input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>dashirn/FAST_RNN_JAX
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 29 16:30:31 2021
@author: dashirn
Train modular network on task primitives & task-specific readouts.
Dependcies: pickle, jax, numpy, rnn_build_modular_g, rnn_tasks
"""
import pickle
import jax.numpy as np
from jax import random #https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#%F0%9F%94%AA-Random-Numbers
random_key = random.PRNGKey(0)
import numpy as onp
# from jax.ops import index_update as jupdateidx
# import matplotlib.pyplot as plt
#Import files
from rnn_build_modular_g import create_rnn_params
from rnn_build_modular_g import train_modular_taskprimitives
from rnn_tasks import build_tasks
#Directory where to save results
save_dir = "/Users/ME"
def run_training_weightfreeze_modular_autoencode(training_params,network_params):
#Save params
task_type,build_M3_type,random_key,save_name_general,save_dir = training_params
save_name_M1 = 'M1_'+save_name_general
save_name_M2 = 'M2_'+save_name_general
save_name_M3 = 'M3_'+save_name_general
save_name_O1 = 'OutputWeights_DelayPro_'+save_name_general
save_name_O2 = 'OutputWeights_DelayAnti_'+save_name_general
save_name_O3 = 'OutputWeights_MemPro_'+save_name_general
save_name_O4 = 'OutputWeights_MemAnti_'+save_name_general
save_name_O5 = 'OutputWeights_MemDm1_'+save_name_general
save_name_O6 = 'OutputWeights_MemDm2_'+save_name_general
save_name_O7 = 'OutputWeights_ContextMemDm1_'+save_name_general
save_name_O8 = 'OutputWeights_ContextMemDm2_'+save_name_general
save_name_O9 = 'OutputWeights_MultiMem_'+save_name_general
save_name_build = save_dir + '/' + "BuildInputsTargets_" + save_name_general
T,ntime,dt,bval,sval,output_size,input_size,state_size,m1_size,\
m2_size,m3_size,m4_size,m5_size,g,batch_size,num_iters,nstep_sizes,init_step_size = network_params
input_params = (random_key,bval, sval, T, ntime, save_name_build)
rand_gen_start = 1001
#Check inputs & Targets
inputs, targets = build_tasks(batch_size, input_params,
build_M3_type,rand_gen_start,
do_plot=True, do_save=False)
####### MODULE 1 #############
if task_type == 'M_1':
save_name = save_name_M1
# #Initialization Parameters
init_params = create_rnn_params(input_size=input_size, output_size=output_size,
state_size=state_size, g=g, random_key=random_key)
###### SELECT WEIGHTS TO BE TRAINED ##########################
#Initialize recurrent weight matrix (J)
weightfreeze_mask_recur = onp.concatenate((onp.zeros((input_size, state_size)),
onp.zeros((state_size+1, state_size))),
axis=0)
print(weightfreeze_mask_recur.shape)
weightfreeze_mask_dict = {}
weightfreeze_mask_dict['hidden unit'] = np.ones((np.shape(init_params['hidden unit'])[0],
np.shape(init_params['hidden unit'])[1]))
#Input Weights Train ON
weightfreeze_mask_recur[:input_size,:] = 1;#Input Weights TRAIN ON
#M1 Weights Train ON
weightfreeze_mask_recur[input_size:input_size+m1_size,\
:m1_size] = 1;#M1 Weights TRAIN ON
#Output Weights Train ON
# weightfreeze_mask_recur[-1:,:m1_size] = 1;
weightfreeze_mask_dict['change'] = np.array(weightfreeze_mask_recur)
#Predict Weights Train ON
weightfreeze_mask_dict['predict'] = np.ones((np.shape(init_params['predict'])[0],
np.shape(init_params['predict'])[1]))
elif task_type == 'M_2':
save_name = save_name_M2
#Load weights trained by incremental 1 & use them to initialize this training
file_loc_trained = save_dir+"/RNN_trained_params_"+save_name_M1+".pickle"
file_loc = open(file_loc_trained,'rb')
init_params = pickle.load(file_loc)#Initialization Parameters for M2
###### SELECT WEIGHTS TO BE TRAINED ##########################
#Initialize recurrent weight matrix (J)
weightfreeze_mask_recur = onp.concatenate((onp.zeros((input_size, state_size)),
onp.zeros((state_size+1, state_size))),
axis=0)
print(weightfreeze_mask_recur.shape)
weightfreeze_mask_dict = {}
weightfreeze_mask_dict['hidden unit'] = np.ones((np.shape(init_params['hidden unit'])[0],
np.shape(init_params['hidden unit'])[1]))
#Input Weights Train ON
weightfreeze_mask_recur[:input_size,:] = 1;#Input Weights TRAIN ON
########M2 Weights Train ON
# #Separate weight matrix
weightfreeze_mask_recur[input_size+m1_size:input_size+m1_size+m2_size,\
m1_size:m1_size+m2_size] = 1;#M2 Weights TRAIN ON
#Output Weights Train ON
# weightfreeze_mask_recur[-1:,m1_size:m1_size+m2_size] = 1;
weightfreeze_mask_dict['change'] = np.array(weightfreeze_mask_recur)
#Predict Weights Train ON
weightfreeze_mask_dict['predict'] = np.ones((np.shape(init_params['predict'])[0],
np.shape(init_params['predict'])[1]))
elif task_type == 'M_3':
save_name = save_name_M3
#Load weights trained by incremental 1 & use them to initialize this training
file_loc_trained = save_dir+"/RNN_trained_params_"+save_name_M2+".pickle"
file_loc = open(file_loc_trained,'rb')
init_params = pickle.load(file_loc)#Initialization Parameters for M3
###### SELECT WEIGHTS TO BE TRAINED ##########################
#Initialize recurrent weight matrix (J)
weightfreeze_mask_recur = onp.concatenate((onp.zeros((input_size, state_size)),
onp.zeros((state_size+1, state_size))),
axis=0)
print(weightfreeze_mask_recur.shape)
weightfreeze_mask_dict = {}
weightfreeze_mask_dict['hidden unit'] = np.ones((np.shape(init_params['hidden unit'])[0],
np.shape(init_params['hidden unit'])[1]))
#####Input Weights Train ON
weightfreeze_mask_recur[:input_size,:] = 1;#Input Weights TRAIN ON
##### Recurrent Weights
##Separate weights
weightfreeze_mask_recur[input_size+m1_size+m2_size:input_size+m1_size+m2_size+m3_size,\
m1_size+m2_size:m1_size+m2_size+m3_size] = 1;#M3 Weights TRAIN ON
#####Output Weights Train ON
# weightfreeze_mask_recur[-1:,m1_size+m2_size:m1_size+m2_size+m3_size] = 1;#m1_size+m2_size:m1_size+m2_size+m3_size
weightfreeze_mask_dict['change'] = np.array(weightfreeze_mask_recur)
#Predict Weights Train ON
weightfreeze_mask_dict['predict'] = np.ones((np.shape(init_params['predict'])[0],
np.shape(init_params['predict'])[1]))
elif task_type == 'O_1':
save_name = save_name_O1
#Load weights trained by incremental 1 & use them to initialize this training
# file_loc_init = save_dir+"/RNN_init_params_"+save_name_task1+".pickle"
file_loc_trained = save_dir+"/RNN_trained_params_"+save_name_M3+".pickle"
file_loc = open(file_loc_trained,'rb')
init_params = pickle.load(file_loc)#Initialization Parameters for M3
#Add another input weight vector for 2nd input (context)
input_size = 3; output_size = 3
init_params_new = create_rnn_params(input_size=input_size,output_size=output_size,
state_size=state_size, g=g, random_key=random_key)
# #Account for the additional input here
# init_params['change'] = np.concatenate((init_params_new['change'][:(input_size-1),:],
# init_params['change']),axis=0)
init_params['predict'] = init_params_new['predict']
print(init_params['change'].shape)
print(init_params['predict'].shape)
###### SELECT WEIGHTS TO BE TRAINED ##########################
#Initialize recurrent weight matrix (J)
weightfreeze_mask_recur = onp.concatenate((onp.zeros((input_size, state_size)),
onp.zeros((state_size+1, state_size))),
axis=0)
print(weightfreeze_mask_recur.shape)
weightfreeze_mask_dict = {}
weightfreeze_mask_dict['hidden unit'] = np.ones((np.shape(init_params['hidden unit'])[0],
np.shape(init_params['hidden unit'])[1]))
#####Input Weights Train ON
weightfreeze_mask_recur[:input_size,m1_size+m2_size:] = 1;#Input Weights TRAIN ON
##### Recurrent Weights OFF
#####Output Weights Train ON
# weightfreeze_mask_recur[-1:,:] = 1;
weightfreeze_mask_dict['change'] = np.array(weightfreeze_mask_recur)
#Predict Weights Train ON
weightfreeze_mask_dict['predict'] = np.ones((np.shape(init_params['predict'])[0],
np.shape(init_params['predict'])[1]))
elif task_type == 'O_2':
save_name = save_name_O2
#Load weights trained by incremental 1 & use them to initialize this training
# file_loc_init = save_dir+"/RNN_init_params_"+save_name_task1+".pickle"
file_loc_trained = save_dir+"/RNN_trained_params_"+save_name_M3+".pickle"
file_loc = open(file_loc_trained,'rb')
init_params = pickle.load(file_loc)#Initialization Parameters for M3
#Add another input weight vector for 2nd input (context)
input_size = 3; output_size = 3
init_params_new = create_rnn_params(input_size=input_size,output_size=output_size,
state_size=state_size, g=g, random_key=random_key)
# #Account for the additional input here
# init_params['change'] = np.concatenate((init_params_new['change'][:(input_size-1),:],
# init_params['change']),axis=0)
init_params['predict'] = init_params_new['predict']
print(init_params['change'].shape)
print(init_params['predict'].shape)
###### SELECT WEIGHTS TO BE TRAINED ##########################
#Initialize recurrent weight matrix (J)
weightfreeze_mask_recur = onp.concatenate((onp.zeros((input_size, state_size)),
onp.zeros((state_size+1, state_size))),
axis=0)
print(weightfreeze_mask_recur.shape)
weightfreeze_mask_dict = {}
weightfreeze_mask_dict['hidden unit'] = np.ones((np.shape(init_params['hidden unit'])[0],
np.shape(init_params['hidden unit'])[1]))
#####Input Weights Train ON
weightfreeze_mask_recur[:input_size,m1_size+m2_size:] = 1;#Input Weights TRAIN ON
##### Recurrent Weights OFF
#####Output Weights Train ON
# weightfreeze_mask_recur[-1:,:] = 1;
weightfreeze_mask_dict['change'] = np.array(weightfreeze_mask_recur)
#Predict Weights Train ON
weightfreeze_mask_dict['predict'] = np.ones((np.shape(init_params['predict'])[0],
np.shape(init_params['predict'])[1]))
elif task_type == 'O_3':
save_name = save_name_O3
#Load weights trained by incremental 1 & use them to initialize this training
# file_loc_init = save_dir+"/RNN_init_params_"+save_name_task1+".pickle"
file_loc_trained = save_dir+"/RNN_trained_params_"+save_name_M3+".pickle"
file_loc = open(file_loc_trained,'rb')
init_params = pickle.load(file_loc)#Initialization Parameters for M3
#Add another input weight vector for 2nd input (context)
input_size = 3; output_size = 3
init_params_new = create_rnn_params(input_size=input_size,output_size=output_size,
state_size=state_size, g=g, random_key=random_key)
# #Account for the additional input here
# init_params['change'] = np.concatenate((init_params_new['change'][:(input_size-1),:],
# init_params['change']),axis=0)
init_params['predict'] = init_params_new['predict']
print(init_params['change'].shape)
print(init_params['predict'].shape)
###### SELECT WEIGHTS TO BE TRAINED ##########################
#Initialize recurrent weight matrix (J)
weightfreeze_mask_recur = onp.concatenate((onp.zeros((input_size, state_size)),
onp.zeros((state_size+1, state_size))),
axis=0)
print(weightfreeze_mask_recur.shape)
weightfreeze_mask_dict = {}
weightfreeze_mask_dict['hidden unit'] = np.ones((np.shape(init_params['hidden unit'])[0],
np.shape(init_params['hidden unit'])[1]))
#####Input Weights Train ON
weightfreeze_mask_recur[:input_size,m1_size+m2_size:] = 1;#Input Weights TRAIN ON
##### Recurrent Weights OFF
#####Output Weights Train ON
# weightfreeze_mask_recur[-1:,:] = 1;
weightfreeze_mask_dict['change'] = np.array(weightfreeze_mask_recur)
#Predict Weights Train ON
weightfreeze_mask_dict['predict'] = np.ones((np.shape(init_params['predict'])[0],
np.shape(init_params['predict'])[1]))
elif task_type == 'O_4':
save_name = save_name_O4
#Load weights trained by incremental 1 & use them to initialize this training
# file_loc_init = save_dir+"/RNN_init_params_"+save_name_task1+".pickle"
file_loc_trained = save_dir+"/RNN_trained_params_"+save_name_M3+".pickle"
file_loc = open(file_loc_trained,'rb')
init_params = pickle.load(file_loc)#Initialization Parameters for M3
#Add another input weight vector for 2nd input (context)
input_size = 3; output_size = 3
init_params_new = create_rnn_params(input_size=input_size,output_size=output_size,
state_size=state_size, g=g, random_key=random_key)
# #Account for the additional input here
# init_params['change'] = np.concatenate((init_params_new['change'][:(input_size-1),:],
# init_params['change']),axis=0)
init_params['predict'] = init_params_new['predict']
print(init_params['change'].shape)
print(init_params['predict'].shape)
###### SELECT WEIGHTS TO BE TRAINED ##########################
#Initialize recurrent weight matrix (J)
weightfreeze_mask_recur = onp.concatenate((onp.zeros((input_size, state_size)),
onp.zeros((state_size+1, state_size))),
axis=0)
print(weightfreeze_mask_recur.shape)
weightfreeze_mask_dict = {}
weightfreeze_mask_dict['hidden unit'] = np.ones((np.shape(init_params['hidden unit'])[0],
np.shape(init_params['hidden unit'])[1]))
#####Input Weights Train ON
weightfreeze_mask_recur[:input_size,m1_size+m2_size:] = 1;#Input Weights TRAIN ON
##### Recurrent Weights OFF
#####Output Weights Train ON
# weightfreeze_mask_recur[-1:,:] = 1;
weightfreeze_mask_dict['change'] = np.array(weightfreeze_mask_recur)
#Predict Weights Train ON
weightfreeze_mask_dict['predict'] = np.ones((np.shape(init_params['predict'])[0],
np.shape(init_params['predict'])[1]))
elif task_type == 'O_5':#mem_dm1
save_name = save_name_O5
#Load weights trained by incremental 1 & use them to initialize this training
# file_loc_init = save_dir+"/RNN_init_params_"+save_name_task1+".pickle"
file_loc_trained = save_dir+"/RNN_trained_params_"+save_name_M3+".pickle"
file_loc = open(file_loc_trained,'rb')
init_params = pickle.load(file_loc)#Initialization Parameters for M3
#Add another input weight vector for 2nd input (context)
input_size = 2; output_size = 2
init_params_new = create_rnn_params(input_size=input_size,output_size=output_size,
state_size=state_size, g=g, random_key=random_key)
# #Account for the additional input here
init_params['change'] = init_params['change'][1:,:]
init_params['predict'] = init_params_new['predict']
print(init_params['change'].shape)
print(init_params['predict'].shape)
###### SELECT WEIGHTS TO BE TRAINED ##########################
#Initialize recurrent weight matrix (J)
weightfreeze_mask_recur = onp.concatenate((onp.zeros((input_size, state_size)),
onp.zeros((state_size+1, state_size))),
axis=0)
print(weightfreeze_mask_recur.shape)
weightfreeze_mask_dict = {}
weightfreeze_mask_dict['hidden unit'] = np.ones((np.shape(init_params['hidden unit'])[0],
np.shape(init_params['hidden unit'])[1]))
#####Input Weights Train ON
weightfreeze_mask_recur[:input_size,m1_size+m2_size:] = 1;#Input Weights TRAIN ON
##### Recurrent Weights OFF
#####Output Weights Train ON
# weightfreeze_mask_recur[-1:,:] = 1;
weightfreeze_mask_dict['change'] = np.array(weightfreeze_mask_recur)
#Predict Weights Train ON
weightfreeze_mask_dict['predict'] = np.ones((np.shape(init_params['predict'])[0],
np.shape(init_params['predict'])[1]))
elif task_type == 'O_6':#mem_dm2
save_name = save_name_O6
#Load weights trained by incremental 1 & use them to initialize this training
# file_loc_init = save_dir+"/RNN_init_params_"+save_name_task1+".pickle"
file_loc_trained = save_dir+"/RNN_trained_params_"+save_name_M3+".pickle"
file_loc = open(file_loc_trained,'rb')
init_params = pickle.load(file_loc)#Initialization Parameters for M3
#Add another input weight vector for 2nd input (context)
input_size = 2; output_size = 2
init_params_new = create_rnn_params(input_size=input_size,output_size=output_size,
state_size=state_size, g=g, random_key=random_key)
# #Account for the | |
is_bailout_needed == "n":
game_end_height = int(rpc_connection.getinfo()["blocks"])
while True:
current_height = int(rpc_connection.getinfo()["blocks"])
height_difference = current_height - game_end_height
if height_difference == 0:
print(current_height)
print(game_end_height)
print(colorize("Waiting for next block before bailout", "blue"))
time.sleep(5)
else:
break
break
break
if start_game == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def rogue_newgame_multiplayer(rpc_connection):
while True:
max_players = input("Input game max. players (>1): ")
if int(max_players) > 1:
break
else:
print("Please re-check your input")
input("Press [Enter] to continue...")
while True:
buyin = input("Input game buyin (>0.001): ")
if float(buyin) > 0.001:
break
else:
print("Please re-check your input")
input("Press [Enter] to continue...")
try:
new_game_txid = rpc_connection.cclib("newgame", "17", '"[' + max_players + "," + buyin + ']"')["txid"]
print(colorize("New multiplayer game succesfully created. txid: " + new_game_txid, "green"))
input("Press [Enter] to continue...")
except Exception as e:
print("Something went wrong.")
print(e)
input("Press [Enter] to continue...")
def rogue_join_multiplayer_game(rpc_connection):
while True:
try:
print_multiplayer_games_list(rpc_connection)
# TODO: optional player data txid (print players you have and ask if you want to choose one)
game_txid = input("Input txid of game you want to join: ")
try:
while True:
print_players_list(rpc_connection)
is_choice_needed = input("Do you want to choose a player for this game? [y/n] ")
if is_choice_needed == "y":
player_txid = input("Please input player txid: ")
newgame_regisration_txid = rogue_game_register(rpc_connection, game_txid, player_txid)["txid"]
break
elif is_choice_needed == "n":
set_warriors_name(rpc_connection)
newgame_regisration_txid = rogue_game_register(rpc_connection, game_txid)["txid"]
break
else:
print("Please choose y or n !")
except Exception as e:
print("Something went wrong. Maybe you're trying to register on game twice or don't have enough funds to pay buyin.")
print(e)
input("Press [Enter] to continue...")
break
print(colorize("Succesfully registered.", "green"))
while True:
mempool = rpc_connection.getrawmempool()
if newgame_regisration_txid in mempool:
print(colorize("Waiting for registration transaction to be mined", "blue"))
time.sleep(5)
else:
print(colorize("Registration transaction is mined", "green"))
break
print(newgame_regisration_txid)
input("Press [Enter] to continue...")
break
except KeyboardInterrupt:
break
def print_players_list(rpc_connection):
players_list = rogue_players_list(rpc_connection)
print(colorize("\nYou own " + str(players_list["numplayerdata"]) + " warriors\n", "blue"))
warrior_counter = 0
for player in players_list["playerdata"]:
warrior_counter = warrior_counter + 1
player_data = rogue_player_info(rpc_connection, player)["player"]
print(colorize("\n================================\n","green"))
print("Warrior " + str(warrior_counter))
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n","blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
input("Press [Enter] to continue...")
def sell_warrior(rpc_connection):
print(colorize("Your brave warriors: \n", "blue"))
print_players_list(rpc_connection)
print("\n")
while True:
need_sell = input("Do you want to place order to sell any? [y/n]: ")
if need_sell == "y":
playertxid = input("Input playertxid of warrior you want to sell: ")
price = input("Input price (in ROGUE coins) you want to sell warrior for: ")
try:
tokenid = rogue_player_info(rpc_connection, playertxid)["player"]["tokenid"]
except Exception as e:
print(e)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
token_ask_raw = rpc_connection.tokenask("1", tokenid, price)
try:
token_ask_txid = rpc_connection.sendrawtransaction(token_ask_raw["hex"])
except Exception as e:
print(e)
print(token_ask_raw)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Ask succesfully placed. Ask txid is: " + token_ask_txid, "green"))
input("Press [Enter] to continue...")
break
if need_sell == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
#TODO: have to combine into single scanner with different cases
def is_warrior_alive(rpc_connection, warrior_txid):
warrior_alive = False
raw_transaction = rpc_connection.getrawtransaction(warrior_txid, 1)
for vout in raw_transaction["vout"]:
if vout["value"] == 0.00000001 and rpc_connection.gettxout(raw_transaction["txid"], vout["n"]):
warrior_alive = True
return warrior_alive
def warriors_scanner(rpc_connection):
start_time = time.time()
token_list = rpc_connection.tokenlist()
my_warriors_list = rogue_players_list(rpc_connection)
warriors_list = {}
for token in token_list:
player_info = rogue_player_info(rpc_connection, token)
if "status" in player_info and player_info["status"] == "error":
pass
elif player_info["player"]["playertxid"] in my_warriors_list["playerdata"]:
pass
elif not is_warrior_alive(rpc_connection, player_info["player"]["playertxid"]):
pass
else:
warriors_list[token] = player_info["player"]
print("--- %s seconds ---" % (time.time() - start_time))
return warriors_list
def warriors_scanner_for_rating(rpc_connection):
print("It can take some time")
token_list = rpc_connection.tokenlist()
my_warriors_list = rogue_players_list(rpc_connection)
actual_playerids = []
warriors_list = {}
for token in token_list:
player_info = rogue_player_info(rpc_connection, token)
if "status" in player_info and player_info["status"] == "error":
pass
else:
while True:
if "batontxid" in player_info["player"].keys():
player_info = rogue_player_info(rpc_connection, player_info["player"]["batontxid"])
else:
actual_playerids.append(player_info["player"]["playertxid"])
break
for player_id in actual_playerids:
player_info = rogue_player_info(rpc_connection, player_id)
if not is_warrior_alive(rpc_connection, player_info["player"]["playertxid"]):
pass
else:
warriors_list[player_id] = player_info["player"]
return warriors_list
def warriors_scanner_for_dex(rpc_connection):
start_time = time.time()
token_list = rpc_connection.tokenlist()
my_warriors_list = rogue_players_list(rpc_connection)
warriors_list = {}
for token in token_list:
player_info = rogue_player_info(rpc_connection, token)
if "status" in player_info and player_info["status"] == "error":
pass
elif player_info["player"]["tokenid"] in my_warriors_list["playerdata"]:
pass
else:
warriors_list[token] = player_info["player"]
print("--- %s seconds ---" % (time.time() - start_time))
return warriors_list
def print_warrior_list(rpc_connection):
players_list = warriors_scanner(rpc_connection)
print(colorize("All warriors on ROGUE chain: \n", "blue"))
warrior_counter = 0
for player in players_list:
warrior_counter = warrior_counter + 1
player_data = rogue_player_info(rpc_connection, player)["player"]
print(colorize("\n================================\n","green"))
print("Warrior " + str(warrior_counter))
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n","blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
input("Press [Enter] to continue...")
def place_bid_on_warriror(rpc_connection):
warriors_list = print_warrior_list(rpc_connection)
# TODO: have to drop my warriors or at least print my warriors ids
while True:
need_buy = input("Do you want to place order to buy some warrior? [y/n]: ")
if need_buy == "y":
playertxid = input("Input playertxid of warrior you want to place bid for: ")
price = input("Input price (in ROGUE coins) you want to buy warrior for: ")
tokenid = rogue_player_info(rpc_connection, playertxid)["player"]["tokenid"]
token_bid_raw = rpc_connection.tokenbid("1", tokenid, price)
try:
token_bid_txid = rpc_connection.sendrawtransaction(token_bid_raw["hex"])
except Exception as e:
print(e)
print(token_bid_raw)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Bid succesfully placed. Bid txid is: " + token_bid_txid, "green"))
input("Press [Enter] to continue...")
break
if need_buy == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def check_incoming_bids(rpc_connection):
# TODO: have to scan for warriors which are in asks as well
players_list = rogue_players_list(rpc_connection)
incoming_orders = []
for player in players_list["playerdata"]:
token_id = rogue_player_info(rpc_connection, player)["player"]["tokenid"]
orders = rpc_connection.tokenorders(token_id)
if len(orders) > 0:
for order in orders:
if order["funcid"] == "b":
incoming_orders.append(order)
return incoming_orders
def print_icoming_bids(rpc_connection):
incoming_bids = check_incoming_bids(rpc_connection)
for bid in incoming_bids:
print("Recieved bid for warrior " + bid["tokenid"])
player_data = rogue_player_info(rpc_connection, bid["tokenid"])["player"]
print(colorize("\n================================\n", "green"))
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("\n================================\n", "blue"))
print("Order info: \n")
print("Bid txid: " + bid["txid"])
print("Price: " + str(bid["price"]) + "\n")
if len(incoming_bids) == 0:
print(colorize("There is no any incoming orders!", "blue"))
input("Press [Enter] to continue...")
else:
while True:
want_to_sell = input("Do you want to fill any incoming bid? [y/n]: ")
if want_to_sell == "y":
bid_txid = input("Input bid txid you want to fill: ")
for bid in incoming_bids:
if bid_txid == bid["txid"]:
tokenid = bid["tokenid"]
fill_sum = bid["totalrequired"]
fillbid_hex = rpc_connection.tokenfillbid(tokenid, bid_txid, str(fill_sum))
try:
fillbid_txid = rpc_connection.sendrawtransaction(fillbid_hex["hex"])
except Exception as e:
print(e)
print(fillbid_hex)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Warrior succesfully sold. Txid is: " + fillbid_txid, "green"))
input("Press [Enter] to continue...")
break
if want_to_sell == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def find_warriors_asks(rpc_connection):
warriors_list = warriors_scanner_for_dex(rpc_connection)
warriors_asks = []
for player in warriors_list:
orders = rpc_connection.tokenorders(player)
if len(orders) > 0:
for order in orders:
if order["funcid"] == "s":
warriors_asks.append(order)
for ask in warriors_asks:
print(colorize("\n================================\n", "green"))
print("Warrior selling on marketplace: " + ask["tokenid"])
player_data = rogue_player_info(rpc_connection, ask["tokenid"])["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + | |
i != len(spu.servers) -1:
servers += ','
sock.Success( servers )
def do_crutservers( self, sock, args ):
if len(sock.node.crutservers) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "CRUTClient %d doesn't have servers" % (sock.SPUid) )
return
crutservers = "%d " % len(sock.node.crutservers)
for i in range(len(sock.node.crutservers)):
(node,url) = sock.node.crutservers[i]
crutservers+= "%s" % (url)
if i != len(sock.node.crutservers) -1:
crutservers += " "
sock.Success( crutservers )
def do_crutclients(self, sock, args ):
#don't error here, you may not have any clients (e.g. last node in fan configuration)
if len(sock.node.crutclients) == 0:
sock.Success("0 CRUTserver doesn't have clients.")
return
crutclients = "%d " % len(sock.node.crutclients)
for i in range(len(sock.node.crutclients)):
(nocde,url) = sock.node.crutclients[i]
crutclients += "%s" % (url)
if i != len(sock.node.crutclients) -1:
crutclients += " "
sock.Success( crutclients )
def do_serverids( self, sock, args ):
"""do_serverids(sock, args)
Sends the list of server IDs.
XXX How is this different from do_servers? (ahern)
"""
# XXX this might only be temporary (BrianP)
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU, "You can't ask for server ids without telling me what SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
if len(spu.servers) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "SPU %d doesn't have servers!" % (sock.SPUid) )
return
servers = "%d " % len(spu.servers)
for i in range(len(spu.servers)):
(node, url) = spu.servers[i]
if node == None:
sock.Failure( SockWrapper.UNKNOWNSERVER, "Sorry, I don't know what SPU the server is running, you didn't tell me." )
return
servers += "%d" % (node.SPUs[0].ID)
if i != len(spu.servers) - 1:
servers += ' '
sock.Success( servers )
def do_tiles( self, sock, args ):
"""do_tiles(sock, args)
Returns the list of tiles associated with a SPU's Nth server."""
# Note, an SPU asks for the tiles, but the tiles are really associated
# with the servers that the (tilesort) SPU will talk to. The arg to
# this query indicates which server to return the tiles for.
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU, "You can't ask for tiles without telling me what SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
if len(spu.servers) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "SPU %d doesn't have servers!" % (sock.SPUid) )
return
server_num = int(args)
if server_num < 0 or server_num >= len(spu.servers):
sock.Failure( SockWrapper.UNKNOWNSERVER, "SPU %d doesn't have a server numbered %d" % (sock.SPUid, server_num) )
(node, url) = spu.servers[server_num]
if node == None:
sock.Failure( SockWrapper.UNKNOWNSERVER, "No tiles for Null node")
return
self.tileReply( sock, node )
def do_servertiles( self, sock, args ):
"""do_servertiles(sock, args)
Sends the defined tiles for a server."""
if sock.node == None or not isinstance(sock.node,CRNetworkNode):
sock.Failure( SockWrapper.UNKNOWNSERVER, "You can't ask for tiles without telling me what server you are!" )
return
self.tileReply( sock, sock.node )
def do_server_param( self, sock, args ):
"""Return a server parameter to the calling SPU."""
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU, "You can't ask for SPU parameters without telling me what SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
args = string.split(args)
server_num = int(args[0])
param = args[1]
if server_num < 0 or server_num >= len(spu.servers):
sock.Failure( SockWrapper.UNKNOWNSERVER, "SPU %d doesn't have a server numbered %d" % (sock.SPUid, server_num) )
(node, url) = spu.servers[server_num]
if node.config.has_key(param):
sock.Success( node.config[param] )
else:
sock.Success( "" )
def tileReply( self, sock, node ):
"""tileReply(sock, node)
Packages up a tile message for socket communication.
"""
if len(node.tiles) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "server doesn't have tiles!" )
return
tiles = "%d " % len(node.tiles)
for i in range(len(node.tiles)):
tile = node.tiles[i] # tile is (x, y, w, h)
tiles += "%d %d %d %d" % tile
if i != len(node.tiles) - 1:
tiles += ","
sock.Success( tiles )
def do_serverdisplaytiles( self, sock, args ):
"""do_serverdisplaytiles(sock, args)
Sends the defined tiles for a server."""
if sock.node == None or not isinstance(sock.node,CRNetworkNode):
sock.Failure( SockWrapper.UNKNOWNSERVER, "You can't ask for tiles without telling me what server you are!" )
return
self.displaytileReply( sock, sock.node )
def displaytileReply( self, sock, node ):
"""tileReply(sock, node)
Packages up a tile message for socket communication.
"""
if len(node.tiles_on_displays) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "server doesn't have tiles!" )
return
tiles = "%d " % len(node.tiles_on_displays)
for i in range(len(node.tiles_on_displays)):
tile = node.tiles_on_displays[i]
tiles += "%d %d %d %d %d" % tile
if i != len(node.tiles) - 1:
tiles += ","
sock.Success( tiles )
def do_displays( self, sock, args ):
"""do_displays(sock, args)
Send the displays associated with a SPU"""
n_displays = 0
for spu in range(len(allSPUs)):
n_displays += len(allSPUs[spu].displays)
displays = "%d " % n_displays
for spu in range(len(allSPUs)):
for i in range(len(allSPUs[spu].displays)):
display = allSPUs[spu].displays[i]
tmp_display = "%d %d %d %s %s" % display
reggie = re.compile('\]|\[|,')
displays += "%s" % reggie.sub(' ', tmp_display)
if i != len(allSPUs[spu].displays) - 1:
displays += ","
sock.Success( displays )
def do_display_tiles( self, sock, args ):
"""do_tiles(sock, args)
Sends the defined tiles for a SPU."""
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU, "You can't ask for tiles without telling me what SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
if len(spu.servers) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "SPU %d doesn't have servers!" % (sock.SPUid) )
return
server_num = int(args)
if server_num < 0 or server_num >= len(spu.servers):
sock.Failure( SockWrapper.UNKNOWNSERVER, "SPU %d doesn't have a server numbered %d" % (sock.SPUid, server_num) )
(node, url) = spu.servers[server_num]
self.displayTileReply( sock, node )
def displayTileReply( self, sock, node ):
"""displayTileReply(sock, node)
Packages up a tile message for socket communication.
"""
if len(node.tiles_on_displays) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "server doesn't have display tiles!" )
return
tiles = "%d " % len(node.tiles_on_displays)
for i in range(len(node.tiles_on_displays)):
tile = node.tiles_on_displays[i]
tiles += "%d %d %d %d %d" % tile
if i != len(node.tiles_on_displays) - 1:
tiles += ","
sock.Success( tiles )
def do_getvncclient( self, sock, args ):
"""do_clients(sock, args)
Like do_clients, return list of clients of this server, but this
function is for vnc only.
Note that the client/server terminology of Chromium (in this
configuration anyway) is just the opposite of VNC's terminology."""
# NOTE: we ignore args (the hostname)
if sock.node == None or not isinstance(sock.node, CRVNCServerNode):
sock.Failure( SockWrapper.UNKNOWNSERVER,
"You can't ask for vnc clients without telling " +
"me which VNC server node you are!" )
return
# Just find the replicate SPU
for i in allSPUs.keys():
spu = allSPUs[i]
if spu.name == "replicate":
sock.Success("1 tcpip %d" % spu.ID);
return
sock.Failure(SockWrapper.NOTHINGTOSAY,
"getvncclient: Didn't find VNC ApplicationNode and SPU")
def do_clients( self, sock, args ):
"""Returns a list of the clients who talk to this server.
Example: '2 tcpip 4, ib 5' means there are two clients. The first
is SPU #4 using TCP/IP, the second is SPU #5 using Infiniband."""
if sock.node == None or not isinstance(sock.node,CRNetworkNode):
sock.Failure( SockWrapper.UNKNOWNSERVER, "You can't ask for clients without telling me what server you are!" )
return
total_clients = len(sock.node.clients) + len(sock.node.file_clients)
clients = "%d " % total_clients
for i in range(len(sock.node.clients)):
(spu, protocol) = sock.node.clients[i]
clients += "%s %d" % (protocol, spu.ID)
if i != total_clients-1:
clients += ','
for i in range(len(sock.node.file_clients)):
fname = sock.node.file_clients[i]
clients += "%s %d" % (fname, -1)
if i-len(sock.node.clients) != total_clients-1:
clients += ','
sock.Success( clients )
def do_reset( self, sock, args ):
"""do_reset(sock, args)
Resets the mothership to its initial state."""
for node in self.nodes:
node.spokenfor = 0
node.spusloaded = 0
node.crut_spokenfor = 0
# respawn auto-start nodes
for cb in CR.startupCallbacks:
cb(self)
spawner = CRSpawner( self.nodes )
spawner.start()
sock.Success( "Server Reset" )
def do_rank( self, sock, args ):
"""do_rank( sock, args )
Retrieves the node's rank and sends it on the socket (for Quadrics)."""
if sock.node == None:
sock.Failure( SockWrapper.UNKNOWNSERVER, "Identify yourself!" )
return
if not sock.node.config.has_key( 'rank' ):
sock.Failure( SockWrapper.UNKNOWNPARAM, "Node didn't say what it's rank is." )
return
sock.Success( sock.node.config['rank'] )
def do_disconnect( self, sock, args ):
"""do_disconnect(sock, args)
Disconnects from clients."""
sock.Success( "Bye" )
self.ClientDisconnect( sock )
def do_logperf( self, sock, args ):
"""do_logperf(sock, args)
Logs Data to a logfile."""
CROutput("%s" % args)
sock.Success( "Dumped" )
def do_gettilelayout( self, sock, args | |
get an error at plot-time.
if hasattr(tohist1, 'compressed'):
tohist1 = tohist1.compressed()
if hasattr(tohist2, 'compressed'):
tohist2 = tohist2.compressed()
# Compute 2-D histogram
hist, xedges, yedges = \
np.histogram2d(tohist1, tohist2, bins=b, weights=weights)
hist = hist.T
# Recover bin centers
bc = []
for i, edges in enumerate([xedges, yedges]):
bc.append(bin_e2c(edges))
# Determine mapping between likelihood and confidence contours
if color_by_like:
# Get likelihood contours (relative to peak) that enclose
# nu-% of the area
if contour_method == 'raw':
nu, levels = error_2D(None, None, hist, None, nu=like,
method='raw')
else:
nu, levels = error_2D(to_hist[0], to_hist[1], self.L / self.L.max(),
bins=[binvec[0], binvec[1]], nu=nu, method=contour_method)
if fill:
if excluded and len(nu) == 1:
# Fill the entire window with cross-hatching
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
x_polygon = [x1, x2, x2, x1]
y_polygon = [y1, y1, y2, y2]
ax.fill(x_polygon, y_polygon, color="none", hatch='X',
edgecolor=kwargs['color'])
# Now, fill the enclosed area with white
ax.contourf(bc[0], bc[1], hist / hist.max(),
levels, color='w', colors='w', zorder=2)
# Draw an outline too
ax.contour(bc[0], bc[1], hist / hist.max(),
levels, colors=kwargs['color'], linewidths=1,
zorder=2)
else:
ax.contourf(bc[0], bc[1], hist / hist.max(),
levels, zorder=3, **kwargs)
else:
ax.contour(bc[0], bc[1], hist / hist.max(),
levels, zorder=4, **kwargs)
else:
if fill:
cs = ax.contourf(bc[0], bc[1], hist / hist.max(),
zorder=3, **kw)
else:
cs = ax.contour(bc[0], bc[1], hist / hist.max(),
zorder=4, **kw)
# Force linear
if not gotax:
ax.set_xscale('linear')
ax.set_yscale('linear')
# Add nice labels (or try to)
self.set_axis_labels(ax, pars, take_log, un_log, None, labels)
# Rotate ticks?
for tick in ax.get_xticklabels():
tick.set_rotation(45.)
for tick in ax.get_yticklabels():
tick.set_rotation(45.)
pl.draw()
return ax
def Contour(self, pars, c, levels=None, leveltol=1e-6, ivar=None, take_log=False,
un_log=False, multiplier=1., ax=None, fig=1, fill=True,
inline_labels=False, manual=None, cax=None, use_colorbar=True,
cb_kwargs={}, **kwargs):
"""
Draw contours that are NOT associated with confidence levels.
..note:: To draw many contours in same plane, just call this
function repeatedly.
Should use pl.contour if we're plotting on a regular grid, i.e.,
the parameter space of a 2-D model grid with the color axis
some derived quantity.
Parameters
----------
pars : list
List of parameters defining the plane on which to draw contours.
c : str
Name of parameter or blob that we're to draw contours of.
levels : list
[Optional] list of levels for
"""
# Only make a new plot window if there isn't already one
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
cb = None
if (pars[0] in self.parameters) and (pars[1] in self.parameters):
xdata, ydata, zdata = self._reshape_data(pars, c, ivar=ivar,
take_log=take_log, un_log=un_log, multiplier=multiplier)
if fill:
kw = kwargs.copy()
kw.update(cb_kwargs)
if levels is not None:
CS = ax.contourf(xdata, ydata, zdata.T, levels, **kw)
else:
CS = ax.contourf(xdata, ydata, zdata.T, **kw)
if use_colorbar:
cb = pl.colorbar(CS, cax=cax, **cb_kwargs)
else:
if levels is not None:
CS = ax.contour(xdata, ydata, zdata.T, levels, **kwargs)
else:
CS = ax.contour(xdata, ydata, zdata.T, **kwargs)
if inline_labels:
pl.clabel(CS, ineline=1, fontsize=10, manual=manual)
else:
p = list(pars) + [c]
# Grab all the data we need
data = self.ExtractData(p, ivar=ivar,
take_log=take_log, un_log=un_log, multiplier=multiplier)
xdata = data[p[0]]
ydata = data[p[1]]
zdata = data[p[2]]
for i, level in enumerate(levels):
# Find indices of appropriate elements
cond = np.abs(zdata - level) < leveltol
elements = np.argwhere(cond).squeeze()
order = np.argsort(xdata[elements])
kw = {}
for kwarg in kwargs.keys():
if type(kwargs[kwarg]) == tuple:
kw[kwarg] = kwargs[kwarg][i]
else:
kw[kwarg] = kwargs[kwarg]
ax.plot(xdata[elements][order], ydata[elements][order], **kw)
pl.draw()
return ax, cb
def ContourScatter(self, x, y, c, z=None, ax=None, fig=1, Nscat=1e4,
take_log=False, cmap='jet', alpha=1.0, bins=20, vmin=None, vmax=None,
color_by_like=False, like=[0.95, 0.68], zbins=None, labels=None,
**kwargs):
"""
Show contour plot in 2-D plane, and add colored points for third axis.
Parameters
----------
x : str
Fields for the x-axis.
y : str
Fields for the y-axis.
c : str
Name of parameter to represent with colored points.
z : int, float, str
Redshift (if investigating blobs)
Nscat : int
Number of samples plot.
Returns
-------
Three objects: the main Axis instance, the scatter plot instance,
and the colorbar object.
"""
if type(take_log) == bool:
take_log = [take_log] * 3
if labels is None:
labels = default_labels
else:
labels_tmp = default_labels.copy()
labels_tmp.update(labels)
labels = labels_tmp
if type(z) is not list:
z = [z] * 3
pars = [x, y]
axes = []
for i, par in enumerate(pars):
if par in self.parameters:
axes.append(self.chain[:,self.parameters.index(par)])
elif par in self.blob_names:
axes.append(self.blobs[:,self.blob_redshifts.index(z[i]),
self.blob_names.index(par)])
elif par in self.derived_blob_names:
axes.append(self.derived_blobs[:,self.blob_redshifts.index(z[i]),
self.derived_blob_names.index(par)])
for i in range(2):
if take_log[i]:
axes[i] = np.log10(axes[i])
xax, yax = axes
if c in self.parameters:
zax = self.chain[:,self.parameters.index(c)].ravel()
elif c in self.all_blob_names:
zax = self.ExtractData(c)[c]
elif c in self.derived_blob_names:
zax = self.derived_blobs[:,self.blob_redshifts.index(z[-1]),
self.derived_blob_names.index(c)]
if zax.shape[0] != self.chain.shape[0]:
if self.chain.shape[0] > zax.shape[0]:
xax = xax[0:self.blobs.shape[0]]
yax = yax[0:self.blobs.shape[0]]
print("Looks like calculation was terminated after chain " +\
"was written to disk but before blobs. How unlucky!")
print("Applying cludge to ensure shape match...")
else:
raise ValueError('Shape mismatch between blobs and chain!')
if take_log[2]:
zax = np.log10(zax)
z.pop(-1)
ax = self.PosteriorPDF(pars, z=z, take_log=take_log, fill=False,
bins=bins, ax=ax, fig=fig, color_by_like=color_by_like, like=like,
**kwargs)
# Pick out Nscat random points to plot
mask = np.zeros_like(xax, dtype=bool)
rand = np.arange(len(xax))
np.random.shuffle(rand)
mask[rand < Nscat] = True
if zbins is not None:
cmap_obj = eval('mpl.colorbar.cm.{!s}'.format(cmap))
#if take_log[2]:
# norm = mpl.colors.LogNorm(zbins, cmap_obj.N)
#else:
if take_log[2]:
norm = mpl.colors.BoundaryNorm(np.log10(zbins), cmap_obj.N)
else:
norm = mpl.colors.BoundaryNorm(zbins, cmap_obj.N)
else:
norm = None
scat = ax.scatter(xax[mask], yax[mask], c=zax[mask], cmap=cmap,
zorder=1, edgecolors='none', alpha=alpha, vmin=vmin, vmax=vmax,
norm=norm)
cb = pl.colorbar(scat)
cb.set_alpha(1)
cb.draw_all()
if c in labels:
cblab = labels[c]
elif '{' in c:
cblab = labels[c[0:c.find('{')]]
else:
cblab = c
if take_log[2]:
cb.set_label(logify_str(cblab))
else:
cb.set_label(cblab)
cb.update_ticks()
pl.draw()
return ax, scat, cb
def TrianglePlot(self, pars=None, ivar=None, take_log=False, un_log=False,
multiplier=1, fig=1, mp=None, inputs={}, tighten_up=0.0, ticks=5,
bins=20, scatter=False, polygons=False,
skip=0, skim=1, stop=None, oned=True, twod=True, fill=True,
show_errors=False, label_panels=None, return_axes=False,
fix=True, skip_panels=[], mp_kwargs={}, inputs_scatter=False,
input_mkw={},
**kwargs):
"""
Make an NxN panel plot showing 1-D and 2-D posterior PDFs.
Parameters
----------
pars : list
Parameters to include in triangle plot.
1-D PDFs along diagonal will follow provided order of parameters
from left to right. This list can contain the names of parameters,
so long as the file prefix.pinfo.pkl exists, otherwise it should
be the indices where the desired parameters live in the second
dimension of the MCMC chain.
NOTE: These can alternatively be the names of arbitrary meta-data
blobs.
If None, this will plot *all* parameters, so be careful!
fig : int
ID number for plot window.
bins : int, np.ndarray
Number of bins in each dimension. Or, array of bins to use
for each parameter. If the latter, the bins should be in the
*final* units of the quantities of interest. For example, if
you apply a multiplier or take_log, the bins should be in the
native units times the multiplier or in the log10 of the native
units (or both).
ivar : int, float, str, list
If plotting arbitrary meta-data blobs, must choose a redshift.
Can be 'B', 'C', or 'D' to extract blobs at 21-cm turning points,
or simply a number. If it's a list, it must have the same
length as pars. This is how one can make a triangle plot
comparing the same quantities at different redshifts.
input : dict
Dictionary of parameter:value pairs representing the input
values for all model parameters being fit. If supplied, lines
will be drawn on each panel denoting these values.
skip : int
Number of steps at beginning of chain to exclude.
stop: int
Number of steps to exclude from the end of the chain.
skim : int
Only take every skim'th step from the chain.
oned : bool
Include the 1-D marginalized PDFs?
fill : bool
Use filled contours? If False, will use open contours instead.
color_by_like : bool
If True, set contour levels by confidence regions enclosing nu-%
of the likelihood. Set parameter `like` to modify these levels.
like : list
List of levels, default is 1,2, and 3 sigma contours (i.e.,
like=[0.68, 0.95])
skip_panels : list
List of panel numbers | |
Point(2,2)
>>> pl = Plane(Point(0,0,1), Vector(0,0,1))
>>> result = pt.project_3D(pl, 2)
>>> print(result)
Point(2.0,2.0,1.0)
"""
if coordinate_index==0:
point=plane.point_yz(self.x,self.y)
elif coordinate_index==1:
point=plane.point_zx(self.x,self.y)
elif coordinate_index==2:
point=plane.point_xy(self.x,self.y)
else:
raise ValueError('coordinate_index must be between 0 and 2')
return point
def to_tuple(self):
"""Returns a tuple representation of the point.
:returns: The coordinates as a tuple.
For a point, this can also be achieved by creating a
tuple of the point itself (i.e. :code:`tuple(pt)`).
:rtype: tuple
.. code-block:: python
>>> from crossproduct import Point
>>> pt = Point(2,2)
>>> result = pt.to_tuple()
>>> print(result)
(2.0,2.0)
"""
return tuple(self)
@property
def x(self):
"""The x coordinate of the point.
:rtype: float
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Point
>>> pt = Point(0,1,2)
>>> print(pt.x)
0.0
"""
return self[0]
@property
def y(self):
"""The y coordinate of the point.
:rtype: float
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Point
>>> pt = Point(0,1,2)
>>> print(pt.y)
1.0
"""
return self[1]
@property
def z(self):
"""The z coordinate of the point.
:raises IndexError: If point is a 2D point.
:rtype: float
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Point
>>> pt = Point(0,1,2)
>>> print(pt.z)
2.0
"""
return self[2]
class Points(collections.abc.MutableSequence):
"""A sequence of points.
In *crossproduct* a Points object is a mutable sequence.
Iterating over a Points object will provide its Point instances.
Index, append, insert and delete actions are available.
:param points: An argument list of Point instances.
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Point, Points
>>> pts = Points(Point(0,0), Point(1,0))
>>> print(pts)
Points(Point(0.0,0.0), Point(1.0,0.0))
>>> print(pts[1])
Point(1.0,0.0)
"""
def __delitem__(self,index):
""
del self._points[index]
def __eq__(self,points):
"""Tests if this points sequence and the supplied points sequence are equal.
:param points: The points sequence to be tested.
:type points: Points
:return: True if the sequence items are equal, otherwise False.
:rtype: bool
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Point, Points
>>> pts1 = Points(Point(0,0), Point(1,0))
>>> pts2 = Points(Point(0,0), Point(1,0))
>>> result = pts1 == pts2
>>> print(result)
True
"""
if isinstance(points,Points) and self._points==points._points:
return True
else:
return False
def __getitem__(self,index):
""
return self._points[index]
def __init__(self,*points):
""
self._points=list(points)
def __len__(self):
""
return len(self._points)
def __repr__(self):
""
return 'Points(%s)' % ', '.join([str(pt) for pt in self])
def __setitem__(self,index,value):
""
self._points[index]=value
# @property
# def coordinates(self):
# """Returns the coordiantes of the Points sequence.
# :return: i.e. ((0,0,0),(1,0,0))
# :rtype: tuple
# """
# return tuple(tuple(pt) for pt in self)
def insert(self,index,value):
"(Required by abstract base case)"
return self._points.insert(index,value)
# def project_2D(self,coordinate_index):
# """Projection of 3D points on a 2D plane.
# :param coordinate_index: The index of the coordinate to ignore.
# Use coordinate_index=0 to ignore the x-coordinate, coordinate_index=1
# for the y-coordinate and coordinate_index=2 for the z-coordinate.
# :type coordinate_index: int
# :return: Sequence of 2D points which have been projected from 3D points.
# :rtype: Points
# :Example:
# .. code-block:: python
# >>> pts = Points(Point3D(1,2,3), Point3D(4,5,6))
# >>> result = pts.project_2D(2)
# >>> print(result)
# Points(Point2D(1,2), Point2D(4,5))
# """
# points=[pt.project_2D(coordinate_index) for pt in self]
# return Points(*points)
def project_3D(self,plane,coordinate_index):
"""Projection of 2D points on a 3D plane.
:param plane: The plane for the projection
:type plane: Plane3D
:param coordinate_index: The index of the coordinate which was ignored
to create the 2D projection. For example, coordinate_index=0
means that the x-coordinate was ignored and this point
was originally projected onto the yz plane.
:type coordinate_index: int
:return: Sequence of 3D points which have been projected from 2D points.
:rtype: Points
:Example:
.. code-block:: python
>>> pt = Points(Point2D(2,2))
>>> pl = Plane3D(Point3D(0,0,1), Vector3D(0,0,1))
>>> result = pts.project_3D(pl, 2)
Points(Point3D(2,2,1))
"""
return Points(*(pt.project_3D(plane,coordinate_index) for pt in self))
def plot(self, ax, *args, **kwargs):
"""Plots the points on the supplied axes.
:param ax: An 2D or 3D Axes instance.
:type ax: matplotlib.axes.Axes, mpl_toolkits.mplot3d.axes3d.Axes3D
:param args: positional arguments to be passed to the Axes.plot call.
:param kwargs: keyword arguments to be passed to the Axes.plot call.
.. rubric:: Code Example
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> from crossproduct import Point, Points
>>> fig, ax = plt.subplots()
>>> pts=Points(Point(1,1),Point(2,2))
>>> pts.plot(ax,marker='o')
>>> plt.show()
.. image:: /_static/points_plot_2D.png
|
.. code-block:: python
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
>>> from crossproduct import Point, Points
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> pts=Points(Point(1,1,1),Point(2,2,2))
>>> pts.plot(ax,marker='o')
>>> plt.show()
.. image:: /_static/points_plot_3D.png
"""
for pt in self:
pt.plot(ax,*args,**kwargs)
def remove_points_in_segments(self,segments):
"""Removes any points that are contained by any of the segments.
:param segments: The segments to check.
:type segments: Segments
:return: None, changes are made in place.
:rtype: None
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Point, Points
>>> pts = Points(Point(0,0), Point(1,0))
>>> segments = Segments(Segment(Point(0,0), Point(0,1)))
>>> pts.remove_points_in_segments(segments)
>>> print(pts)
Points(Point(1.0,0.0))
"""
for pt in self[::-1]:
if segments.contains(pt):
self.remove(pt)
def to_tuple(self):
"""Returns a tuple representation of the points.
:returns: A tuple of the points tuples.
:rtype: tuple
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Point, Points
>>> pts = Points(Point(0,0), Point(1,0))
>>> result = pts.to_tuple()
>>> print(result)
((0.0, 0.0), (1.0, 0.0))
"""
return tuple(tuple(pt) for pt in self)
class Vector(collections.abc.Sequence):
"""A vector, as described by xy or xyz coordinates.
In *crossproduct* a Vector object is a immutable sequence.
Iterating over a Vector will provide its coordinates.
Indexing a vector will return the coordinate for that index (0=x, 1=y, 2=z).
:param coordinates: Argument list of two (xy) or three (xyz) coordinates.
Coordinates should be of type int, float or similar numeric. These values
are converted to floats.
:raises ValueError: If less then 2 or more than 3 arguments are supplied.
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Vector
>>> v = Vector(1,2)
>>> print(v)
Vector(1.0,2.0)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Basic-Definitions>`_
"""
def __add__(self,vector):
"""Addition of this vector and a supplied vector.
:param vector: A vector.
:type vector: Vector
:rtype: Vector
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Vector
>>> v = Vector(1,2)
>>> result = v + Vector(1,1)
>>> print(result)
Vector(2.0,3.0)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Vector-Addition>`_
"""
zipped=itertools.zip_longest(self,vector) # missing values filled with None
try:
coordinates=[a+b for a,b in zipped]
except TypeError: # occurs if, say, a or b is None
raise ValueError('Vectors to add must be of the same length.')
return Vector(*coordinates)
def __eq__(self,vector):
"""Tests if this vector and the supplied vector have the same coordinates.
A tolerance value is used so coordinates with very small difference
are considered equal.
:param vector: The vector to be tested.
:type vector: Vector
:raises ValueError: If points are not of the same length.
:return: True if the vector coordinates are the same, otherwise False.
:rtype: bool
.. rubric:: Code Example
.. code-block:: python
>>> from crossproduct import Vector
>>> result = Vector(1,2) == Vector(2,2)
>>> print(result)
False
"""
zipped=itertools.zip_longest(self,vector) # missing values filled with None
try:
result=[math.isclose(a, b, abs_tol=ABS_TOL) for a,b in zipped]
except TypeError: # occurs | |
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.postgres.search import SearchQuery
from django.contrib.postgres.search import SearchVector
from django.core.files.storage import get_storage_class
from django.db import models
from django.db.models import Q, F, Case, When, Value, Sum, Min, Max, OuterRef, Subquery, Count, CharField
from django.db.models.functions import Length
from django.views import View
from django.views.generic.detail import SingleObjectMixin
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import get_language_info
from django.utils.decorators import method_decorator
from django.utils.text import slugify
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.http import HttpResponseForbidden
from django.views.decorators.cache import cache_control
from collections import Counter
import json
import datetime
import re
import pytz
import logging
from studygroups.decorators import user_is_group_facilitator
from studygroups.decorators import user_is_team_organizer
from studygroups.models import Course
from studygroups.models import StudyGroup
from studygroups.models import Application
from studygroups.models import Meeting
from studygroups.models import Team
from studygroups.models import TeamMembership
from studygroups.models import TeamInvitation
from studygroups.models import Announcement
from studygroups.models import generate_meetings_from_dates
from studygroups.models import get_json_response
from studygroups.models.course import course_platform_from_url
from studygroups.models.team import eligible_team_by_email_domain
from uxhelpers.utils import json_response
from api.geo import getLatLonDelta
from api import schema
from api.forms import ImageForm
logger = logging.getLogger(__name__)
def studygroups(request):
# TODO remove this API endpoint, where is it currently being used??
study_groups = StudyGroup.objects.published()
if 'course_id' in request.GET:
study_groups = study_groups.filter(course_id=request.GET.get('course_id'))
def to_json(sg):
data = {
"name": sg.name,
"course_title": sg.course.title,
"facilitator": sg.facilitator.first_name + " " + sg.facilitator.last_name,
"venue": sg.venue_name,
"venue_address": sg.venue_address + ", " + sg.city,
"city": sg.city,
"day": sg.day(),
"start_date": sg.start_date,
"meeting_time": sg.meeting_time,
"time_zone": sg.timezone_display(),
"end_time": sg.end_time(),
"weeks": sg.meeting_set.active().count(),
"url": f"{settings.PROTOCOL}://{settings.DOMAIN}" + reverse('studygroups_signup', args=(slugify(sg.venue_name, allow_unicode=True), sg.id,)),
}
if sg.image:
data["image_url"] = f"{settings.PROTOCOL}://{settings.DOMAIN}" + sg.image.url
#TODO else set default image URL
return data
data = [ to_json(sg) for sg in study_groups ]
return json_response(request, data)
class CustomSearchQuery(SearchQuery):
""" use to_tsquery to support partial matches """
""" NOTE: This is potentially unsafe!!"""
def as_sql(self, compiler, connection):
query = re.sub(r'[!\'()|&\:=,\.\ \-\<\>@]+', ' ', self.value).strip().lower()
tsquery = ":* & ".join(query.split(' '))
tsquery += ":*"
params = [tsquery]
if self.config:
config_sql, config_params = compiler.compile(self.config)
template = 'to_tsquery({}::regconfig, %s)'.format(config_sql)
params = config_params + [tsquery]
else:
template = 'to_tsquery(%s)'
if self.invert:
template = '!!({})'.format(template)
return template, params
def serialize_learning_circle(sg):
data = {
"course": {
"id": sg.course.pk,
"title": sg.course.title,
"provider": sg.course.provider,
"link": sg.course.link,
"course_page_url": settings.PROTOCOL + '://' + settings.DOMAIN + reverse('studygroups_course_page', args=(sg.course.id,)),
"discourse_topic_url": sg.course.discourse_topic_url if sg.course.discourse_topic_url else settings.PROTOCOL + '://' + settings.DOMAIN + reverse("studygroups_generate_course_discourse_topic", args=(sg.course.id,)),
},
"id": sg.id,
"name": sg.name,
"facilitator": sg.facilitator.first_name,
"venue": sg.venue_name,
"venue_address": sg.venue_address + ", " + sg.city,
"venue_website": sg.venue_website,
"city": sg.city,
"region": sg.region,
"country": sg.country,
"country_en": sg.country_en,
"latitude": sg.latitude,
"longitude": sg.longitude,
"place_id": sg.place_id,
"online": sg.online,
"language": sg.language,
"day": sg.day(),
"start_date": sg.start_date,
"start_datetime": sg.local_start_date(),
"meeting_time": sg.meeting_time,
"time_zone": sg.timezone_display(),
"last_meeting_date": sg.end_date, # TODO rename to end_date or last_meeting_date - ie make consistent
"end_time": sg.end_time(),
"weeks": sg.weeks if sg.draft else sg.meeting_set.active().count(), # TODO
"url": f"{settings.PROTOCOL}://{settings.DOMAIN}" + reverse('studygroups_signup', args=(slugify(sg.venue_name, allow_unicode=True), sg.id,)),
"report_url": sg.report_url(),
"studygroup_path": reverse('studygroups_view_study_group', args=(sg.id,)),
"draft": sg.draft,
"signup_count": sg.application_set.active().count(),
"signup_open": sg.signup_open and sg.end_date > datetime.date.today(),
}
if sg.image:
data["image_url"] = settings.PROTOCOL + '://' + settings.DOMAIN + sg.image.url
# TODO else set default image URL
if sg.signup_question:
data["signup_question"] = sg.signup_question
if hasattr(sg, 'next_meeting_date'):
data["next_meeting_date"] = sg.next_meeting_date
if hasattr(sg, 'status'):
data["status"] = sg.status
return data
def _intCommaList(csv):
values = csv.split(',') if csv else []
cleaned = []
for value in values:
try:
v = int(value)
cleaned += [v]
except ValueError:
return None, 'Not a list of integers seperated by commas'
return cleaned, None
def _limit_offset(request):
if 'offset' in request.GET or 'limit' in request.GET:
try:
offset = int(request.GET.get('offset', 0))
except ValueError as e:
offset = 0
try:
limit = int(request.GET.get('limit', 100))
except ValueError as e:
limit = 100
return limit, offset
@method_decorator(cache_control(max_age=15*60), name='dispatch')
class LearningCircleListView(View):
def get(self, request):
query_schema = {
"latitude": schema.floating_point(),
"longitude": schema.floating_point(),
"distance": schema.floating_point(),
"offset": schema.integer(),
"limit": schema.integer(),
"weekdays": _intCommaList,
"user": schema.boolean(),
"scope": schema.text(),
"draft": schema.boolean(),
"team_id": schema.integer(),
"order": lambda v: (v, None) if v in ['name', 'start_date', 'created_at', 'first_meeting_date', 'last_meeting_date', None] else (None, "must be 'name', 'created_at', 'first_meeting_date', 'last_meeting_date', or 'start_date'"),
}
data = schema.django_get_to_dict(request.GET)
clean_data, errors = schema.validate(query_schema, data)
if errors != {}:
return json_response(request, {"status": "error", "errors": errors})
study_groups = StudyGroup.objects.published().prefetch_related('course', 'meeting_set', 'application_set').order_by('id')
if 'draft' in request.GET:
study_groups = StudyGroup.objects.active().order_by('id')
if 'id' in request.GET:
id = request.GET.get('id')
study_groups = StudyGroup.objects.filter(pk=int(id))
if 'user' in request.GET:
user_id = request.user.id
study_groups = study_groups.filter(facilitator=user_id)
today = datetime.date.today()
active_meetings = Meeting.objects.filter(study_group=OuterRef('pk'), deleted_at__isnull=True).order_by('meeting_date')
# TODO status is being used by the learning circle search page?
study_groups = study_groups.annotate(
status=Case(
When(signup_open=True, start_date__gt=today, then=Value('upcoming')),
When(signup_open=True, start_date__lte=today, end_date__gte=today, then=Value('in_progress')),
When(signup_open=False, end_date__gte=today, then=Value('closed')),
default=Value('completed'),
output_field=CharField(),
),
)
# TODO scope is used by dashboard?
if 'scope' in request.GET:
scope = request.GET.get('scope')
upcoming_meetings = Meeting.objects.filter(study_group=OuterRef('pk'), deleted_at__isnull=True, meeting_date__gte=today).order_by('meeting_date')
if scope == "active":
study_groups = study_groups\
.annotate(next_meeting_date=Subquery(upcoming_meetings.values('meeting_date')[:1]))\
.filter(Q(end_date__gte=today) | Q(draft=True))
elif scope == "upcoming":
study_groups = study_groups\
.annotate(next_meeting_date=Subquery(upcoming_meetings.values('meeting_date')[:1]))\
.filter(Q(start_date__gt=today) | Q(draft=True))
elif scope == "current":
study_groups = study_groups\
.annotate(next_meeting_date=Subquery(upcoming_meetings.values('meeting_date')[:1]))\
.filter(start_date__lte=today, end_date__gte=today)
elif scope == "completed":
study_groups = study_groups\
.filter(end_date__lt=today)
q = request.GET.get('q', '').strip()
if q:
tsquery = CustomSearchQuery(q, config='simple')
study_groups = study_groups.annotate(
search = SearchVector(
'city',
'name',
'course__title',
'course__provider',
'course__topics',
'venue_name',
'venue_address',
'venue_details',
'facilitator__first_name',
'facilitator__last_name',
config='simple'
)
).filter(search=tsquery)
if 'course_id' in request.GET:
study_groups = study_groups.filter(
course_id=request.GET.get('course_id')
)
city = request.GET.get('city')
if city is not None:
study_groups = study_groups.filter(city=city)
team_id = request.GET.get('team_id')
if team_id is not None:
team = Team.objects.get(pk=team_id)
members = team.teammembership_set.active().values('user')
team_users = User.objects.filter(pk__in=members)
study_groups = study_groups.filter(facilitator__in=team_users)
# TODO How is this different from scope=active?
if 'active' in request.GET:
active = request.GET.get('active') == 'true'
if active:
study_groups = study_groups.filter(end_date__gte=today)
else:
study_groups = study_groups.filter(end_date__lt=today)
if 'latitude' in request.GET and 'longitude' in request.GET:
# work with floats for ease
latitude = float(request.GET.get('latitude'))
longitude = float(request.GET.get('longitude'))
distance = float(request.GET.get('distance', False) or 50)
lat_delta, lon_delta = getLatLonDelta(latitude, longitude, distance)
lat_min = max(-90, latitude - lat_delta)
lat_max = min(90, latitude + lat_delta)
lon_min = max(-180, longitude - lon_delta)
lon_max = min(180, longitude + lon_delta)
# NOTE doesn't wrap around,
# iow, something at lat=45, lon=-189 and distance=1000 won't match
# lat=45, lon=189 even though they are only 222 km apart.
study_groups = study_groups.filter(
latitude__gte=lat_min,
latitude__lte=lat_max,
longitude__gte=lon_min,
longitude__lte=lon_max
)
# NOTE could use haversine approximation to filter more accurately
if 'topics' in request.GET:
topics = request.GET.get('topics').split(',')
query = Q(course__topics__icontains=topics[0])
for topic in topics[1:]:
query = Q(course__topics__icontains=topic) | query
study_groups = study_groups.filter(query)
if 'weekdays' in request.GET:
weekdays = request.GET.get('weekdays').split(',')
query = None
for weekday in weekdays:
# __week_day differs from datetime.weekday()
# Monday should be 0
weekday = int(weekday) + 2 % 7
query = query | Q(start_date__week_day=weekday) if query else Q(start_date__week_day=weekday)
study_groups = study_groups.filter(query)
# TODO this conflates signup open and active
study_groups_signup_open = study_groups.filter(signup_open=True, end_date__gte=today)
study_groups_signup_closed = study_groups.filter(Q(signup_open=False) | Q(end_date__lt=today))
if 'signup' in request.GET:
signup_open = request.GET.get('signup') == 'open'
if signup_open:
study_groups = study_groups_signup_open
else:
study_groups = study_groups_signup_closed
order = request.GET.get('order', None)
if order == 'name':
study_groups = study_groups.order_by('name')
elif order == 'start_date':
study_groups = study_groups.order_by('-start_date')
elif order == 'created_at':
study_groups = study_groups.order_by('-created_at')
elif order == 'first_meeting_date':
study_groups = study_groups.order_by('start_date')
elif order == 'last_meeting_date':
study_groups = study_groups.order_by('-end_date')
data = {
'count': study_groups.count(),
'signup_open_count': study_groups_signup_open.count(),
'signup_closed_count': study_groups_signup_closed.count(),
}
if 'offset' in request.GET or 'limit' in request.GET:
limit, offset = _limit_offset(request)
data['offset'] = offset
data['limit'] = limit
study_groups = study_groups[offset:offset+limit]
data['items'] = [ serialize_learning_circle(sg) for sg in study_groups ]
return json_response(request, data)
class LearningCircleTopicListView(View):
""" Return topics for listed courses """
def get(self, request):
study_group_ids = Meeting.objects.active().filter(
meeting_date__gte=timezone.now()
).values('study_group')
course_ids = None
course_ids = StudyGroup.objects.published().filter(id__in=study_group_ids).values('course')
topics = Course.objects.active()\
.filter(unlisted=False)\
.filter(id__in=course_ids)\
.exclude(topics='')\
.values_list('topics')
topics = [
item.strip().lower() for sublist in topics for item in sublist[0].split(',')
]
data = {}
data['topics'] = { k: v for k, v in list(Counter(topics).items()) }
return json_response(request, data)
def _studygroup_object_for_map(sg):
active = sg.end_date > datetime.date.today()
report_available = sg.learnersurveyresponse_set.count() > 0
data = {
"id": sg.id,
"title": sg.name,
"latitude": sg.latitude,
"longitude": sg.longitude,
"city": sg.city,
"start_date": sg.start_date,
"active": active
}
if active:
data["url"] = settings.PROTOCOL + '://' + settings.DOMAIN + reverse('studygroups_signup', args=(slugify(sg.venue_name, allow_unicode=True), sg.id,))
elif report_available:
data["report_url"] = sg.report_url()
return data
class LearningCirclesMapView(View):
def get(self, request):
study_groups = StudyGroup.objects.published().select_related('course').prefetch_related("learnersurveyresponse_set")
data = {}
data['items'] = [ _studygroup_object_for_map(sg) for sg in study_groups ]
return json_response(request, data)
def _course_check(course_id):
if not Course.objects.filter(pk=int(course_id)).exists():
return None, 'Course matching ID not found'
else:
return Course.objects.get(pk=int(course_id)), None
def serialize_course(course):
data = {
"id": course.id,
"title": course.title,
"provider": course.provider,
"platform": course.platform,
"link": course.link,
"caption": course.caption,
"on_demand": course.on_demand,
"topics": [t.strip() for t | |
pool._reclaimed.append(new_d)
# We're inside GC!
# pool._append_to_queue is not useful because
# deque.append causes deadlock
else:
warnings.warn(UserWarning("DataPool: an object collected during cyclic garbage collection; discarded"))
# data is discarded
@staticmethod
def _check_cleanness(d, in_pool_ok=False):
try:
h = d.__handle
if not in_pool_ok or h is not IN_POOL:
raise ValueError("data is already managed by DataPool")
except AttributeError:
d.__handle = IN_POOL
def _setup_lease(self, d, ref, forced=False):
if not forced:
if not (d.__handle == IN_POOL):
raise ValueError("data was not in DataPool")
refback_obj = [d]
if self._gc_recovery is None:
# we have recovery clue from dead zombie: use strict finalizer
r = wr(ref)
f = weakref.finalize(ref, DataPool._reclaim, refback_obj)
else:
# we have a recovery clue from dead zombie: use closed graph method
r = f = _Rref(ref, refback_obj)
d.__handle = _Handle(
watch_target = r,
datapool = wr(self),
refback = refback_obj,
finalizer = f)
def _append_to_queue(self, d):
assert(d.__handle is IN_POOL)
with self._lock:
self._queue.append(d)
@staticmethod
def _check_alive_leased_data(d):
handle = d.__handle
if type(handle) is not _Handle:
raise ValueError("data is not leased")
ref = handle.watch_target()
# inhibit finalizer here.
if not ref:
raise RuntimeError("pool-managed data is dangling; referent lost.")
# here, finalizer is not running
# state may be changed: need recheck.
handle = d.__handle
if type(handle) is not _Handle:
raise RuntimeError("pool-managed data is dead during processing; referent lost.")
pool = handle.datapool()
assert(handle.watch_target() is ref)
return (handle, ref, pool)
@staticmethod
def _clear_handle_content(handle, finalizer_detach=True):
handle.refback[0] = None # cut circular dependency
handle.refback = None
if finalizer_detach:
handle.finalizer.detach()
handle.finalizer = None
handle.watch_target = None
@classmethod
def _get_names(self):
class _NAMES: pass
d = _NAMES()
d.__handle = None
r = wr(d)
return tuple(d.__dict__.keys())
DataPool.required_slot_names = DataPool._get_names() + ('__weakref__',)
# may be used if __weakref__ already exists
DataPool.required_slot_names_direct = DataPool._get_names()
del DataPool._get_names
class PooledDataMixin:
"""Mixin object for data managed by DataPool.
Provides instance methods for managed data.
Classes importing this mixin must provide slot names
given in `DataPool.required_slot_names`.
"""
__slots__ = ()
def replace_with(self, new):
return DataPool.replace_data(self, new)
def return_to_pool(self):
return DataPool.return_to_pool(self)
def remove_from_pool(self):
return DataPool.kill(self)
def update_referent(self, new):
return DataPool.update_referent(self, new)
def get_referent(self):
return DataPool.get_referent(self)
class PooledDataBase(PooledDataMixin):
"""Base object for data managed by DataPool.
Using this class as a single base, required slots are
automatically maintained.
"""
__slots__ = DataPool.required_slot_names
"""# Internal states:
0: data not registered:
__handle not defined
Allowed_action:
put
put_and_use
1: data in pool:
__handle = IN_POOL
contained in pool.queue
Allowed_action:
lease to r -> state 2
1L: data in dead pool:
__handle = IN_POOL
container pool garbage-collected
Allowed_action:
nothing
1D: data killed:
__handle = DEAD
NOT contained in pool.queue
Allowed_action:
nothing
2: data leased to r:
__handle = [watch_target = wr(r)
datapool = wr(pool)
refback = [d]
finalizer = final(__refback)]
Events:
- r lost -> finalizer_called
- pool lost -> state 3
Allowed_action:
- return_to_pool
- replace_with
- kill
3: pool lost:
__handle = [watch_target = wr(r)
datapool = wr(None)
refback = [d]
finalizer = final(__refback)]
Events:
- r lost -> finalizer_called
Allowed_action:
- return_to_pool
- replace_with
- kill
Mutual exclusion:
Each actions and finalizers must not run in parallel.
._check_alive_leased_data() will carefully check
that finalizer is not already running, and that
finalizer will not run during the actions.
Actions and Events:
finalizer_called:
SYNCHRONIZE check state:
- from state 1/1L/1D: no op (should not happen, no error reports in finalizer)
- from state 2: __handle.datapool = wr(pool)
clear __handle's content
__handle = IN_POOL
putback to pool.queue
[ now state 1 ]
- from state 3: __handle.datapool = wr(None)
clear __handle's content
__handle = IN_POOL
not returned to pool
[ now state 1L ]
Action return_to_pool:
SYNCHRONIZE check_state
- from state 1, 1L, 1D: Error
- deactivate finalizer
- from state 2: __handle.datapool = wr(pool)
clear __handle's content
__handle = IN_POOL
putback to pool.queue
[ now state 1 ]
- from state 3: __handle.datapool = wr(None)
clear __handle's content
__handle = IN_POOL
not returned to pool
[ now state 1L ]
Action replace_with:
SYNCHRONIZE check_state
- from state 0/1/1L: error
- from state 2/3:
old.__handle = DEAD
[old is state DEAD, new is state 0, refback points to old.
finalizer will not be called here, because we have ref in SYNCHRONIZE.]
handle.refback <- [new]
[old is state DEAD, new is state 0, refback points to new.
finalizer will not be called here, because we have ref in SYNCHRONIZE.]
new.__handle = old.handle
[old is state 1D, new is state 2/3.]
[now finalizer may be activated.]
Action return_to_pool:
SYNCHRONIZE check_state
- from state 0, 1, 1L: Error
- from state 2/3:
- deactivate finalizer
clear __handle's content
__handle = DEAD
[ now state 1D ]
Reference graph:
_________________(expected)---------------
| |
| pool |
| ^ |
| : |
v : |
obj ----> handle ----> (weakref) .....> referent
^ | / ^ |
| | /(*A) (*B): (virtually)
| | | : |
\ v v (*B) : v
-------- refback_obj <-------------- finalizer <- (weakref module)
Solid lines represent strong references, and
Dotted lines represent weak references.
References marked with (*A) exist when gc_recovery is enabled.
Those with (*B) exist when gc_recovery is not used.
- The refback_obj is referenced strongly from finalizer or weakref.
It will keep obj alive when referent is dying. The callbacks will
reclaim the obj into pool before it is trashed.
- No strong reference to the referent: referent is expected to be
collected by reference-counting GC.
- There is a cycles around the obj. It will be usually eliminated
during object reclaim by clearing refback_obj -> obj and obj ->
handle links.
- Having a finalizer is virtually equivalent to having a strong
reference from referent to the finalizer.
So, when gc_recovery is not used, if there is a strong reference
from obj to referent, there causes a virtual circular dependency
around obj: obj -> referent -> finalizer -> refback_obj -> obj.
Because obj is strongly kept alive by the finalizer for
reclaiming, referent cannot be collected by both reference
counting and mark-sweep GCs, resulting in memory leak.
Also, if there is other data to the same referent, those will also
be considered a part of cyclic objects (because it is internally
referenced from the referent).
Use of `gc_recovery` hook parameter
When gc_recovery hook is enabled, such leased data with dependency
cycles can be collected by GC. To enable that, the reference
structure is modified so that refback_obj is directly pointed from
the weak reference, not using the finalizer indirection.
When the whole cycle is collected by the mark-sweep GC, all
objects are instantly considered dead; an object finalizer
(__del__) associated to the weakref will be kicked and it will
resurrect obj to return the data to pools. However, at this
moment, the object is already marked dead by GC.
Such marked-dead objects have some tricky behaviors: its
finalizers will not be called again, existing weakrefs to those
objects are eliminated, etc. Such objects might be safe or unsafe
to be reused, depending on the situation. It is the reason that
this hook is not enabled by default.
The `gc_recovery` hook is responsible to make such a dead object
useful again; the hook will receive a dead object, and assumed to
return a "clean" copy of it. If non-null object is returned, it
will be returned back to the associated pool. If the hook returns
None, the object is not resurrected.
Either `copy.copy` or `copy.deepcopy` can be used as `gc_recovery`
hook, depending on a situation. Alternatively, if the data's
internal liveness does not cause any issues, `True` can be passed
to reuse the dead object as is. `False` will silently discard the
dead.
It also have another side effect; if the referent has dropped all
strong references to the pooled-managed object before itself is
unreferenced, and the caller also lose the reference to the pooled
object (to be used for `return_to_pool`), the object will become
dead and collected by the GC, before the referent is collected.
The object have to be returned to | |
mars", u"%d. apríl",
u"%d. maí", u"%d. júní", u"%d. júlí", u"%d. ágúst",
u"%d. september", u"%d. október", u"%d. nóvember",
u"%d. desember"])
addFmt2('it', False, u"%%d %s", False)
addFmt1('ja', False, makeMonthList(u"%d月%%d日"))
addFmt2('jv', False, u"%%d %s", True)
addFmt2('ka', False, u"%%d %s")
addFmt1('ko', False, makeMonthList(u"%d월 %%d일"))
addFmt1('ku', False, [u"%d'ê rêbendanê", u"%d'ê reşemiyê", u"%d'ê adarê",
u"%d'ê avrêlê", u"%d'ê gulanê", u"%d'ê pûşperê",
u"%d'ê tîrmehê", u"%d'ê gelawêjê", u"%d'ê rezberê",
u"%d'ê kewçêrê", u"%d'ê sermawezê", u"%d'ê berfanbarê"])
addFmt1('la', False, [u"%d Ianuarii", u"%d Februarii", u"%d Martii",
u"%d Aprilis", u"%d Maii", u"%d Iunii", u"%d Iulii",
u"%d Augusti", u"%d Septembris", u"%d Octobris",
u"%d Novembris", u"%d Decembris"])
addFmt2('lb', False, u"%%d. %s", True)
addFmt1('li', False, [u"%d januari", u"%d februari", u"%d miert", u"%d april",
u"%d mei", u"%d juni", u"%d juli", u"%d augustus",
u"%d september", u"%d oktober", u"%d november",
u"%d december"])
addFmt1('lt', False, [u"Sausio %d", u"Vasario %d", u"Kovo %d", u"Balandžio %d",
u"Gegužės %d", u"Birželio %d", u"Liepos %d",
u"Rugpjūčio %d", u"Rugsėjo %d", u"Spalio %d",
u"Lapkričio %d", u"Gruodžio %d"])
addFmt2('lv', False, u"%%d. %s", False)
addFmt2('mhr', False, u"%%d %s", False)
addFmt1('mk', False, [u"%d јануари", u"%d февруари", u"%d март", u"%d април",
u"%d мај", u"%d јуни", u"%d јули", u"%d август",
u"%d септември", u"%d октомври", u"%d ноември",
u"%d декември"])
addFmt2('ml', False, u"%s %%d")
addFmt2('ms', False, u"%%d %s", True)
addFmt2('nap', False, u"%%d 'e %s", False)
addFmt2('nds', False, u"%%d. %s", True)
addFmt1('nl', False, [u"%%d %s" % v
for v in [u"januari", u"februari", u"maart", u"april",
u"mei", u"juni", u"juli", u"augustus",
u"september", u"oktober", u"november",
u"december"]])
addFmt1('nn', False, [u"%%d. %s" % v
for v in [u"januar", u"februar", u"mars", u"april",
u"mai", u"juni", u"juli", u"august",
u"september", u"oktober", u"november",
u"desember"]])
addFmt2('no', False, u"%%d. %s", False)
addFmt1('oc', False, [u"%d de genièr", u"%d de febrièr", u"%d de març",
u"%d d'abril", u"%d de mai", u"%d de junh",
u"%d de julhet", u"%d d'agost", u"%d de setembre",
u"%d d'octobre", u"%d de novembre", u"%d de decembre"])
addFmt1('os', False, [u"%d январы", u"%d февралы", u"%d мартъийы",
u"%d апрелы", u"%d майы", None, u"%d июлы", None,
u"%d сентябры", None, u"%d ноябры", u"%d декабры"])
addFmt1('pl', False, [u"%d stycznia", u"%d lutego", u"%d marca",
u"%d kwietnia", u"%d maja", u"%d czerwca", u"%d lipca",
u"%d sierpnia", u"%d września", u"%d października",
u"%d listopada", u"%d grudnia"])
addFmt2('pt', False, u"%%d de %s", True)
addFmt2('ro', False, u"%%d %s", False)
addFmt1('ru', False, [u"%d января", u"%d февраля", u"%d марта",
u"%d апреля", u"%d мая", u"%d июня", u"%d июля",
u"%d августа", u"%d сентября", u"%d октября",
u"%d ноября", u"%d декабря"])
addFmt2('sco', False, u"%%d %s", True)
addFmt2('scn', False, u"%%d di %s", False)
addFmt1('se', False, [u"ođđajagimánu %d.", u"guovvamánu %d.", u"njukčamánu %d.",
u"cuoŋománu %d.", u"miessemánu %d.", u"geassemánu %d.",
u"suoidnemánu %d.", u"borgemánu %d.", u"čakčamánu %d.",
u"golggotmánu %d.", u"skábmamánu %d.", u"juovlamánu %d."])
addFmt1('sh', False, makeMonthList(u"%%d.%d."))
addFmt2('simple', False, u"%s %%d", True)
addFmt2('sk', False, u"%%d. %s", False)
addFmt2('sl', False, u"%%d. %s", False)
addFmt1('sq', False, [u"%d Janar", u"%d Shkurt", u"%d Mars", u"%d Prill",
u"%d Maj", u"%d Qershor", u"%d Korrik", u"%d Gusht",
u"%d Shtator", u"%d Tetor", u"%d Nëntor", u"%d Dhjetor"])
addFmt2('sr', False, u"%%d. %s", False)
addFmt2('su', False, u"%%d %s", True)
addFmt2('sv', False, u"%%d %s", False)
addFmt2('ta', False, u"%s %%d")
addFmt2('te', False, u"%s %%d")
addFmt2('th', False, u"%%d %s") # %%T
addFmt2('tl', False, u"%s %%d")
addFmt2('tr', False, u"%%d %s", True)
addFmt2('tt', False, u"%%d. %s", True)
addFmt1('uk', False, [u"%d січня", u"%d лютого", u"%d березня", u"%d квітня",
u"%d травня", u"%d червня", u"%d липня", u"%d серпня",
u"%d вересня", u"%d жовтня", u"%d листопада",
u"%d грудня"])
addFmt1('ur', False, [u"%d جنوری", u"%d فروری", u"%d مارچ", u"%d اپریل",
u"%d مئ", u"%d جون", u"%d جلائ", u"%d اگست", u"%d ستمب",
u"%d اکتوبر", u"%d نومب", u"%d دسمب"])
addFmt2('vec', False, u"%%d de %s", False)
addFmt1('vi', False, makeMonthList(u"%%d tháng %d"))
addFmt2('vo', False, u"%s %%d", False)
addFmt1('zh', False, makeMonthList(u"%d月%%d日"))
# Walloon names depend on the day number, thus we must generate various
# different patterns
waMonthNames = [u"djanvî", u"fevrî", u"måss", u"avri", u"may", u"djun",
u"djulete", u"awousse", u"setimbe", u"octôbe", u"nôvimbe",
u"decimbe"]
# For month names begining with a consonant...
for i in (0, 1, 2, 4, 5, 6, 8, 10, 11):
formats[dayMnthFmts[i]]['wa'] = eval(
(u'lambda m: multi(m, [' +
u'(lambda v: dh_dayOfMnth(v, u"%%dî d\' %s"), lambda p: p == 1),' +
u'(lambda v: dh_dayOfMnth(v, u"%%d d\' %s"), lambda p: p in [2,3,20,22,23]),' +
u'(lambda v: dh_dayOfMnth(v, u"%%d di %s"), alwaysTrue)])')
% (waMonthNames[i], waMonthNames[i], waMonthNames[i]))
# For month names begining with a vowel...
for i in (3, 7, 9):
formats[dayMnthFmts[i]]['wa'] = eval(
(u'lambda m: multi(m, [' +
u'(lambda v: dh_dayOfMnth(v, u"%%dî d\' %s"), lambda p: p == 1),' +
u'(lambda v: dh_dayOfMnth(v, u"%%d d\' %s"), alwaysTrue)])')
% (waMonthNames[i], waMonthNames[i]))
# Brazil uses "1añ" for the 1st of every month, and number without suffix for
# all other days
brMonthNames = makeMonthNamedList('br', u"%s", True)
for i in range(0, 12):
formats[dayMnthFmts[i]]['br'] = eval(
(u'lambda m: multi(m, [' +
u'(lambda v: dh_dayOfMnth(v, u"%%dañ %s"), lambda p: p == 1),' +
u'(lambda v: dh_dayOfMnth(v, u"%%d %s"), alwaysTrue)])')
% (brMonthNames[i], brMonthNames[i]))
#
# Month of the Year: "en:May 1976"
#
addFmt2('af', True, u"%s %%d", True)
addFmt2('ar', True, u"%s %%d")
addFmt2('ang', True, u"%s %%d", True)
addFmt2('cs', True, u"%s %%d")
addFmt2('de', True, u"%s %%d", True)
addFmt1('el', True, [u"Ιανουάριος %d", u"Φεβρουάριος %d", u"Μάρτιος %d",
u"Απρίλιος %d", u"Μάιος %d", u"Ιούνιος %d", u"Ιούλιος %d",
u"Άυγουστος %d", u"Σεπτέμβριος %d", u"Οκτώβριος %d",
u"Νοέμβριος %d", u"Δεκέμβριος %d"])
addFmt2('en', True, u"%s %%d", True)
addFmt2('eo', True, u"%s de %%d")
addFmt2('es', True, u"%s de %%d", True)
addFmt2('et', True, u"%s %%d", True)
addFmt2('fi', True, u"%s %%d", True)
addFmt1('fr', True, [u"Janvier %d", u"Février %d", u"Mars %d", u"Avril %d",
u"Mai %d", u"Juin %d", u"Juillet %d", u"Août %d",
u"Septembre %d", u"Octobre %d", u"Novembre %d",
u"Décembre %d"])
addFmt2('he', True, u"%s %%d", True)
addFmt2('it', True, u"Attualità/Anno %%d - %s", True)
addFmt1('ja', True, [u"「最近の出来事」%%d年%d月" % mm for mm in range(1, 13)])
addFmt2('ka', True, u"%s, %%d")
addFmt1('ko', True, [u"%d년 1월", u"%d년 2월", u"%d년 3월", u"%d년 4월",
u"%d년 5월", u"%d년 6월", u"%d년 7월", u"%d년 8월",
u"%d년 9월", u"%d년 10월", u"%d년 11월", u"%d년 12월"])
addFmt1('li', True, [u"januari %d", u"februari %d", u"miert %d", u"april %d",
u"mei %d", u"juni %d", u"juli %d", u"augustus %d",
u"september %d", u"oktober %d", u"november %d",
u"december %d"])
addFmt1('nl', True, [u"Januari %d", u"Februari %d", u"Maart %d", u"April %d",
u"Mei %d", u"Juni %d", u"Juli %d", u"Augustus %d",
u"September %d", u"Oktober %d", u"November %d",
u"December %d"])
addFmt2('pl', True, u"%s %%d", True)
addFmt1('scn', True, [None, None, u"Marzu %d", None, None, None, None, None,
None, None, None, None])
addFmt2('simple', True, u"%s %%d", True)
addFmt2('sk', True, u"%s %%d")
addFmt2('sv', True, u"%s %%d", True)
addFmt2('th', True, u"%s พ.ศ. %%T")
addFmt2('tl', True, u"%s %%d")
addFmt2('tt', True, u"%s, %%d", True)
addFmt1('ur', True, [u"%d01مبم", u"%d02مبم", u"%d03مبم", u"%d04مبم",
u"%d05مبم", u"%d06مبم", u"%d07مبم", u"%d08مبم", u"%d09مبم",
u"%d10مبم", u"%d11مبم", u"%d12مبم"])
addFmt2('uk', True, u"%s %%d", True)
addFmt1('vi', True, makeMonthList(u"Tháng %d năm %%d"))
addFmt1('zh', True, makeMonthList(u"%%d年%d月"))
addFmt1('zh-min-nan', True, makeMonthList(u"%%d nî %d goe̍h"))
# This table defines the limits for each type of format data.
# Each item is a tuple with
# - a predicate function which returns True if the value falls
# within acceptable limits, False otherwise,
# - start value
# - end value
#
# TODO: Before compat 19d1cf9e (2006), there was a 'step' in the tuple,
# used exclusively by DecadeAD and DecadeBC to increment by 10 years.
# "and v%10==0" should be added to the limitation predicate for those two.
formatLimits = {
'MonthName': (lambda v: 1 <= v and v < 13, 1, 13),
'Number': (lambda v: 0 <= v and v < 1000000, 0, 1001),
'YearAD': (lambda v: 0 <= v and v < 2501, 0, 2501),
'YearBC': (lambda v: 0 <= v and v < 4001, 0, 501), # zh: has years as old as 前1700年
'DecadeAD': (lambda v: 0 <= v and v < 2501, 0, 2501),
'DecadeBC': (lambda v: 0 <= v and v < 4001, 0, 501), # zh: has decades as old as 前1700年代
# Some centuries use Roman numerals or a given list
# do not exceed them in testing
'CenturyAD': (lambda v: 1 <= v and v < 41, 1, 23),
'CenturyBC': (lambda v: 1 <= v and v < 91, 1, 23),
'CenturyAD_Cat': (lambda v: 1 <= v and v < 41, 1, 23),
'CenturyBC_Cat': (lambda v: 1 <= v and v < 41, 1, 23),
# For millenniums, only test first 3 AD Millenniums and 1 BC Millennium
'MillenniumAD': (lambda v: 1 <= v and v < 6, 1, 4),
'MillenniumBC': (lambda v: 1 <= v and v < 20, 1, 2),
'Cat_Year_MusicAlbums': (lambda v: 1950 <= v and v < 2021, 1950, 2021),
'Cat_BirthsAD': (lambda v: 0 <= v and v < 2501, 0, 2501),
'Cat_DeathsAD': (lambda v: 0 <= v and v < 2501, 0, 2501),
'Cat_BirthsBC': (lambda v: 0 <= v and v < 4001, 0, 501),
'Cat_DeathsBC': (lambda v: | |
<filename>auto_punch_a_card.py<gh_stars>1-10
# coding:utf-8
# Copyright (c) 2020 YBM.eli0t All rights reserved.
# Power by Eli0t
# 目前需要填写:
# 编号
# amap_key(获取位置和经纬度) 高德地图 API 的 key 使用API前您需先申请Key。若无高德地图API账号需要先申请账号。
# whoami_value(可以通过编号计算出,目前仅支持 1872 中队)
# X-CSRF-Token 变化情况未知
# cookie 一般不变化
import requests
import json
import time
import logging
import ast
import sys
import yaml
logging.getLogger().setLevel(logging.INFO)
with open('config.yaml', 'r') as config:
information = yaml.load(config.read(), Loader=yaml.FullLoader)
number = information['number']
amap_key = information['amap_key']
Cookie = information['Cookie']
X_CSRF_Token = information['X_CSRF_Token']
Province_GPS = information['Province_GPS']
City_GPS = information['City_GPS']
District_GPS = information['District_GPS']
Detail_GPS = information['Detail_GPS']
health = information['health']
name = ''
time_ = int(str(int(time.time())).ljust(13,'0'))
format_date = time.strftime( "%Y-%m-%d", time.localtime())
ts = time.strptime(format_date, "%Y-%m-%d")
date_ = int(str(int(time.mktime(ts))).ljust(13, '0'))
Class = number[:3]
whoami_value = str(hex(0x5e565f3eb897e100066ca24a + int(number[-2:])))[2:]
telephone = ''
null = ''
t = round(time.time() * 1000)
# 13 位详细时间戳
# 通过 IP 获取位置,返回一个包含 省份、城市、经度、纬度 的列表
def IP_get_addr_and_itude():
logging.info("\033[1;32m ** 位置通过 IP 获取开始 ** \033[0m")
get_IP_url = 'https://api.ipify.org/?format=json'
IP = ast.literal_eval(requests.get(get_IP_url).text)['ip']
print(" 当前 IP :" + IP)
amap_API_url_get_addr = 'http://restapi.amap.com/v3/ip?ip={}&key=<KEY>'.format(IP, amap_key)
addr_IP_get = requests.get(amap_API_url_get_addr).text
addr_IP_get = ast.literal_eval(addr_IP_get)
City_IP_GPS = addr_IP_get['city']
print(" 当前城市(通过 IP 获取):" + City_IP_GPS)
Province_IP_GPS = addr_IP_get['province']
print(" 当前省份(通过 IP 获取):" + Province_IP_GPS)
Longitude_IP_GPS = addr_IP_get['rectangle'].split(';')[0].split(',')[0]
Latitude_IP_GPS = addr_IP_get['rectangle'].split(';')[0].split(',')[1]
print(" 当前经度(通过 IP 获取):" + Longitude_IP_GPS)
print(" 当前纬度(通过 IP 获取):" + Latitude_IP_GPS)
IP_get_addr_and_itude = []
IP_get_addr_and_itude.append(Province_IP_GPS)
IP_get_addr_and_itude.append(City_IP_GPS)
IP_get_addr_and_itude.append(Longitude_IP_GPS)
IP_get_addr_and_itude.append(Latitude_IP_GPS)
return IP_get_addr_and_itude
# 通过详细位置获取经纬度,返回一个包含 经度、纬度 的列表
def location_get_itude(Province_GPS, City_GPS, District_GPS, Detail_GPS, amap_key):
logging.info("\033[1;32m ** 详细位置获取经纬度开始 ** \033[0m")
amap_API_url_get_L = 'https://restapi.amap.com/v3/geocode/geo?address={}&key={}'.format(Province_GPS + City_GPS + District_GPS + Detail_GPS, amap_key)
GPS = requests.get(amap_API_url_get_L)
GPS = str(ast.literal_eval(GPS.text)['geocodes'])[1:-1]
# str => dict
GPS = ast.literal_eval(GPS)['location'].split(",")
Longitude = GPS[0].ljust(17, '0') + '1'
print(" 当前经度(通过详细地址获取):" + Longitude)
Latitude = GPS[1].ljust(17, '0') + '1'
print(" 当前纬度(通过详细地址获取):" + Latitude)
location_get_itude = []
location_get_itude.append(Longitude)
location_get_itude.append(Latitude)
return location_get_itude
push_data = time.strftime(number + "%Y%m%d", time.localtime())
logging.info("\033[1;32m ** Get ready ** \033[0m")
session = requests.Session()
# 未开发完全
'''
url_get_Cookie_1 = 'https://wxwork.jiandaoyun.com/wxwork/ww0f8c29c5c7b53b53/dashboard'
header_get_Cookie_1 = {
'Host': 'wxwork.jiandaoyun.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-cn',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate, br',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 wxwork/3.0.25 MicroMessenger/7.0.1 Language/zh'
}
re = session.get(url_get_Cookie_1, headers = header_get_Cookie_1, timeout = 20)
print(re.text)
print(re.cookies)
input()
url_get_Cookie_2 = 'https://wxwork.jiandaoyun.com/wxwork/ww0f8c29c5c7b53b53/dashboard?code=jJJCuqSe2mRLKIVzaqz9XuvrvdaDbQDkPVojLaSgCSA&state=5fd23bf5fdf61734d1dcb73'
header_get_Cookie_2 = {
'Host': 'wxwork.jiandaoyun.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 wxwork/3.0.25 MicroMessenger/7.0.1 Language/zh',
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Cookie': "JDY_SID=s%3AtMXH2b4RlXPSCg1CFgpp_Lr4BKHT0kiQ.ls1J9cw2K90%2BpcGlYBtrtquA625DqQOwSd8QDGqimrM; _csrf=s%3AFZPR-zsYhX9xBkcReVSfNNji.WlSihcpP4JXGyzh32aO5AEL%2Bcms%2BfhiYdIxBqraUoNM"
}
re = session.get(url_get_Cookie_2, headers = header_get_Cookie_2, timeout = 20)
print(re.text)
input()
'''
url_start_ready_1 = 'https://wxwork.jiandaoyun.com/wxwork/ww0f8c29c5c7b53b53/dashboard'
header_start_ready_1 = {
'Host': 'wxwork.jiandaoyun.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'If-None-Match': 'W/"1aa4-cm7T6S4W9E82aHiZK14XgeRHHqg"',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 wxwork/3.0.25 MicroMessenger/7.0.1 Language/zh',
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'Cookie': Cookie
}
url_start_ready_2 = 'https://wxwork.jiandaoyun.com/dashboard/apps'
header_start_ready_2 = {
'Host': 'wxwork.jiandaoyun.com',
'Accept': '*/*',
'X-Requested-With': 'XMLHttpRequest',
'Content-Type' : 'application/json;charset=UTF-8',
'X-JDY-Ver': 'undefined',
'Origin' : 'https://wxwork.jiandaoyun.com',
'Referer': 'https://wxwork.jiandaoyun.com/wxwork/ww0f8c29c5c7b53b53/dashboard',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 wxwork/3.0.25 MicroMessenger/7.0.1 Language/zh',
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'X-CSRF-Token': X_CSRF_Token,
'Cookie' : Cookie
}
data_start_ready_2 = {
'corpId': 'ww0f8c29c5c7b53b53'
}
url_start_ready_3 = 'https://wxwork.jiandaoyun.com/dashboard/workflow/todo_count'
header_start_ready_3 = header_start_ready_2
data_start_ready_3 = data_start_ready_2
url_start_ready_4 = 'https://track.jiandaoyun.com/log?e=custom_app_post_m&t={}&u=ww0f8c29c5c7b53b53-{}&c=ww0f8c29c5c7b53b53'.format(t, number)
header_start_ready_4 = {
'Host': 'track.jiandaoyun.com',
'Accept': 'image/webp,image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 wxwork/3.0.25 MicroMessenger/7.0.1 Language/zh',
'Accept-Language': 'zh-cn',
'Referer': 'https://wxwork.jiandaoyun.com/wxwork/ww0f8c29c5c7b53b53/dashboard',
'Accept-Encoding': 'gzip, deflate, br',
'X-CSRF-Token': X_CSRF_Token
}
url_ready_1 = "https://track.jiandaoyun.com/log?e=app_visit_from_mobile&t={}&u=ww0f8c29c5c7b53b53-{}&c=ww0f8c29c5c7b53b53".format(t, number)
header_ready_1 = header_start_ready_4
# 2020-07-12T07:03:15.045Z
url_ready_2 = 'https://wxwork.jiandaoyun.com/_/app/5e3abc6eb2603a0006585067'
header_ready_2 = {
'Host': 'wxwork.jiandaoyun.com',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 wxwork/3.0.25 MicroMessenger/7.0.1 Language/zh',
'Referer': 'https://wxwork.jiandaoyun.com/wxwork/ww0f8c29c5c7b53b53/dashboard',
'Accept': '*/*',
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/json;charset=UTF-8',
'X-JDY-Ver': 'undefined',
'X-Requested-With': 'XMLHttpRequest',
'Origin': 'https://wxwork.jiandaoyun.com',
'Connection': 'keep-alive',
'X-CSRF-Token': X_CSRF_Token,
'Cookie': Cookie
}
data_ready_2 = {
'appId': '5e3abc6eb2603a0006585067'
# 这个应该是整的学校都是一样的
}
# 2020-07-12T07:03:15.052Z
url_ready_3 = 'https://wxwork.jiandaoyun.com/_/app/5e3abc6eb2603a0006585067/workflow/query_data_count'
header_ready_3 = header_ready_2
data_ready_3 = {
'type': 'todo'
}
# 2020-07-12T07:03:15.296Z
url_1 = 'https://wxwork.jiandaoyun.com/_/app/5e3abc6eb2603a0006585067/form/5e329f810cc34e0006b9706b'
header = {
'Host': 'wxwork.jiandaoyun.com',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 wxwork/3.0.25 MicroMessenger/7.0.1 Language/zh',
'Referer': 'https://wxwork.jiandaoyun.com/wxwork/ww0f8c29c5c7b53b53/dashboard',
'Accept': '*/*',
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/json;charset=UTF-8',
'X-CSRF-Token': X_CSRF_Token,
'X-JDY-Ver': 'undefined',
'X-Requested-With': 'XMLHttpRequest',
'Origin': 'https://wxwork.jiandaoyun.com',
'Connection': 'keep-alive',
'Cookie': Cookie,
'TE': 'Trailers'
}
data_1 = {
"appId": "5e3abc6eb2603a0006585067",
"entryId": "5e329f810cc34e0006b9706b"
}
# "text" : "{\"appId\":\"5e3abc6eb2603a0006585067\",\"entryId\":\"5e329f810cc34e0006b9706b\"}"
# 1619 2020-07-12T07:03:17.821Z
url_middle = 'https://wxwork.jiandaoyun.com/_/app/5e3abc6eb2603a0006585067/get_vip_pack'
header_middle = header_ready_2
data_middle = data_1
data_2_1 = {
"formId" : "5e351e41041a5f0006de5b66",
"appId" : "5e3abc6eb2603a0006585067",
"entryId" : "5e329f810cc34e0006b9706b",
"dataId" : '',
"filter" : {
"rel" : "and",
"cond" : [
{
"entryId" : "5e351e41041a5f0006de5b66",
"value" : [
whoami_value ],
"method" : "in",
"type" : "user",
"field" : "_widget_1582792744211"
}
]
},
"field" : "_widget_1580539457795"
}
# get number
url_all = 'https://wxwork.jiandaoyun.com/_/data/link'
data_2 = {
"formId" : "5e351e41041a5f0006de5b66",
"appId" : "5e3abc6eb2603a0006585067",
"entryId" : "5e329f810cc34e0006b9706b",
"dataId" : '',
"filter" : {
"rel" : "and",
"cond" : [
{
"entryId" : "5e351e41041a5f0006de5b66",
"value" : [
number
],
"method" : "in",
"type" : "text",
"field" : "_widget_1580539457795"
},
{
"entryId" : "5e351e41041a5f0006de5b66",
"value" : [
whoami_value
],
"method" : "in",
"type" : "user",
"field" : "_widget_1582792744211"
}
]
},
"field" : "_widget_1580539457756"
}
# get name 1
# 2460 2020-07-12T07:03:18.697Z
data_2_2 = {
"formId" : "5e351e41041a5f0006de5b66",
"appId" : "5e3abc6eb2603a0006585067",
"entryId" : "5e329f810cc34e0006b9706b",
"dataId" : '',
"filter" : {
"rel" : "and",
"cond" : [
{
"entryId" : "5e351e41041a5f0006de5b66",
"value" : [
whoami_value
],
"method" : "in",
"type" : "user",
"field" : "_widget_1582792744211"
}
]
},
"field" : "_widget_1580539457756"
}
# get name 2
data_3 = {
"field":"_widget_1581313193925",
"formId":"5e351e41041a5f0006de5b66",
"appId":"5e3abc6eb2603a0006585067",
"entryId":"5e329f810cc34e0006b9706b",
"dataId": "",
"filter":{
"cond":[
{"type":"user",
"field":"_widget_1582792744211",
"method":"in",
"value":[whoami_value],
"entryId":"5e351e41041a5f0006de5b66"}
],
"rel":"and"}
}
# 2630 2020-07-12T07:03:18.715Z
data_5 = {
"field":"_widget_1581313193966",
"formId":"5e351e41041a5f0006de5b66",
"appId":"5e3abc6eb2603a0006585067",
"entryId":"5e329f810cc34e0006b9706b",
"dataId": "",
"filter":{
"cond":[
{"type":"user",
"field":"_widget_1582792744211",
"method":"in",
"value":[whoami_value],
"entryId":"5e351e41041a5f0006de5b66"}
],
"rel":"and"}
}
data_6 = {
"field":"_widget_1581313194033",
"formId":"5e351e41041a5f0006de5b66",
"appId":"5e3abc6eb2603a0006585067",
"entryId":"5e329f810cc34e0006b9706b",
"dataId": "",
"filter":{
"cond":[
{"type":"user",
"field":"_widget_1582792744211",
"method":"in",
"value":[whoami_value],
"entryId":"5e351e41041a5f0006de5b66"}
],
"rel":"and"}
}
# 2020-07-12T07:03:19.023Z
data_8 = {
"field":"_widget_1580372529999",
"formId":"5e4212ae1ce79100064f49bd",
"appId":"5e3abc6eb2603a0006585067",
"entryId":"5e329f810cc34e0006b9706b",
"dataId": "",
"filter":{
"cond":[
{"type":"text",
"field":"_widget_1581486714569",
"method":"in",
"value":[number+name],
"entryId":"5e4212ae1ce79100064f49bd"}
],
"rel":"and"}
}
# 3869 _widget_1580372529999
# 2020-07-12T07:03:19.196Z
data_9 = {
"field":"_widget_1580372530349",
"formId":"<KEY>",
"appId":"5e3abc6eb2603a0006585067",
"entryId":"5e329f810cc34e0006b9706b",
"dataId": "",
"filter":{
"cond":[
{"type":"text",
"field":"_widget_1581486714569",
"method":"in",
"value":[number+name],
"entryId":"5e4212ae1ce79100064f49bd"}
],
"rel":"and"}
}
# 533 _widget_1580372530349
# 2020-07-12T07:03:19.364Z
url_push = 'https://wxwork.jiandaoyun.com/_/data/create'
if __name__ == '__main__':
for i in range(1, len(sys.argv)):
if sys.argv[i] == '-i':
list_ip = IP_get_addr_and_itude()
Detail_GPS_push = ''
Province_GPS_push = list_ip[0]
City_GPS_push = list_ip[1]
Longitude_push = list_ip[2]
Latitude_push = list_ip[3]
elif sys.argv[i] == '-c':
Detail_GPS_push = Detail_GPS
Province_GPS_push = Province_GPS
City_GPS_push = City_GPS
list_itude = location_get_itude(Province_GPS, City_GPS, District_GPS, Detail_GPS, amap_key)
Longitude_push = list_itude[0]
Latitude_push = list_itude[1]
# 读取位置参数
time.sleep(1)
re = session.get(url_start_ready_1, headers = header_start_ready_1, timeout = 20)
print(re.status_code)
re = session.post(url_start_ready_2, headers = header_start_ready_2, data = json.dumps(data_start_ready_2), timeout = 20)
print(re.status_code)
re = session.post(url_start_ready_3, headers = header_start_ready_3, data = json.dumps(data_start_ready_3), timeout = 20)
print(re.status_code)
re = session.get(url_start_ready_4, headers = header_start_ready_4, timeout = 20)
print(re.status_code)
re = session.get(url_ready_1, headers = header_ready_1, timeout = 20)
print(re.status_code)
re = session.post(url_ready_2, headers = header_ready_2, data = json.dumps(data_ready_2), timeout = 20)
print(re.status_code)
re = session.post(url_ready_3, headers = header_ready_3, data = json.dumps(data_ready_2), timeout = 20)
print(re.status_code)
re = session.post(url_1, headers = header, data = json.dumps(data_1), timeout = 20)
print(' get if seccess ' + str(re.status_code))
re = session.post(url_middle, headers = header_middle, data = json.dumps(data_middle), timeout = 20)
print(str(re.status_code))
logging.info("\033[1;32m all ready to start ! \033[0m")
re = session.post(url_all, headers = header, data = json.dumps(data_2_1), timeout = 20)
number = ast.literal_eval(re.text)['value']
print(' get number ' + number)
re = session.post(url_all, headers = header, data = json.dumps(data_2), timeout = 20)
name = ast.literal_eval(re.text)['value']
print(' get name ' + name)
re = session.post(url_all, headers = header, data = json.dumps(data_2_2), timeout = 20)
name_2 = ast.literal_eval(re.text)['value']
print(' get name 2 ' + name_2)
re = session.post(url_all, headers = header, data = json.dumps(data_3), timeout = 20)
sex = ast.literal_eval(re.text)['value']
print(' get sex ' + sex)
# attention
data_4 = {
"field":"_widget_1580282769605",
"formId":"<KEY>",
"appId":"5e3abc6eb2603a0006585067",
"entryId":"5e329f810cc34e0006b9706b",
"dataId": "",
"filter":{
"cond":[
{"type":"text",
"field":"_widget_1581486714569",
"method":"in",
"value":[number+name],
"entryId":"5e4212ae1ce79100064f49bd"}
],
"rel":"and"}
}
# old type
re = session.post(url_all, headers = header, data = json.dumps(data_4), timeout = 20)
telephone = ast.literal_eval(re.text)['value']
print(' get telephone ' + telephone)
re = session.post(url_all, headers = header, data = json.dumps(data_5), timeout = 20)
sdept = ast.literal_eval(re.text)['value']
print(' get sdept ' + sdept)
re = session.post(url_all, headers = header, data = json.dumps(data_6), timeout = 20)
specialty = ast.literal_eval(re.text)['value']
print(' get specialty ' + specialty)
# attention
data_6_2 = {
"formId" : "5e351e41041a5f0006de5b66",
"appId" : "5e3abc6eb2603a0006585067",
"entryId" : "5e329f810cc34e0006b9706b",
"dataId" : '',
"filter" : {
"rel" : "and",
"cond" : [
{
"entryId" : "5e351e41041a5f0006de5b66",
"value" : [
number+name
| |
< ranges[0] + ranges[1]:
random_angle += offset_angles[0]
else:
random_angle += offset_angles[1]
hue = ((reference_angle + random_angle) / 360) % 1
saturation = random.uniform(saturation_range[0], saturation_range[1])
luminance = lerp(luminance_range[0], luminance_range[1], i / (count - 1))
colours[i] = hsl_to_rgb(hue, saturation, luminance)
return colours
# Cube Helix Palette Generation...............................................
def cube_helix(
levels, start_hue, rotations,
saturation_range=(0, 1), lightness_range=(0, 1), gamma=1):
"""
Based on Dave Green's public domain (Unlicense license) Fortran 77
implementation for cube helix colour table generation.
"""
low = 0
high = 0
colours = [None] * levels
for i in range(levels):
fraction = lerp(lightness_range[0], lightness_range[1], i / levels)
saturation = lerp(saturation_range[0], saturation_range[1], fraction)
angle = TAU * (start_hue / 3 + 1 + rotations * fraction)
fraction = math.pow(fraction, gamma)
amplitude = saturation * fraction * (1 - fraction) / 2
r = -0.14861 * math.cos(angle) + 1.78277 * math.sin(angle)
g = -0.29227 * math.cos(angle) - 0.90649 * math.sin(angle)
b = 1.97294 * math.cos(angle)
r = fraction + amplitude * r
g = fraction + amplitude * g
b = fraction + amplitude * b
if r < 0:
r = 0
low += 1
if g < 0:
g = 0
low += 1
if b < 0:
b = 0
low += 1
if r > 1:
r = 1
high += 1
if g > 1:
g = 1
high += 1
if b > 1:
b = 1
high += 1
colours[i] = Colour3.to_byte3((r, g, b))
return colours, low, high
def fetch_colour(colours, value, low, high):
u = unlerp(low, high, value)
v = lerp(0, len(colours), u)
v_truncated = math.floor(v)
t = v - v_truncated
index = int(v_truncated)
c0 = colours[index]
c1 = colours[index + 1]
return rgb_lerp(c0, c1, t)
# Geometry Utilities..........................................................
class Vector2:
@staticmethod
def add(a, b):
return (a[0] + b[0], a[1] + b[1])
@staticmethod
def subtract(a, b):
return (a[0] - b[0], a[1] - b[1])
@staticmethod
def multiply(c, v):
return (c * v[0], c * v[1])
@staticmethod
def divide(v, c):
return (v[0] / c, v[1] / c)
@staticmethod
def length(v):
return math.sqrt((v[0] ** 2) + (v[1] ** 2))
@staticmethod
def distance(a, b):
dx = b[0] - a[0]
dy = b[1] - a[1]
return math.sqrt((dx * dx) + (dy * dy))
@staticmethod
def scale(c, v):
return (c * v[0], c * v[1])
@staticmethod
def perp(v):
return (v[1], -v[0])
@staticmethod
def floor(v):
return (int(v[0]), int(v[1]))
@staticmethod
def dot(a, b):
return (a[0] * b[0]) + (a[1] * b[1])
@staticmethod
def lerp(a, b, t):
return (lerp(a[0], b[0], t), lerp(a[1], b[1], t))
@staticmethod
def limit_length(v, limit):
d = Vector2.length(v)
if d > limit:
return Vector2.scale(limit / d, v)
else:
return v
def get_circle_bounds(center, radius):
left = int(center[0] - radius)
right = int(center[0] + radius + 1)
top = int(center[1] - radius)
bottom = int(center[1] + radius + 1)
return (left, top, right, bottom)
def point_in_box(point, box):
return (box[0] <= point[0] <= box[2] - 1
and box[1] <= point[1] <= box[3] - 1)
def clip_box(box, bounds):
return (
max(box[0], bounds[0]),
max(box[1], bounds[1]),
min(box[2], bounds[2]),
min(box[3], bounds[3]))
def clip_point(point, box):
return (
min(max(point[0], box[0]), box[2] - 1),
min(max(point[1], box[1]), box[3] - 1))
# Canvas......................................................................
def unpack_rg(rg):
return ((rg) & 0xFF, (rg >> 8) & 0xFF)
def unpack_rgb(rgb):
return (rgb & 0xFF, (rgb >> 8) & 0xFF, (rgb >> 16) & 0xFF)
def unpack_rgba(rgba):
return (
(rgba ) & 0xFF,
(rgba >> 8) & 0xFF,
(rgba >> 16) & 0xFF,
(rgba >> 24) & 0xFF)
def pack_rg(rg):
return (rg[1] << 8) | (rg[0])
def pack_rgb(rgb):
return (rgb[2] << 16) | (rgb[1] << 8) | (rgb[0])
def pack_rgba(rgba):
return (rgba[3] << 24) | (rgba[2] << 16) | (rgba[1] << 8) | (rgba[0])
# There doesn't seem to be any way to query the size in bytes of array
# typecodes. So, for each of these make_array functions, make a zero-item
# array and take its itemsize, then make the real array.
def make_array8(count):
byte_size = array('B').itemsize
return array('B', bytearray(byte_size * count))
def make_array16(count):
short_size = array('H').itemsize
return array('H', bytearray(short_size * count))
def make_array32(count):
int_size = array('I').itemsize
long_size = array('L').itemsize
if int_size == 4:
return array('I', bytearray(int_size * count))
else:
return array('L', bytearray(int_size * count))
def make_array_float(count):
double_size = array('d').itemsize
return array('d', bytearray(double_size * count))
class Canvas:
"""
Attrs:
pixels = an array of unsigned 1, 2, or 4 byte values
size = a tuple containing the dimensions in pixels (width, height)
mode = one of the strings "R", "RG", "RGB", "RGBA", "F"
"""
def __init__(self, mode, size, pixels=None):
if mode not in ["R", "RG", "RGB", "RGBA", "F"]:
raise ValueError(
"""mode must be one of the strings {"R", "RG", "RGB",\
"RGBA", "F"}.""")
if pixels is not None:
self.pixels = pixels
else:
count = size[0] * size[1]
if mode == "R":
self.pixels = make_array8(count)
elif mode == "RG":
self.pixels = make_array16(count)
elif mode == "RGB" or mode == "RGBA":
self.pixels = make_array32(count)
elif mode == "F":
self.pixels = make_array_float(count)
self.size = size
self.mode = mode
def get_bounds(self):
return (0, 0, self.size[0], self.size[1])
def get_pixel_r(self, point):
assert point_in_box(point, self.get_bounds())
index = self.size[0] * point[1] + point[0]
return self.pixels[index]
def get_pixel_rg(self, point):
assert point_in_box(point, self.get_bounds())
index = self.size[0] * point[1] + point[0]
return unpack_rg(self.pixels[index])
def get_pixel_rgb(self, point):
assert point_in_box(point, self.get_bounds())
index = self.size[0] * point[1] + point[0]
return unpack_rgb(self.pixels[index])
def get_pixel_rgba(self, point):
assert point_in_box(point, self.get_bounds())
index = self.size[0] * point[1] + point[0]
return unpack_rgba(self.pixels[index])
def get_pixel_f(self, point):
assert point_in_box(point, self.get_bounds())
index = self.size[0] * point[1] + point[0]
return self.pixels[index]
def put_pixel_r(self, point, pixel):
assert point_in_box(point, self.get_bounds())
index = self.size[0] * point[1] + point[0]
self.pixels[index] = pixel
def put_pixel_rg(self, point, pixel):
assert point_in_box(point, self.get_bounds())
index = self.size[0] * point[1] + point[0]
self.pixels[index] = pack_rg(pixel)
def put_pixel_rgb(self, point, pixel):
assert point_in_box(point, self.get_bounds())
index = self.size[0] * point[1] + point[0]
self.pixels[index] = pack_rgb(pixel)
def put_pixel_rgba(self, point, pixel):
assert point_in_box(point, self.get_bounds())
index = self.size[0] * point[1] + point[0]
self.pixels[index] = pack_rgba(pixel)
def put_pixel_f(self, point, pixel):
assert point_in_box(point, self.get_bounds())
index = self.size[0] * point[1] + point[0]
self.pixels[index] = pixel
def put_and_premultiply_alpha(self, alpha):
"""Add an alpha channel to an RGB image."""
assert self.size == alpha.size
assert self.mode == "RGB"
for i in range(len(self.pixels)):
rgb = unpack_rgb(self.pixels[i])
a_byte = alpha.pixels[i]
a = byte_to_unorm(a_byte)
rgb = (
int(a * rgb[0]),
int(a * rgb[1]),
int(a * rgb[2]))
self.pixels[i] = (a_byte << 24) | pack_rgb(rgb)
self.mode = "RGBA"
def convert(self, mode):
"""
The implemented conversions are:
-float (F) to 1-channel (R)
-1-channel (R) to 3-channel (RGB)
"""
count = len(self.pixels)
if mode == "R":
if self.mode == "F":
pixels = array('B', bytearray(count))
for i in range(count):
pixels[i] = clamp(int(self.pixels[i]), 0, 255)
else:
raise NotImplementedError(
"An F mode image can only be converted to an image of "
"mode R.")
elif mode == "RGB":
if self.mode == "R":
pixels = make_array32(count)
for i in range(count):
r = self.pixels[i]
pixels[i] = (r << 16) | (r << 8) | (r)
elif self.mode == "RG":
pixels = make_array16(count)
for i in range(count):
pixels[i] = self.pixels[i]
elif self.mode == "RGB":
return self.crop(self.get_bounds())
else:
raise NotImplementedError(
"An RGB mode image can only be converted to an image of "
"mode R or RG.")
return Canvas(mode=mode, size=self.size, pixels=pixels)
def crop(self, box):
"""Only RGB and RGBA cropping is implemented!"""
if self.mode not in ["RGB", "RGBA"]:
raise NotImplementedError(
"Only images of mode RGB and RGBA can be cropped.")
box = clip_box(box, self.get_bounds())
width = box[2] - box[0]
height = box[3] - box[1]
copied_pixels = make_array32(width * height)
i = 0
for y in range(box[1], box[3]):
for x in range(box[0], box[2]):
copied_pixels[i] = self.pixels[self.size[0] * y + x]
i += 1
return Canvas(
mode=self.mode, size=(width, height), pixels=copied_pixels)
def copy(self, image, box):
box = clip_box(box, self.get_bounds())
for y in range(box[1], box[3]):
for x in range(box[0], box[2]):
si = self.size[0] * y + x
ci = image.size[0] * (y - box[1]) + (x - box[0])
image.pixels[ci] = self.pixels[si]
# Line Drawing................................................................
def sign(x):
return (x > 0) - (x < 0)
def draw_line_without_clipping(image, a, b, colour):
dx = b[0] - a[0]
dy = b[1] - a[1]
adx = abs(dx)
ady = abs(dy)
sdx = sign(dx)
sdy | |
'handles': 99,
'pass': 84,
'off_rebound': 36,
'dunk': 30
},
'defense': {
'def_rebound': 43,
'inside_defense': 32,
'post_defense': 32,
'outside_defense': 62,
'block': 45,
'steal': 66
},
'physical': {
'speed': 86,
'vertical': 81,
'strength': 37
}
}, tendencies={
'offense': {
'shoot_mid': 80,
'shoot_three': 80,
'pass': 50,
'attack_rim': 94,
'post_up': 45
},
'defense': {
'steal': 65,
'block': 30,
'intercept': 45
}
}, avatar="https://nypost.com/wp-content/uploads/sites/2/2020/02/kyrie-irving-4.jpg?quality=80&strip=all",
description="<NAME> is an American professional basketball player for the Brooklyn Nets of the National Basketball Association (NBA). He was named the Rookie of the Year after being selected by the Cleveland Cavaliers with the first overall pick in the 2011 NBA draft. A seven-time All-Star and three-time member of the All-NBA Team, he won an NBA championship with the Cavaliers in 2016.",
image="https://static01.nyt.com/images/2021/10/08/lens/08nba-kyrie-01/merlin_195444735_cfee78bc-ed13-4a08-9267-501cc59d7a9c-superJumbo.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Cap",
height=86,
attributes={
'offense': {
'layup': 84,
'post_shot': 94,
'mid_range': 88,
'three': 30,
'handles': 48,
'pass': 58,
'off_rebound': 95,
'dunk': 95
},
'defense': {
'def_rebound': 96,
'inside_defense': 96,
'post_defense': 96,
'outside_defense': 37,
'block': 88,
'steal': 56
},
'physical': {
'speed': 78,
'vertical': 68,
'strength': 83
}
}, tendencies={
'offense': {
'shoot_mid': 50,
'shoot_three': 20,
'pass': 40,
'attack_rim': 99,
'post_up': 99
},
'defense': {
'steal': 50,
'block': 95,
'intercept': 65
}
}, avatar="https://fadeawayworld.net/.image/ar_1:1%2Cc_fill%2Ccs_srgb%2Cfl_progressive%2Cq_auto:good%2Cw_1200/MTg0NDg1MzE4NzEwMjczMTQ0/kareem-abdul-jabbar-iso-1981.jpg",
description="<NAME> is an American former professional basketball player for the Milwaukee Bucks and the Los Angeles Lakers. During his career as a center, Abdul-Jabbar was a record six-time NBA Most Valuable Player (MVP), a record 19-time NBA All-Star, a 15-time All-NBA selection, and an 11-time NBA All-Defensive Team member. A member of six NBA championship teams as a player and two more as an assistant coach, Abdul-Jabbar twice was voted NBA Finals MVP.",
image="https://www.doubleclutch.uk/wp-content/uploads/2020/04/Kareem_Abdul_Jabbar-e1587670066296.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Melo",
height=80,
attributes={
'offense': {
'layup': 89,
'post_shot': 95,
'mid_range': 97,
'three': 80,
'handles': 94,
'pass': 84,
'off_rebound': 45,
'dunk': 85
},
'defense': {
'def_rebound': 78,
'inside_defense': 73,
'post_defense': 73,
'outside_defense': 72,
'block': 68,
'steal': 78
},
'physical': {
'speed': 92,
'vertical': 85,
'strength': 82
}
}, tendencies={
'offense': {
'shoot_mid': 75,
'shoot_three': 60,
'pass': 30,
'attack_rim': 80,
'post_up': 95
},
'defense': {
'steal': 40,
'block': 60,
'intercept': 39
}
}, avatar="https://a.espncdn.com/combiner/i?img=%2Fphoto%2F2021%2F0224%2Fr818864_1296x729_16%2D9.jpg",
description="<NAME> is an American professional basketball player for the Los Angeles Lakers of the National Basketball Association (NBA). He has been named an NBA All-Star ten times and an All-NBA Team member six times. He played college basketball for the Syracuse Orange, winning a national championship as a freshman in 2003 while being named the NCAA Tournament's Most Outstanding Player. During the NBA's 75th anniversary, he was named one of the 75 Greatest Players in NBA History.[2]",
image="https://vip.nypost.com/wp-content/uploads/sites/2/2020/03/carmelo-anthony.jpg?quality=90&strip=all",
tier=1).save()
Player(name="<NAME>",
nickname="Splash Brother",
height=78,
attributes={
'offense': {
'layup': 88,
'post_shot': 78,
'mid_range': 94,
'three': 97,
'handles': 76,
'pass': 77,
'off_rebound': 36,
'dunk': 65
},
'defense': {
'def_rebound': 47,
'inside_defense': 64,
'post_defense': 64,
'outside_defense': 91,
'block': 51,
'steal': 59
},
'physical': {
'speed': 69,
'vertical': 67,
'strength': 55
}
}, tendencies={
'offense': {
'shoot_mid': 80,
'shoot_three': 90,
'pass': 30,
'attack_rim': 70,
'post_up': 40
},
'defense': {
'steal': 80,
'block': 50,
'intercept': 76
}
}, avatar="https://cdn.nba.com/manage/2020/11/klay-thompson-iso-1568x1045.jpg",
description="<NAME> is an American professional basketball player for the Golden State Warriors of the National Basketball Association (NBA). He is credited as one of the greatest shooters in NBA history.[2][3] A three-time NBA champion with the Warriors, he is a five-time NBA All-Star and a two-time All-NBA Third Team honoree. He has also been named to the NBA All-Defensive Second Team.",
image="https://www.nbcsports.com/sites/rsnunited/files/article/hero/Klay-Thompson-Shooting-USATSI-17087442.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="CB4",
height=83,
attributes={
'offense': {
'layup': 88,
'post_shot': 80,
'mid_range': 84,
'three': 79,
'handles': 65,
'pass': 69,
'off_rebound': 87,
'dunk': 85
},
'defense': {
'def_rebound': 87,
'inside_defense': 85,
'post_defense': 85,
'outside_defense': 60,
'block': 78,
'steal': 59
},
'physical': {
'speed': 48,
'vertical': 65,
'strength': 85
}
}, tendencies={
'offense': {
'shoot_mid': 70,
'shoot_three': 50,
'pass': 55,
'attack_rim': 88,
'post_up': 80
},
'defense': {
'steal': 60,
'block': 85,
'intercept': 45
}
}, avatar="http://images.thepostgame.com/assets/public/styles/slideshow_image/public/GettyImages-150590242-compressor.jpg",
description="While at Toronto, Bosh became a five-time NBA All-Star, was named to the All-NBA Second Team once, played for the U.S. national team (with whom he won a gold medal at the 2008 Summer Olympics), and supplanted former fan favorite Vince Carter as the face and leader of the Raptors franchise. In the 2006–07 season, Bosh led the Raptors to their first playoff appearance in five years and their first-ever division title. He left Toronto in 2010 as the franchise's all-time leader in points, rebounds, blocks, and minutes played.",
image="https://www.cleveland.com/resizer/MH_H8zEUC6FWyOtbQDo1bzWCuaY=/1280x0/smart/advancelocal-adapter-image-uploads.s3.amazonaws.com/image.cleveland.com/home/cleve-media/width2048/img/cavs_impact/photo/chris-bosh-2f5fc84c60ad8d8f.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Russ",
height=82,
attributes={
'offense': {
'layup': 76,
'post_shot': 65,
'mid_range': 45,
'three': 29,
'handles': 45,
'pass': 78,
'off_rebound': 97,
'dunk': 95
},
'defense': {
'def_rebound': 98,
'inside_defense': 98,
'post_defense': 98,
'outside_defense': 55,
'block': 99,
'steal': 72
},
'physical': {
'speed': 84,
'vertical': 88,
'strength': 94
}
}, tendencies={
'offense': {
'shoot_mid': 30,
'shoot_three': 10,
'pass': 60,
'attack_rim': 80,
'post_up': 75
},
'defense': {
'steal': 70,
'block': 99,
'intercept': 87
}
}, avatar="https://www.giantbomb.com/a/uploads/square_medium/14/141373/2681643-0711686871-dna06.jpg",
description="A five-time NBA Most Valuable Player and a 12-time All-Star, he was the centerpiece of the Celtics dynasty that won eleven NBA championships during his 13-year career. Russell and <NAME> of the National Hockey League are tied for the record of the most championships won by an athlete in a North American sports league. Russell led the San Francisco Dons to two consecutive NCAA championships in 1955 and 1956, and he captained the gold-medal winning U.S. national basketball team at the 1956 Summer Olympics.",
image="https://s-i.huffpost.com/gadgets/slideshows/304026/slide_304026_2589444_free.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="The Worm",
height=79,
attributes={
'offense': {
'layup': 55,
'post_shot': 48,
'mid_range': 65,
'three': 62,
'handles': 45,
'pass': 61,
'off_rebound': 99,
'dunk': 90
},
'defense': {
'def_rebound': 98,
'inside_defense': 98,
'post_defense': 98,
'outside_defense': 77,
'block': 50,
'steal': 57
},
'physical': {
'speed': 78,
'vertical': 88,
'strength': 86
}
}, tendencies={
'offense': {
'shoot_mid': 30,
'shoot_three': 10,
'pass': 30,
'attack_rim': 40,
'post_up': 60
},
'defense': {
'steal': 70,
'block': 99,
'intercept': 87
}
}, avatar="https://images.7news.com.au/publication/C-1046789/59774a3762246f68c2bceca98719361c6a8d46cc.jpg?imwidth=650&impolicy=sevennews_v2",
description="Rodman played at the small forward position in his early years before becoming a power forward. He earned NBA All-Defensive First Team honors seven times and won the NBA Defensive Player of the Year Award twice. He also led the NBA in rebounds per game for a record seven consecutive years and won five NBA championships.",
image="https://www.rollingstone.com/wp-content/uploads/2020/04/dennis-rodman-tattoo-t-shirt.jpg",
tier=1).save()
Player(name="<NAME>",
nickname="Zeke",
height=73,
attributes={
'offense': {
'layup': 97,
'post_shot': 29,
'mid_range': 84,
'three': 72,
'handles': 97,
'pass': 94,
'off_rebound': 35,
'dunk': 65
},
'defense': {
'def_rebound': 45,
'inside_defense': 40,
'post_defense': 40,
'outside_defense': 81,
'block': 35,
'steal': 71
},
'physical': {
'speed': 92,
'vertical': 74,
'strength': 60
}
}, tendencies={
'offense': {
'shoot_mid': 80,
'shoot_three': 65,
'pass': 79,
'attack_rim': 90,
'post_up': 40
},
'defense': {
'steal': 85,
'block': 30,
'intercept': 74
}
}, avatar="https://th.bing.com/th/id/OIP.xkNKvPjHyXt-whUQbzK6bwHaEK?pid=ImgDet&rs=1",
description="A point guard, the 12-time NBA All-Star was named one of the 50 Greatest Players in NBA History as well as the 75 Greatest Players, and inducted into the Naismith Memorial Basketball Hall of Fame. He played his entire professional career for the Detroit Pistons of the National Basketball Association (NBA).",
image="https://th.bing.com/th/id/R.57c9dbd49ba36969e21aef35228a4467?rik=7lGk1xMz0OFqGA&riu=http%3a%2f%2fimages.complex.com%2fcomplex%2fimage%2fupload%2fc_limit%2cw_680%2ff_auto%2cfl_lossy%2cpg_1%2cq_auto%2fp7pkha6ef63hoq2vyjhr.jpg&ehk=wkCsEHX%2foJ4yXQ3PyY%2bzbeufl26SuWgU%2ffeSJL0bsnQ%3d&risl=&pid=ImgRaw&r=0",
tier=1).save()
Player(name="<NAME>",
nickname="Big Pat",
height=84,
attributes={
'offense': {
'layup': 69,
'post_shot': 98,
'mid_range': 83,
'three': 35,
'handles': 40,
'pass': 64,
'off_rebound': 67,
'dunk': 95
},
'defense': {
'def_rebound': 92,
'inside_defense': 94,
'post_defense': 94,
'outside_defense': 35,
'block': 90,
'steal': 59
},
'physical': {
'speed': 45,
'vertical': 46,
'strength': 92
}
}, tendencies={
'offense': {
'shoot_mid': 40,
'shoot_three': 10,
'pass': 50,
'attack_rim': 78,
'post_up': 99
},
'defense': {
'steal': 55,
'block': 89,
'intercept': 59
}
}, avatar="https://comicvine.gamespot.com/a/uploads/square_medium/11/114183/5167058-20150805-1-ewing.jpg",
description="He had a seventeen-year NBA career, predominantly playing for the New York Knicks, where he was an eleven-time all-star and named to seven All-NBA teams. The Knicks appeared in the NBA Finals twice (1994 and 1999) during his tenure. He won Olympic gold medals as a member of the 1984 and 1992 United States men's Olympic basketball teams. Ewing was selected as one of the | |
<filename>WassersteinGAN/src/utils/data_utils.py
import cv2
import glob
import h5py
import imageio
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
import numpy as np
import os
from scipy import stats
from keras.datasets import mnist, cifar10
from keras.optimizers import Adam, SGD, RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
def normalization(X, image_data_format="channels_last"):
X = X / 255.
if image_data_format == "channels_last":
X = (X - 0.5) / 0.5
else:
X = (X - 0.5) / 0.5
return X
def inverse_normalization(X):
return ((X * 0.5 + 0.5) * 255.).astype(np.uint8)
def load_mnist(image_data_format):
(X_train, y_train), (X_test, y_test) = mnist.load_data()
if image_data_format == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
else:
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = normalization(X_train, image_data_format)
X_test = normalization(X_test, image_data_format)
nb_classes = len(np.unique(np.hstack((y_train, y_test))))
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def load_cifar10(image_data_format):
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
if image_data_format == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], 3, 32, 32)
X_test = X_test.reshape(X_test.shape[0], 3, 32, 32)
else:
X_train = X_train.reshape(X_train.shape[0], 32, 32, 3)
X_test = X_test.reshape(X_test.shape[0], 32, 32, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = normalization(X_train, image_data_format)
X_test = normalization(X_test, image_data_format)
nb_classes = len(np.unique(np.vstack((y_train, y_test))))
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def load_celebA(img_dim, image_data_format):
with h5py.File("../../data/processed/CelebA_%s_data.h5" % img_dim, "r") as hf:
X_real_train = hf["data"][:].astype(np.float32)
X_real_train = normalization(X_real_train, image_data_format)
if image_data_format == "channels_last":
X_real_train = X_real_train.transpose(0, 2, 3, 1)
return X_real_train
def load_image_dataset(dset, img_dim, image_data_format, batch_size):
X_batch_gen = None
if dset == "celebA":
X_real_train = load_celebA(img_dim, image_data_format)
elif dset == "mnist":
X_real_train, _, _, _ = load_mnist(image_data_format)
elif dset == "cifar10":
X_real_train, _, _, _ = load_cifar10(image_data_format)
else:
X_batch_gen = data_generator_from_dir(dset, (img_dim, img_dim), batch_size, image_data_format)
X_real_train = next(X_batch_gen)
return X_real_train, X_batch_gen
def data_generator_from_dir(data_dir, target_size, batch_size, image_data_format="channels_last"):
# data_gen args
print("Loading data from", data_dir)
# Check if number of files in data_dir is a multiple of batch_size
number_of_images = sum([len(files) for r, d, files in os.walk(data_dir)])
if number_of_images % batch_size != 0:
raise ValueError("ERROR: # of images in " + str(data_dir) + " found by keras.ImageDataGenerator is not a multiple of the batch_size ( " + str(batch_size) + " )!\nFound " + str(number_of_images) + " images. Add " + str(batch_size - number_of_images % batch_size) + " more image(s), or delete " + str(number_of_images % batch_size) + " image(s).")
# datagens
data_generator_args = dict(preprocessing_function=normalization)
image_datagen = ImageDataGenerator(**data_generator_args)
# Image generators
image_data_generator = image_datagen.flow_from_directory(data_dir, target_size=target_size, batch_size=batch_size, class_mode=None, seed=29)
if len(image_data_generator) == 0:
raise ValueError("ERROR: # of images found by keras.ImageDataGenerator is 0!\nPlease save the images in the data_dir into at least one modre directory, preferably into classes. Given data_dir:", data_dir)
return image_data_generator
def load_toy(n_mixture=8, std=0.01, radius=1.0, pts_per_mixture=5000):
thetas = np.linspace(0, 2 * np.pi, n_mixture + 1)[:-1]
xs, ys = radius * np.sin(thetas), radius * np.cos(thetas)
cov = std * np.eye(2)
X = np.zeros((n_mixture * pts_per_mixture, 2))
for i in range(n_mixture):
mean = np.array([xs[i], ys[i]])
pts = np.random.multivariate_normal(mean, cov, pts_per_mixture)
X[i * pts_per_mixture: (i + 1) * pts_per_mixture, :] = pts
return X
def get_optimizer(opt, lr):
if opt == "SGD":
return SGD(lr=lr)
elif opt == "RMSprop":
return RMSprop(lr=lr)
elif opt == "Adam":
return Adam(lr=lr, beta1=0.5)
def gen_batch(X, X_batch_gen, batch_size):
while True:
if X_batch_gen is None:
idx = np.random.choice(X.shape[0], batch_size, replace=False)
yield X[idx]
else:
yield next(X_batch_gen)
def sample_noise(noise_scale, batch_size, noise_dim):
return np.random.normal(scale=noise_scale, size=(batch_size, noise_dim[0]))
def get_disc_batch(X_real_batch, generator_model, batch_counter, batch_size, noise_dim, noise_scale=0.5):
# Pass noise to the generator
noise_input = sample_noise(noise_scale, batch_size, noise_dim)
# Produce an output
X_disc_gen = generator_model.predict(noise_input, batch_size=batch_size)
X_disc_real = X_real_batch[:batch_size]
return X_disc_real, X_disc_gen
def save_model_weights(generator_model, discriminator_model, DCGAN_model, e,
save_weights_every_n_epochs=5, save_only_last_n_weights=10, model_name="WGAN"):
purge_weights(generator_model, discriminator_model, DCGAN_model, save_only_last_n_weights, model_name)
model_path = os.path.join("../../models", model_name)
if (e + 1) % save_weights_every_n_epochs == 0:
print("Saving weight...")
gen_weights_path = os.path.join(model_path, '%s_epoch%5d.h5' % (generator_model.name, e))
generator_model.save_weights(gen_weights_path, overwrite=True)
disc_weights_path = os.path.join(model_path, '%s_epoch%5d.h5' % (discriminator_model.name, e))
discriminator_model.save_weights(disc_weights_path, overwrite=True)
DCGAN_weights_path = os.path.join(model_path, '%s_epoch%5d.h5' % (DCGAN_model.name, e))
DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)
def purge_weights(generator_model, discriminator_model, DCGAN_model, n, model_name):
gen_weight_files = sorted(glob.glob('../../models/%s/%s*' % (model_name, generator_model.name)))
for gen_weight_file in gen_weight_files[:-n]:
os.remove(os.path.realpath(gen_weight_file))
disc_weight_files = sorted(glob.glob('../../models/%s/%s*' % (model_name, discriminator_model.name)))
for disc_weight_file in disc_weight_files[:-n]:
os.remove(os.path.realpath(disc_weight_file))
DCGAN_weight_files = sorted(glob.glob('../../models/%s/%s*' % (model_name, DCGAN_model.name)))
for DCGAN_weight_file in DCGAN_weight_files[:-n]:
os.remove(os.path.realpath(DCGAN_weight_file))
def plot_generated_batch(X_real, generator_model, epoch_number, batch_size,
noise_dim, image_data_format, model_name,
noise_scale=0.5, suffix='training', MAX_FRAMES_PER_GIF=100):
# Generate images
X_gen = sample_noise(noise_scale, batch_size, noise_dim)
X_gen = generator_model.predict(X_gen)
X_real = inverse_normalization(X_real)
X_gen = inverse_normalization(X_gen)
Xg = X_gen[:8]
Xr = X_real[:8]
if image_data_format == "channels_last":
X = np.concatenate((Xg, Xr), axis=0)
list_rows = []
for i in range(int(X.shape[0] / 4)):
Xr = np.concatenate([X[k] for k in range(4 * i, 4 * (i + 1))], axis=1)
list_rows.append(Xr)
Xr = np.concatenate(list_rows, axis=0)
if image_data_format == "channels_first":
X = np.concatenate((Xg, Xr), axis=0)
list_rows = []
for i in range(int(X.shape[0] / 4)):
Xr = np.concatenate([X[k] for k in range(4 * i, 4 * (i + 1))], axis=2)
list_rows.append(Xr)
Xr = np.concatenate(list_rows, axis=1)
Xr = Xr.transpose(1,2,0)
# Make iter text
text_image = cv2.putText(np.zeros((32, Xr.shape[1], Xr.shape[2])),
'%s epoch' % str(epoch_number), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255, 255, 255), 1, cv2.LINE_AA).astype('uint8')
image = np.vstack((text_image, Xr))
# if Xr.shape[-1] == 1:
# plt.imshow(Xr[:, :, 0], cmap="gray")
# else:
# plt.imshow(Xr)
# plt.savefig("../../figures/current_batch.png")
# plt.clf()
# plt.close()
imageio.imsave(os.path.join("../../figures", model_name, model_name + "_current_batch_%s.png" % suffix), image)
# Make gif
gif_frames = []
# Read old gif frames
try:
gif_frames_reader = imageio.get_reader(os.path.join("../../figures", model_name, model_name + "_%s.gif" % suffix))
for frame in gif_frames_reader:
gif_frames.append(frame[:, :, :3])
except:
pass
# Append new frame
im = cv2.putText(np.concatenate((np.zeros((32, Xg[0].shape[1], Xg[0].shape[2])), Xg[0]), axis=0),
'%s epoch' % str(epoch_number), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255, 255, 255), 1, cv2.LINE_AA).astype('uint8')
gif_frames.append(im)
# If frames exceeds, save as different file
if len(gif_frames) > MAX_FRAMES_PER_GIF:
print("Splitting the GIF...")
gif_frames_00 = gif_frames[:MAX_FRAMES_PER_GIF]
num_of_gifs_already_saved = len(glob.glob(os.path.join("../../figures", model_name, model_name + "_%s_*.gif" % suffix)))
print("Saving", os.path.join("../../figures", model_name, model_name + "_%s_%03d.gif" % (suffix, num_of_gifs_already_saved)))
imageio.mimsave(os.path.join("../../figures", model_name, model_name + "_%s_%03d.gif" % (suffix, num_of_gifs_already_saved)), gif_frames_00)
gif_frames = gif_frames[MAX_FRAMES_PER_GIF:]
# Save gif
print("Saving", os.path.join("../../figures", model_name, model_name + "_%s.gif" % suffix))
imageio.mimsave(os.path.join("../../figures", model_name, model_name + "_%s.gif" % suffix), gif_frames)
def plot_losses(disc_losses, disc_losses_real, disc_losses_gen, gen_losses,
model_name, init_epoch=0):
epochs = np.arange(len(disc_losses)) + init_epoch
fig = plt.figure()
plt.plot(epochs, disc_losses, linewidth=2, label='D')
plt.plot(epochs, disc_losses_real, linewidth=1, label='D_real')
plt.plot(epochs, disc_losses_gen, linewidth=1, label='D_gen')
plt.plot(epochs, gen_losses, linewidth=2, label='G')
plt.legend()
plt.title("Losses")
plt.xlabel("Epochs")
plt.savefig(os.path.join("../../figures", model_name, model_name + "_losses.png"), bbox_inches='tight')
plt.clf()
plt.close()
def plot_generated_toy_batch(X_real, generator_model, discriminator_model, noise_dim, gen_iter, noise_scale=0.5):
# Generate images
X_gen = sample_noise(noise_scale, 10000, noise_dim)
X_gen = generator_model.predict(X_gen)
# Get some toy data to plot KDE of real data
data = load_toy(pts_per_mixture=200)
x = data[:, 0]
y = data[:, 1]
xmin, xmax = -1.5, 1.5
ymin, ymax = -1.5, 1.5
# Peform the kernel density estimate
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
# Plot the contour
fig = plt.figure(figsize=(10,10))
plt.suptitle("Generator iteration %s" % gen_iter, fontweight="bold", fontsize=22)
ax = fig.gca()
ax.contourf(xx, yy, f, cmap='Blues', vmin=np.percentile(f,80), vmax=np.max(f), levels=np.linspace(0.25, 0.85, 30))
# Also plot the contour of the discriminator
delta = 0.025
xmin, xmax = -1.5, 1.5
ymin, ymax = -1.5, 1.5
# Create mesh
XX, YY = np.meshgrid(np.arange(xmin, xmax, delta), np.arange(ymin, ymax, delta))
arr_pos = np.vstack((np.ravel(XX), np.ravel(YY))).T
# Get Z = predictions
ZZ = discriminator_model.predict(arr_pos)
ZZ = ZZ.reshape(XX.shape)
# Plot contour
ax.contour(XX, YY, ZZ, cmap="Blues", levels=np.linspace(0.25, 0.85, 10))
dy, dx = np.gradient(ZZ)
# Add streamlines
# plt.streamplot(XX, YY, dx, dy, linewidth=0.5, cmap="magma", density=1, arrowsize=1)
# Scatter generated data
plt.scatter(X_gen[:1000, 0], X_gen[:1000, 1], s=20, color="coral", marker="o")
l_gen = plt.Line2D((0,1),(0,0), color='coral', marker='o', linestyle='', markersize=20)
l_D = plt.Line2D((0,1),(0,0), color='steelblue', linewidth=3)
l_real = plt.Rectangle((0, 0), 1, 1, fc="steelblue")
# Create legend from custom artist/label lists
# bbox_to_anchor = (0.4, 1)
ax.legend([l_real, l_D, l_gen], ['Real data KDE', 'Discriminator contour',
'Generated data'], fontsize=18, loc="upper left")
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax + 0.8)
plt.savefig("../../figures/toy_dataset_iter%s.jpg" % gen_iter)
plt.clf()
plt.close()
if __name__ == '__main__':
data = load_toy(pts_per_mixture=200)
x = data[:, 0]
y = data[:, 1]
xmin, xmax = -1.5, 1.5
ymin, ymax = -1.5, 1.5
# Peform the kernel density estimate
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
fig = plt.figure()
gen_it | |
FIXME: Should have more efficient open_tagfile() that
# does all checksums in one go while writing through,
# adding checksums after closing.
# Below probably OK for now as metadata files
# are not too large..?
checksums[SHA1] = checksum_copy(tag_file, hasher=hashlib.sha1)
tag_file.seek(0)
checksums[SHA256] = checksum_copy(tag_file, hasher=hashlib.sha256)
tag_file.seek(0)
checksums[SHA512] = checksum_copy(tag_file, hasher=hashlib.sha512)
rel_path = _posix_path(os.path.relpath(path, self.folder))
self.tagfiles.add(rel_path)
self.add_to_manifest(rel_path, checksums)
if timestamp is not None:
self._file_provenance[rel_path] = {"createdOn": timestamp.isoformat()}
def _ro_aggregates(self):
# type: () -> List[Dict[str, Any]]
"""Gather dictionary of files to be added to the manifest."""
def guess_mediatype(rel_path):
# type: (str) -> Dict[str, str]
"""Return the mediatypes."""
media_types = {
# Adapted from
# https://w3id.org/bundle/2014-11-05/#media-types
"txt": TEXT_PLAIN,
"ttl": 'text/turtle; charset="UTF-8"',
"rdf": 'application/rdf+xml',
"json": 'application/json',
"jsonld": 'application/ld+json',
"xml": 'application/xml',
##
"cwl": 'text/x+yaml; charset="UTF-8"',
"provn": 'text/provenance-notation; charset="UTF-8"',
"nt": 'application/n-triples',
}
conforms_to = {
"provn": 'http://www.w3.org/TR/2013/REC-prov-n-20130430/',
"cwl": 'https://w3id.org/cwl/',
}
prov_conforms_to = {
"provn": 'http://www.w3.org/TR/2013/REC-prov-n-20130430/',
"rdf": 'http://www.w3.org/TR/2013/REC-prov-o-20130430/',
"ttl": 'http://www.w3.org/TR/2013/REC-prov-o-20130430/',
"nt": 'http://www.w3.org/TR/2013/REC-prov-o-20130430/',
"jsonld": 'http://www.w3.org/TR/2013/REC-prov-o-20130430/',
"xml": 'http://www.w3.org/TR/2013/NOTE-prov-xml-20130430/',
"json": 'http://www.w3.org/Submission/2013/SUBM-prov-json-20130424/',
}
extension = rel_path.rsplit(".", 1)[-1].lower() # type: Optional[str]
if extension == rel_path:
# No ".", no extension
extension = None
local_aggregate = {} # type: Dict[str, Any]
if extension in media_types:
local_aggregate["mediatype"] = media_types[extension]
if extension in conforms_to:
# TODO: Open CWL file to read its declared "cwlVersion", e.g.
# cwlVersion = "v1.0"
local_aggregate["conformsTo"] = conforms_to[extension]
if (rel_path.startswith(_posix_path(PROVENANCE))
and extension in prov_conforms_to):
if ".cwlprov" in rel_path:
# Our own!
local_aggregate["conformsTo"] = [prov_conforms_to[extension], CWLPROV_VERSION]
else:
# Some other PROV
# TODO: Recognize ProvOne etc.
local_aggregate["conformsTo"] = prov_conforms_to[extension]
return local_aggregate
aggregates = [] # type: List[Dict]
for path in self.bagged_size.keys():
aggregate_dict = {} # type: Dict[str, Any]
(folder, filename) = posixpath.split(path)
# NOTE: Here we end up aggregating the abstract
# data items by their sha1 hash, so that it matches
# the entity() in the prov files.
# TODO: Change to nih:sha-256; hashes
# https://tools.ietf.org/html/rfc6920#section-7
aggregate_dict["uri"] = 'urn:hash::sha1:' + filename
aggregate_dict["bundledAs"] = {
# The arcp URI is suitable ORE proxy; local to this Research Object.
# (as long as we don't also aggregate it by relative path!)
"uri": self.base_uri + path,
# relate it to the data/ path
"folder": "/%s/" % folder,
"filename": filename,
}
if path in self._file_provenance:
# Made by workflow run, merge captured provenance
aggregate_dict["bundledAs"].update(self._file_provenance[path])
else:
# Probably made outside wf run, part of job object?
pass
if path in self._content_types:
aggregate_dict["mediatype"] = self._content_types[path]
aggregates.append(aggregate_dict)
for path in self.tagfiles:
if (not (path.startswith(METADATA) or path.startswith(WORKFLOW) or
path.startswith(SNAPSHOT))):
# probably a bagit file
continue
if path == posixpath.join(METADATA, "manifest.json"):
# Should not really be there yet! But anyway, we won't
# aggregate it.
continue
rel_aggregates = {} # type: Dict[str, Any]
# These are local paths like metadata/provenance - but
# we need to relativize them for our current directory for
# as we are saved in metadata/manifest.json
uri = posixpath.relpath(path, METADATA)
rel_aggregates["uri"] = uri
rel_aggregates.update(guess_mediatype(path))
if path in self._file_provenance:
# Propagate file provenance (e.g. timestamp)
rel_aggregates.update(self._file_provenance[path])
elif not path.startswith(SNAPSHOT):
# make new timestamp?
rel_aggregates.update(self._self_made())
aggregates.append(rel_aggregates)
aggregates.extend(self._external_aggregates)
return aggregates
def add_uri(self, uri, timestamp=None):
# type: (str, Optional[datetime.datetime]) -> Dict
self.self_check()
aggr = self._self_made(timestamp=timestamp)
aggr["uri"] = uri
self._external_aggregates.append(aggr)
return aggr
def add_annotation(self, about, content, motivated_by="oa:describing"):
# type: (str, List[str], str) -> str
"""Cheap URI relativize for current directory and /."""
self.self_check()
curr = self.base_uri + METADATA + "/"
content = [c.replace(curr, "").replace(self.base_uri, "../")
for c in content]
uri = uuid.uuid4().urn
ann = {
"uri": uri,
"about": about,
"content": content,
"oa:motivatedBy": {"@id": motivated_by}
}
self.annotations.append(ann)
return uri
def _ro_annotations(self):
# type: () -> List[Dict]
annotations = []
annotations.append({
"uri": uuid.uuid4().urn,
"about": self.ro_uuid.urn,
"content": "/",
# https://www.w3.org/TR/annotation-vocab/#named-individuals
"oa:motivatedBy": {"@id": "oa:describing"}
})
# How was it run?
# FIXME: Only primary*
prov_files = [posixpath.relpath(p, METADATA) for p in self.tagfiles
if p.startswith(_posix_path(PROVENANCE))
and "/primary." in p]
annotations.append({
"uri": uuid.uuid4().urn,
"about": self.ro_uuid.urn,
"content": prov_files,
# Modulation of https://www.w3.org/TR/prov-aq/
"oa:motivatedBy": {"@id": "http://www.w3.org/ns/prov#has_provenance"}
})
# Where is the main workflow?
annotations.append({
"uri": uuid.uuid4().urn,
"about": posixpath.join("..", WORKFLOW, "packed.cwl"),
"oa:motivatedBy": {"@id": "oa:highlighting"}
})
annotations.append({
"uri": uuid.uuid4().urn,
"about": self.ro_uuid.urn,
"content": [posixpath.join("..", WORKFLOW, "packed.cwl"),
posixpath.join("..", WORKFLOW, "primary-job.json")],
"oa:motivatedBy": {"@id": "oa:linking"}
})
# Add user-added annotations at end
annotations.extend(self.annotations)
return annotations
def _authored_by(self):
# type: () -> Dict
authored_by = {}
if self.orcid:
authored_by["orcid"] = self.orcid
if self.full_name:
authored_by["name"] = self.full_name
if not self.orcid:
authored_by["uri"] = USER_UUID
if authored_by:
return {"authoredBy": authored_by}
return {}
def _write_ro_manifest(self):
# type: () -> None
# Does not have to be this order, but it's nice to be consistent
manifest = OrderedDict() # type: Dict[str, Any]
manifest["@context"] = [
{"@base": "%s%s/" % (self.base_uri, _posix_path(METADATA))},
"https://w3id.org/bundle/context"
]
manifest["id"] = "/"
manifest["conformsTo"] = CWLPROV_VERSION
filename = "manifest.json"
manifest["manifest"] = filename
manifest.update(self._self_made())
manifest.update(self._authored_by())
manifest["aggregates"] = self._ro_aggregates()
manifest["annotations"] = self._ro_annotations()
json_manifest = json_dumps(manifest, indent=4, ensure_ascii=False)
rel_path = posixpath.join(_posix_path(METADATA), filename)
with self.write_bag_file(rel_path) as manifest_file:
manifest_file.write(json_manifest + "\n")
def _write_bag_info(self):
# type: () -> None
with self.write_bag_file("bag-info.txt") as info_file:
info_file.write(u"Bag-Software-Agent: %s\n" % self.cwltool_version)
# FIXME: require sha-512 of payload to comply with profile?
# FIXME: Update profile
info_file.write(u"BagIt-Profile-Identifier: https://w3id.org/ro/bagit/profile\n")
info_file.write(u"Bagging-Date: %s\n" % datetime.date.today().isoformat())
info_file.write(u"External-Description: Research Object of CWL workflow run\n")
if self.full_name:
info_file.write(u"Contact-Name: %s\n" % self.full_name)
# NOTE: We can't use the urn:uuid:{UUID} of the workflow run (a prov:Activity)
# as identifier for the RO/bagit (a prov:Entity). However the arcp base URI is good.
info_file.write(u"External-Identifier: %s\n" % self.base_uri)
# Calculate size of data/ (assuming no external fetch.txt files)
total_size = sum(self.bagged_size.values())
num_files = len(self.bagged_size)
info_file.write(u"Payload-Oxum: %d.%d\n" % (total_size, num_files))
_logger.debug(u"[provenance] Generated bagit metadata: %s",
self.folder)
def generate_snapshot(self, prov_dep):
# type: (MutableMapping[Text, Any]) -> None
"""Copy all of the CWL files to the snapshot/ directory."""
self.self_check()
for key, value in prov_dep.items():
if key == "location" and value.split("/")[-1]:
filename = value.split("/")[-1]
path = os.path.join(self.folder, SNAPSHOT, filename)
filepath = ''
if "file://" in value:
filepath = value[7:]
else:
filepath = value
# FIXME: What if destination path already exists?
if os.path.exists(filepath):
try:
if os.path.isdir(filepath):
shutil.copytree(filepath, path)
else:
shutil.copy(filepath, path)
timestamp = datetime.datetime.fromtimestamp(os.path.getmtime(filepath))
self.add_tagfile(path, timestamp)
except PermissionError:
pass # FIXME: avoids duplicate snapshotting; need better solution
elif key in ("secondaryFiles", "listing"):
for files in value:
if isinstance(files, MutableMapping):
self.generate_snapshot(files)
else:
pass
def packed_workflow(self, packed): # type: (Text) -> None
"""Pack CWL description to generate re-runnable CWL object in RO."""
self.self_check()
rel_path = posixpath.join(_posix_path(WORKFLOW), "packed.cwl")
# Write as binary
with self.write_bag_file(rel_path, encoding=None) as write_pack:
# YAML is always UTF8, but json.dumps gives us str in py2
write_pack.write(packed.encode(ENCODING))
_logger.debug(u"[provenance] Added packed workflow: %s", rel_path)
def has_data_file(self, sha1hash): # type: (str) -> bool
"""Confirms the presence of the given file in the RO."""
folder = os.path.join(self.folder, DATA, sha1hash[0:2])
hash_path = os.path.join(folder, sha1hash)
return os.path.isfile(hash_path)
def add_data_file(self, from_fp, timestamp=None, content_type=None):
# type: (IO, Optional[datetime.datetime], Optional[str]) -> Text
"""Copy inputs to data/ folder."""
self.self_check()
tmp_dir, tmp_prefix = os.path.split(self.temp_prefix)
with tempfile.NamedTemporaryFile(
prefix=tmp_prefix, dir=tmp_dir, delete=False) as tmp:
checksum = checksum_copy(from_fp, tmp)
# Calculate hash-based file path
folder = os.path.join(self.folder, DATA, checksum[0:2])
path = os.path.join(folder, checksum)
# os.rename assumed safe, as our temp file should
# be in same file system as our temp folder
if not os.path.isdir(folder):
os.makedirs(folder)
os.rename(tmp.name, path)
# Relative posix path
# (to avoid \ on Windows)
rel_path = _posix_path(os.path.relpath(path, self.folder))
# Register in bagit checksum
if Hasher == hashlib.sha1:
self._add_to_bagit(rel_path, sha1=checksum)
else:
_logger.warning(
u"[provenance] Unknown hash method %s for bagit manifest",
Hasher)
# Inefficient, bagit support need to checksum again
self._add_to_bagit(rel_path)
_logger.debug(u"[provenance] Added data file %s", path)
if timestamp is not None:
self._file_provenance[rel_path] = self._self_made(timestamp)
_logger.debug(u"[provenance] Relative path for data file %s", rel_path)
if content_type is not None:
self._content_types[rel_path] = content_type
return rel_path
def _self_made(self, timestamp=None):
# type: (Optional[datetime.datetime]) -> Dict[str, Any]
if timestamp is None:
timestamp = datetime.datetime.now()
return {
"createdOn": timestamp.isoformat(),
"createdBy": {"uri": self.engine_uuid,
"name": self.cwltool_version}
}
def add_to_manifest(self, rel_path, checksums):
# type: (Text, Dict[str,str]) -> None
"""Add files to the research object manifest."""
self.self_check()
if posixpath.isabs(rel_path):
raise ValueError("rel_path must be relative: %s" % rel_path)
if posixpath.commonprefix(["data/", rel_path]) == "data/":
# payload file, go to manifest
manifest = "manifest"
else:
# metadata file, go to tag | |
= np.eye(self.bsize, dtype=dtype)
# return W
# return _initializer
def checker_init(self):
def _initializer(shape, dtype=np.float32, partition_info=None):
gate = np.empty(self.blocks, dtype=dtype)
for w, (c, k) in enumerate(self.updat_list):
gate[w] = (c & 1) ^ (k & 1) ^ 1
return gate
return _initializer
# grid = []
# for c in range(5):
# row = []
# for k in range(5):
# row.append((c & 1) ^ (k & 1) ^ 1)
# grid.append(row)
# for row in grid:
# print(row)
def fprop_test(self, I, W, gate=None):
bsize = self.bsize
if self.axis:
O = np.zeros((I.shape[0], self.KB, bsize))
I = I.reshape((-1, self.CB, bsize))
for k, lut in self.fprop_list:
for c, w in lut:
O[:,k,:] += np.dot( I[:,c,:], W[w,:,:] ) # NC x CK = NK
return O.reshape(I.shape[0], -1)
else:
N = I[0].size
O = np.zeros((self.KB, bsize, N))
I = I.reshape((self.CB, bsize, N))
for k, lut in self.fprop_list:
if gate is None:
for c, w in lut:
O[k,:,:] += np.dot( W[w,:,:].T, I[c,:,:] ) # CK.T x CN = KN
else:
for c, w in lut:
if gate[w] != 0.0:
O[k,:,:] += np.dot( W[w,:,:].T, I[c,:,:] ) * gate[w] # CK.T x CN = KN
return O.reshape(-1, N)
def bprop_test(self, E, W, gate=None):
bsize = self.bsize
if self.axis:
B = np.zeros((E.shape[0], self.CB, bsize))
E = E.reshape((-1, self.KB, bsize))
for c, lut in self.bprop_list:
for k, w in lut:
B[:,c,:] += np.dot( E[:,k,:], W[w,:,:].T ) # NK x CK.T = NC
return B.reshape(E.shape[0], -1)
else:
N = E[0].size
B = np.zeros((self.CB, bsize, N))
E = E.reshape((self.KB, bsize, N))
for c, lut in self.bprop_list:
if gate is None:
for k, w in lut:
B[c,:,:] += np.dot( W[w,:,:], E[k,:,:] ) # CK x KN = CN
else:
for k, w in lut:
if gate[w] != 0.0:
B[c,:,:] += np.dot( W[w,:,:], E[k,:,:] ) * gate[w] # CK x KN = CN
return B.reshape(-1, N)
def updat_test(self, I, E, gate=None, dw_gated=False):
U = np.zeros(self.w_shape)
bsize = self.bsize
if self.axis:
I = I.reshape((-1, self.CB, bsize))
E = E.reshape((-1, self.KB, bsize))
for w, (c, k) in enumerate(self.updat_list):
U[w,:,:] = np.dot( I[:,c,:].T, E[:,k,:] ) # NC.T x NK = CK
else:
I = I.reshape((self.CB, bsize, -1))
E = E.reshape((self.KB, bsize, -1))
if not dw_gated or gate is None:
for w, (c, k) in enumerate(self.updat_list):
U[w,:,:] = np.dot( I[c,:,:], E[k,:,:].T ) # CN x KN.T = CK
else:
for w, (c, k) in enumerate(self.updat_list):
if gate[w] != 0.0:
U[w,:,:] = np.dot( I[c,:,:], E[k,:,:].T ) * gate[w] # CN x KN.T = CK
return U
def l2_normalize_test(self, W, epsilon=1e-12):
W = W.copy()
for k, lut in self.fprop_list:
ws = [w for c, w in lut]
W2 = W[ws,:,:].reshape(-1, self.bsize)
norm = np.sqrt(np.maximum(np.sum(np.square(W2), axis=0, keepdims=True), epsilon))
for w in ws:
W[w,:,:] /= norm
return W
def l2_normalize_grad_test(self, W, U, epsilon=1e-12):
for k, lut in self.fprop_list:
ws = [w for c, w in lut]
W2 = W[ws,:,:].reshape(-1, self.bsize)
U2 = U[ws,:,:].reshape(-1, self.bsize)
sum_sqr_w = np.sum(np.square(W2), axis=0, keepdims=True)
max_w = np.maximum(sum_sqr_w, epsilon)
norm_grad = ( U2 + W2 * (sum_sqr_w >= epsilon) * np.sum(-U2 * W2 / max_w, axis=0, keepdims=True) ) / np.sqrt(max_w)
norm_grad = norm_grad.reshape(-1, self.bsize, self.bsize)
for i, w in enumerate(ws):
U[w,:,:] = norm_grad[i]
return U
def l2_normalize(self, W, gain=None, epsilon=1e-12, dtype=tf.float32):
l2_lut = get_constant(self.l2_lut, name="l2")
if gain is None:
W, _ = l2_normalize_ck(W, l2_lut, TY=dtype, epsilon=epsilon, K=self.K, shared=self.l2_shared, bsize=self.bsize )
else:
W, _ = l2_normalize_gain_ck(W, gain, l2_lut, TY=dtype, epsilon=epsilon, K=self.K, shared=self.l2_shared, bsize=self.bsize )
return W
def matmul(self, I, W, gate=None, gate_grad=False, dw_gated=False, name=None, bench=0):
return self.__call__(I, W, gate=gate, gate_grad=gate_grad, dw_gated=dw_gated, name=name, bench=bench)
def __call__(self, I, W, gate=None, gate_grad=False, dw_gated=False, name=None, bench=0):
if name is None:
name = self.name + ("_%06d" % self.count)
self.count += 1
if gate is None:
gate = []
else:
gate = [gate]
#assert self.bsize == 8 and self.axis == 0, "blocksparse gating only implemented for block_size 8 on axis 0"
fprop_lut = get_constant(self.fprop_lut, name="fprop")
bprop_lut = get_constant(self.bprop_lut, name="bprop")
updat_lut = get_constant(self.updat_lut, name="updat")
O, _ = blocksparse_matmul(
I, W, fprop_lut, bprop_lut, updat_lut, gate,
gated_dw=bool(dw_gated), gate_grad=bool(gate_grad),
blocks=self.blocks, bsize=self.bsize, axis=self.axis, C=self.C, K=self.K,
segments=self.fprop_segments, segments_dx=self.bprop_segments,
locks=self.fprop_locks, locks_dx=self.bprop_locks,
shared=self.fprop_shared, shared_dx=self.bprop_shared, bench=bench, name=name
)
#print(O.op.name, O.op.device)
return O
@ops.RegisterGradient("BlocksparseMatmul")
def blocksparse_matmul_grad(op, dy, temp):
blocks = op.get_attr("blocks")
bsize = op.get_attr("bsize")
axis = op.get_attr("axis")
C = op.get_attr("C")
K = op.get_attr("K")
segments = op.get_attr("segments_dx")
shared = op.get_attr("shared_dx")
locks = op.get_attr("locks_dx")
gated_dw = op.get_attr("gated_dw")
gate_grad = op.get_attr("gate_grad")
bench = op.get_attr("bench")
x = op.inputs[0]
w = op.inputs[1]
lut_dx = op.inputs[3]
lut_dw = op.inputs[4]
gate = [op.inputs[5]] if len(op.inputs) > 5 else []
name = op.name.split('/')[-1]
dx, _ = blocksparse_matmul_dx(
dy, w, lut_dx, gate, gated_dw=gated_dw, gate_grad=gate_grad,
blocks=blocks, bsize=bsize, axis=axis, C=K, K=C, # swap C,K
segments=segments, locks=locks, shared=shared,
bench=bench, name=name+"_bprop")
dw = blocksparse_matmul_dw(
[x], [dy], lut_dw, gate, gated_dw=gated_dw, gate_grad=gate_grad,
blocks=blocks, bsize=bsize, axis=axis, C=C, K=K,
bench=bench, name=name+"_updat")
# print(dx.op.name, dx.op.device)
# print(dw.op.name, dw.op.device)
if len(gate) == 0:
return (dx, dw, None, None, None)
elif gate_grad:
dw, dg = blocksparse_matmul_dg(dw, w, gate[0])
else:
dg = None
return (dx, dw, None, None, None, dg)
@ops.RegisterGradient("L2NormalizeCK")
def blocksparse_l2_normalize_grad_ck(op, dy, sum_sqr_x):
epsilon = op.get_attr("epsilon")
K = op.get_attr("K")
shared = op.get_attr("shared")
bsize = op.get_attr("bsize")
grad_x = l2_normalize_grad_ck(dy, op.inputs[0], op.outputs[1], op.inputs[1], epsilon=epsilon, K=K, shared=shared, bsize=bsize)
return (grad_x, None)
@ops.RegisterGradient("L2NormalizeGainCK")
def blocksparse_l2_normalize_grad_ck(op, dy, sum_sqr_x):
epsilon = op.get_attr("epsilon")
K = op.get_attr("K")
shared = op.get_attr("shared")
bsize = op.get_attr("bsize")
grad_x, grad_g = l2_normalize_gain_grad_ck(
dy, op.inputs[0], op.inputs[1], op.outputs[1], op.inputs[2], epsilon=epsilon, K=K, shared=shared, bsize=bsize)
return (grad_x, grad_g, None)
# Utils for graph re-writing
def block_reduced_full_dw(param_grad, scale=1.0, norm="max", group_size=8):
# max(abs()) or l2_norm()
norm = 0 if norm.lower() == "max" else 1
# host side scalar, if zero will cause compute for this op to be skipped.
scale = scalar_constant(scale, dtype=tf.float32)
assert group_size <= 8
# backward walk param grad to find BlocksparseMatmulDW ops
# this should only hit BlocksparseMatmulDWs, BlocksparseMatmulDGs, AddNs or FloatCasts
ops = get_parents(param_grad, "BlocksparseMatmulDW")
if len(ops) < 1:
raise ValueError("BlocksparseMatmulDW op not found")
# this sorting is dependent on the op names being correctly ordered.
ops.sort(key=lambda op: op.name.split('/')[-1], reverse=True)
# use the parent scope for the new ops
scope = ops[-1].name.split('/')
scope = '/'.join(scope[0:-1])
# we're going to be using absolute names, so clear name_scope
with tf.name_scope(None):
dw_full = None
offset = 0
while offset < len(ops):
xs = [op.inputs[0] for op in ops[offset:offset+group_size] ]
gs = [op.inputs[1] for op in ops[offset:offset+group_size] ]
# Get the corresponding activation grad op for the last param grad op in the group
bprop = None
for consumer in gs[-1].consumers():
if consumer.type == "BlocksparseMatmulDX":
bprop = consumer
break
assert bprop is not None
# get attributes of first op in group
up = ops[offset]
bsize = up.get_attr("bsize")
axis = up.get_attr("axis")
name = "%s/block_reduced_full_dw_%03d" % (scope, offset)
dw_full = [] if dw_full is None else [dw_full]
dw_full, _, _ = blocksparse_reduced_dw(xs, gs, scale, dw_full, bsize=bsize, norm=norm, axis=axis, name=name)
# force the dw op before any more time steps are processed
bprop._add_control_input(dw_full.op)
offset += group_size
return dw_full
def group_param_grads(param_grad, group_size=8):
assert group_size <= 8
# backward walk param grad to find BlocksparseMatmulDW ops
# this should only hit BlocksparseMatmulDWs, BlocksparseMatmulDGs, AddNs or FloatCasts
ops = get_parents(param_grad, "BlocksparseMatmulDW")
if len(ops) <= 1:
return param_grad
# this sorting is dependent on the op names being correctly ordered.
ops.sort(key=lambda op: op.name.split('/')[-1], reverse=True)
# for x in ops:
# print(x.name)
# print("")
# exit()
segment_size = len(ops)
if ops[0].get_attr("gate_grad") and len(ops[0].inputs) == 4:
gate_count = dict()
max_count = 0
for op in ops:
gate = op.inputs[3]
count = gate_count.get(gate, 0) + 1
gate_count[gate] = count
max_count = max(max_count, count)
for count in gate_count.values():
if count != max_count:
raise ValueError("Non-uniform gate broadcasting detected.")
segment_size = max_count
if group_size > segment_size:
group_size = segment_size
else:
assert segment_size % group_size == 0
# nothing to rewrite here.
if segment_size == 1:
return param_grad
# use the parent scope for the new ops
scope = ops[-1].name.split('/')
scope = '/'.join(scope[0:-1])
# we're going to be using absolute names, so clear name_scope
with tf.name_scope(None):
dw = None
dws = list()
offset = 0
seg_cnt = 0
while offset < len(ops):
xs | |
<gh_stars>1-10
"""
.. py:module:: modules
:synopsis: The module management
.. moduleauthor:: <NAME> <<EMAIL>>
This is the docstring of the :py:mod:`modules` module.
"""
from collections import namedtuple, defaultdict
import importlib as imp
import inspect
import os
import random
import string
import sys
import threading
import traceback
from .listener import Listeners
PERFORM_KINDS = {"conf", "run", "loop_first", "loop_next", "loop_end"}
RETURN_KINDS = {"init", *PERFORM_KINDS}
LISTENER_KINDS = {"order", "dependency", "workflow"}
GLOBAL_NS = ""
PLUGINS_PATH = "plugins"
def _load_module(name, path):
"""
Load and register a given module.
:param name: the name of the module
:type name: str
:param path: the path to the module file
:type path: str
For loading a package, give the path of the package’s
``__init__.py`` file as path.
:return: Metadata of the module, or ``None`` if module couldn’t be loaded. If ``return_init_ret`` is ``True``, a tuple of module metadata and ``register`` return value is returned.
"""
RETURN_BAD = ((),())
# Load the module
try:
mod = imp.import_module('src.'+name)
except Exception as e:
print("\nImporting:\n\tname: {}\n\tpath: {}".format(name, path)) #DEBUG
print("Cannot load module '{}' from '{}':\n{}: {}".format(name, path, e.__class__.__name__, e), file=sys.stderr)
return RETURN_BAD
# Check if module is valid (has `register` function)
if not hasattr(mod, 'register'):
print("Ignoring invalid plugin {} at {}:\nNo 'register' function found.".format(name, path), file=sys.stderr)
return RETURN_BAD
# Register the module
meta = ModuleMetadata(mod)
# First, pre-fill auto-detected version and functions
if hasattr(mod, '__version__'):
meta.version = mod.__version__
for perf_kind in PERFORM_KINDS:
if hasattr(mod, perf_kind):
meta.set_fun(perf_kind, getattr(mod, perf_kind))
# Second, check if module wants to register more modules
MetadataRegisterer = namedtuple("MoreMetadata", ("meta", "ret"))
more_meta = ()
meta_templates = {}
try:
# Prepare requested module metadata instances
reg_param = inspect.signature(mod.register).parameters.get("more_meta")
if reg_param:
reg_param = reg_param.default
if reg_param is None or reg_param is inspect.Parameter.empty:
# Metadata for single module as scalar
more_meta = (MetadataRegisterer(ModuleMetadata(), {}),)
meta_templates = {"more_meta": more_meta[0]}
else:
# Tuple of metadata for multiple modules
n_meta = int(reg_param)
more_meta = tuple(MetadataRegisterer(ModuleMetadata(), {}) for _ in range(n_meta))
meta_templates = {"more_meta": more_meta}
except TypeError:
pass
except ValueError:
pass
# Third, let module fill in its properties
try:
init_ret = mod.register(meta, **meta_templates)
except Exception as e:
print("\nIgnore module '{}' due to exception:".format(name),
file=sys.stderr, end='')
_print_exception_string(e)
return RETURN_BAD
# Check meta data
return_meta = []
return_init_ret = []
for m, r in (MetadataRegisterer(meta, init_ret), *more_meta):
check_failed = m.check()
# Ignore bad module
if check_failed:
print(f"Ignoring invalid plugin '{m.name}' ({m.id}):\n{check_failed}", file=sys.stderr)
continue
# Append good module to return list
return_meta.append(m)
return_init_ret.append(r)
# Memorize return data of kind "init"
if r:
m.set_ret("init", tuple(r.keys()))
#print(f"Return data of '{m.id}': {r}") #DEBUG
# Return
return return_meta, return_init_ret
def _search_modules(plugins_path):
"""Find modules to be loaded."""
modules = set()
# Search plugins directory for plugins
for f in os.listdir(plugins_path):
# Ignore files starting with a dot
if f.startswith(('.', '_')):
continue
# Get file parts and full path
name, ext = os.path.splitext(f)
fp = os.path.join(plugins_path, f)
# Check for valid module (or package) name
isValid = False
if os.path.isdir(fp) and os.path.isfile(os.path.join(fp, '__init__.py')):
# The path is a package
fp = os.path.join(fp, '__init__.py')
isValid = True
elif ext.startswith('.py') and (len(ext) == 3 or (len(ext) == 4 and ext[-1] in 'co')):
# The path is a module
isValid = True
# Skip invalid file names
if not isValid:
continue
# Add file to list of potential modules
modules.add((name, fp))
return modules
def _parse_version(ver, isComparison=False):
"""
Parse a version string.
The version string should consist of numbers
separated by dots, e.g. "1.0.2", "2", or "3".
Different versions of a plugin should have different version
strings such that the version string of the newer version is
the larger operand in version comparison.
For version comparison, the string will be split at the dots,
and the resulting substrings will be compared beginning with
the first using python’s default comparison operators.
Multiple consecutive dots are ignored.
An empty version can also be specified by ``None``, and a version
consisting of a single number can also be specified as a
positive integer number.
The version is returned as a tuple of strings, as an empty tuple
for an unspecified version or as ``None`` for an invalid argument.
:param ver: the version string
:type ver: str
:param isComparison: boolean flag whether ver is a comparison
:type isComparison: bool
:return: A tuple of subversion strings, obtained by splitting
the version string at dots.
If ``isComparison`` is ``True``, the comparison mode is returned
before the tuple of subversion strings.
The comparison mode is one of the following strings:
``>=``, ``<=``, ``!=``, ``>``, ``<``, ``=``
"""
# Catch special cases
if ver is None or ver is () or ver is '':
return (None, ()) if isComparison else ()
elif isinstance(ver, int) and ver >= 0:
ver = str(ver)
#return ((str(ver),)
elif not isinstance(ver, str):
return None
# Parse version string
# TODO: add optional dependency ('?')
comp_flags = ('>=', '<=', '!=', '>', '<', '=', '==')
#starts_with_comparison = ver.startswith(comp_flags)
if isComparison:
if ver[:2] in comp_flags:
comp_mode = ver[:2]
ver = ver[2:]
elif ver[0] in comp_flags:
comp_mode = ver[0]
ver = ver[1:]
else:
comp_mode = '='
# Split version string into subversions
ver = tuple([v for v in ver.split('.') if v])
if isComparison:
return comp_mode, ver
else:
return ver
def _check_versions(version_present, comp_mode, version_required):
"""
Check if a version fulfills a version requirement.
TODO: possibly wrong results for subversionstrings
with different lengths
:param version_present: The version of the plugin to be evaluated
:param comp_mode: The comparison mode
:param version_required: The required version
:return: ``True`` if version fulfills requirement, else ``False``.
"""
# TODO: correct for strings with different lengths
# TODO: add optional dependency ('?')
if not version_present and not version_required:
return True
elif comp_mode == '>=':
for vp, vr in zip(version_present, version_required):
if vp < vr:
return False
if len(version_present) < len(version_required):
return False
return True
elif comp_mode == '<=':
for vp, vr in zip(version_present, version_required):
if vp > vr:
return False
if len(version_present) > len(version_required):
return False
return True
elif comp_mode == '!=':
for vp, vr in zip(version_present, version_required):
if vp != vr:
return True
if len(version_present) == len(version_required):
return False
return True
elif comp_mode == '>':
for vp, vr in zip(version_present, version_required):
if vp > vr:
return True
elif vp < vr:
return False
if len(version_present) > len(version_required):
return True
return False
elif comp_mode == '<':
for vp, vr in zip(version_present, version_required):
if vp < vr:
return True
elif vp < vr:
return False
if len(version_present) < len(version_required):
return True
return False
elif comp_mode == '=' or comp_mode == '==':
if len(version_present) != len(version_required):
return False
for vp, vr in zip(version_present, version_required):
if vp != vr:
return False
return True
# This is never reached for a valid comp_mode
return False
def _parse_dep(dep):
"""
Parse the dependency data inserted by the plugin.
:param dep: The dependency data provided by the plugin
:return: A (possibly empty) tuple of dependencies,
or ``None`` if dependency data is invalid
The expected dependency data is::
[tuple of] tuple of ("id", [tuple of] ("conf_ret" | "run_ret"), [tuple of] [(<, >) [=]] "version" )
"""
# Expects:
# [tuple of] tuple of ("id", [tuple of] ("conf_ret" | "run_ret"), [tuple of] [(<, >) [=]] "version" )
# Returns:
# tuple of (tuple of ("id", tuple of ("conf_ret" | "run_ret"), tuple of (<cmp_mode>, "version") ))
# Returns None if input is invalid
# No dependencies
if not dep:
return ()
# Depending on only one module; convert to tuple
if isinstance(dep[0], str):
dep = (dep,)
# Write all dependencies to standardized structure
new = []
isValid = True
for d in dep:
n = [None, None, None]
try:
# "id" is a string
n[0] = d[0]
# "conf_ret" is a string or an iterable of strings
if isinstance(d[1], str):
n[1] = (d[1],)
else:
n[1] = d[1]
# "version" is a string or a tuple of strings
if len(d) > 2:
if isinstance(d[2], str):
versions = (d[2],)
else:
versions = d[2]
new_versions = []
for ver in versions:
cmp_mode, ver_nr = _parse_version(ver, True)
if cmp_mode and ver_nr:
new_versions.append((cmp_mode, ver_nr))
n[2] | |
self.ax_table is not None:
self.ax_table_ref = model.TableM(self.ax_table)
if self.ath_table is not None:
self.ath_table_ref = model.TableM(self.ath_table)
if self.az_table is not None:
self.az_table_ref = model.TableM(self.az_table)
if self.ge_table is not None:
self.ge_table_ref = model.TableM(self.ge_table)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.mid = self.Mid()
self.mid_ref = None
self.ex_table = self.Ex_table()
self.eth_table = self.Eth_table()
self.ez_table = self.Ez_table()
self.nuth_table = self.Nuth_table()
self.nuxz_table = self.Nuxz_table()
self.rho_table = self.Rho_table()
self.gzx_table = self.Gzx_table()
self.ax_table = self.Ax_table()
self.ath_table = self.Ath_table()
self.az_table = self.Az_table()
self.ge_table = self.Ge_table()
self.ex_table_ref = None
self.eth_table_ref = None
self.ez_table_ref = None
self.nuth_table_ref = None
self.nuxz_table_ref = None
self.rho_table_ref = None
self.gzx_table_ref = None
self.ax_table_ref = None
self.ath_table_ref = None
self.az_table_ref = None
self.ge_table_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT3 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
ex_table = integer_or_blank(card, 2, 'T(EX)')
eth_table = integer_or_blank(card, 3, 'T(ETH)')
ez_table = integer_or_blank(card, 5, 'T(EZ)')
nuth_table = integer_or_blank(card, 6, 'T(NUTH)')
nuxz_table = integer_or_blank(card, 7, 'T(NUXZ)')
rho_table = integer_or_blank(card, 8, 'T(RHO)')
gzx_table = integer_or_blank(card, 11, 'T(GZX)')
ax_table = integer_or_blank(card, 12, 'T(AX)')
ath_table = integer_or_blank(card, 13, 'T(ATH)')
az_table = integer_or_blank(card, 14, 'T(AZ)')
ge_table = integer_or_blank(card, 16, 'T(GE)')
assert len(card) <= 16, 'len(MATT3 card) = %i\ncard=%s' % (len(card), card)
return MATT3(mid, ex_table, eth_table, ez_table,
nuth_table, nuxz_table, rho_table, gzx_table,
ax_table, ath_table, az_table, ge_table, comment=comment)
def Ex_table(self):
if self.ex_table_ref is not None:
return self.ex_table_ref.tid
return self.ex_table
def Eth_table(self):
if self.eth_table_ref is not None:
return self.eth_table_ref.tid
return self.eth_table
def Ez_table(self):
if self.ez_table_ref is not None:
return self.ez_table_ref.tid
return self.eth_table
def Nuth_table(self):
if self.nuth_table_ref is not None:
return self.nuth_table_ref.tid
return self.nuth_table
def Nuxz_table(self):
if self.nuxz_table_ref is not None:
return self.nuxz_table_ref.tid
return self.nuxz_table
def Rho_table(self):
if self.rho_table_ref is not None:
return self.rho_table_ref.tid
return self.rho_table
def Gzx_table(self):
if self.gzx_table_ref is not None:
return self.gzx_table_ref.tid
return self.gzx_table
def Ax_table(self):
if self.ax_table_ref is not None:
return self.ax_table_ref.tid
return self.ax_table
def Ath_table(self):
if self.ath_table_ref is not None:
return self.ath_table_ref.tid
return self.ath_table
def Az_table(self):
if self.az_table_ref is not None:
return self.az_table_ref.tid
return self.az_table
def Ge_table(self):
if self.ge_table_ref is not None:
return self.ge_table_ref.tid
return self.ge_table
def raw_fields(self):
list_fields = [
'MATT3', self.Mid(), self.Ex_table(), self.Eth_table(), self.Ez_table(),
self.Nuth_table(), self.Nuxz_table(), self.Rho_table(), None, None,
self.Gzx_table(), self.Ax_table(), self.Ath_table(), self.Az_table(),
None, self.Ge_table(),
]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class MATT4(MaterialDependenceThermal):
"""
Specifies temperature-dependent material properties on MAT2 entry
fields via TABLEMi entries.
+-------+-------+-------+-------+--------+-------+-------+---------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+=======+=======+=======+=======+========+=======+=======+=========+
| MATT4 | MID | T(K) | T(CP) | | T(H) | T(mu) | T(HGEN) |
+-------+-------+-------+-------+--------+-------+-------+---------+
"""
type = 'MATT4'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT4(mid, k_table=None, cp_table=None, h_table=None,
mu_table=None, hgen_table=None, comment='')
def __init__(self, mid, k_table=None, cp_table=None, h_table=None,
mu_table=None, hgen_table=None, comment=''):
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
if k_table == 0:
k_table = None
if cp_table == 0:
cp_table = None
if h_table == 0:
h_table = None
if mu_table == 0:
mu_table = None
if hgen_table == 0:
hgen_table = None
self.mid = mid
self.k_table = k_table
self.cp_table = cp_table
self.h_table = h_table
self.mu_table = mu_table
self.hgen_table = hgen_table
self.mid_ref = None
self.k_table_ref = None
self.cp_table_ref = None
self.h_table_ref = None
self.mu_table_ref = None
self.hgen_table_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT4 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
k_table = integer_or_blank(card, 2, 'T(K)')
cp_table = integer_or_blank(card, 3, 'T(CP)')
h_table = integer_or_blank(card, 5, 'T(H)')
mu_table = integer_or_blank(card, 6, 'T(mu)')
hgen_table = integer_or_blank(card, 7, 'T(HGEN)')
assert len(card) <= 8, 'len(MATT4 card) = %i\ncard=%s' % (len(card), card)
return MATT4(mid, k_table, cp_table, h_table, mu_table,
hgen_table, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a MATT4 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(mid, k_table, cp_table, null, h_table, mu_table, hgen_table) = data
assert null == 0, data
return MATT4(mid, k_table, cp_table, h_table, mu_table,
hgen_table, comment=comment)
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MATT4 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
self._xref_table(model, 'k_table', msg=msg)
self._xref_table(model, 'cp_table', msg=msg)
self._xref_table(model, 'h_table', msg=msg)
self._xref_table(model, 'mu_table', msg=msg)
self._xref_table(model, 'hgen_table', msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.mid = self.Mid()
self.k_table = self.K_table()
self.cp_table = self.Cp_table()
self.h_table = self.H_table()
self.mu_table = self.Mu_table()
self.hgen_table = self.Hgen_table()
self.mid_ref = None
self.mid_ref = None
self.k_table_ref = None
self.cp_table_ref = None
self.h_table_ref = None
self.mu_table_ref = None
self.hgen_table_ref = None
def K_table(self):
return self._get_table('k_table')
def Cp_table(self):
return self._get_table('cp_table')
def H_table(self):
return self._get_table('h_table')
def Mu_table(self):
return self._get_table('mu_table')
def Hgen_table(self):
return self._get_table('hgen_table')
def raw_fields(self):
list_fields = [
'MATT4', self.Mid(), self.K_table(), self.Cp_table(),
None,
self.H_table(), self.Mu_table(), self.Hgen_table()
]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class MATT5(MaterialDependenceThermal):
"""
Specifies temperature-dependent material properties on MAT2 entry
fields via TABLEMi entries.
+-------+---------+---------+--------+--------+--------+--------+--------+-------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=========+=========+========+========+========+========+========+=======+
| MATT5 | MID | T(Kxx) | T(Kxy) | T(Kxz) | T(Kyy) | T(Kyz) | T(Kzz) | T(CP) |
+-------+---------+---------+--------+--------+--------+--------+--------+-------+
| | | T(HGEN) | | | | | | |
+-------+---------+---------+--------+--------+--------+--------+--------+-------+
"""
type = 'MATT5'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT5(mid, kxx_table=None, kxy_table=None, kxz_table=None, kyy_table=None,
kyz_table=None, kzz_table=None, cp_table=None, hgen_table=None, comment='')
def __init__(self, mid, kxx_table=None, kxy_table=None, kxz_table=None,
kyy_table=None, kyz_table=None, kzz_table=None,
cp_table=None, hgen_table=None, comment=''):
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
self.mid = mid
self.kxx_table = kxx_table
self.kxy_table = kxy_table
self.kxz_table = kxz_table
self.kyy_table = kyy_table
self.kyz_table = kyz_table
self.kzz_table = kzz_table
self.cp_table = cp_table
self.hgen_table = hgen_table
self.mid_ref = None
self.kxx_table_ref = None
self.kxy_table_ref = None
self.kxz_table_ref = None
self.kyy_table_ref = None
self.kyz_table_ref = None
self.kzz_table_ref = None
self.cp_table_ref = None
self.hgen_table_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT5 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
kxx_table = integer_or_blank(card, 2, 'T(Kxx)')
kxy_table = integer_or_blank(card, 3, 'T(Kxy)')
kxz_table = integer_or_blank(card, 5, 'T(Kxz)')
kyy_table = integer_or_blank(card, 6, 'T(Kyy)')
kyz_table = integer_or_blank(card, 7, 'T(Kyz)')
kzz_table = integer_or_blank(card, 8, 'T(Kyz)')
cp_table = integer_or_blank(card, 9, 'T(Kyz)')
hgen_table = integer_or_blank(card, 11, 'T(HGEN)')
assert len(card) <= 12, 'len(MATT5 card) = %i\ncard=%s' % (len(card), card)
return MATT5(mid, kxx_table, kxy_table, kxz_table, kyy_table,
kyz_table, kzz_table, cp_table, hgen_table,
comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a MATT5 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(mid, kxx_table, kxy_table, kxz_table, kyy_table, kyz_table, kzz_table,
cp_table, null, hgen_table) = data
if kxx_table == 0:
kxx_table = None
if kxy_table == 0:
kxy_table = None
if kxz_table == 0:
kxz_table = None
if kyy_table == 0:
kyy_table = None
if kyz_table == 0:
kyz_table = None
if kzz_table == 0:
kzz_table = None
if cp_table == 0:
cp_table = None
if hgen_table == 0:
hgen_table = None
assert null == 0, data
return MATT5(mid, kxx_table, kxy_table, kxz_table, kyy_table,
kyz_table, kzz_table, cp_table, hgen_table,
comment=comment)
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MATT5 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
self._xref_table(model, 'kxx_table', msg=msg)
self._xref_table(model, 'kxy_table', msg=msg)
self._xref_table(model, 'kxz_table', msg=msg)
self._xref_table(model, 'kyy_table', msg=msg)
self._xref_table(model, 'kyz_table', msg=msg)
| |
"""
Core functionality and main computation functions are defined here. There are
low level implementations:
* :func:`cross_correlate`
* :func:`cross_correlate_fft`
* :func:`auto_correlate`
* :func:`auto_correlate_fft`
* :func:`cross_difference`
* :func:`auto_difference`
Functions for calculating tau-dependent mean signal and mean square
of the signal that are needed for normalization:
* :func:`cross_sum`
* :func:`cross_sum_fft`
* :func:`auto_sum`
* :func:`auto_sum_fft`
High-level functions (in-memory calculation):
* :func:`ccorr` to calculate cross-correlation/difference functions
* :func:`acorr` to calculate auto-correlation/difference functions
For out-of-memory analysis use:
* :func:`iccorr` to calculate cross-correlation/difference functions
* :func:`iacorr` to calculate auto-correlation/difference functions
Finally, normalization of the results:
* :func:`normalize` to normalize the outputs of the above functions.
"""
from __future__ import absolute_import, print_function, division
import numpy as np
from cddm.conf import CDTYPE, FDTYPE, IDTYPE,I64DTYPE
from cddm.print_tools import print1,print2, print_frame_rate, enable_prints, disable_prints
import time
from functools import reduce
from cddm.fft import _fft
from cddm._core_nb import _cross_corr_fft_regular, _cross_corr_fft, \
_auto_corr_fft_regular,_auto_corr_fft,\
_cross_corr_regular, _cross_corr, _cross_corr_vec, \
_cross_diff_regular,_cross_diff,_cross_diff_vec, \
_auto_corr_regular,_auto_corr, _auto_corr_vec, \
_auto_diff_regular,_auto_diff, _auto_diff_vec,\
_cross_sum_regular,_cross_sum,_cross_sum_vec,_cross_sum_complex_vec,\
_cross_sum_fft,_cross_sum_complex_fft,_fill_ones, abs2,\
_add_count_cross,_add_count_auto, _add_count_cross_complex
#complex versions
from cddm._core_nb import _cross_corr_complex_regular, _cross_corr_complex, _cross_corr_complex_vec, \
_auto_corr_complex_regular,_auto_corr_complex, _auto_corr_complex_vec, _cross_sum_complex, _cross_sum_complex_regular,_cross_sum_complex_regular_inverted
#imported for backward compatibility
from cddm.norm import NORM_STANDARD, NORM_SUBTRACTED, NORM_STRUCTURED, NORM_WEIGHTED, NORM_COMPENSATED
from cddm.norm import weight_from_data, weighted_sum, scale_factor, normalize, norm_flags, take_data
#--------------------------
#Core correlation functions
def _move_axis_and_align(f, axis, new_axis = -2, align = False):
"""Swaps axes of input data and aligns it by copying entire data."""
f = np.asarray(f)
if f.ndim > 1:
f = np.moveaxis(f,axis,new_axis)
if align == True:
f = f.copy()
return f
def _inspect_cross_arguments(f1,f2,t1,t2,axis,n, aout, dtype = CDTYPE, aout_full_size = False):
"""Inspects and returns processed input arguments for cross_* functions"""
if (t1 is None and t2 is not None) or (t2 is None and t1 is not None):
raise ValueError("You must define both `t1` and `t2`")
elif t1 is not None:
t1 = np.asarray(t1, I64DTYPE)
t2 = np.asarray(t2, I64DTYPE)
axis = int(axis)
f1 = np.asarray(f1, dtype)
f2 = np.asarray(f2, dtype)
if f1.dtype not in (FDTYPE, CDTYPE) and dtype is None:
f1 = np.asarray(f1, FDTYPE)
if f2.dtype not in (FDTYPE, CDTYPE) and dtype is None:
f2 = np.asarray(f2, FDTYPE)
ndim = f1.ndim
if ndim == 0 or f1.shape != f2.shape:
raise ValueError("Wrong shape of input arrays")
if n is None:
if aout is not None:
if not isinstance(aout, np.ndarray):
raise TypeError("aout must be a valid numpy array.")
if aout_full_size == False:
n = aout.shape[-1]
else:
n = (aout.shape[-1]+1)//2
else:
n = _determine_full_length_cross(f1.shape,t1,t2,axis)
if t1 is not None and t2 is not None:
t1min,t2min = t1.min(), t2.min()
t1 = t1 - min(t1min,t2min)
t2 = t2 - min(t1min,t2min)
return f1,f2,t1,t2,axis,n
def _determine_full_length_auto(shape,t,axis):
if t is None:
#output size is input size
n = shape[axis]
else:
#output size depends on the max and min values of t
#determine appropriate length of the equivalent regular-spaced data
tmin,tmax = t.min(), t.max()
n = 1 + tmax -tmin
return n
def _determine_full_length_cross(shape,t1,t2,axis):
if t1 is None and t2 is None:
#output size is input size
n = shape[axis]
else:
#output size depends on the max and min values of t
#determine appropriate length of the equivalent regular-spaced data
t1min,t1max = t1.min(), t1.max()
t2min,t2max = t2.min(), t2.max()
n = 1 + max(t2max-t1min, t1max - t2min)
return n
def _inspect_auto_arguments(f,t,axis,n,aout, dtype = CDTYPE):
"""Inspects and returns processed input arguments for auto_* functions"""
if t is not None:
t = np.asarray(t, I64DTYPE)
axis = int(axis)
f = np.asarray(f, dtype)
if f.dtype not in (FDTYPE, CDTYPE) and dtype is None:
f = np.asarray(f, FDTYPE)
ndim = f.ndim
if ndim == 0:
raise ValueError("Wrong shape of input arrays")
if n is None:
if aout is not None:
if not isinstance(aout, np.ndarray):
raise TypeError("aout must be a valid numpy array.")
n = aout.shape[-1]
else:
n = _determine_full_length_auto(f.shape,t,axis)
if t is not None:
tmin = t.min()
t = t - tmin
return f,t,axis,n
def _transpose_data(data, axis = -2):
return np.moveaxis(data,axis,-1)
def _default_out(out, data_shape, n, calc_axis, dtype = FDTYPE, complex = False):
shape = list(data_shape)
if complex == True:
shape[calc_axis] = n*2 -1
else:
shape[calc_axis] = n
if out is None:
out = np.zeros(shape, dtype)
else:
shape[calc_axis] = out.shape[-1]
out = _transpose_data(out, calc_axis)
if out.shape != tuple(shape):
raise ValueError("Wrong output array shape")
if out.dtype not in (FDTYPE,CDTYPE):
raise ValueError("Wrong output dtype")
return out
def cross_correlate_fft(f1,f2, t1 = None, t2 = None, axis = 0, n = None, complex = False, aout = None):
"""Calculates cross-correlation function of two equal sized input arrays using FFT.
For large arrays and large n, this is faster than correlate. The output of
this function is identical to the output of cross_correlate.
See :func:`cross_correlate` for details.
"""
#use None for "n" argument to determine length needed for FFT
f1,f2,t1,t2,axis,n = _inspect_cross_arguments(f1,f2,t1,t2,axis,n,aout, aout_full_size = complex)
#determine fft length
length = _determine_full_length_cross(f1.shape,t1,t2,axis)
#algorithm needs calculation to be done over the last axis.. so move it here
f1 = np.moveaxis(f1,axis, -1)
f2 = np.moveaxis(f2,axis, -1)
complex = bool(complex)
dtype = CDTYPE if complex else FDTYPE
out = _default_out(aout,f1.shape,n,-1, dtype = dtype, complex = complex)
if t1 is None:
return _cross_corr_fft_regular(f1,f2,out, out)
else:
#random spaced data algorithm
return _cross_corr_fft(f1,f2,t1,t2,length,out, out)
def auto_correlate_fft(f, t = None, axis = 0, n = None, complex = False, aout = None):
"""Calculates auto-correlation function of input array using FFT.
For large arrays and large n, this is faster than correlate. The output of
this function is identical to the output of auto_correlate.
See :func:`auto_correlate` for details.
"""
f,t,axis,n = _inspect_auto_arguments(f,t,axis,n,aout)
#determine fft length
length = _determine_full_length_auto(f.shape,t,axis)
#algorithm needs calculation to be done over the last axis.. so move it here
f = np.moveaxis(f,axis, -1)
complex = bool(complex)
dtype = CDTYPE if complex else FDTYPE
out = _default_out(aout,f.shape,n,-1, dtype = dtype)
if t is None:
#regular spaced data algorithm
return _auto_corr_fft_regular(f,out, out)
else:
#random spaced data algorithm
return _auto_corr_fft(f,t,length,out, out)
def _is_aligned(data, axis, align):
try:
return (((axis == -1 or axis == data.ndim-1) and data.data.contiguous == True) or align == True)
except:
#python 2.7 just return False
return False
def thread_frame_shape(shape, thread_divisor = None, force_2d = False):
"""Computes new frame shape for threaded computaton.
Parameters
----------
shape : tuple of ints
Input frame shape
thread_divisor : int
An integer that divides the flattend frame shape. This number determines
number of threads.
force_2d : bool
If 1d data, make it 2d regardless of thread_divisor value.
Returns
-------
shape : tuple
A length 2 shape
"""
new_shape = shape
if len(new_shape) == 1 and thread_divisor is None and force_2d == True:
thread_divisor = 1
if thread_divisor is not None:
total = reduce((lambda x, y: x * y), new_shape)
try:
new_shape = (thread_divisor, total//int(thread_divisor))
except ValueError:
raise ValueError("Invalid `thread_divisor`")
if total != new_shape[0] * new_shape[1]:
raise ValueError("`thread_divisor` incompatible with input array's shape")
return new_shape
def reshape_input(f, axis = 0, thread_divisor = None, mask = None):
"""Reshapes input data, for faster threaded calculation
Parameters
----------
f : ndarray
Input array
axis : int
Axis over which the computation is performed.
thread_divisor : int
An integer that divides the flattend frame shape. This number determines
number of threads.
mask : ndarray
A boolean mask array. If provided, input data is masked first, then reshaped.
This only works with axis = 0.
Returns
-------
array, old_shape : ndarray, tuple
Reshaped array and old frame shape tuple. Old frame shape is needed dor
reshaping of output data with :func:`reshape_output`
"""
f = np.asarray(f)
axis = list(range(f.ndim))[axis]#make it a positive integer
if mask is not None:
if axis != 0:
raise ValueError("Mask can only be applied when axis = 0")
f = f[:,mask]
shape = list(f.shape)
n = shape.pop(axis)
force_2d = True if mask is None else False
new_shape = list(thread_frame_shape(shape, thread_divisor, force_2d))
new_shape.insert(axis, n)
return f.reshape(new_shape), tuple(shape)
def reshape_frame(frame, shape, mask = None):
x = frame.reshape(shape)
if mask is not None:
out = np.empty(mask.shape, x.dtype)
out[mask] = x
out[np.logical_not(mask)] = np.nan
return out
else:
return x
def reshape_output(data, shape | |
3, 1)
# Open to remove white holes
# masked = imopen(masked, 3, 2)
# masked = imfill(masked)
kernel_dilation = np.ones((5, 5), np.uint8)
masked = cv2.dilate(masked, kernel_dilation, iterations=2)
# Apply foreground mask (dilated) to the image and perform detection on that
# masked = cv2.bitwise_and(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), masked)
# Invert frame such that black pixels are foreground
masked = cv2.bitwise_not(masked)
# keypoints = []
# Blob detection
keypoints = detector.detect(masked)
n_keypoints = len(keypoints)
centroids = np.zeros((n_keypoints, 2))
sizes = np.zeros((n_keypoints, 2))
for i in range(n_keypoints):
centroids[i] = keypoints[i].pt
centroids[i] += origin
sizes[i] = keypoints[i].size
return centroids, sizes, masked
def detect_objects_large(frame, mask, fgbg, detector, origin):
masked = cv2.convertScaleAbs(frame, alpha=1, beta=0)
gain = 15
masked = cv2.convertScaleAbs(masked, alpha=1, beta=256 - average_brightness(16, frame, mask) + gain)
masked = fgbg.apply(masked, learningRate=-1)
kernel = np.ones((5, 5), np.uint8)
# Remove Noise
masked = cv2.morphologyEx(masked, cv2.MORPH_OPEN, kernel, iterations=int(1))
masked = cv2.dilate(masked, kernel, iterations=int(4 * SCALE_FACTOR))
contours, hierarchy = cv2.findContours(masked, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
n_keypoints = len(contours)
centroids = np.zeros((n_keypoints, 2))
sizes = np.zeros((n_keypoints, 2))
for i, contour in enumerate(contours):
M = cv2.moments(contour)
centroids[i] = [int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])]
centroids[i] += origin
x, y, w, h = cv2.boundingRect(contour)
sizes[i] = (w, h)
return centroids, sizes, masked
def predict_new_locations_of_tracks(tracks):
for track in tracks:
track.kalmanFilter.predict()
# Assigns detections to tracks using Munkre's Algorithm with cost based on euclidean distance,
# with detections being located too far from existing tracks being designated as unassigned detections
# and tracks without any nearby detections being designated as unassigned tracks
def detection_to_track_assignment(tracks, centroids, cost_of_non_assignment):
# start_time = time.time()
m, n = len(tracks), len(centroids)
k, l = min(m, n), max(m, n)
# Create a square 2-D cost matrix with dimensions twice the size of the larger list (detections or tracks)
cost = np.zeros((k + l, k + l))
# initialization_time = time.time()
# Calculate the distance of every detection from each track,
# filling up the rows of the cost matrix (up to column n, the number of detections) corresponding to existing tracks
# This creates a m x n matrix
for i in range(len(tracks)):
start_time_distance_loop = time.time()
track = tracks[i]
track_location = track.kalmanFilter.x[:2]
cost[i, :n] = np.array([distance.euclidean(track_location, centroid) for centroid in centroids])
# distance_time = time.time()
unassigned_track_cost = cost_of_non_assignment
unassigned_detection_cost = cost_of_non_assignment
extra_tracks = 0
extra_detections = 0
if m > n: # More tracks than detections
extra_tracks = m - n
elif n > m: # More detections than tracks
extra_detections = n - m
elif n == m:
pass
# Padding cost matrix with dummy columns to account for unassigned tracks
# This is used to fill the top right corner of the cost matrix
detection_padding = np.ones((m, m)) * unassigned_track_cost
cost[:m, n:] = detection_padding
# Padding cost matrix with dummy rows to account for unassigned detections
# This is used to fill the bottom left corner of the cost matrix
track_padding = np.ones((n, n)) * unassigned_detection_cost
cost[m:, :n] = track_padding
# padding_time = time.time()
# The bottom right corner of the cost matrix, corresponding to dummy detections being matched to dummy tracks
# is left with 0 cost to ensure that excess dummies are always matched to each other
# Perform the assignment, returning the indices of assignments,
# which are combined into a coordinate within the cost matrix
row_ind, col_ind = linear_sum_assignment(cost)
assignments_all = np.column_stack((row_ind, col_ind))
# assignment_time = time.time()
# Assignments within the top left corner corresponding to existing tracks and detections
# are designated as (valid) assignments
assignments = assignments_all[(assignments_all < [m, n]).all(axis=1)]
# Assignments within the top right corner corresponding to existing tracks matched with dummy detections
# are designated as unassigned tracks and will later be regarded as invisible
unassigned_tracks = assignments_all[
(assignments_all >= [0, n]).all(axis=1) & (assignments_all < [m, k + l]).all(axis=1)]
# Assignments within the bottom left corner corresponding to detections matched to dummy tracks
# are designated as unassigned detections and will generate a new track
unassigned_detections = assignments_all[
(assignments_all >= [m, 0]).all(axis=1) & (assignments_all < [k + l, n]).all(axis=1)]
# sorting_time = time.time()
# print(f"Initialization took {initialization_time - start_time}ms.\n"
# f"Distance measuring took {distance_time - initialization_time}ms.\n"
# f"Padding took {padding_time - distance_time}ms.\n"
# f"Assignment took {assignment_time - padding_time}ms.\n"
# f"Sorting took {sorting_time - assignment_time}\n\n")
return assignments, unassigned_tracks, unassigned_detections
# Using the coordinates of valid assignments which correspond to the detection and track indices,
# update the track with the matched detection
def update_assigned_tracks(assignments, tracks, centroids, sizes):
for assignment in assignments:
track_idx = assignment[0]
detection_idx = assignment[1]
centroid = centroids[detection_idx]
size = sizes[detection_idx]
track = tracks[track_idx]
kf = track.kalmanFilter
kf.update(centroid)
# # Adaptive filtering
# # If the residual is too large, increase the process noise
# Q_scale_factor = 100.
# y, S = kf.y, kf.S # Residual and Measurement covariance
# # Square and normalize the residual
# eps = np.dot(y.T, np.linalg.inv(S)).dot(y)
# kf.Q *= eps * 10.
track.size = size
track.age += 1
track.totalVisibleCount += 1
track.consecutiveInvisibleCount = 0
# Existing tracks without a matching detection are aged and considered invisible for the frame
def update_unassigned_tracks(unassigned_tracks, tracks):
for unassignedTrack in unassigned_tracks:
track_idx = unassignedTrack[0]
track = tracks[track_idx]
track.age += 1
track.consecutiveInvisibleCount += 1
# If any track has been invisible for too long, or generated by a flash, it will be removed from the list of tracks
def delete_lost_tracks(tracks):
if len(tracks) == 0:
return tracks
invisible_for_too_long = 3 * FPS
age_threshold = 1 * FPS
tracks_to_be_removed = []
for track in tracks:
visibility = track.totalVisibleCount / track.age
# A new created track with a low visibility is likely to have been generated by noise and is to be removed
# Tracks that have not been seen for too long (The threshold determined by the reliability of the filter)
# cannot be accurately located and are also be removed
if (track.age < age_threshold and visibility < 0.8) \
or track.consecutiveInvisibleCount >= invisible_for_too_long:
tracks_to_be_removed.append(track)
tracks = [track for track in tracks if track not in tracks_to_be_removed]
return tracks
# Detections not assigned an existing track are given their own track, initialized with the location of the detection
def create_new_tracks(unassigned_detections, next_id, tracks, centroids, sizes):
for unassignedDetection in unassigned_detections:
detection_idx = unassignedDetection[1]
centroid = centroids[detection_idx]
size = sizes[detection_idx]
dt = 1 / FPS # Time step between measurements in seconds
track = Track(next_id, size)
# Attempted tuning
# # Constant velocity model
# # Initial Location
# track.kalmanFilter.x = [centroid[0], centroid[1], 0, 0]
# # State Transition Matrix
# track.kalmanFilter.F = np.array([[1., 0, dt, 0],
# [0, 1, 0, dt],
# [0, 0, 1, 0],
# [0, 0, 0, 1]])
# # Measurement Function
# track.kalmanFilter.H = np.array([[1., 0, 0, 0],
# [0, 1, 0, 0]])
# # Covariance Matrix
# track.kalmanFilter.P = np.diag([(10.*SCALE_FACTOR)**2, (10.*SCALE_FACTOR)**2, # Positional variance
# (7*SCALE_FACTOR)**2, (7*SCALE_FACTOR)**2]) # Velocity variance
# # Process Noise
# # Assumes that the process noise is white
# track.kalmanFilter.Q = Q_discrete_white_noise(dim=4, dt=dt, var=1000)
# # Measurement Noise
# track.kalmanFilter.R = np.diag([10.**2, 10**2])
# Constant velocity model
# Initial Location
track.kalmanFilter.x = [centroid[0], centroid[1], 0, 0]
# State Transition Matrix
track.kalmanFilter.F = np.array([[1., 0, 1, 0],
[0, 1, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# Measurement Function
track.kalmanFilter.H = np.array([[1., 0, 0, 0],
[0, 1, 0, 0]])
# Ah I really don't know what I'm doing here
# Covariance Matrix
track.kalmanFilter.P = np.diag([200., 200, 50, 50])
# Motion Noise
track.kalmanFilter.Q = np.diag([100., 100, 25, 25])
# Measurement Noise
track.kalmanFilter.R = 100
# # Constant acceleration model
tracks.append(track)
next_id += 1
return next_id
def filter_tracks(frame, masked, tracks, counter, origin):
# Minimum number of frames to remove noise seems to be somewhere in the range of 30
# Actually, I feel having both might be redundant together with the deletion criteria
min_track_age = max(1.0 * FPS, 30) # seconds * FPS to give number of frames in seconds
# This has to be less than or equal to the minimum age | |
}
},
}
# 'covsurver_prot_mutations': {
# '$regex': mut, '$options': 'i'
# },
query_this_week = query.copy()
query_prev_week = query.copy()
query_this_week['collection_date'] = {'$lt': today_date, '$gte': last_week_date}
query_prev_week['collection_date'] = {'$lt': last_week_date, '$gte': previous_week_date}
results_this_week = collection_db.count_documents(query_this_week)
results_prev_week = collection_db.count_documents(query_prev_week)
if denominator_prev_week != 0:
perc_prev_week = (results_prev_week / denominator_prev_week) * 100
else:
perc_prev_week = 0
if denominator_this_week != 0:
perc_this_week = (results_this_week / denominator_this_week) * 100
else:
perc_this_week = 0
diff_perc = abs(perc_this_week - perc_prev_week)
full_object = {f'{location_granularity[0]}': location_0,
f'{location_granularity[1]}': location_1,
f'{location_granularity[2]}': location_2,
'lineage': lineage,
'mut': mut,
'total_seq_world_prev_week': denominator_world_prev_week,
'total_seq_world_this_week': denominator_world_this_week,
'total_seq_pop_prev_week': denominator_prev_week,
'total_seq_pop_this_week': denominator_this_week,
'count_prev_week': results_prev_week,
'count_this_week': results_this_week,
'perc_prev_week': perc_prev_week,
'perc_this_week': perc_this_week,
'diff_perc': diff_perc,
'date': today_date.strftime("%Y-%m-%d"),
'granularity': location_granularity[2],
'location': location_2
}
# all_mutation_for_lineage_for_geo_previous_week.append(full_object)
all_all_mut_for_lineage_for_geo.append(full_object)
populate_aggregate_place_dict(full_object, location_granularity, today_date,
granularity=1)
populate_aggregate_place_dict(full_object, location_granularity, today_date,
granularity=0)
populate_aggregate_place_dict(full_object, location_granularity, today_date,
granularity=-1)
# name_csv = lineage + '.csv'
# directory = f'CSV_examples/{location_0}/{location_1}/{location_2}'
# if not os.path.exists(directory):
# os.makedirs(directory)
# table = pd.DataFrame(all_mutation_for_lineage_for_geo_previous_week)
# table.to_csv(rf'{directory}/{name_csv}',
# index=False, header=True)
else:
print('======> ERROR', mut)
for aggregated_loc in dict_aggregated_place:
single_obj = dict_aggregated_place[aggregated_loc].copy()
final_obj = get_final_object_aggregated_place(single_obj)
all_all_mut_for_lineage_for_geo.append(final_obj)
table2 = pd.DataFrame(all_all_mut_for_lineage_for_geo)
file_name = 'database_variants_' + today_date.strftime("%Y-%m-%d") + '.json'
table2.to_json(f'{file_name}', orient='records', lines=True)
end = timeit.default_timer()
print("TIMER ", end - start)
print("fine request all_mutation all_lineages")
def prova_query():
date1 = datetime.strptime('2021-08-08', '%Y-%m-%d')
date2 = date1.replace(day=date1.day) - timedelta(days=7)
query = [
{"$match": {
'c_coll_date_prec': {
'$eq': 2
},
'collection_date': {
'$lt': date1,
'$gte': date2
},
f'location.geo_group': {
'$eq': 'Europe'
},
f'location.country': {
'$eq': 'United Kingdom'
},
f'location.region': {
'$eq': 'England'
},
'covv_lineage': {
'$eq': 'B.1.617.2'
},
}},
{"$unwind": "$muts"},
{"$group": {"_id": {'lin': '$covv_lineage',
'pro': "$muts.pro",
'org': "$muts.org",
'loc': "$muts.loc",
'alt': "$muts.alt",
},
"count": {"$sum": 1}}},
]
res = collection_db.aggregate(query, allowDiskUse=True)
print("prova results: ", list(res))
all_geo_granularity = ['geo_group', 'country', 'region']
# prova_query()
array_date = ['2021-10-31', '2021-10-24', '2021-10-17']
for single_date in array_date:
start = timeit.default_timer()
date = datetime.strptime(single_date, '%Y-%m-%d')
dict_count_aggregated_place = {}
dict_aggregated_place = {}
# get_all_geo_last_week(location_granularity=all_geo_granularity, today_date=date)
#######################################################
@api.route('/getMostImportantResult')
class FieldList(Resource):
@api.doc('get_most_important_result')
def get(self):
today_date = datetime.strptime(f"2021-10-31", '%Y-%m-%d')
array_of_dates = [today_date]
i = 0
while i < 3:
# if i == 0:
# prev_date = array_of_dates[i].replace(day=array_of_dates[i].day) - timedelta(days=7)
# else:
prev_date = array_of_dates[i].replace(day=array_of_dates[i].day) - timedelta(days=7)
array_of_dates.append(prev_date)
i = i + 1
where_part = {}
array_or = []
for dt in array_of_dates:
specific_or = {'analysis_date': {'$eq': dt}}
array_or.append(specific_or)
where_part['$and'] = [{'$or': array_or}]
match_part = {'$match': where_part}
query = [match_part]
results = collection_result_variant_db.aggregate(query, allowDiskUse=True)
array_results = []
for res in list(results):
single_obj = {}
for key in res:
if key == 'analysis_date':
single_obj[key] = str(res[key].strftime('%Y-%m-%d'))
elif key == 'granularity':
single_obj[key] = res[key]
if res[key] == 'world':
single_obj['location'] = 'World'
else:
single_obj['location'] = res[res['granularity']]
else:
single_obj[key] = res[key]
array_results.append(single_obj)
result_dict = create_unique_array_results(array_results, today_date)
array_to_change = list(result_dict.values())
array_to_return = []
for single_line in array_to_change:
for one_date in array_date:
name_variable_1 = 'p_value_with_mut_' + one_date
name_variable_2 = 'p_value_without_mut_' + one_date
name_variable_3 = 'p_value_comparative_mut_' + one_date
if name_variable_1 in single_line:
if single_line[name_variable_1] < 0.05 and\
single_line[name_variable_2] < 0.05 and\
single_line[name_variable_3] < 0.05:
array_to_return.append(single_line)
break
return array_to_return
@api.route('/getAllGeo')
class FieldList(Resource):
@api.doc('get_all_geo')
def get(self):
query = [
{"$group": {"_id": {'geo_group': '$geo_group',
'country': "$country",
'region': "$region",
},
}
},
]
results = collection_result_variant_db.aggregate(query, allowDiskUse=True)
array_continent = []
array_country = []
array_region = []
for single_item in results:
if single_item['_id']['geo_group'] not in array_continent:
array_continent.append(single_item['_id']['geo_group'])
if single_item['_id']['country'] not in array_country:
array_country.append(single_item['_id']['country'])
if single_item['_id']['region'] not in array_region:
array_region.append(single_item['_id']['region'])
results = {'continent': array_continent, 'country': array_country, 'region': array_region}
return results
@api.route('/getAllLineage')
class FieldList(Resource):
@api.doc('get_all_lineage')
def get(self):
query = [
{"$group": {"_id": {'lineage': '$lineage',
},
}
},
]
results = collection_result_variant_db.aggregate(query, allowDiskUse=True)
array_results = []
for single_item in results:
array_results.append(single_item['_id']['lineage'])
array_results.sort()
return array_results
@api.route('/getStatistics')
class FieldList(Resource):
@api.doc('get_statistics')
def post(self):
payload = api.payload
granularity = payload['granularity']
location = payload['value']
lineage = payload['lineage']
today_date = datetime.strptime(f"2021-10-31", '%Y-%m-%d')
array_of_dates = [today_date]
i = 0
while i < 3:
# if i == 0:
# prev_date = array_of_dates[i].replace(day=array_of_dates[i].day) - timedelta(days=7)
# else:
prev_date = array_of_dates[i].replace(day=array_of_dates[i].day) - timedelta(days=7)
array_of_dates.append(prev_date)
i = i + 1
where_part = {}
array_or = []
for dt in array_of_dates:
specific_or = {'analysis_date': {'$eq': dt}}
array_or.append(specific_or)
where_part['$and'] = [{'$or': array_or}]
real_granularity_1 = 'world'
real_granularity_2 = 'geo_group'
if granularity == 'region':
real_granularity_1 = granularity
array_conditions = []
specific_or_1 = {'granularity': {'$eq': real_granularity_1}}
array_conditions.append(specific_or_1)
where_part['$or'] = array_conditions
# where_part['granularity'] = {'$eq': granularity}
where_part[granularity] = {'$eq': location}
elif granularity == 'country':
real_granularity_1 = granularity
real_granularity_2 = 'region'
array_conditions = []
specific_or_1 = {'granularity': {'$eq': real_granularity_1}}
array_conditions.append(specific_or_1)
specific_or_2 = {'granularity': {'$eq': real_granularity_2}}
array_conditions.append(specific_or_2)
where_part['$or'] = array_conditions
# where_part['granularity'] = {'$eq': granularity}
where_part[granularity] = {'$eq': location}
elif granularity == 'continent':
real_granularity_1 = 'geo_group'
real_granularity_2 = 'country'
array_conditions = []
specific_or_1 = {'granularity': {'$eq': real_granularity_1}}
array_conditions.append(specific_or_1)
specific_or_2 = {'granularity': {'$eq': real_granularity_2}}
array_conditions.append(specific_or_2)
where_part['$or'] = array_conditions
# where_part['granularity'] = {'$eq': real_granularity_1}
where_part['geo_group'] = {'$eq': location}
else:
array_conditions = []
specific_or_1 = {'granularity': {'$eq': real_granularity_1}}
array_conditions.append(specific_or_1)
specific_or_2 = {'granularity': {'$eq': real_granularity_2}}
array_conditions.append(specific_or_2)
where_part['$or'] = array_conditions
# where_part['granularity'] = {'$eq': real_granularity}
if lineage is not None:
where_part['lineage'] = {'$eq': lineage}
match_part = {'$match': where_part}
query = [match_part]
results = collection_result_variant_db.aggregate(query, allowDiskUse=True)
array_results = []
for res in list(results):
single_obj = {}
for key in res:
if key == 'analysis_date':
single_obj[key] = str(res[key].strftime('%Y-%m-%d'))
elif key == 'granularity':
single_obj[key] = res[key]
if res[key] == 'world':
single_obj['location'] = 'World'
else:
single_obj['location'] = res[res['granularity']]
else:
single_obj[key] = res[key]
array_results.append(single_obj)
result_dict = create_unique_array_results(array_results, today_date)
array_to_return = list(result_dict.values())
return array_to_return
def create_unique_array_results(array_results, today_date):
result_dict = {}
for single_res in array_results:
single_obj = {}
if single_res['location'] is None:
location = 'none'
else:
location = single_res['location']
if single_res['lineage'] is None:
lineage = 'none'
else:
lineage = single_res['lineage']
if single_res['mut'] is None:
mut = 'none'
else:
mut = single_res['mut']
if single_res['granularity'] is None:
granularity = 'none'
else:
granularity = single_res['granularity']
id_single_obj = location + granularity + lineage + mut
if id_single_obj not in result_dict:
analysis_date = single_res['analysis_date']
for key in single_res:
if key == 'p_value_comparative_mut' or \
key == 'p_value_without_mut' or \
key == 'diff_perc_without_mut' or \
key == 'perc_without_mut_this_week' or \
key == 'perc_without_mut_prev_week' or \
key == 'count_without_mut_this_week' or \
key == 'count_without_mut_prev_week' or \
key == 'p_value_with_mut' or \
key == 'diff_perc_with_mut' or \
key == 'perc_with_mut_this_week' or \
key == 'perc_with_mut_prev_week' or \
key == 'count_with_mut_this_week' or \
key == 'count_with_mut_prev_week':
new_key = key + '_' + analysis_date
single_obj[new_key] = single_res[key]
elif key == 'total_seq_pop_this_week_with_mut':
new_key = 'total_seq_pop_this_week' + '_' + analysis_date
single_obj[new_key] = single_res[key]
elif key == 'total_seq_pop_prev_week_with_mut':
new_key = 'total_seq_pop_prev_week' + '_' + analysis_date
single_obj[new_key] = single_res[key]
elif key == 'analysis_date':
single_obj[key] = str(today_date.strftime('%Y-%m-%d'))
elif key == 'mut':
single_obj['protein'] = single_res[key].split('_')[0]
single_obj[key] = single_res[key].split('_')[1]
else:
if key != 'total_seq_world_prev_week' and \
key != 'total_seq_world_this_week':
single_obj[key] = single_res[key]
key_lineage_1 = 'total_seq_lineage_this_week' + '_' + analysis_date
single_obj[key_lineage_1] = single_res['count_with_mut_this_week'] \
+ single_res['count_without_mut_this_week']
key_lineage_2 = 'total_seq_lineage_prev_week' + '_' + analysis_date
single_obj[key_lineage_2] = single_res['count_with_mut_prev_week'] \
+ single_res['count_without_mut_prev_week']
key_diff = 'diff_perc' + '_' + analysis_date
if single_obj[key_lineage_1] != 0:
factor_1 = single_res['count_with_mut_this_week'] / single_obj[key_lineage_1]
else:
factor_1 = 0
if single_obj[key_lineage_2] != 0:
factor_2 = single_res['count_with_mut_prev_week'] / single_obj[key_lineage_2]
else:
factor_2 = 0
single_obj[key_diff] = (factor_1 - factor_2)*100
result_dict[id_single_obj] = single_obj
else:
analysis_date = single_res['analysis_date']
for key in single_res:
if key == 'p_value_comparative_mut' or \
key == 'p_value_without_mut' or \
key == 'diff_perc_without_mut' or \
key == 'perc_without_mut_this_week' or \
key == 'perc_without_mut_prev_week' or \
key == 'count_without_mut_this_week' or \
key == 'count_without_mut_prev_week' or \
key == 'p_value_with_mut' or \
key == 'diff_perc_with_mut' or \
key == 'perc_with_mut_this_week' or \
key == 'perc_with_mut_prev_week' or \
key == 'count_with_mut_this_week' or \
key == 'count_with_mut_prev_week':
new_key = key + '_' + analysis_date
result_dict[id_single_obj][new_key] = single_res[key]
elif key == 'total_seq_pop_this_week_with_mut':
new_key = 'total_seq_pop_this_week' + '_' + analysis_date
result_dict[id_single_obj][new_key] = single_res[key]
elif key == 'total_seq_pop_prev_week_with_mut':
new_key = 'total_seq_pop_prev_week' + '_' + analysis_date
result_dict[id_single_obj][new_key] = single_res[key]
key_lineage_1 = 'total_seq_lineage_this_week' + '_' + analysis_date
result_dict[id_single_obj][key_lineage_1] = single_res['count_with_mut_this_week'] \
+ single_res['count_without_mut_this_week']
key_lineage_2 = 'total_seq_lineage_prev_week' + '_' + analysis_date
result_dict[id_single_obj][key_lineage_2] = single_res['count_with_mut_prev_week'] \
+ single_res['count_without_mut_prev_week']
key_diff = 'diff_perc' + '_' + analysis_date
if result_dict[id_single_obj][key_lineage_1] != 0:
factor_1 = single_res['count_with_mut_this_week'] / result_dict[id_single_obj][key_lineage_1]
else:
factor_1 = 0
if result_dict[id_single_obj][key_lineage_2] != 0:
factor_2 = single_res['count_with_mut_prev_week'] / result_dict[id_single_obj][key_lineage_2]
else:
factor_2 = 0
| |
from __future__ import print_function, division, absolute_import
import numpy as np
from scipy.misc import imsave, imread, imresize
from sklearn.feature_extraction.image import reconstruct_from_patches_2d, extract_patches_2d
from scipy.ndimage.filters import gaussian_filter
from keras import backend as K
import os
import time
'''
_image_scale_multiplier is a special variable which is used to alter image size.
The default image size is 32x32. If a true upscaling model is used, then the input image size is 16x16,
which not offer adequate training samples.
'''
_image_scale_multiplier = 1
img_size = 128 * _image_scale_multiplier
stride = 64 * _image_scale_multiplier
assert (img_size ** 2) % (stride ** 2) == 0, "Number of images generated from strided subsample of the image needs to be \n" \
"a positive integer. Change stride such that : \n" \
"(img_size ** 2) / (stride ** 2) is a positive integer."
input_path = r"D:\Yue\Documents\Datasets\train2014\train2014\\" # r"input_images/"
validation_path = r"val_images/" # r"D:\Yue\Documents\Datasets\MSCOCO\val\valset\\" # r"val_images/"
validation_set5_path = validation_path + "set5/"
validation_set14_path = validation_path + "set14/"
base_dataset_dir = os.path.expanduser("~") + "/Image Super Resolution Dataset/"
output_path = base_dataset_dir + "train_images/train/"
validation_output_path = base_dataset_dir + r"train_images/validation/"
if not os.path.exists(output_path):
os.makedirs(output_path)
# def transform_images(directory, output_directory, scaling_factor=2, max_nb_images=-1, true_upscale=False):
# index = 1
#
# if not os.path.exists(output_directory + "X/"):
# os.makedirs(output_directory + "X/")
#
# if not os.path.exists(output_directory + "y/"):
# os.makedirs(output_directory + "y/")
#
# # For each image in input_images directory
# nb_images = len([name for name in os.listdir(directory)])
#
# if max_nb_images != -1:
# print("Transforming %d images." % max_nb_images)
# else:
# assert max_nb_images <= nb_images, "Max number of images must be less than number of images in path"
# print("Transforming %d images." % (nb_images))
#
# if nb_images == 0:
# print("Extract the training images or images from imageset_91.zip (found in the releases of the project) "
# "into a directory with the name 'input_images'")
# print("Extract the validation images or images from set5_validation.zip (found in the releases of the project) "
# "into a directory with the name 'val_images'")
# exit()
#
# for file in os.listdir(directory):
# img = imread(directory + file, mode='RGB')
#
# # Resize to 256 x 256
# img = imresize(img, (img_size, img_size))
#
# # Create patches
# hr_patch_size = (16 * scaling_factor * _image_scale_multiplier)
# nb_hr_images = (img_size ** 2) // (stride ** 2)
#
# hr_samples = np.empty((nb_hr_images, hr_patch_size, hr_patch_size, 3))
#
# image_subsample_iterator = subimage_generator(img, stride, hr_patch_size, nb_hr_images)
#
# stride_range = np.sqrt(nb_hr_images).astype(int)
#
# i = 0
# for j in range(stride_range):
# for k in range(stride_range):
# hr_samples[i, :, :, :] = next(image_subsample_iterator)
# i += 1
#
# lr_patch_size = 16 * _image_scale_multiplier
#
# t1 = time.time()
# # Create nb_hr_images 'X' and 'Y' sub-images of size hr_patch_size for each patch
# for i in range(nb_hr_images):
# ip = hr_samples[i]
# # Save ground truth image X
# imsave(output_directory + "/y/" + "%d_%d.png" % (index, i + 1), ip)
#
# # Apply Gaussian Blur to Y
# op = gaussian_filter(ip, sigma=0.5)
#
# # Subsample by scaling factor to Y
# op = imresize(op, (lr_patch_size, lr_patch_size), interp='bicubic')
#
# if not true_upscale:
# # Upscale by scaling factor to Y
# op = imresize(op, (hr_patch_size, hr_patch_size), interp='bicubic')
#
# # Save Y
# imsave(output_directory + "/X/" + "%d_%d.png" % (index, i+1), op)
#
# print("Finished image %d in time %0.2f seconds. (%s)" % (index, time.time() - t1, file))
# index += 1
#
# if max_nb_images > 0 and index >= max_nb_images:
# print("Transformed maximum number of images. ")
# break
#
# print("Images transformed. Saved at directory : %s" % (output_directory))
def transform_images_temp(directory, output_directory, scaling_factor=2, max_nb_images=-1, true_upscale=False,
id_advance=0):
index = 1
if not os.path.exists(output_directory + "X/"):
os.makedirs(output_directory + "X/")
if not os.path.exists(output_directory + "y/"):
os.makedirs(output_directory + "y/")
# For each image in input_images directory
nb_images = len([name for name in os.listdir(directory)])
if max_nb_images != -1:
print("Transforming %d images." % max_nb_images)
else:
assert max_nb_images <= nb_images, "Max number of images must be less than number of images in path"
print("Transforming %d images." % (nb_images))
if nb_images == 0:
print("Extract the training images or images from imageset_91.zip (found in the releases of the project) "
"into a directory with the name 'input_images'")
print("Extract the validation images or images from set5_validation.zip (found in the releases of the project) "
"into a directory with the name 'val_images'")
exit()
for file in os.listdir(directory):
img = imread(directory + file, mode='RGB')
# Resize to 256 x 256
img = imresize(img, (img_size, img_size))
# Create patches
hr_patch_size = 64
lr_patch_size = 32
nb_hr_images = (img_size ** 2) // (stride ** 2)
hr_samples = np.empty((nb_hr_images, hr_patch_size, hr_patch_size, 3))
image_subsample_iterator = subimage_generator(img, stride, hr_patch_size, nb_hr_images)
stride_range = np.sqrt(nb_hr_images).astype(int)
i = 0
for j in range(stride_range):
for k in range(stride_range):
hr_samples[i, :, :, :] = next(image_subsample_iterator)
i += 1
t1 = time.time()
# Create nb_hr_images 'X' and 'Y' sub-images of size hr_patch_size for each patch
for i in range(nb_hr_images):
ip = hr_samples[i]
# Save ground truth image X
imsave(output_directory + "/y/" + "%d_%d.png" % (index + id_advance, i + 1), ip)
# Apply Gaussian Blur to Y
#op = gaussian_filter(ip, sigma=0.5)
# Subsample by scaling factor to Y
op = imresize(ip, (lr_patch_size, lr_patch_size), interp='bicubic')
if not true_upscale:
# Upscale by scaling factor to Y
op = imresize(op, (hr_patch_size, hr_patch_size), interp='bicubic')
# Save Y
imsave(output_directory + "/X/" + "%d_%d.png" % (index + id_advance, id_advance + i + 1), op)
print("Finished image %d in time %0.2f seconds. (%s)" % (index + id_advance, time.time() - t1, file))
index += 1
if max_nb_images > 0 and index >= max_nb_images:
print("Transformed maximum number of images. ")
break
print("Images transformed. Saved at directory : %s" % (output_directory))
def image_count():
return len([name for name in os.listdir(output_path + "X/")])
def val_image_count():
return len([name for name in os.listdir(validation_output_path + "X/")])
def subimage_generator(img, stride, patch_size, nb_hr_images):
for _ in range(nb_hr_images):
for x in range(0, img_size, stride):
for y in range(0, img_size, stride):
subimage = img[x : x + patch_size, y : y + patch_size, :]
yield subimage
def make_patches(x, scale, patch_size, upscale=True, verbose=1):
'''x shape: (num_channels, rows, cols)'''
height, width = x.shape[:2]
if upscale: x = imresize(x, (height * scale, width * scale))
patches = extract_patches_2d(x, (patch_size, patch_size))
return patches
def combine_patches(in_patches, out_shape, scale):
'''Reconstruct an image from these `patches`'''
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon
def image_generator(directory, scale_factor=2, target_shape=None, channels=3, small_train_images=False, shuffle=True,
batch_size=32, nb_inputs=1, seed=None):
if not target_shape:
if small_train_images:
if K.image_dim_ordering() == "th":
image_shape = (channels, 16 * _image_scale_multiplier, 16 * _image_scale_multiplier)
y_image_shape = (channels, 16 * scale_factor * _image_scale_multiplier,
16 * scale_factor * _image_scale_multiplier)
else:
# image_shape = (16 * _image_scale_multiplier, 16 * _image_scale_multiplier, channels)
# y_image_shape = (16 * scale_factor * _image_scale_multiplier,
# 16 * scale_factor * _image_scale_multiplier, channels)
image_shape = (32 * _image_scale_multiplier, 32 * _image_scale_multiplier, channels)
y_image_shape = (32 * scale_factor * _image_scale_multiplier,
32 * scale_factor * _image_scale_multiplier, channels)
else:
if K.image_dim_ordering() == "th":
image_shape = (channels, 32 * scale_factor * _image_scale_multiplier, 32 * scale_factor * _image_scale_multiplier)
y_image_shape = image_shape
else:
image_shape = (32 * scale_factor * _image_scale_multiplier, 32 * scale_factor * _image_scale_multiplier,
channels)
y_image_shape = image_shape
else:
if small_train_images:
if K.image_dim_ordering() == "th":
y_image_shape = (3,) + target_shape
target_shape = (target_shape[0] * _image_scale_multiplier // scale_factor,
target_shape[1] * _image_scale_multiplier // scale_factor)
image_shape = (3,) + target_shape
else:
y_image_shape = target_shape + (channels,)
target_shape = (target_shape[0] * _image_scale_multiplier // scale_factor,
target_shape[1] * _image_scale_multiplier // scale_factor)
image_shape = target_shape + (channels,)
else:
if K.image_dim_ordering() == "th":
image_shape = (channels,) + target_shape
y_image_shape = image_shape
else:
image_shape = target_shape + (channels,)
y_image_shape = image_shape
file_names = [f for f in sorted(os.listdir(directory + "X/"))]
X_filenames = [os.path.join(directory, "X", f) for f in file_names]
y_filenames = [os.path.join(directory, "y", f) for f in file_names]
nb_images = len(file_names)
print("Found %d images." % nb_images)
index_generator = _index_generator(nb_images, batch_size, shuffle, seed)
while 1:
index_array, current_index, current_batch_size = next(index_generator)
batch_x = np.zeros((current_batch_size,) + image_shape)
batch_y = np.zeros((current_batch_size,) + y_image_shape)
for i, j in enumerate(index_array):
x_fn = X_filenames[j]
img = imread(x_fn, mode='RGB')
if small_train_images:
img = imresize(img, (32 * _image_scale_multiplier, 32 * _image_scale_multiplier))
img = img.astype('float32') / 255.
if K.image_dim_ordering() == "th":
batch_x[i] = img.transpose((2, 0, 1))
else:
batch_x[i] = img
y_fn = y_filenames[j]
img = imread(y_fn, mode="RGB")
img = img.astype('float32') / 255.
if K.image_dim_ordering() == "th":
batch_y[i] = img.transpose((2, 0, 1))
else:
batch_y[i] = img
if nb_inputs == 1:
yield (batch_x, batch_y)
else:
batch_x = [batch_x for i in range(nb_inputs)]
yield batch_x, batch_y
def _index_generator(N, batch_size=32, shuffle=True, seed=None):
batch_index = 0
total_batches_seen = 0
while 1:
| |
else:
# print('empty table')
pass
# print(database.queued_queries)
# for query in database.queued_queries:
# print(query)
# database.query(query)
database.execute_queue()
# print(database.path)
return {'tables': newtables, 'output': output}
def addeditpartlist(d, output={'message': ''}):
from iiutilities import dblib, datalib
"""
TODO: Optimize query structure
This operates either on partdata OR partsdata list. Still does not optimize to run queries at once. Runs them
in series. An obvious place for optimization
We are going to use this for BOMs, orders, and assemblies.
Minor differences are contained in meta, with exceptions for items:
Order and assembly items have status field. This will just magically appear in the items, however, as taken
from the itemslist in the globals
We typically grab all properties of a part from the stock database before we call this, in the UI populated fields.
We don't always want to, however, so we have the option of filling from the stock database. This means we can be
incomplete. So 'add three of part A003' with 'copystock'='missing' will add all of the part data to the BOM or
order, minimizing required data transfer and simplifying operations. 'copystock'='all' will overwrite all sent data
except for quantity and partid.
So to use this function as a 'refresh data from stock', simply call it, for example, with a bomname and a list of
parts with 'copystock'='all'
"""
# Defaults
settings = {'addqty':False, 'copystock':None}
settings.update(d)
stockdbpath = sysvars.dirs.dbs.stock
ordersdbpath = sysvars.dirs.dbs.orders
bomsdbpath = sysvars.dirs.dbs.boms
inventoriesdbpath = sysvars.dirs.dbs.inventories
assembliesdbpath = sysvars.dirs.dbs.assemblies
if 'bomname' in settings:
output['message'] += 'bomname key present. '
type = 'bom'
activedbpath = bomsdbpath
tablekey = 'bomname'
listpartproperties = tableitems.bompart_schema.columns()
elif 'ordername' in settings:
output['message'] += 'ordername key present. '
type = 'order'
activedbpath = ordersdbpath
tablekey = 'ordername'
listpartproperties = tableitems.orderpart_schema.columns()
elif 'assemblyname' in settings:
output['message'] += 'assemblyname key present. '
type = 'assembly'
activedbpath = assembliesdbpath
tablekey = 'assemblyname'
listpartproperties = tableitems.assemblypart_schema.columns()
elif 'inventoryname' in settings:
output['message'] += 'inventoryname key present. '
type = 'inventory'
activedbpath = inventoriesdbpath
tablekey = 'inventoryname'
listpartproperties = tableitems.inventorypart_schema.columns()
else:
output['message'] += 'No suitable keyword present for command. Terminating. '
return output
if 'partdata' not in settings and 'partsdata' not in settings:
output['message'] += 'No partdata or partsdata present in request. Terminating. '
return output
# Always operate on a list of parts to speed things up.
if 'partdata' in settings:
settings['partsdata'] = [settings['partdata']]
if 'message' not in output:
output['message'] = ''
tablename = str(d[tablekey])
activedb = dblib.sqliteDatabase(activedbpath)
# Determine whether or not the part already exists in the BOM
existing_parts = activedb.read_table(tablename)
# This is just columns
# Eventually this should be more robust and actually enforce types by pragma
ordercolumns = activedb.get_pragma_names(tablename)
print('partsdata')
print(settings['partsdata'])
for index, part_to_modify in enumerate(settings['partsdata']):
print('{}: Part to modify \n{}\n'.format(index, part_to_modify))
if settings['copystock'] in ['all', 'missing']:
# Get the stock part entry for reference and backfill purposes
try:
stockpart = dblib.readonedbrow(stockdbpath, 'stock', condition="partid='" + part_to_modify['partid'] + "'")[0]
except:
stockpart = None
pass
# print('error in stockpart result')
# revert to no copystock if we can't find the part
if not stockpart:
settings['copystock'] = None
stockpart = {}
else:
stockpart = {}
"""
We are going to totally rebuild the database if database format changes.
TODO: Totally fix and unkludge this using new schema.
We are only going to do
this, however, if a sample part that exists in the database does not contain all the fields of the new entry
We do this because there are concurrency issues with recreating simultaneously with, for example multiple
asynchronous calls. Insert operations are atomic, however, so if we can run insert whenever possible, we will
do that.
"""
# Test: (again, this should eventually test pragma and properly form a database using schema)
inclusive = True
if ordercolumns:
for property in listpartproperties:
if property not in ordercolumns:
inclusive = False
break
newparts = []
thenewpart = {}
"""
We iterate over every part in the existing database table.
We make a new part.
If the part we are modifying matches, matchpart = True and partexists = True
TODO: OMG FIX THIS UGLY MESS
"""
partexists = False
for existing_part in existing_parts:
newpart = {'partid': existing_part['partid']}
matchpart = False
print('existing part, part to modify')
print(existing_part)
print(part_to_modify)
if existing_part['partid'] == part_to_modify['partid']:
output['message'] += 'Part ' + existing_part['partid'] + ' / ' + part_to_modify['partid'] + ' was found. '
matchpart = True
partexists = True
# If we have a match, copy all data from previous part and stockpart where appropriate
# depending on backfill options.
for property in listpartproperties:
if matchpart:
# add qty if requested. otherwise just paste new value
if property == 'qty':
# print(partdatum['partid'] + ', ' + str(settings['addqty']))
if settings['addqty']:
# print('partdatum qty ' + str(partdatum['qty'] + ', orderpart qty ' + str(orderpart['qty'])))
newpart['qty'] = float(part_to_modify['qty']) + float(existing_part['qty'])
else:
newpart['qty'] = part_to_modify['qty']
elif settings['copystock'] == 'all' and property != 'qty':
# get all part data from stock entry
# except qty, which is special
newpart[property] = stockpart[property]
else:
if property in part_to_modify:
# print('property ' + property + ' found in partdata')
# make sure not empty
if part_to_modify[property]:
newpart[property] = part_to_modify[property]
continue
# Combined elif via continue
# Have to protect against properties that are in order and not stock
if settings['copystock'] == 'missing' and property in stockpart:
# get part data if available from stock entry
newpart[property] = stockpart[property]
else:
# print('empty property ' + property)
newpart[property] = ''
# If we don't have a match, just copy existing properties, mapped appropriately
else:
if property in existing_part:
newpart[property] = existing_part[property]
else:
newpart[property] = ''
# We make a single copy to use if we are not reconstructing database
newparts.append(newpart)
if matchpart:
thenewpart = newpart.copy()
if not partexists:
output['message'] += 'Part not found in existing bom. Creating from scratch. '
if 'partid' in part_to_modify:
output['message'] += 'key partdata[partid] found in d with value ' + part_to_modify['partid'] + '. '
newpart = {'partid': part_to_modify['partid']}
for property in listpartproperties:
if settings['copystock'] == 'all' and property != 'qty':
# get all part data from stock entry
# except qty, which is special
newpart[property] = stockpart[property]
else:
if property in part_to_modify:
# print('property ' + property + ' found')
# make sure not empty
if part_to_modify[property]:
newpart[property] = part_to_modify[property]
continue
else:
# print('property empty.')
pass
# Have to protect against properties that are in order and not stock
# print('input dictionary' )
# print(d)
if settings['copystock'] == 'missing':
# print('at copystock for property ' + property)
pass
if settings['copystock'] == 'missing' and property in stockpart:
# get part data if available from stock entry
newpart[property] = stockpart[property]
else:
newpart[property] = ''
newparts.append(newpart)
thenewpart = newpart.copy()
else:
output['message'] += 'key partdata[partid] not found in d. '
output['message'] += 'Reinserting. '
if inclusive:
output['message'] += 'Structure was found to be inclusive. Not rebuilding. '
# if partexists:
# activedb.insert
#
# dblib.sqlitedeleteitem(activedb, tablename, "partid='" + thenewpart['partid'] + "'")
# # print('THE NEW NPART')
# # print(thenewpart)
# try:
# assumes uniquekey and auto replace
activedb.insert(tablename, thenewpart)
# dblib.insertstringdicttablelist(activedb, tablename, [thenewpart], droptable=False)
# except:
# output['message'] += 'Error in query on "' + activedb.path + '" + and table "' + tablename + '. '
else:
output['message'] += 'Structure was not found to be inclusive. rebuilding. '
dblib.insertstringdicttablelist(activedb, tablename, newparts, droptable=True)
# Recalculate quantities. Autotyped based on kwargs
recalcpartdata(**{tablekey: tablename})
return output
def refreshpartsfromstock(d, output={'message': ''}):
from iiutilities import dblib
notouchkeys = ['qty', 'partid']
if 'bomname' or 'assemblyname' in d:
if 'bomname' in d:
output['message'] += 'bomname found. '
databasepath = sysvars.dirs.dbs.boms
tablename = d['bomname']
schema = tableitems.bompart_schema
elif 'assemblyname' in d:
output['message'] += 'assemblyname found. '
databasepath = sysvars.dirs.dbs.assemblies
tablename = d['assemblyname']
schema = tableitems.assemblypart_schema
database = dblib.sqliteDatabase(databasepath)
stock_database = dblib.sqliteDatabase(sysvars.dirs.dbs.stock)
if 'partids' in d:
output['message'] += 'partids found. '
for partid in d['partids']:
output['message'] += 'processing ' + partid + '. '
| |
space
if rowCount > 12:
self.figure.subplots_adjust(hspace=0)
else:
self.figure.subplots_adjust(hspace=0.1)
axisRangeNumbers = (0, 1)
self.setAxisRange('x', axisRangeNumbers, 0)
# turn off grid
self.grid = False
class GraphColorGridLegend(Graph):
'''
Grid of discrete colored "blocks" where each block can be labeled
Data is provided as a list of lists of colors, where colors are specified as a hex triplet,
or the common HTML color codes, and based on analysis-specific mapping of colors to results.
>>> #_DOCS_SHOW g = graph.primitives.GraphColorGridLegend()
>>> g = graph.primitives.GraphColorGridLegend(doneAction=None) #_DOCS_HIDE
>>> data = []
>>> data.append(('Major', [('C#', '#00AA55'), ('D-', '#5600FF'), ('G#', '#2B00FF')]))
>>> data.append(('Minor', [('C#', '#004600'), ('D-', '#00009b'), ('G#', '#00009B')]))
>>> g.data = data
>>> g.process()
.. image:: images/GraphColorGridLegend.*
:width: 600
'''
_DOC_ATTR = {
'hideLeftBottomSpines': 'bool to hide the left and bottom axis spines; default True',
}
graphType = 'colorGridLegend'
figureSizeDefault = (5, 1.5)
keywordConfigurables = Graph.keywordConfigurables + ('hideLeftBottomSpines',)
def __init__(self, *args, **keywords):
self.hideLeftBottomSpines = True
super().__init__(*args, **keywords)
if 'title' not in keywords:
self.title = 'Legend'
def renderSubplot(self, subplot):
for i, rowLabelAndData in enumerate(self.data):
rowLabel = rowLabelAndData[0]
rowData = rowLabelAndData[1]
self.makeOneRowOfGraph(self.figure, i, rowLabel, rowData)
self.setAxisRange('x', (0, 1), 0)
allTickLines = subplot.get_xticklines() + subplot.get_yticklines()
for j, line in enumerate(allTickLines):
line.set_visible(False)
# sets the space between subplots
# top and bottom here push diagram more toward center of frame
# may be useful in other graphs
# ,
self.figure.subplots_adjust(hspace=1.5, top=0.75, bottom=0.2)
self.setAxisLabel('y', '')
self.setAxisLabel('x', '')
self.setTicks('y', [])
self.setTicks('x', [])
def makeOneRowOfGraph(self, figure, rowIndex, rowLabel, rowData):
# noinspection PyShadowingNames
'''
Makes a subplot for one row of data (such as for the Major label)
and returns a matplotlib.axes.AxesSubplot instance representing the subplot.
Here we create an axis with a part of Scriabin's mapping of colors
to keys in Prometheus: The Poem of Fire.
>>> import matplotlib.pyplot
>>> colorLegend = graph.primitives.GraphColorGridLegend()
>>> rowData = [('C', '#ff0000'), ('G', '#ff8800'), ('D', '#ffff00'),
... ('A', '#00ff00'), ('E', '#4444ff')]
>>> colorLegend.data = [['Scriabin Mapping', rowData]]
>>> fig = matplotlib.pyplot.figure()
>>> subplot = colorLegend.makeOneRowOfGraph(fig, 0, 'Scriabin Mapping', rowData)
>>> subplot
<AxesSubplot:>
'''
# environLocal.printDebug(['rowLabel', rowLabel, i])
positions = []
heights = []
subColors = []
for j, oneColorMapping in enumerate(rowData):
positions.append(1.0 + j)
subColors.append(oneColorMapping[1]) # second value is colors
heights.append(1)
# add a new subplot for each row
posTriple = (len(self.data), 1, rowIndex + 1)
# environLocal.printDebug(['posTriple', posTriple])
ax = figure.add_subplot(*posTriple)
# ax is an Axes object
# 1 here is width
width = 1
ax.bar(positions, heights, width, color=subColors, linewidth=0.3, edgecolor='#000000')
# lower thickness of spines
for spineArtist in ax.spines.values():
# spineArtist.set_color('none') # don't draw spine
spineArtist.set_linewidth(0.3)
spineArtist.set_color('#000000')
# remove all ticks for subplots
allTickLines = ax.get_xticklines() + ax.get_yticklines()
for j, line in enumerate(allTickLines):
line.set_visible(False)
# need one label for each left side; 0.5 is in the middle
ax.set_yticks([0.5])
ax.set_yticklabels([rowLabel],
fontsize=self.tickFontSize,
family=self.fontFamily,
horizontalalignment='right',
verticalalignment='center') # one label for one tick
# need a label for each bars
ax.set_xticks([x + 1 for x in range(len(rowData))])
# get labels from row data; first of pair
# need to push y down as need bottom alignment for lower case
substitutedAccidentalLabels = [accidentalLabelToUnicode(x)
for x, unused_y in rowData]
ax.set_xticklabels(
substitutedAccidentalLabels,
fontsize=self.tickFontSize,
family=self.fontFamily,
horizontalalignment='center',
verticalalignment='center',
y=-0.4)
# this is the scaling to see all bars; not necessary
ax.set_xlim([0.5, len(rowData) + 0.5])
return ax
class GraphHorizontalBar(Graph):
'''
Numerous horizontal bars in discrete channels, where bars
can be incomplete and/or overlap.
Data provided is a list of pairs, where the first value becomes the key,
the second value is a list of x-start, x-length values.
>>> a = graph.primitives.GraphHorizontalBar()
>>> a.doneAction = None #_DOCS_HIDE
>>> data = [('Chopin', [(1810, 1849-1810)]),
... ('Schumanns', [(1810, 1856-1810), (1819, 1896-1819)]),
... ('Brahms', [(1833, 1897-1833)])]
>>> a.data = data
>>> a.process()
.. image:: images/GraphHorizontalBar.*
:width: 600
'''
_DOC_ATTR = {
'barSpace': 'Amount of vertical space each bar takes; default 8',
'margin': 'Space around the bars, default 2',
}
graphType = 'horizontalBar'
figureSizeDefault = (10, 4)
keywordConfigurables = Graph.keywordConfigurables + (
'barSpace', 'margin')
def __init__(self, *args, **keywords):
self.barSpace = 8
self.margin = 2
super().__init__(*args, **keywords)
if 'alpha' not in keywords:
self.alpha = 0.6
@property
def barHeight(self):
return self.barSpace - (self.margin * 2)
def renderSubplot(self, subplot):
self.figure.subplots_adjust(left=0.15)
yPos = 0
xPoints = [] # store all to find min/max
yTicks = [] # a list of label, value pairs
xTicks = []
keys = []
i = 0
# TODO: check data orientation; flips in some cases
for info in self.data:
if len(info) == 2:
key, points = info
unused_formatDict = {}
else:
key, points, unused_formatDict = info
keys.append(key)
# provide a list of start, end points;
# then start y position, bar height
faceColor = self.nextColor()
if points:
yRange = (yPos + self.margin,
self.barHeight)
subplot.broken_barh(points,
yRange,
facecolors=faceColor,
alpha=self.alpha)
for xStart, xLen in points:
xEnd = xStart + xLen
for x in [xStart, xEnd]:
if x not in xPoints:
xPoints.append(x)
# ticks are value, label
yTicks.append([yPos + self.barSpace * 0.5, key])
# yTicks.append([key, yPos + self.barSpace * 0.5])
yPos += self.barSpace
i += 1
xMin = min(xPoints)
xMax = max(xPoints)
xRange = xMax - xMin
# environLocal.printDebug(['got xMin, xMax for points', xMin, xMax, ])
self.setAxisRange('y', (0, len(keys) * self.barSpace))
self.setAxisRange('x', (xMin, xMax))
self.setTicks('y', yTicks)
# first, see if ticks have been set externally
if 'ticks' in self.axis['x'] and not self.axis['x']['ticks']:
rangeStep = int(xMin + round(xRange / 10))
if rangeStep == 0:
rangeStep = 1
for x in range(int(math.floor(xMin)),
round(math.ceil(xMax)),
rangeStep):
xTicks.append([x, f'{x}'])
self.setTicks('x', xTicks)
class GraphHorizontalBarWeighted(Graph):
'''
Numerous horizontal bars in discrete channels,
where bars can be incomplete and/or overlap, and
can have different heights and colors within their
respective channel.
'''
_DOC_ATTR = {
'barSpace': 'Amount of vertical space each bar takes; default 8',
'margin': 'Space around the bars, default 2',
}
graphType = 'horizontalBarWeighted'
figureSizeDefault = (10, 4)
keywordConfigurables = Graph.keywordConfigurables + (
'barSpace', 'margin')
def __init__(self, *args, **keywords):
self.barSpace = 8
self.margin = 0.25 # was 8; determines space between channels
super().__init__(*args, **keywords)
# this default alpha is used if not specified per bar
if 'alpha' not in keywords:
self.alpha = 1
# example data
# data = [
# ('Violins', [(3, 5, 1, '#fff000'), (1, 12, 0.2, '#3ff203')] ),
# ('Celli', [(2, 7, 0.2, '#0ff302'), (10, 3, 0.6, '#ff0000', 1)] ),
# ('Clarinet', [(5, 1, 0.5, '#3ff203')] ),
# ('Flute', [(5, 1, 0.1, '#00ff00'), (7, 20, 0.3, '#00ff88')] ),
# ]
@property
def barHeight(self):
return self.barSpace - (self.margin * 2)
def renderSubplot(self, subplot):
# might need more space here for larger y-axis labels
self.figure.subplots_adjust(left=0.15)
yPos = 0
xPoints = [] # store all to find min/max
yTicks = [] # a list of label, value pairs
# xTicks = []
keys = []
i = 0
# reversing data to present in order
self.data = list(self.data)
self.data.reverse()
for key, points in self.data:
keys.append(key)
xRanges = []
yRanges = []
alphas = []
colors = []
for i, data in enumerate(points):
x = 0
span = None
heightScalar = 1
color = self.nextColor()
alpha = self.alpha
yShift = 0 # between -1 and 1
if len(data) == 3:
x, span, heightScalar = data
elif len(data) == 4:
x, span, heightScalar, color = data
elif len(data) == 5:
x, span, heightScalar, color, alpha = data
elif len(data) == 6:
x, span, heightScalar, color, alpha, yShift = data
# filter color value
color = getColor(color)
# add to x ranges
xRanges.append((x, span))
colors.append(color)
alphas.append(alpha)
# x points used to get x ticks
if x not in xPoints:
xPoints.append(x)
if (x + span) not in xPoints:
xPoints.append(x + span)
# TODO: add high/low shift to position w/n range
# provide a list of start, end points;
# then start y position, bar height
h = self.barHeight * heightScalar
yAdjust = (self.barHeight - h) * 0.5
yShiftUnit = self.barHeight * (1 - heightScalar) * 0.5
adjustedY = yPos + self.margin + yAdjust + (yShiftUnit * yShift)
yRanges.append((adjustedY, h))
for i, xRange in enumerate(xRanges):
# note: can get ride of bounding lines by providing
# | |
note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetAntiAliasingOffsetInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the AntiAliasingOffset setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetAntiAliasingOffsetInfo(io_admin_level, io_locked)
def get_auxiliary_drill_viewer_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetAuxiliaryDrillViewerInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the AuxiliaryDrillViewer setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetAuxiliaryDrillViewerInfo(io_admin_level, io_locked)
def get_back_face_culling_mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetBackFaceCullingMode() As CATBackFaceCullingMode
|
| Retrieves the BackFaceCullingMode parameter.
|
| Parameters:
|
| oBackFaceCullingMode
| Value of the back face culling mode setting option. The retrieved
| value can be one of the four possible values defined by the
|
|
| CATBackFaceCullingMode enumeration.
| Returns:
| An HRESULT.
| Legal values:
|
| S_OK
| if the operation succeeded.
| E_FAIL
| if the operation failed.
:return: int
:rtype: int
"""
return self.visualization_setting_att.GetBackFaceCullingMode()
def get_back_face_culling_mode_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetBackFaceCullingModeInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the BackFaceCullingMode setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetBackFaceCullingModeInfo(io_admin_level, io_locked)
def get_background_rgb(self, io_r: int, io_g: int, io_b: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetBackgroundRGB(long ioR,
| long ioG,
| long ioB)
|
| Returns the BackgroundRGB parameter.
:param int io_r:
:param int io_g:
:param int io_b:
:return: None
:rtype: None
"""
return self.visualization_setting_att.GetBackgroundRGB(io_r, io_g, io_b)
def get_background_rgb_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetBackgroundRGBInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the BackgroundRGB setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetBackgroundRGBInfo(io_admin_level, io_locked)
def get_border_edges_mode_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetBorderEdgesModeInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the BorderEdgesMode setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetBorderEdgesModeInfo(io_admin_level, io_locked)
def get_border_edges_rgb(self, io_r: int, io_g: int, io_b: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetBorderEdgesRGB(long ioR,
| long ioG,
| long ioB)
|
| Returns the BorderEdgesRGB parameter.
:param int io_r:
:param int io_g:
:param int io_b:
:return: None
:rtype: None
"""
return self.visualization_setting_att.GetBorderEdgesRGB(io_r, io_g, io_b)
def get_border_edges_rgb_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetBorderEdgesRGBInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the BorderEdgesRGB setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetBorderEdgesRGBInfo(io_admin_level, io_locked)
def get_border_edges_thickness_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetBorderEdgesThicknessInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the BorderEdgesThickness setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetBorderEdgesThicknessInfo(io_admin_level, io_locked)
def get_bounding_box_selection_mode_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetBoundingBoxSelectionModeInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the BoundingBoxSelectionMode setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetBoundingBoxSelectionModeInfo(io_admin_level, io_locked)
def get_color_background_mode_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetColorBackgroundModeInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the ColorBackgroundMode setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetColorBackgroundModeInfo(io_admin_level, io_locked)
def get_default_diffuse_ambient_coefficient_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetDefaultDiffuseAmbientCoefficientInfo(CATBSTR
| ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the DefaultDiffuseAmbientCoefficient setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetDefaultDiffuseAmbientCoefficientInfo(io_admin_level, io_locked)
def get_default_shininess_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetDefaultShininessInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the DefaultShininess setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetDefaultShininessInfo(io_admin_level, io_locked)
def get_default_specular_coefficient_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetDefaultSpecularCoefficientInfo(CATBSTR
| ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the DefaultSpecularCoefficient setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetDefaultSpecularCoefficientInfo(io_admin_level, io_locked)
def get_display_current_scale_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetDisplayCurrentScaleInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the SetStereoModeLock setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetDisplayCurrentScaleInfo(io_admin_level, io_locked)
def get_display_drill_list_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetDisplayDrillListInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the DisplayDrillList setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetDisplayDrillListInfo(io_admin_level, io_locked)
def get_display_immersive_drill_viewer_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetDisplayImmersiveDrillViewerInfo(CATBSTR
| ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the DisplayImmersiveDrillViewer setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetDisplayImmersiveDrillViewerInfo(io_admin_level, io_locked)
def get_dynamic_cull_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetDynamicCullInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the DynamicCull setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.visualization_setting_att.GetDynamicCullInfo(io_admin_level, io_locked)
def get_dynamic_lod_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetDynamicLODInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the DynamicLOD setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return | |
model_version=expected['model_version'],
prediction_ids=ids,
prediction_labels=labels,
features=None,
feature_names_overwrite=None,
prediction_timestamps=None)
for _, bulk in records.items():
assert isinstance(bulk, public__pb2.BulkRecord)
for r in bulk.records:
assert isinstance(r, public__pb2.Record)
assert not bool(r.organization_key)
assert not bool(r.model_id)
assert not bool(r.prediction.features)
def test_build_bulk_prediction_with_feature_names_overwrites():
c = get_stubbed_client()
features, labels, ids = mock_dataframes_clean_nan(file_to_open)
feature_names_overwrite = [
'mask_' + str(i) for i in range(len(features.columns))
]
records = c.bulk_log(model_id=expected['model'],
model_version=expected['model_version'],
prediction_ids=ids,
prediction_labels=labels,
features=features,
feature_names_overwrite=feature_names_overwrite,
prediction_timestamps=None)
for _, bulk in records.items():
assert isinstance(bulk, public__pb2.BulkRecord)
for r in bulk.records:
assert isinstance(r, public__pb2.Record)
assert not bool(r.organization_key)
assert not bool(r.model_id)
assert bool(r.prediction.features)
for feature in r.prediction.features:
assert feature in feature_names_overwrite
def test_build_bulk_actuals_dataframes():
c = get_stubbed_client()
_, labels, ids = mock_dataframes_clean_nan(file_to_open)
bulk_records = c.bulk_log(model_id=expected['model'],
prediction_ids=ids,
actual_labels=labels)
record_count = 0
for indexes, bulk in bulk_records.items():
assert indexes == (0, len(ids))
assert bulk.organization_key == expected['organization_key']
assert bulk.model_id == expected['model']
assert not hasattr(bulk, 'timestamp')
for record in bulk.records:
assert isinstance(record, public__pb2.Record)
assert isinstance(record.actual.label, public__pb2.Label)
assert record.prediction_id == ids[0][record_count]
assert record.actual.label.WhichOneof('data') == 'numeric'
assert record.actual.timestamp.seconds == 0
assert record.actual.timestamp.nanos == 0
record_count += 1
assert record_count == len(ids)
def test_validate_bulk_predictions_timestamp_out_of_range():
c = get_stubbed_client()
features, labels, ids = mock_dataframes_clean_nan(file_to_open)
current_time = datetime.datetime.now().timestamp()
earlier_time = (datetime.datetime.now() - datetime.timedelta(days=30)).timestamp()
prediction_timestamps = np.linspace(earlier_time, current_time, num=len(ids))
prediction_timestamps = pd.Series(prediction_timestamps.astype(int))
# break one of the timestamps
prediction_timestamps.iloc[4] = int(current_time) + (366 * 24 * 60 * 60)
ex = None
try:
c.bulk_log(
model_id=expected['model'],
model_version=expected['model_version'],
prediction_ids=ids,
prediction_labels=labels,
features=features,
prediction_timestamps=prediction_timestamps,
)
except Exception as err:
ex = err
assert isinstance(ex, ValueError)
def test_validate_bulk_predictions_with_nan():
c = get_stubbed_client()
features, labels, ids = mock_dataframes_clean_nan(file_to_open)
# intentionally assign np.nan to labels
labels.loc[labels.sample(frac=0.1).index, 0] = np.nan
with pytest.raises(ValueError) as excinfo:
c.bulk_log(
model_id=expected['model'],
model_version=expected['model_version'],
prediction_ids=ids,
prediction_labels=labels,
features=features,
feature_names_overwrite=None,
prediction_timestamps=None)
assert str(excinfo.value) == "prediction labels cannot contain null value"
def test_validate_bulk_predictions_mismatched_shapes():
c = get_stubbed_client()
features, labels, ids = mock_dataframes_clean_nan(file_to_open)
feature_names_overwrite = [
'mask_' + str(i) for i in range(len(features.columns))
]
id_ex, feature_ex, label_ex, overwrite_ex = None, None, None, None
try:
c.bulk_log(model_id=expected['model'],
model_version=expected['model_version'],
prediction_ids=ids[3:],
prediction_labels=labels,
features=features,
feature_names_overwrite=feature_names_overwrite,
prediction_timestamps=None)
except Exception as err:
id_ex = err
try:
c.bulk_log(model_id=expected['model'],
model_version=expected['model_version'],
prediction_ids=ids,
prediction_labels=labels,
features=features[3:],
feature_names_overwrite=None,
prediction_timestamps=None)
except Exception as err:
feature_ex = err
try:
c.bulk_log(model_id=expected['model'],
model_version=expected['model_version'],
prediction_ids=ids,
prediction_labels=labels[3:],
features=None,
feature_names_overwrite=None,
prediction_timestamps=None)
except Exception as err:
label_ex = err
try:
c.bulk_log(model_id=expected['model'],
model_version=expected['model_version'],
prediction_ids=ids,
prediction_labels=labels,
features=features,
feature_names_overwrite=feature_names_overwrite[3:],
prediction_timestamps=None)
except Exception as err:
overwrite_ex = err
assert isinstance(id_ex, ValueError)
assert isinstance(feature_ex, ValueError)
assert isinstance(label_ex, ValueError)
assert isinstance(overwrite_ex, ValueError)
def test_build_bulk_prediction_with_prediction_timestamps():
c = get_stubbed_client()
features, labels, ids = mock_dataframes_clean_nan(file_to_open)
t = [int(time.time()) + i for i in range(features.shape[0])]
records = c.bulk_log(model_id=expected['model'],
model_version=expected['model_version'],
prediction_ids=ids,
prediction_labels=labels,
features=features,
feature_names_overwrite=None,
prediction_timestamps=t)
for _, bulk in records.items():
assert isinstance(bulk, public__pb2.BulkRecord)
for r in bulk.records:
assert isinstance(r, public__pb2.Record)
assert not bool(r.organization_key)
assert not bool(r.model_id)
assert bool(r.prediction.features)
assert r.prediction.timestamp is not None
def test_handle_log_prediction_with_prediction_timestamps():
t = int(time.time())
c = get_stubbed_client()
record = c.log(model_id=expected['model'],
model_version=expected['model_version'],
prediction_id=expected['prediction_id'],
prediction_label=expected['value_binary'],
features=expected['features'],
prediction_timestamp=t)
assert isinstance(record.prediction, public__pb2.Prediction)
assert bool(record.prediction.features)
assert record.prediction.timestamp.seconds == t
def test_build_bulk_predictions_index():
c = get_stubbed_client()
features, labels, idx = mock_dataframes_clean_nan(file_to_open)
ids = pd.DataFrame(index=idx.values, data=idx.values).index.to_series()
bulk_records = c.bulk_log(model_id=expected['model'],
prediction_ids=ids,
prediction_labels=labels,
features=features,
model_version=expected['model_version'],
feature_names_overwrite=None,
prediction_timestamps=None)
record_count = 0
for _, bulk in bulk_records.items():
assert bulk.organization_key == expected['organization_key']
assert bulk.model_id == expected['model']
assert not hasattr(bulk, 'timestamp')
for record in bulk.records:
assert isinstance(record, public__pb2.Record)
assert isinstance(record.prediction.label, public__pb2.Label)
assert len(record.prediction.features) == features.shape[1]
assert record.prediction.label.WhichOneof('data') == 'numeric'
assert record.prediction_id in idx.values
record_count += 1
assert record_count == len(ids)
def test_build_bulk_actuals_index():
c = get_stubbed_client()
_, labels, idx = mock_dataframes_clean_nan(file_to_open)
ids = pd.DataFrame(index=idx.values, data=idx.values).index.to_series()
bulk_records = c.bulk_log(model_id=expected['model'],
prediction_ids=ids,
actual_labels=labels)
record_count = 0
for _, bulk in bulk_records.items():
assert bulk.organization_key == expected['organization_key']
assert bulk.model_id == expected['model']
assert not hasattr(bulk, 'timestamp')
for record in bulk.records:
assert isinstance(record, public__pb2.Record)
assert isinstance(record.actual.label, public__pb2.Label)
assert record.prediction_id == ids[record_count][0]
assert record.actual.label.WhichOneof('data') == 'numeric'
assert record.prediction_id in idx.values
record_count += 1
assert record_count == len(ids)
def test_build_bulk_binary_predictions():
c = get_stubbed_client()
features, _, idx = mock_dataframes_clean_nan(file_to_open)
ids = pd.DataFrame(index=idx.values, data=idx.values).index.to_series()
features['pred'] = features['mpg'].apply(lambda x: x > 15)
bulk_records = c.bulk_log(model_id=expected['model'],
prediction_ids=ids,
prediction_labels=features['pred'],
features=features,
model_version=expected['model_version'],
feature_names_overwrite=None,
prediction_timestamps=None)
record_count = 0
for _, bulk in bulk_records.items():
assert bulk.organization_key == expected['organization_key']
assert bulk.model_id == expected['model']
assert not hasattr(bulk, 'timestamp')
for record in bulk.records:
assert isinstance(record, public__pb2.Record)
assert isinstance(record.prediction.label, public__pb2.Label)
assert len(record.prediction.features) == features.shape[1]
assert record.prediction.label.WhichOneof('data') == 'binary'
assert record.prediction_id in idx.values
record_count += 1
assert record_count == len(ids)
def test_build_bulk_binary_actuals():
c = get_stubbed_client()
features, _, idx = mock_dataframes_clean_nan(file_to_open)
features['actual'] = features['mpg'].apply(lambda x: x > 15)
ids = pd.DataFrame(index=idx.values, data=idx.values).index.to_series()
bulk_records = c.bulk_log(model_id=expected['model'],
prediction_ids=ids,
actual_labels=features['actual'])
record_count = 0
for _, bulk in bulk_records.items():
assert bulk.organization_key == expected['organization_key']
assert bulk.model_id == expected['model']
assert not hasattr(bulk, 'timestamp')
for record in bulk.records:
assert isinstance(record, public__pb2.Record)
assert isinstance(record.actual.label, public__pb2.Label)
assert record.prediction_id == ids[record_count][0]
assert record.actual.label.WhichOneof('data') == 'binary'
assert record.prediction_id in idx.values
record_count += 1
assert record_count == len(ids)
def test_build_feature_importances():
c = get_stubbed_client()
record = c.log(model_id=expected['model'],
prediction_id=expected['prediction_id'],
shap_values=expected['feature_importances'])
assert isinstance(record, public__pb2.Record)
assert isinstance(record.feature_importances, public__pb2.FeatureImportances)
assert record.organization_key == expected['organization_key']
assert record.model_id == expected['model']
assert record.prediction_id == expected['prediction_id']
assert len(record.feature_importances.feature_importances) == len(expected['feature_importances'])
def test_prediction_timestamp_out_of_range():
c = get_stubbed_client()
ex = None
try:
c.log(
model_id=expected['model'],
prediction_id=expected['prediction_id'],
model_version=expected['model_version'],
model_type=ModelTypes.CATEGORICAL,
prediction_label='HOTDOG',
features=expected['features'],
prediction_timestamp=int(time.time()) + (380 * 24 * 60 * 60),
)
except Exception as err:
ex = err
assert isinstance(ex, ValueError)
def test_build_missing_data():
c = get_stubbed_client()
ex = None
try:
c.log(model_id=expected['model'],
prediction_id=expected['prediction_id'])
except Exception as err:
# Error because everything is None
ex = err
assert isinstance(ex, ValueError)
def test_build_feature_importances_error_empty_data():
c = get_stubbed_client()
ex = None
try:
c.log(model_id=expected['model'],
prediction_id=expected['prediction_id'],
shap_values={}
)
except Exception as err:
# Error because no feature_importances were provided
ex = err
assert isinstance(ex, ValueError)
def test_build_feature_importances_error_wrong_data_type():
c = get_stubbed_client()
ex = None
try:
c.log(model_id=expected['model'],
prediction_id=expected['prediction_id'],
shap_values={"a": "string"}
# feature importances should be float, so this will produce an error
)
except Exception as err:
ex = err
assert isinstance(ex, TypeError)
def test_build_bulk_feature_importances():
c = get_stubbed_client()
features, _, pred_ids = mock_dataframes_clean_nan(file_to_open)
data = np.random.rand(len(pred_ids), len(features.columns))
feature_importances = pd.DataFrame(data=data, columns=features.columns)
ids = pd.DataFrame(index=pred_ids.values, data=pred_ids.values).index.to_series()
bulk_records = c.bulk_log(model_id=expected['model'],
prediction_ids=ids,
shap_values=feature_importances)
record_count = 0
for _, bulk in bulk_records.items():
assert bulk.organization_key == expected['organization_key']
assert bulk.model_id == expected['model']
assert not hasattr(bulk, 'timestamp')
for record in bulk.records:
assert isinstance(record, public__pb2.Record)
fi = record.feature_importances
assert isinstance(fi, public__pb2.FeatureImportances)
assert len(fi.feature_importances) == len(features.columns)
assert record.prediction_id == ids[record_count][0]
assert record.prediction_id in pred_ids.values
record_count += 1
assert record_count == len(ids)
def test_build_bulk_feature_importances_error_mismatch():
c = get_stubbed_client()
features, _, pred_ids = mock_dataframes_clean_nan(file_to_open)
# Make the length of feature importances data array mismatch the number of prediction ids
data = np.random.rand(len(pred_ids) - 1, len(features.columns))
feature_importances = pd.DataFrame(data=data, columns=features.columns)
ids = pd.DataFrame(index=pred_ids.values, data=pred_ids.values).index.to_series()
ex = None
try:
c.bulk_log(model_id=expected['model'],
prediction_ids=ids,
shap_values=feature_importances)
except Exception as err:
# feature importances data length and number of prediction ids mismatch should cause this error
ex = err
assert isinstance(ex, ValueError)
# def test_build_bulk_feature_importances_error_wrong_data_type():
# features, _, pred_ids = mock_dataframes(file_to_open)
#
# # Replace one of the rows in the feature importances data with values of the wrong data type (i.e. not float)
# data = np.random.rand(len(pred_ids) - 1, len(features.columns))
# data_wrong_type = np.ones(len(features.columns), dtype=bool)
#
# data = np.vstack((data, data_wrong_type))
# feature_importances = pd.DataFrame(data=data, columns=features.columns)
# ids = pd.DataFrame(index=pred_ids.values, data=pred_ids.values).index.to_series()
#
# ex = None
# try:
# bulk_fi = BulkFeatureImportances(organization_key=expected['organization_key'],
# model_id=expected['model'],
# prediction_ids=ids,
# feature_importances=feature_importances)
#
# bulk_fi.validate_inputs()
# except Exception as err:
# # caused by wrong type
# ex = err
#
# assert isinstance(ex, ValueError)
def test_build_training_records():
features, labels, _ = mock_dataframes_clean_nan(file_to_open)
recs = TrainingRecords(organization_key=expected['organization_key'],
model_id=expected['model'],
model_type=ModelTypes.NUMERIC,
model_version=expected['model_version'],
prediction_labels=labels,
actual_labels=labels,
features=features)
bundles = recs.build_proto()
record_count = 0
for _, recs in bundles.items():
for rec in recs:
record_count += 1
assert isinstance(rec, public__pb2.PreProductionRecord)
assert isinstance(rec.training_record, public__pb2.PreProductionRecord.TrainingRecord)
assert isinstance(rec.training_record.record, public__pb2.Record)
assert rec.training_record.record.organization_key == expected['organization_key']
assert rec.training_record.record.model_id == expected['model']
assert rec.training_record.record.prediction_and_actual.prediction.model_version == expected[
'model_version']
assert isinstance(rec.training_record.record.prediction_and_actual.prediction.label, public__pb2.Label)
assert len(rec.training_record.record.prediction_and_actual.prediction.features) == features.shape[1]
assert rec.training_record.record.prediction_and_actual.prediction.label.WhichOneof('data') == 'numeric'
assert rec.training_record.record.prediction_and_actual.prediction.timestamp.seconds == 0
assert rec.training_record.record.prediction_and_actual.prediction.timestamp.nanos == 0
assert record_count == len(labels)
def test_send_validation_records():
c = get_stubbed_client()
features, labels, pred_ids = mock_dataframes_clean_nan(file_to_open)
t = [int(time.time()) + i for i in range(features.shape[0])]
# make life a bit easier and just take the first record
features = features[:1]
labels = labels[:1]
pred_ids = pred_ids[:1]
t = t[:1]
result = c.log_validation_records(
model_id=expected['model'],
model_version=expected['model_version'],
batch_id=expected['batch'],
prediction_labels=labels,
actual_labels=labels,
prediction_ids=pred_ids,
model_type=ModelTypes.NUMERIC,
features=features,
prediction_timestamps=t,
)
# test values in single record
expected_prediction_id = pred_ids[0][0]
for _, recs in result.items():
for rec in recs:
assert isinstance(rec, public__pb2.PreProductionRecord)
assert isinstance(rec.validation_record, public__pb2.PreProductionRecord.ValidationRecord)
assert isinstance(rec.validation_record.record, public__pb2.Record)
assert rec.validation_record.batch_id == expected['batch']
| |
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.http import JsonResponse
from django.utils.translation import ugettext_lazy as _
from rest_framework.decorators import action
from rest_framework.response import Response
from apps.generic import ModelViewSet
from apps.node_man import exceptions
from apps.node_man.constants import IamActionType
from apps.node_man.handlers.iam import IamHandler
from apps.node_man.handlers.permission import PackagePermission
from apps.node_man.handlers.plugin_v2 import PluginV2Handler
from apps.node_man.models import GsePluginDesc
from apps.node_man.serializers import plugin_v2
from apps.utils.local import get_request_username
from common.api import NodeApi
class PluginV2ViewSet(ModelViewSet):
model = GsePluginDesc
queryset = None
permission_classes = (PackagePermission,)
def list(self, request, *args, **kwargs):
"""
@api {GET} /v2/plugin/ 插件列表
@apiName list_plugin
@apiGroup plugin_v2
@apiParam {String} [search] 插件别名&名称模糊搜索
@apiParam {Boolean} [simple_all] 返回全部数据(概要信息,`id`, `description`, `name`),默认`False`
@apiParam {Int} [page] 当前页数,默认`1`
@apiParam {Int} [pagesize] 分页大小,默认`10`
@apiParam {object} [sort] 排序
@apiParam {String=["name", "category", "creator", "scenario", "description"]} [sort.head] 排序字段
@apiParam {String=["ASC", "DEC"]} [sort.sort_type] 排序类型
@apiParamExample {Json} 请求参数
{
}
@apiSuccessExample {json} 成功返回:
{
"total": 2,
"list": [
{
"id": 1,
"description": "系统基础信息采集",
"name": "basereport",
"category": "官方插件",
"nodes_number": 123,
"source_app_code": "bk_nodeman",
"scenario": "CMDB上的实时数据,蓝鲸监控里的主机监控,包含CPU,内存,磁盘等",
"deploy_type": "整包部署"
},
{
"id": 2,
"description": "监控采集器",
"name": "bkmonitorbeat",
"category": "第三方插件",
"nodes_number": 321,
"source_app_code": "bk_monitor",
"scenario": "蓝鲸监控采集器,支持多种协议及多任务的采集,提供多种运行模式和热加载机制",
"deploy_type": "Agent自动部署"
}
]
}
"""
self.serializer_class = plugin_v2.PluginListSerializer
return Response(PluginV2Handler.list_plugin(self.validated_data))
def retrieve(self, request, *args, **kwargs):
"""
@api {GET} /v2/plugin/{{pk}}/ 插件详情
@apiName retrieve_plugin
@apiGroup plugin_v2
@apiParamExample {Json} 请求参数
{
}
@apiSuccessExample {json} 成功返回:
{
"id": 1,
"description": "系统基础信息采集",
"name": "basereport",
"category": "官方插件",
"source_app_code": "bk_nodeman",
"scenario": "CMDB上的实时数据,蓝鲸监控里的主机监控,包含CPU,内存,磁盘等",
"deploy_type": "整包部署",
"plugin_packages": [
{
"id": 1,
"pkg_name": "basereport-10.1.12.tgz",
"project": "basereport",
"version": "10.1.12",
"config_templates": [
{"id": 1, "name": "basereport.conf", "version": "10.1", "is_main": true}
],
"os": "linux",
"cpu_arch": "x86_64",
"support_os_cpu": "linux_x86_64",
"pkg_mtime": "2019-11-25 21:58:30",
"is_ready": True
},
{
"id": 2,
"pkg_name": "bkmonitorbeat-1.7.1.tgz",
"project": "bkmonitorbeat",
"version": "1.7.1",
"config_templates": [
{"id": 1, "name": "child1.conf", "version": "1.0", "is_main": false},
{"id": 2, "name": "child2.conf", "version": "1.1", "is_main": false},
{"id": 3, "name": "bkmonitorbeat.conf", "version": "0.1", "is_main": true}
],
"os": "windows",
"cpu_arch": "x86",
"support_os_cpu": "windows_x86",
"pkg_mtime": "2019-11-25 21:58:30",
"is_ready": True
}
]
}
"""
is_superuser = IamHandler().is_superuser(get_request_username())
perms_ids = []
if not is_superuser:
# 校验权限
perms_ids = IamHandler().fetch_policy(get_request_username(), [IamActionType.plugin_pkg_operate])[
IamActionType.plugin_pkg_operate
]
data = NodeApi.plugin_retrieve({"plugin_id": kwargs["pk"]})
data["permissions"] = {"operate": int(kwargs["pk"]) in perms_ids if not is_superuser else True}
return Response(data)
def update(self, request, *args, **kwargs):
"""
@api {PUT} /v2/plugin/{{pk}}/ 编辑插件
@apiName update_plugin
@apiGroup plugin_v2
@apiParam {String} description 插件别名
@apiParamExample {Json} 请求参数
{
"description": "bkcloud",
}
"""
self.serializer_class = plugin_v2.PluginEditSerializer
gse_plugin_desc = GsePluginDesc.objects.filter(id=kwargs["pk"]).first()
if not gse_plugin_desc:
raise exceptions.PluginNotExistError(_("不存在ID为: {id} 的插件").format(id=kwargs["pk"]))
gse_plugin_desc.description = self.validated_data["description"]
gse_plugin_desc.save(update_fields=["description"])
return Response({})
@action(detail=False, methods=["POST"], serializer_class=plugin_v2.PluginListHostSerializer)
def list_plugin_host(self, request):
"""
@api {POST} /v2/plugin/list_plugin_host/ 查询插件下主机
@apiName list_plugin_host
@apiGroup plugin_v2
@apiParam {String} project 插件名称
@apiParam {Int[]} [bk_biz_id] 业务ID
@apiParam {Int[]} [bk_host_id] 主机ID
@apiParam {List} [conditions] 搜索条件,支持os_type, ip, status, version, bk_cloud_id, node_from <br>
query: IP、操作系统、Agent状态、Agent版本、云区域 单/多模糊搜索 <br>
topology: 拓扑搜索,传入bk_set_ids, bk_module_ids
@apiParam {List} [nodes] 拓扑节点, 例如:[{"bk_biz_id": 1, "bk_inst_id": 10, "bk_obj_id": "module"}, ...]
@apiParam {Int[]} [exclude_hosts] 跨页全选排除主机
@apiParam {Int} [page] 当前页数,默认为`1`
@apiParam {Int} [pagesize] 分页大小,默认为`10`,`-1` 表示跨页全选
@apiParamExample {Json} 请求参数
{
"description": "bkcloud",
}
:param request:
:return: {
"total": 1,
"list": [
{
"bk_cloud_id": 1,
"bk_cloud_name": "云区域名称",
"bk_biz_id": 2,
"bk_biz_name": "业务名称",
"bk_host_id": 1,
"os_type": "linux",
"inner_ip": "127.0.0.1",
"status": "RUNNING",
"plugin_status": {
"test_plugin": {
"version": "1.0.0",
"status": "RUNNING"
}
}
}
]
}
"""
return Response(PluginV2Handler.list_plugin_host(params=self.validated_data))
@action(detail=False, methods=["POST"], serializer_class=plugin_v2.PluginRegisterSerializer)
def create_register_task(self, request):
"""
@api {POST} /v2/plugin/create_register_task/ 创建注册任务
@apiName create_register_task
@apiGroup plugin_v2
@apiParam {String} file_name 文件名
@apiParam {Boolean} is_release 是否已发布
@apiParam {Boolean} [is_template_load] 是否需要读取配置文件,缺省默认为`false`
@apiParam {Boolean} [is_template_overwrite] 是否可以覆盖已经存在的配置文件,缺省默认为`false`
@apiParam {List} [select_pkg_abs_paths] 指定注册包相对路径列表,缺省默认全部导入
@apiParamExample {Json} 请求参数
{
"file_name": "bkunifylogbeat-7.1.28.tgz",
"is_release": True,
"select_pkg_abs_paths": ["bkunifylogbeat_linux_x86_64/bkunifylogbeat"]
}
@apiSuccessExample {json} 成功返回:
{
"job_id": 1
}
"""
return Response(NodeApi.create_register_task(self.validated_data))
@action(detail=False, methods=["GET"], serializer_class=plugin_v2.PluginRegisterTaskSerializer)
def query_register_task(self, request):
"""
@api {GET} /v2/plugin/query_register_task/ 查询插件注册任务
@apiName query_register_task
@apiGroup plugin_v2
@apiParam {Int} job_id 任务ID
@apiParamExample {Json} 请求参数
{
"job_id": 1
}
@apiSuccessExample {json} 成功返回:
{
"is_finish": False,
"status": "RUNNING",
"message": "~",
}
"""
return Response(NodeApi.query_register_task(self.validated_data))
@action(detail=False, methods=["POST"], serializer_class=plugin_v2.PkgStatusOperationSerializer)
def package_status_operation(self, request):
"""
@api {POST} /v2/plugin/package_status_operation/ 插件包状态类操作
@apiName package_status_operation
@apiGroup plugin_v2
@apiParam {String} operation 状态操作 `release`-`上线`,`offline`-`下线` `ready`-`启用`,`stop`-`停用`
@apiParam {Int[]} [id] 插件包id列表,`id`和(`name`, `version`)至少有一个
@apiParam {String} [name] 插件包名称
@apiParam {String} [version] 版本号
@apiParam {String} [cpu_arch] CPU类型,`x86` `x86_64` `powerpc`
@apiParam {String} [os] 系统类型,`linux` `windows` `aix`
@apiParam {String[]} [md5_list] md5列表
@apiParamExample {Json} 请求参数
{
}
@apiSuccessExample {json} 返回操作成功的插件包id列表:
[1, 2, 4]
"""
return Response(NodeApi.package_status_operation(self.validated_data))
@action(detail=False, methods=["POST"], serializer_class=plugin_v2.ExportSerializer)
def create_export_task(self, request):
"""
@api {POST} /v2/plugin/create_export_task/ 触发插件打包导出
@apiName create_export_plugin_task
@apiGroup plugin_v2
@apiParam {Object} query_params 插件信息,version, project, os[可选], cpu_arch[可选]
@apiParam {String} category 插件类别
@apiParamExample {Json} 请求参数
{
"category": "gse_plugin",
"query_params": {
"project": "test_plugin",
"version": "1.0.0"
}
}
@apiSuccessExample {json} 成功返回:
{
"job_id": 1
}
"""
params = self.validated_data
params["creator"] = get_request_username()
return Response(NodeApi.create_export_task(params))
@action(detail=False, methods=["GET"], serializer_class=plugin_v2.QueryExportTaskSerializer)
def query_export_task(self, request):
"""
@api {GET} /v2/plugin/query_export_task/ 获取一个导出任务结果
@apiName query_export_plugin_task
@apiGroup plugin_v2
@apiParam {Int} job_id 任务ID
@apiParamExample {Json} 请求参数
{
"job_id": 1
}
@apiSuccessExample {json} 成功返回:
{
"is_finish": True,
"is_failed": False,
"download_url": "http://127.0.0.1//backend/export/download/",
"error_message": "haha"
}
"""
return Response(NodeApi.query_export_task(self.validated_data))
@action(detail=False, methods=["POST"], serializer_class=plugin_v2.PluginParseSerializer)
def parse(self, request):
"""
@api {POST} /v2/plugin/parse/ 解析插件包
@apiName plugin_parse
@apiGroup plugin_v2
@apiParam {String} file_name 文件名
@apiParamExample {Json} 请求参数
{
"file_name": "basereport-10.1.12.tgz"
}
@apiSuccessExample {json} 成功返回:
[
{
"result": True,
"message": "新增插件",
"pkg_abs_path": "basereport_linux_x86_64/basereport",
"pkg_name": "basereport-10.1.12",
"project": "basereport",
"version": "10.1.12",
"category": "官方插件",
"config_templates": [
{"name": "child1.conf", "version": "1.0", "is_main": false},
{"name": "child2.conf", "version": "1.1", "is_main": false},
{"name": "basereport-main.config", "version": "0.1", "is_main": true}
],
"os": "x86_64",
"cpu_arch": "linux",
"description": "高性能日志采集"
},
{
"result": False,
"message": "缺少project.yaml文件",
"pkg_abs_path": "external_bkmonitorbeat_windows_x32/bkmonitorbeat",
"pkg_name": None,
"project": None,
"version": None,
"category": None,
"config_templates": [],
"os": "x32",
"cpu_arch": "windows",
"description": None
},
]
"""
return Response(NodeApi.parse(self.validated_data))
@action(detail=False, methods=["POST"], serializer_class=plugin_v2.PluginStatusOperationSerializer)
def plugin_status_operation(self, request):
"""
@api {POST} /v2/plugin/plugin_status_operation/ 插件状态类操作
@apiName plugin_status_operation
@apiGroup plugin_v2
@apiParam {String} operation 状态操作 `ready`-`启用`,`stop`-`停用`
@apiParam {Int[]} id 插件id列表
@apiParamExample {Json} 请求参数
{
"operation": "stop",
"id": [1, 2]
}
@apiSuccessExample {json} 返回操作成功的插件id列表:
[1, 2]
"""
return Response(NodeApi.plugin_status_operation(self.validated_data))
@action(detail=True, methods=["GET"], serializer_class=plugin_v2.PluginQueryHistorySerializer)
def history(self, request, pk):
"""
@api {GET} /v2/plugin/{{pk}}/history/ 插件包历史
@apiName plugin_history
@apiGroup plugin_v2
@apiParam {String} [os] 系统类型,`windows` `linux` `aix`
@apiParam {String} [cpu_arch] cpu位数,`x86` `x86_64` `powerpc`
@apiParam {Int[]} [pkg_ids] 插件包id列表
@apiParamExample {Json} 请求参数
{
}
@apiSuccessExample {json} 成功返回:
[
{
"id": 1,
"pkg_name": "basereport-1.0.tgz",
"project": "basereport",
"version": "1.0",
"pkg_size": 4391830,
"md5": "35bf230be9f3c1b878ef7665be34e14e",
"nodes_number": 1,
"config_templates": [
{"id": 1, "name": "bkunifylogbeat.conf", "version": "1.0", "is_main": false},
{"id": 2, "name": "bkunifylogbeat1.conf", "version": "1.1", "is_main": false},
{"id": 3, "name": "bkunifylogbeat-main.config", "version": "0.1", "is_main": true}
],
"pkg_mtime": "2019-11-25 21:58:30",
"is_ready": True,
"is_release_version": True
},
{
"id": 2,
"pkg_name": "basereport-1.1.tgz",
"project": "basereport",
"version": "1.1",
"md5": "35bf230be9f3c1b878ef7665be34e14e",
"nodes_number": 1,
"pkg_size": 4391830,
"config_templates": [
{"id": 4, "name": "child1.conf", "version": "1.0", "is_main": false},
{"id": 5, "name": "child2.conf", "version": "2.0", "is_main": false},
{"id": 6, "name": "bkunifylogbeat-main.config", "version": "0.2", "is_main": true}
],
"pkg_mtime": "2019-11-25 22:01:30",
"is_ready": True,
// 最新上传的包
"is_newest": True,
"is_release_version": True
},
]
"""
params = self.validated_data
params["plugin_id"] = pk
return Response(PluginV2Handler.history(params))
@action(detail=False, methods=["POST"], serializer_class=plugin_v2.PluginUploadSerializer)
def upload(self, request):
"""
@api {POST} /v2/plugin/upload/ 插件上传
@apiName plugin_upload
@apiGroup plugin_v2
@apiParam {File} package_file 插件压缩包
@apiParam {String} [module] 插件类别,缺省默认为`gse_plugin`
@apiParamExample {Json} 请求参数
{
}
@apiSuccessExample {json} 成功返回:
{
"id": 3,
"name": "test_plugin-7.1.28.tgz",
"pkg_size": 5587006
}
"""
ser = self.serializer_class(data=request.data)
ser.is_valid(raise_exception=True)
data = ser.validated_data
return JsonResponse(
PluginV2Handler.upload(
package_file=data["package_file"], module=data["module"], username=get_request_username()
)
)
@action(detail=False, methods=["POST"], serializer_class=plugin_v2.PluginFetchConfigVarsSerializer)
def fetch_config_variables(self, request):
"""
@api {POST} /v2/plugin/fetch_config_variables/ 获取配置模板参数
@apiName fetch_config_variables
@apiGroup plugin_v2
@apiParam {Int[]} config_tpl_ids 配置模板id列表
@apiParamExample {Json} 请求参数
{
"config_tpl_ids": [1, 2]
}
@apiSuccessExample {json} 成功返回:
[
{
"id": 1,
"name": "bkmonitorbeat.conf",
"version": "1.0.0",
"is_main": true,
"creator": "system",
"variables": {
"type": "object",
"properties": {
"tasks": {
"title": "tasks",
"type": "array",
"items": {
"title": "task",
"type": "object",
"properties": {
"bk_biz_id": {
"title": "bk_biz_id",
"_required": true,
"type": "any"
},
"task_list": {
"title": "task_list",
"type": "array",
"items": {
"title": "task",
"type": "object",
"properties": {
"pattern": {
"title": "pattern",
"type": "string",
"_required": true
| |
<gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
class ForkGANModel(BaseModel):
"""
This class implements the ForkGAN model, for learning image-to-image translation without paired data.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
return parser
def __init__(self, opt):
"""Initialize the ForkGAN class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G', 'G_adv_total', 'G_adv', 'G_rec', 'G_recfake', 'G_cycle', 'G_rec', 'G_percep', 'cls', 'G_cls']
self.loss_names.append('D')
self.loss_names.append('RC')
self.loss_names.append('RF')
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
# visual_names_A = ['real_A', 'fake_B', 'rec_realA', 'rec_fakeB']
# visual_names_B = ['real_B', 'fake_A', 'rec_realB', 'rec_fakeA']
visual_names_A = ['realA_percep', 'fakeB_percep']
visual_names_B = ['realB_percep', 'fakeA_percep']
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
visual_names_A.append('idt_B')
visual_names_B.append('idt_A')
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'C', 'D_A', 'D_B', 'RC_A', 'RC_B', 'RF_A', 'RF_B']
else: # during test time, only load G
self.model_names = ['G_A', 'G_B']
# define networks (both Generators and discriminators)
# The naming is different from those used in the paper.
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define discriminators and domain-agnostic classifier
# define discriminators
self.netD_A = networks.define_D(opt.input_nc, opt.ndf, opt.n_scale, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.n_scale, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netRC_A = networks.define_D(opt.input_nc, opt.ndf, opt.n_scale, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netRC_B = networks.define_D(opt.input_nc, opt.ndf, opt.n_scale, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netRF_A = networks.define_D(opt.input_nc, opt.ndf, opt.n_scale, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netRF_B = networks.define_D(opt.input_nc, opt.ndf, opt.n_scale, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
# define domain-agnostic classifier
self.netC = networks.define_C(256, opt.ndf, opt.n_scale, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
assert(opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
# define loss functions
self.criterionGAN_list = networks.GANLoss_List(opt.gan_mode).to(self.device) # define GAN loss list.
self.criterionCycle = torch.nn.L1Loss()
self.criterionReconstruction = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_C = torch.optim.Adam(itertools.chain(self.netC.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_RC = torch.optim.Adam(itertools.chain(self.netRC_A.parameters(), self.netRC_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_RF = torch.optim.Adam(itertools.chain(self.netRF_A.parameters(), self.netRF_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_C)
self.optimizers.append(self.optimizer_D)
self.optimizers.append(self.optimizer_RC)
self.optimizers.append(self.optimizer_RF)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
self.A_label = torch.zeros([self.real_A.size(0)], dtype=torch.long).to(self.device)
self.B_label = torch.ones([self.real_B.size(0)], dtype=torch.long).to(self.device)
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
# Obtain outputs from each generator G_A and G_B
self.fake_B, self.rec_realA, self.realA_percep = self.netG_A(self.real_A) # G_A(A)
self.fake_A_, self.rec_fakeB, self.fakeB_percep = self.netG_B(self.fake_B) # G_B(G_A(A))
self.fake_A, self.rec_realB, self.realB_percep = self.netG_B(self.real_B) # G_B(B)
self.fake_B_, self.rec_fakeA, self.fakeA_percep = self.netG_A(self.fake_A) # G_A(G_B(B))
if self.isTrain:
# Obtain outputs from domain-agnostic classifier
self.realA_percep_logit = self.netC(self.realA_percep)
self.realB_percep_logit = self.netC(self.realB_percep)
self.fakeA_percep_logit = self.netC(self.fakeA_percep)
self.fakeB_percep_logit = self.netC(self.fakeB_percep)
##########################################################################
# Define generator loss
##########################################################################
def backward_G(self):
"""Calculate the loss for generators G_A and G_B"""
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed: ||G_A(B) - B||
self.idt_A, _, _ = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed: ||G_B(A) - A||
self.idt_B, _, _ = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# Initialize generator's loss items
self.loss_G_adv_total = 0.0
self.loss_G_adv = 0.0
self.loss_G_rec = 0.0
self.loss_G_recfake = 0.0
for i in range(self.opt.n_d):
# Generator adversarial loss
self.loss_G_A = self.criterionGAN_list(self.netD_A(self.fake_B), True)
self.loss_G_B = self.criterionGAN_list(self.netD_B(self.fake_A), True)
# Generator adversarial reconstruction loss
self.loss_G_A_rec = self.criterionGAN_list(self.netRC_A(self.rec_realA), True)
self.loss_G_B_rec = self.criterionGAN_list(self.netRC_B(self.rec_realB), True)
# Generator adversarial refine loss
self.loss_G_A_ref = self.criterionGAN_list(self.netRF_A(self.rec_fakeB), True)
self.loss_G_B_ref = self.criterionGAN_list(self.netRF_B(self.rec_fakeA), True)
self.loss_G_adv += 0.5 * (self.loss_G_A + self.loss_G_B)
self.loss_G_rec += 0.5 * (self.loss_G_A_rec + self.loss_G_B_rec)
self.loss_G_recfake += 0.5 * (self.loss_G_A_ref + self.loss_G_B_ref)
# GAN adversarial loss
self.loss_G_adv_total += self.loss_G_adv + self.loss_G_rec + self.loss_G_recfake
# Generator classification loss
self.loss_G_cls = F.cross_entropy(self.fakeA_percep_logit.reshape(-1, 2), self.A_label)*0.5 +\
F.cross_entropy(self.fakeB_percep_logit.reshape(-1, 2), self.B_label)*0.5
# Generator perceptual loss
self.loss_G_percep = torch.mean(torch.abs(torch.mean(self.realA_percep, dim=3) - torch.mean(self.fakeB_percep, dim=3))) +\
torch.mean(torch.abs(torch.mean(self.realB_percep, dim=3) - torch.mean(self.fakeA_percep, dim=3)))
# Generator cycle loss
self.loss_G_cycle_A = self.criterionCycle(self.real_A, self.fake_A_)
self.loss_G_cycle_B = self.criterionCycle(self.real_B, self.fake_B_)
self.loss_G_cycle = self.loss_G_cycle_A + self.loss_G_cycle_B
# Generator reconstruction loss
self.loss_G_rec_A = self.criterionReconstruction(self.rec_realA, self.real_A)
self.loss_G_rec_B = self.criterionReconstruction(self.rec_realB, self.real_B)
self.loss_G_rec = self.loss_G_rec_A + self.loss_G_rec_B
# combined loss and calculate gradients
self.loss_G = self.loss_G_adv_total + self.loss_G_cls + self.loss_G_percep +\
self.opt.gamma * self.loss_G_cycle + self.opt.eps * self.loss_G_rec
self.loss_G.backward(retain_graph=True)
##########################################################################
# Define domain-agnostic classifier loss
##########################################################################
def backward_C(self):
"""Calculate domain-agnostic classifier loss for generator and itself"""
self.loss_cls = F.cross_entropy(self.realA_percep_logit.reshape(-1, 2), self.A_label)*0.25 +\
F.cross_entropy(self.realB_percep_logit.reshape(-1, 2), self.B_label)*0.25 +\
F.cross_entropy(self.fakeB_percep_logit.reshape(-1, 2), self.A_label)*0.25 +\
F.cross_entropy(self.fakeA_percep_logit.reshape(-1, 2), self.B_label)*0.25
self.loss_cls.backward()
##########################################################################
# Define discriminator loss (same as CycleGAN) ===> translation
##########################################################################
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN_list(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN_list(pred_fake, False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
return loss_D
def backward_D(self):
"""Calculate GAN loss for discriminator D
We also call loss_D.backward() to calculate the gradients for each discriminator.
"""
self.loss_D_list = []
for i in range(self.opt.n_d):
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
self.loss_D_item = self.loss_D_A + self.loss_D_B
self.loss_D_item.backward()
self.loss_D_list.append(self.loss_D_item)
self.loss_D = sum(self.loss_D_list)
##########################################################################
# Define reconstruction loss ===> reconstruction
##########################################################################
def backward_RC_basic(self, netRC, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netRC (network) -- the discriminator RC
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
"""
# Real
pred_real = netRC(real)
loss_RC_real = self.criterionGAN_list(pred_real, True)
# Fake
pred_fake = netRC(fake.detach())
loss_RC_fake = self.criterionGAN_list(pred_fake, False)
# Combined loss and calculate gradients
loss_RC = (loss_RC_real + loss_RC_fake) * 0.5
return loss_RC
def backward_RC(self):
"""Calculate GAN loss for discriminator RC
We also call loss_RC.backward() to calculate the gradients for each discriminator.
"""
self.loss_RC_list = []
for | |
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
assert torch.cuda.is_available()
cuda_device = torch.device("cuda") # device object representing GPU
#This model stands for MobileNet ImageNet
#model_id = 10 (regardless of the file name)
class custom_cnn_10(torch.nn.Module):
def __init__(self, input_features, reshape = True, widen_list = None, decompo_list = None, dummy_list = None, deepen_list = None, skipcon_list = None, kerneladd_list = None):
super(custom_cnn_10,self).__init__()
self.reshape = reshape
self.widen_list = widen_list
self.decompo_list = decompo_list
self.dummy_list = dummy_list
self.deepen_list = deepen_list
self.skipcon_list = skipcon_list
self.kerneladd_list = kerneladd_list
self.relu = torch.nn.ReLU6(inplace=True)
self.logsoftmax = torch.nn.LogSoftmax(dim = 1)
self.maxpool2x2 = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.conv0 = torch.nn.Conv2d(3, 32, (3, 3), stride=(2, 2), padding=(1, 1), bias=False)
self.conv_bn0 = torch.nn.BatchNorm2d(32)
# [1, 16, 1, 1]
self.conv1 = torch.nn.Conv2d(32, 32, (3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)
self.conv_bn1 = torch.nn.BatchNorm2d(32)
self.conv2 = torch.nn.Conv2d(32, 16, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn2 = torch.nn.BatchNorm2d(16)
# [6, 24, 2, 2]
self.conv3 = torch.nn.Conv2d(16, 96, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn3 = torch.nn.BatchNorm2d(96)
self.conv4 = torch.nn.Conv2d(96, 96, (3, 3), stride=(2, 2), padding=(1, 1), groups=96, bias=False)
self.conv_bn4 = torch.nn.BatchNorm2d(96)
self.conv5 = torch.nn.Conv2d(96, 24, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn5 = torch.nn.BatchNorm2d(24)
self.conv6 = torch.nn.Conv2d(24, 144, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn6 = torch.nn.BatchNorm2d(144)
self.conv7 = torch.nn.Conv2d(144, 144, (3, 3), stride=(1, 1), padding=(1, 1), groups=144, bias=False)
self.conv_bn7 = torch.nn.BatchNorm2d(144)
self.conv8 = torch.nn.Conv2d(144, 24, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn8 = torch.nn.BatchNorm2d(24)
# [6, 32, 3, 2],
self.conv9 = torch.nn.Conv2d(24, 144, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn9 = torch.nn.BatchNorm2d(144)
self.conv10 = torch.nn.Conv2d(144, 144, (3, 3), stride=(2, 2), padding=(1, 1), groups=144, bias=False)
self.conv_bn10 = torch.nn.BatchNorm2d(144)
self.conv11 = torch.nn.Conv2d(144, 32, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn11 = torch.nn.BatchNorm2d(32)
self.conv12 = torch.nn.Conv2d(32, 192, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn12 = torch.nn.BatchNorm2d(192)
self.conv13 = torch.nn.Conv2d(192, 192, (3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)
self.conv_bn13 = torch.nn.BatchNorm2d(192)
self.conv14 = torch.nn.Conv2d(192, 32, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn14 = torch.nn.BatchNorm2d(32)
self.conv15 = torch.nn.Conv2d(32, 192, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn15 = torch.nn.BatchNorm2d(192)
self.conv16 = torch.nn.Conv2d(192, 192, (3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)
self.conv_bn16 = torch.nn.BatchNorm2d(192)
self.conv17 = torch.nn.Conv2d(192, 32, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn17 = torch.nn.BatchNorm2d(32)
# [6, 64, 4, 2],
self.conv18 = torch.nn.Conv2d(32, 192, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn18 = torch.nn.BatchNorm2d(192)
self.conv19 = torch.nn.Conv2d(192, 192, (3, 3), stride=(2, 2), padding=(1, 1), groups=192, bias=False)
self.conv_bn19 = torch.nn.BatchNorm2d(192)
self.conv20 = torch.nn.Conv2d(192, 64, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn20 = torch.nn.BatchNorm2d(64)
self.conv21 = torch.nn.Conv2d(64, 384, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn21 = torch.nn.BatchNorm2d(384)
self.conv22 = torch.nn.Conv2d(384, 384, (3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
self.conv_bn22 = torch.nn.BatchNorm2d(384)
self.conv23 = torch.nn.Conv2d(384, 64, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn23 = torch.nn.BatchNorm2d(64)
self.conv24 = torch.nn.Conv2d(64, 384, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn24 = torch.nn.BatchNorm2d(384)
self.conv25 = torch.nn.Conv2d(384, 384, (3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
self.conv_bn25 = torch.nn.BatchNorm2d(384)
self.conv26 = torch.nn.Conv2d(384, 64, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn26 = torch.nn.BatchNorm2d(64)
self.conv27 = torch.nn.Conv2d(64, 384, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn27 = torch.nn.BatchNorm2d(384)
self.conv28 = torch.nn.Conv2d(384, 384, (3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
self.conv_bn28 = torch.nn.BatchNorm2d(384)
self.conv29 = torch.nn.Conv2d(384, 64, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn29 = torch.nn.BatchNorm2d(64)
# [6, 96, 3, 1],
self.conv30 = torch.nn.Conv2d(64, 384, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn30 = torch.nn.BatchNorm2d(384)
self.conv31 = torch.nn.Conv2d(384, 384, (3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
self.conv_bn31 = torch.nn.BatchNorm2d(384)
self.conv32 = torch.nn.Conv2d(384, 96, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn32 = torch.nn.BatchNorm2d(96)
self.conv33 = torch.nn.Conv2d(96, 576, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn33 = torch.nn.BatchNorm2d(576)
self.conv34 = torch.nn.Conv2d(576, 576, (3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
self.conv_bn34 = torch.nn.BatchNorm2d(576)
self.conv35 = torch.nn.Conv2d(576, 96, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn35 = torch.nn.BatchNorm2d(96)
self.conv36 = torch.nn.Conv2d(96, 576, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn36 = torch.nn.BatchNorm2d(576)
self.conv37 = torch.nn.Conv2d(576, 576, (3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
self.conv_bn37 = torch.nn.BatchNorm2d(576)
self.conv38 = torch.nn.Conv2d(576, 96, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn38 = torch.nn.BatchNorm2d(96)
# [6, 160, 3, 2],
self.conv39 = torch.nn.Conv2d(96, 576, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn39 = torch.nn.BatchNorm2d(576)
self.conv40 = torch.nn.Conv2d(576, 576, (3, 3), stride=(2, 2), padding=(1, 1), groups=576, bias=False)
self.conv_bn40 = torch.nn.BatchNorm2d(576)
self.conv41 = torch.nn.Conv2d(576, 160, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn41 = torch.nn.BatchNorm2d(160)
self.conv42 = torch.nn.Conv2d(160, 960, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn42 = torch.nn.BatchNorm2d(960)
self.conv43 = torch.nn.Conv2d(960, 960, (3, 3), stride=(1, 1), padding=(1, 1), groups=960, bias=False)
self.conv_bn43 = torch.nn.BatchNorm2d(960)
self.conv44 = torch.nn.Conv2d(960, 160, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn44 = torch.nn.BatchNorm2d(160)
self.conv45 = torch.nn.Conv2d(160, 960, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn45 = torch.nn.BatchNorm2d(960)
self.conv46 = torch.nn.Conv2d(960, 960, (3, 3), stride=(1, 1), padding=(1, 1), groups=960, bias=False)
self.conv_bn46 = torch.nn.BatchNorm2d(960)
self.conv47 = torch.nn.Conv2d(960, 160, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn47 = torch.nn.BatchNorm2d(160)
# [6, 320, 1, 1],
self.conv48 = torch.nn.Conv2d(160, 960, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn48 = torch.nn.BatchNorm2d(960)
self.conv49 = torch.nn.Conv2d(960, 960, (3, 3), stride=(1, 1), padding=(1, 1), groups=960, bias=False)
self.conv_bn49 = torch.nn.BatchNorm2d(960)
self.conv50 = torch.nn.Conv2d(960, 320, (1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.conv_bn50 = torch.nn.BatchNorm2d(320)
self.conv51 = torch.nn.Conv2d(320, 1280, (1, 1), stride=(1, 1), padding=(0, 0))
self.conv_bn51 = torch.nn.BatchNorm2d(1280)
self.classifier = torch.nn.Linear(1280, 1000)
self.reset_parameters(input_features)
def reset_parameters(self, input_features):
stdv = 1.0 / math.sqrt(input_features)
for weight in self.parameters():
weight.data.uniform_(-stdv, +stdv)
def forward(self, X1):
if self.reshape:
X1 = X1.reshape(-1, 3, 224, 224)
X1 = self.conv0(X1)
X1 = self.conv_bn0(X1)
X1 = self.relu(X1)
X1 = self.conv1(X1)
X1 = self.conv_bn1(X1)
X1 = self.relu(X1)
X1 = self.conv2(X1)
X1 = self.conv_bn2(X1)
# [6, 24, 2, 2]
X1 = self.conv3(X1)
X1 = self.conv_bn3(X1)
X1 = self.relu(X1)
X1 = self.conv4(X1)
X1 = self.conv_bn4(X1)
X1 = self.relu(X1)
X1 = self.conv5(X1)
X1 = self.conv_bn5(X1)
X0_skip = X1
X1 = self.conv6(X1)
X1 = self.conv_bn6(X1)
X1 = self.relu(X1)
X1 = self.conv7(X1)
X1 = self.conv_bn7(X1)
X1 = self.relu(X1)
X1 = self.conv8(X1)
X1 = self.conv_bn8(X1)
X1 = X1 + X0_skip
# [6, 32, 3, 2],
X1 = self.conv9(X1)
X1 = self.conv_bn9(X1)
X1 = self.relu(X1)
X1 = self.conv10(X1)
X1 = self.conv_bn10(X1)
X1 = self.relu(X1)
X1 = self.conv11(X1)
X1 = self.conv_bn11(X1)
X0_skip = X1
X1 = self.conv12(X1)
X1 = self.conv_bn12(X1)
X1 = self.relu(X1)
X1 = self.conv13(X1)
X1 = self.conv_bn13(X1)
X1 = self.relu(X1)
X1 = self.conv14(X1)
X1 = self.conv_bn14(X1)
X1 = X1 + X0_skip
X0_skip = X1
X1 = self.conv15(X1)
X1 = self.conv_bn15(X1)
X1 = self.relu(X1)
X1 = self.conv16(X1)
X1 = self.conv_bn16(X1)
X1 = self.relu(X1)
X1 = self.conv17(X1)
X1 = self.conv_bn17(X1)
X1 = X1 + X0_skip
# [6, 64, 4, 2],
X1 = self.conv18(X1)
X1 = self.conv_bn18(X1)
X1 = self.relu(X1)
X1 = self.conv19(X1)
X1 = self.conv_bn19(X1)
X1 = self.relu(X1)
X1 = self.conv20(X1)
X1 = self.conv_bn20(X1)
X0_skip = X1
X1 = self.conv21(X1)
X1 = self.conv_bn21(X1)
X1 = self.relu(X1)
X1 = self.conv22(X1)
X1 = self.conv_bn22(X1)
X1 = self.relu(X1)
X1 = self.conv23(X1)
X1 = self.conv_bn23(X1)
X1 = X1 + X0_skip
X0_skip = X1
X1 = self.conv24(X1)
X1 = self.conv_bn24(X1)
X1 = self.relu(X1)
X1 = self.conv25(X1)
X1 = self.conv_bn25(X1)
X1 = self.relu(X1)
X1 = self.conv26(X1)
X1 = self.conv_bn26(X1)
X1 = X1 + X0_skip
X0_skip = X1
X1 = self.conv27(X1)
X1 = self.conv_bn27(X1)
X1 = self.relu(X1)
X1 = self.conv28(X1)
X1 = self.conv_bn28(X1)
X1 = self.relu(X1)
X1 = self.conv29(X1)
X1 = self.conv_bn29(X1)
X1 = X1 + X0_skip
# [6, 96, 3, 1],
X1 = self.conv30(X1)
X1 = self.conv_bn30(X1)
X1 = self.relu(X1)
X1 = self.conv31(X1)
X1 = self.conv_bn31(X1)
X1 = self.relu(X1)
X1 = self.conv32(X1)
X1 = self.conv_bn32(X1)
X0_skip = X1
X1 = self.conv33(X1)
X1 = self.conv_bn33(X1)
X1 = self.relu(X1)
X1 = self.conv34(X1)
X1 = self.conv_bn34(X1)
X1 = self.relu(X1)
X1 = self.conv35(X1)
X1 = self.conv_bn35(X1)
X1 = X1 + X0_skip
X0_skip = X1
X1 = self.conv36(X1)
X1 | |
node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'userParameterLong':
obj_ = userParameterLongType.factory()
obj_.build(child_)
self.userParameterLong.append(obj_)
obj_.original_tagname_ = 'userParameterLong'
elif nodeName_ == 'userParameterDouble':
obj_ = userParameterDoubleType.factory()
obj_.build(child_)
self.userParameterDouble.append(obj_)
obj_.original_tagname_ = 'userParameterDouble'
elif nodeName_ == 'userParameterString':
obj_ = userParameterStringType.factory()
obj_.build(child_)
self.userParameterString.append(obj_)
obj_.original_tagname_ = 'userParameterString'
elif nodeName_ == 'userParameterBase64':
obj_ = userParameterBase64Type.factory()
obj_.build(child_)
self.userParameterBase64.append(obj_)
obj_.original_tagname_ = 'userParameterBase64'
# end class userParameters
class accelerationFactorType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kspace_encoding_step_1=None, kspace_encoding_step_2=None):
self.original_tagname_ = None
self.kspace_encoding_step_1 = kspace_encoding_step_1
self.kspace_encoding_step_2 = kspace_encoding_step_2
def factory(*args_, **kwargs_):
if accelerationFactorType.subclass:
return accelerationFactorType.subclass(*args_, **kwargs_)
else:
return accelerationFactorType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_kspace_encoding_step_1(self): return self.kspace_encoding_step_1
def set_kspace_encoding_step_1(self, kspace_encoding_step_1): self.kspace_encoding_step_1 = kspace_encoding_step_1
def get_kspace_encoding_step_2(self): return self.kspace_encoding_step_2
def set_kspace_encoding_step_2(self, kspace_encoding_step_2): self.kspace_encoding_step_2 = kspace_encoding_step_2
def hasContent_(self):
if (
self.kspace_encoding_step_1 is not None or
self.kspace_encoding_step_2 is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='accelerationFactorType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='accelerationFactorType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='accelerationFactorType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='accelerationFactorType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='accelerationFactorType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.kspace_encoding_step_1 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%skspace_encoding_step_1>%s</%skspace_encoding_step_1>%s' % (namespace_, self.gds_format_integer(self.kspace_encoding_step_1, input_name='kspace_encoding_step_1'), namespace_, eol_))
if self.kspace_encoding_step_2 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%skspace_encoding_step_2>%s</%skspace_encoding_step_2>%s' % (namespace_, self.gds_format_integer(self.kspace_encoding_step_2, input_name='kspace_encoding_step_2'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='accelerationFactorType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.kspace_encoding_step_1 is not None:
showIndent(outfile, level)
outfile.write('kspace_encoding_step_1=%d,\n' % self.kspace_encoding_step_1)
if self.kspace_encoding_step_2 is not None:
showIndent(outfile, level)
outfile.write('kspace_encoding_step_2=%d,\n' % self.kspace_encoding_step_2)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'kspace_encoding_step_1':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'kspace_encoding_step_1')
self.kspace_encoding_step_1 = ival_
elif nodeName_ == 'kspace_encoding_step_2':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'kspace_encoding_step_2')
self.kspace_encoding_step_2 = ival_
# end class accelerationFactorType
class parallelImagingType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, accelerationFactor=None, calibrationMode=None, interleavingDimension=None):
self.original_tagname_ = None
self.accelerationFactor = accelerationFactor
self.calibrationMode = calibrationMode
self.validate_calibrationModeType(self.calibrationMode)
self.interleavingDimension = interleavingDimension
self.validate_interleavingDimensionType(self.interleavingDimension)
def factory(*args_, **kwargs_):
if parallelImagingType.subclass:
return parallelImagingType.subclass(*args_, **kwargs_)
else:
return parallelImagingType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_accelerationFactor(self): return self.accelerationFactor
def set_accelerationFactor(self, accelerationFactor): self.accelerationFactor = accelerationFactor
def get_calibrationMode(self): return self.calibrationMode
def set_calibrationMode(self, calibrationMode): self.calibrationMode = calibrationMode
def get_interleavingDimension(self): return self.interleavingDimension
def set_interleavingDimension(self, interleavingDimension): self.interleavingDimension = interleavingDimension
def validate_calibrationModeType(self, value):
# Validate type calibrationModeType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['embedded', 'interleaved', 'separate', 'external', 'other']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on calibrationModeType' % {"value" : value.encode("utf-8")} )
def validate_interleavingDimensionType(self, value):
# Validate type interleavingDimensionType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['phase', 'repetition', 'contrast', 'average', 'other']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on interleavingDimensionType' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.accelerationFactor is not None or
self.calibrationMode is not None or
self.interleavingDimension is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='parallelImagingType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='parallelImagingType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='parallelImagingType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='parallelImagingType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='parallelImagingType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.accelerationFactor is not None:
self.accelerationFactor.export(outfile, level, namespace_, name_='accelerationFactor', pretty_print=pretty_print)
if self.calibrationMode is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scalibrationMode>%s</%scalibrationMode>%s' % (namespace_, self.gds_format_string(quote_xml(self.calibrationMode).encode(ExternalEncoding), input_name='calibrationMode'), namespace_, eol_))
if self.interleavingDimension is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sinterleavingDimension>%s</%sinterleavingDimension>%s' % (namespace_, self.gds_format_string(quote_xml(self.interleavingDimension).encode(ExternalEncoding), input_name='interleavingDimension'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='parallelImagingType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.accelerationFactor is not None:
showIndent(outfile, level)
outfile.write('accelerationFactor=model_.accelerationFactorType(\n')
self.accelerationFactor.exportLiteral(outfile, level, name_='accelerationFactor')
showIndent(outfile, level)
outfile.write('),\n')
if self.calibrationMode is not None:
showIndent(outfile, level)
outfile.write('calibrationMode=%s,\n' % quote_python(self.calibrationMode).encode(ExternalEncoding))
if self.interleavingDimension is not None:
showIndent(outfile, level)
outfile.write('interleavingDimension=%s,\n' % quote_python(self.interleavingDimension).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'accelerationFactor':
obj_ = accelerationFactorType.factory()
obj_.build(child_)
self.accelerationFactor = obj_
obj_.original_tagname_ = 'accelerationFactor'
elif nodeName_ == 'calibrationMode':
calibrationMode_ = child_.text
calibrationMode_ = self.gds_validate_string(calibrationMode_, node, 'calibrationMode')
self.calibrationMode = calibrationMode_
# validate type calibrationModeType
self.validate_calibrationModeType(self.calibrationMode)
elif nodeName_ == 'interleavingDimension':
interleavingDimension_ = child_.text
interleavingDimension_ = self.gds_validate_string(interleavingDimension_, node, 'interleavingDimension')
self.interleavingDimension = interleavingDimension_
# validate type interleavingDimensionType
self.validate_interleavingDimensionType(self.interleavingDimension)
# end class parallelImagingType
GDSClassesMapping = {
'parallelImaging': parallelImagingType,
'set': limitType,
'experimentalConditions': experimentalConditionsType,
'accelerationFactor': accelerationFactorType,
'acquisitionSystemInformation': acquisitionSystemInformationType,
'userParameterLong': userParameterLongType,
'sequenceParameters': sequenceParametersType,
'userParameterDouble': userParameterDoubleType,
'slice': limitType,
'reconSpace': encodingSpaceType,
'studyInformation': studyInformationType,
'contrast': limitType,
'userParameterString': userParameterStringType,
'kspace_encoding_step_1': limitType,
'kspace_encoding_step_0': limitType,
'kspace_encoding_step_2': limitType,
'measurementInformation': measurementInformationType,
'encodingLimits': encodingLimitsType,
'phase': limitType,
'repetition': limitType,
'segment': limitType,
'encodedSpace': encodingSpaceType,
'userParameterBase64': userParameterBase64Type,
'measurementDependency': measurementDependencyType,
'subjectInformation': subjectInformationType,
'trajectoryDescription': trajectoryDescriptionType,
'coilLabel': coilLabelType,
'average': limitType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ismrmrdHeader'
rootClass = ismrmrdHeader
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ismrmrdHeader'
rootClass = ismrmrdHeader
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ismrmrdHeader'
rootClass = ismrmrdHeader
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ismrmrdHeader'
rootClass = ismrmrdHeader
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from schema import *\n\n')
sys.stdout.write('import schema as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == | |
<filename>scripts/klf14_b6ntac_exp_0075_pipeline_v4_validation.py
"""
Validate pipeline v4:
* segmentation
* dmap (0056)
* contour (0070)
* classifier (0074)
* segmentation correction (0053) networks
"""
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
# script name to identify this experiment
experiment_id = 'klf14_b6ntac_exp_0075_pipeline_v4_validation'
# cross-platform home directory
from pathlib import Path
home = str(Path.home())
# PyCharm automatically adds cytometer to the python path, but this doesn't happen if the script is run
# with "python scriptname.py"
import os
import sys
sys.path.extend([os.path.join(home, 'Software/cytometer')])
import warnings
import pickle
import pandas as pd
import time
import re
# other imports
from enum import IntEnum
from PIL import Image, ImageDraw, ImageEnhance
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from scipy.stats import linregress, mode
from skimage.morphology import remove_small_holes, binary_closing, binary_dilation
from scipy.ndimage.morphology import binary_fill_holes
import cv2
# limit number of GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras
import keras.backend as K
import cytometer.utils
import cytometer.data
import tensorflow as tf
# limit GPU memory used
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
set_session(tf.Session(config=config))
# specify data format as (n, row, col, channel)
K.set_image_data_format('channels_last')
DEBUG = False
# number of folds for k-fold cross validation
n_folds = 10
# number of epochs for training
epochs = 25
# area (pixel**2) of the smallest object we accept as a cell (pi * (16 pixel)**2 = 804.2 pixel**2)
smallest_cell_area = 1500
# training window length
training_window_len = 401
# remove from training cells that don't have a good enough overlap with a reference label
smallest_dice = 0.5
# segmentations with Dice >= threshold are accepted
dice_threshold = 0.9
# segmentation parameters
min_cell_area = 75
median_size = 0
closing_size = 11
contour_seed_threshold = 0.005
batch_size = 16
local_threshold_block_size = 41
# rough_foreground_mask() parameters
downsample_factor = 8.0
dilation_size = 25
component_size_threshold = 0
hole_size_treshold = 8000
'''Directories and filenames
'''
# data paths
root_data_dir = os.path.join(home, 'Data/cytometer_data/klf14')
histology_dir = os.path.join(root_data_dir, 'Maz Yon')
training_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training')
training_data_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training')
training_non_overlap_data_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training_non_overlap')
training_augmented_dir = os.path.join(root_data_dir, 'klf14_b6ntac_training_augmented')
saved_models_dir = os.path.join(root_data_dir, 'saved_models')
metainfo_dir = os.path.join(home, 'Data/cytometer_data/klf14')
# model names
dmap_model_basename = 'klf14_b6ntac_exp_0056_cnn_dmap_model'
contour_model_basename = 'klf14_b6ntac_exp_0070_cnn_contour_after_dmap_model'
classifier_model_basename = 'klf14_b6ntac_exp_0074_cnn_tissue_classifier_fcn'
correction_model_basename = 'klf14_b6ntac_exp_0053_cnn_quality_network_fcn_overlapping_scaled_contours'
# load list of images, and indices for training vs. testing indices
kfold_filename = os.path.join(saved_models_dir, 'klf14_b6ntac_exp_0055_cnn_contour_kfold_info.pickle')
with open(kfold_filename, 'rb') as f:
aux = pickle.load(f)
file_list = aux['file_list']
idx_test_all = aux['idx_test']
idx_train_all = aux['idx_train']
# correct home directory
file_list = [x.replace('/users/rittscher/rcasero', home) for x in file_list]
# number of images
n_im = len(file_list)
# CSV file with metainformation of all mice
metainfo_csv_file = os.path.join(metainfo_dir, 'klf14_b6ntac_meta_info.csv')
metainfo = pd.read_csv(metainfo_csv_file)
'''
************************************************************************************************************************
Prepare the testing data:
This is computed once, and then saved to
'klf14_b6ntac_exp_0075_pipeline_v4_validation_data.npz'.
In subsequent runs, the data is loaded from that file.
Apply classifier trained with each 10 folds to the other fold.
************************************************************************************************************************
'''
'''Load the test data
'''
# file name for pre-computed data
data_filename = os.path.join(saved_models_dir, experiment_id + '_data.npz')
if os.path.isfile(data_filename):
# load pre-computed data
aux = np.load(data_filename)
im_array_all = aux['im_array_all']
rough_mask_all = aux['rough_mask_all']
out_class_all = aux['out_class_all']
out_mask_all = aux['out_mask_all']
i_all = aux['i_all']
del aux
else: # pre-compute the validation data and save to file
# start timer
t0 = time.time()
# init output
im_array_all = []
rough_mask_all = []
out_class_all = []
out_mask_all = []
contour_type_all = []
i_all = []
# correct home directory in file paths
file_list = cytometer.data.change_home_directory(list(file_list), '/users/rittscher/rcasero', home, check_isfile=True)
# loop files with hand traced contours
for i, file_svg in enumerate(file_list):
'''Read histology training window
'''
print('file ' + str(i) + '/' + str(len(file_list) - 1))
# change file extension from .svg to .tif
file_tif = file_svg.replace('.svg', '.tif')
# open histology training image
im = Image.open(file_tif)
# make array copy
im_array = np.array(im)
if DEBUG:
enhancer = ImageEnhance.Contrast(im)
enhanced_im = enhancer.enhance(4.0)
plt.clf()
plt.imshow(im)
plt.clf()
plt.imshow(enhanced_im)
'''Rough segmentation'''
histology_filename = os.path.basename(file_svg)
aux = re.split('_row', histology_filename)
histology_filename = aux[0] + '.ndpi'
histology_filename = os.path.join(histology_dir, histology_filename)
aux = aux[1].replace('.svg', '')
aux = re.split('_', aux)
row = np.int32(aux[1])
col = np.int32(aux[3])
# rough segmentation of the tissue in the full histology image (not just the training window)
rough_mask, im_downsampled = \
cytometer.utils.rough_foreground_mask(histology_filename, downsample_factor=downsample_factor,
dilation_size=dilation_size,
component_size_threshold=component_size_threshold,
hole_size_treshold=hole_size_treshold,
return_im=True)
# crop full histology to only the training image
row_0 = np.int32(np.round((row - 500) / downsample_factor))
row_end = row_0 + np.int32(np.round(im.size[0] / downsample_factor))
col_0 = np.int32(np.round((col - 500) / downsample_factor))
col_end = col_0 + np.int32(np.round(im.size[1] / downsample_factor))
# crop rough mask and downsampled image
im_crop = im_downsampled[row_0:row_end, col_0:col_end]
rough_mask_crop = rough_mask[row_0:row_end, col_0:col_end]
# upsample image and mask
im_crop = Image.fromarray(im_crop)
im_crop = im_crop.resize(size=(1001, 1001), resample=Image.LINEAR)
im_crop = np.array(im_crop)
rough_mask_crop = Image.fromarray(rough_mask_crop)
rough_mask_crop = rough_mask_crop.resize(size=(1001, 1001), resample=Image.NEAREST)
rough_mask_crop = np.array(rough_mask_crop)
if DEBUG:
plt.contour(rough_mask_crop, colors='k')
'''Read contours
'''
# read the ground truth cell contours in the SVG file. This produces a list [contour_0, ..., contour_N-1]
# where each contour_i = [(X_0, Y_0), ..., (X_P-1, X_P-1)]
cell_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Cell', add_offset_from_filename=False,
minimum_npoints=3)
other_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Other', add_offset_from_filename=False,
minimum_npoints=3)
brown_contours = cytometer.data.read_paths_from_svg_file(file_svg, tag='Brown', add_offset_from_filename=False,
minimum_npoints=3)
contours = cell_contours + other_contours + brown_contours
# make a list with the type of cell each contour is classified as
contour_type = [np.zeros(shape=(len(cell_contours),), dtype=np.uint8), # 0: white-adipocyte
np.ones(shape=(len(other_contours),), dtype=np.uint8), # 1: other types of tissue
np.ones(shape=(len(brown_contours),), dtype=np.uint8)] # 1: brown cells (treated as "other" tissue)
contour_type = np.concatenate(contour_type)
contour_type_all.append(contour_type)
print('Cells: ' + str(len(cell_contours)))
print('Other: ' + str(len(other_contours)))
print('Brown: ' + str(len(brown_contours)))
if (len(contours) == 0):
print('No contours... skipping')
continue
# initialise arrays for training
out_class = np.zeros(shape=im_array.shape[0:2], dtype=np.uint8)
out_mask = np.zeros(shape=im_array.shape[0:2], dtype=np.uint8)
if DEBUG:
plt.clf()
plt.imshow(im_array)
plt.scatter((im_array.shape[1] - 1) / 2.0, (im_array.shape[0] - 1) / 2.0)
# loop ground truth cell contours
for j, contour in enumerate(contours):
if DEBUG:
plt.clf()
plt.imshow(im_array)
plt.plot([p[0] for p in contour], [p[1] for p in contour])
xy_c = (np.mean([p[0] for p in contour]), np.mean([p[1] for p in contour]))
plt.scatter(xy_c[0], xy_c[1])
# rasterise current ground truth segmentation
cell_seg_gtruth = Image.new("1", im_array.shape[0:2][::-1], "black") # I = 32-bit signed integer pixels
draw = ImageDraw.Draw(cell_seg_gtruth)
draw.polygon(contour, outline="white", fill="white")
cell_seg_gtruth = np.array(cell_seg_gtruth, dtype=np.bool)
if DEBUG:
plt.clf()
plt.subplot(121)
plt.imshow(im_array)
plt.plot([p[0] for p in contour], [p[1] for p in contour])
xy_c = (np.mean([p[0] for p in contour]), np.mean([p[1] for p in contour]))
plt.scatter(xy_c[0], xy_c[1])
plt.subplot(122)
plt.imshow(im_array)
plt.contour(cell_seg_gtruth.astype(np.uint8))
# add current object to training output and mask
out_mask[cell_seg_gtruth] = 1
out_class[cell_seg_gtruth] = contour_type[j]
# end for j, contour in enumerate(contours):
if DEBUG:
plt.clf()
plt.subplot(121)
plt.imshow(im_array)
plt.contour(out_mask.astype(np.uint8), colors='r')
plt.title('Mask', fontsize=14)
plt.axis('off')
plt.subplot(122)
plt.imshow(im_array)
plt.contour(out_class.astype(np.uint8), colors='k')
plt.title('Class', fontsize=14)
plt.axis('off')
plt.tight_layout()
# add dummy dimensions for keras
im_array = np.expand_dims(im_array, axis=0)
rough_mask_crop = np.expand_dims(rough_mask_crop, axis=0)
out_class = np.expand_dims(out_class, axis=0)
out_class = np.expand_dims(out_class, axis=3)
out_mask = np.expand_dims(out_mask, axis=0)
# convert to expected types
im_array = im_array.astype(np.float32)
rough_mask_crop = rough_mask_crop.astype(np.bool)
out_class = out_class.astype(np.float32)
out_mask = out_mask.astype(np.float32)
# scale image intensities from [0, 255] to [0.0, 1.0]
im_array /= 255
# append input/output/mask for later use in training
im_array_all.append(im_array)
rough_mask_all.append(rough_mask_crop)
out_class_all.append(out_class)
out_mask_all.append(out_mask)
i_all.append(i)
print('Time so far: ' + str("{:.1f}".format(time.time() - t0)) + ' s')
# collapse lists into arrays
im_array_all = np.concatenate(im_array_all)
rough_mask_all = np.concatenate(rough_mask_all)
out_class_all = np.concatenate(out_class_all)
out_mask_all = np.concatenate(out_mask_all)
# save results to avoid having to recompute them every time
np.savez(data_filename, im_array_all=im_array_all, rough_mask_all=rough_mask_all, out_class_all=out_class_all,
out_mask_all=out_mask_all, i_all=i_all)
'''
************************************************************************************************************************
Areas of manual contours (this should be redundant with section "Object-wise classification validation", but it's
a lot faster to compute if you just need the hand contour areas)
************************************************************************************************************************
'''
# start timer
t0 = time.time()
# init
df_all = pd.DataFrame()
for i_fold in range(len(idx_test_all)):
print('# Fold ' + str(i_fold) + '/' + str(len(idx_test_all) - 1))
# test and training image indices. These indices refer to file_list
idx_test = idx_test_all[i_fold]
# idx_train = idx_train_all[i_fold]
# list of test files (used later for the dataframe)
file_list_test = np.array(file_list)[idx_test]
# map the indices from file_list to im_array_all (there's an image that had no WAT or Other contours and was
# skipped)
idx_lut = np.full(shape=(len(file_list), ), fill_value=-1, dtype=idx_test.dtype)
idx_lut[i_all] = range(len(i_all))
# idx_train = idx_lut[idx_train]
idx_test = idx_lut[idx_test]
# print('## len(idx_train) = ' + str(len(idx_train)))
print('## len(idx_test) = ' + str(len(idx_test)))
# split data into training and testing
# im_array_train = im_array_all[idx_train, :, :, :]
im_array_test = im_array_all[idx_test, :, :, :]
# out_class_train = out_class_all[idx_train, :, :, :]
out_class_test = out_class_all[idx_test, :, :, :]
# out_mask_train = out_mask_all[idx_train, :, :]
out_mask_test = out_mask_all[idx_test, :, :]
# loop test images
for i, file_svg in enumerate(file_list_test):
print('# Fold ' + str(i_fold) + '/' + str(len(idx_test_all) - 1) + ', i = '
+ str(i) + '/' + str(len(idx_test) - 1))
''' Ground truth contours '''
# change file extension from .svg to .tif
file_tif | |
!= 0" % (node.offset,))
self.o()
self.o(" @property")
self.o(" def %s(self) -> typing_.Optional[bytes]:" % (uname))
self.output_doc(node, " ")
self.o(" if not self.has_%s:" % (uname))
self.o(" return None")
self.o(
" (o, s) = self._get_ptr%s(%d, scalgoproto.BYTES_MAGIC)"
% ("_inplace" if node.inplace else "", node.offset)
)
self.o(" return self._reader._data[o : o + s]")
self.o()
else:
self.o(" @property")
self.o(" def %s(self) -> bytes:" % (uname))
self.output_doc(node, " ")
self.o(
" (o, s) = self._get_ptr%s(%d, scalgoproto.BYTES_MAGIC)"
% ("_inplace" if node.inplace else "", node.offset)
)
self.o(" return self._reader._data[o : o + s]")
self.o()
def generate_union_bytes_in(self, node: Value, uname: str) -> None:
self.o(" @property")
self.o(" def %s(self) -> bytes:" % (uname))
self.output_doc(node, " ")
self.o(" assert self.is_%s" % (uname))
self.o(" (o, s) = self._get_ptr(scalgoproto.BYTES_MAGIC)")
self.o(" return self._reader._data[o : o + s]")
self.o()
def generate_bytes_out(self, node: Value, uname: str) -> None:
self.o(" @scalgoproto.Adder")
if node.inplace:
self.o(" def %s(self, value: bytes) -> None:" % (uname))
else:
self.o(
" def %s(self, value: typing_.Union[scalgoproto.BytesOut, bytes]) -> None:"
% (uname)
)
self.output_doc(node, " ")
if node.inplace:
self.o(" self._add_inplace_bytes(%d, value)" % (node.offset))
else:
self.o(" self._set_bytes(%d, value)" % (node.offset))
self.o()
def generate_union_bytes_out(
self, node: Value, uname: str, idx: int, inplace: bool
) -> None:
self.o(" @scalgoproto.Adder")
if inplace:
self.o(" def %s(self, value: bytes) -> None:" % (uname))
else:
self.o(
" def %s(self, value: typing_.Union[scalgoproto.BytesOut, bytes]) -> None:"
% (uname)
)
self.output_doc(node, " ")
if inplace:
self.o(" self._add_inplace_bytes(%d, value)" % (idx))
else:
self.o(" self._set_bytes(%d, value)" % (idx))
self.o()
def generate_union_in(self, node: Value, uname: str, table: Table) -> None:
assert node.union is not None
if node.optional:
self.o(" @property")
self.o(" def has_%s(self) -> bool:" % (uname,))
self.o(" return self._get_uint16(%d, 0) != 0" % (node.offset,))
self.o()
self.o(" @property")
self.o(
" def %s(self) -> typing_.Optional[%sIn]:" % (uname, node.union.name)
)
self.output_doc(node, " ")
self.o(" if not self.has_%s:" % (uname))
self.o(" return None")
if node.inplace:
self.o(
" return %sIn(self._reader, self._get_uint16(%d, 0), self._offset + self._size, self._get_uint48(%d))"
% (node.union.name, node.offset, node.offset + 2)
)
else:
self.o(
" return %sIn(self._reader, self._get_uint16(%d, 0), self._get_uint48(%d))"
% (node.union.name, node.offset, node.offset + 2)
)
self.o()
else:
self.o(" @property")
self.o(" def %s(self) -> %sIn:" % (uname, node.union.name))
self.output_doc(node, " ")
if node.inplace:
self.o(
" return %sIn(self._reader, self._get_uint16(%d, 0), self._offset + self._size, self._get_uint48(%d))"
% (node.union.name, node.offset, node.offset + 2)
)
else:
self.o(
" return %sIn(self._reader, self._get_uint16(%d, 0), self._get_uint48(%d))"
% (node.union.name, node.offset, node.offset + 2)
)
self.o()
def generate_union_out(self, node: Value, uname: str, table: Table) -> None:
assert node.union is not None
self.o(" @property")
self.o(
" def %s(self) -> %s%sOut:"
% (uname, node.union.name, "Inplace" if node.inplace else "")
)
self.o(
" return %s%sOut(self._writer, self._offset + %d, self._offset + %d)"
% (
node.union.name,
"Inplace" if node.inplace else "",
node.offset,
table.bytes,
)
)
self.o()
def generate_value_in(self, table: Table, node: Value) -> None:
assert node.type_ is not None
uname = snake(self.value(node.identifier))
if node.list_:
self.generate_list_in(node, uname)
elif node.type_.type == TokenType.BOOL:
self.generate_bool_in(node, uname)
elif node.type_.type in typeMap:
self.generate_basic_in(node, uname)
elif node.enum:
self.generate_enum_in(node, uname)
elif node.struct:
self.generate_struct_in(node, uname)
elif node.table:
self.generate_table_in(node, uname)
elif node.union:
self.generate_union_in(node, uname, table)
elif node.type_.type == TokenType.TEXT:
self.generate_text_in(node, uname)
elif node.type_.type == TokenType.BYTES:
self.generate_bytes_in(node, uname)
else:
raise ICE()
def generate_value_out(self, table: Table, node: Value) -> None:
assert node.type_ is not None
uname = snake(self.value(node.identifier))
if node.list_:
self.generate_list_out(node, uname)
elif node.type_.type == TokenType.BOOL:
self.generate_bool_out(node, uname)
elif node.type_.type in typeMap:
self.generate_basic_out(node, uname)
elif node.enum:
self.generate_enum_out(node, uname)
elif node.struct:
self.generate_struct_out(node, uname)
elif node.table:
self.generate_table_out(node, uname)
elif node.union:
self.generate_union_out(node, uname, table)
elif node.type_.type == TokenType.TEXT:
self.generate_text_out(node, uname)
elif node.type_.type == TokenType.BYTES:
self.generate_bytes_out(node, uname)
else:
raise ICE()
def generate_union_copy(self, union: Union) -> None:
self.o(" def _copy(self, i:%sIn) -> None:" % union.name)
self.o(" if False:")
self.o(" pass")
for node in union.members:
assert node.type_ is not None
uuname = snake(self.value(node.identifier))
self.o(" elif i.is_%s:" % uuname)
if node.list_:
self.o(
" self.add_%s(len(i.%s))._copy(i.%s)"
% (uuname, uuname, uuname)
)
elif (
node.type_.type == TokenType.TEXT or node.type_.type == TokenType.BYTES
):
self.o(" self.add_%s(i.%s)" % (uuname, uuname))
elif node.table:
if node.table.empty:
self.o(" self.add_%s()" % (uuname))
else:
self.o(" self.add_%s()._copy(i.%s)" % (uuname, uuname))
else:
raise ICE()
self.o()
def generate_union(self, union: Union) -> None:
# Recursively generate direct contained members
for value in union.members:
if value.direct_table:
self.generate_table(value.direct_table)
if value.direct_union:
self.generate_union(value.direct_union)
if value.direct_enum:
self.generate_enum(value.direct_enum)
if value.direct_struct:
self.generate_struct(value.direct_struct)
self.o("class %sIn(scalgoproto.UnionIn):" % union.name)
self.output_doc(union, " ")
self.o(" __slots__: typing_.List[str] = []")
self.o(" _MEMBERS = [")
for node in union.members:
self.o(' "%s",' % snake(self.value(node.identifier)))
self.o(" ]")
self.o()
self.o(
" def __init__(self, reader: scalgoproto.Reader, type: int, offset: int, size: int = None) -> None:"
)
self.o(
' """Private constructor. Call factory methods on scalgoproto.Reader to construct instances"""'
)
self.o(" super().__init__(reader, type, offset, size)")
self.o()
self.o(" class Type(enum.IntEnum):")
self.o(" NONE = 0")
idx = 1
for member in union.members:
if not isinstance(member, (Table, Value)):
raise ICE()
self.o(" %s = %d" % (self.value(member.identifier).upper(), idx))
idx += 1
self.o()
self.o(" @property")
self.o(" def type(self) -> Type:")
self.output_doc(union, " ")
self.o(" return %sIn.Type(self._type)" % (union.name))
self.o()
for member in union.members:
assert member.type_ is not None
n = self.value(member.identifier)
uuname = snake(n)
self.o(" @property")
self.o(" def is_%s(self) -> bool:" % (uuname,))
self.o(" return self.type == %sIn.Type.%s" % (union.name, n.upper()))
self.o()
if member.list_:
self.generate_union_list_in(member, uuname)
elif member.table:
self.generate_union_table_in(member, uuname)
elif member.type_.type == TokenType.BYTES:
self.generate_union_bytes_in(member, uuname)
elif member.type_.type == TokenType.TEXT:
self.generate_union_text_in(member, uuname)
else:
raise ICE()
self.o()
self.o("class %sOut(scalgoproto.UnionOut):" % union.name)
self.o(" __slots__: typing_.List[str] = []")
self.o()
self.o(
" def __init__(self, writer: scalgoproto.Writer, offset: int, end: int = 0) -> None:"
)
self.o(
' """Private constructor. Call factory methods on scalgoproto.Writer to construct instances"""'
)
self.o(" super().__init__(writer, offset, end)")
self.o()
idx = 1
for member in union.members:
assert member.type_ is not None
uuname = snake(self.value(member.identifier))
if member.list_:
self.generate_union_list_out(member, uuname, idx, False)
elif member.table:
self.generate_union_table_out(member, uuname, idx, False)
elif member.type_.type == TokenType.BYTES:
self.generate_union_bytes_out(member, uuname, idx, False)
elif member.type_.type == TokenType.TEXT:
self.generate_union_text_out(member, uuname, idx, False)
else:
raise ICE()
idx += 1
self.generate_union_copy(union)
self.o()
self.o("class %sInplaceOut(scalgoproto.UnionOut):" % union.name)
self.o(" __slots__: typing_.List[str] = []")
self.o()
self.o(
" def __init__(self, writer: scalgoproto.Writer, offset: int, end: int = 0) -> None:"
)
self.o(
' """Private constructor. Call factory methods on scalgoproto.Writer to construct instances"""'
)
self.o(" super().__init__(writer, offset, end)")
self.o()
idx = 1
for member in union.members:
assert member.type_ is not None
uuname = snake(self.value(member.identifier))
if member.list_:
self.generate_union_list_out(member, uuname, idx, True)
elif member.table:
self.generate_union_table_out(member, uuname, idx, True)
elif member.type_.type == TokenType.BYTES:
self.generate_union_bytes_out(member, uuname, idx, True)
elif member.type_.type == TokenType.TEXT:
self.generate_union_text_out(member, uuname, idx, True)
else:
raise ICE()
idx += 1
self.generate_union_copy(union)
self.o()
def generate_table_copy(self, table: Table) -> None:
self.o(" def _copy(self, i:%sIn) -> None:" % table.name)
for ip in (True, False):
for node in table.members:
uname = snake(self.value(node.identifier))
if bool(node.inplace) != ip:
continue
assert node.type_ is not None
if node.list_:
if node.optional:
self.o(" if i.%s is not None:" % uname)
self.o(
" self.add_%s(len(i.%s))._copy(i.%s)"
% (uname, uname, uname)
)
else:
self.o(
" self.add_%s(len(i.%s))._copy(i.%s)"
% (uname, uname, uname)
)
elif (
node.type_.type in typeMap
or node.type_.type == TokenType.BOOL
or node.enum
or node.struct
or node.type_.type == TokenType.TEXT
or node.type_.type == TokenType.BYTES
):
if (
node.optional
or node.enum
or node.type_.type == TokenType.TEXT
or node.type_.type == TokenType.BYTES
):
self.o(" if i.%s is not None:" % uname)
self.o(" self.%s = i.%s" % (uname, uname))
else:
self.o(" self.%s = i.%s" % (uname, uname))
elif node.table:
if node.optional:
self.o(" if i.%s is not None:" % (uname))
if node.table.empty:
self.o(" self.add_%s()")
else:
self.o(
" self.add_%s()._copy(i.%s)" % (uname, uname)
)
else:
if node.table.empty:
self.o(" self.add_%s()")
else:
self.o(" self.add_%s()._copy(i.%s)" % (uname, uname))
elif node.union:
self.o(" if i.%s is not None:" % (uname))
self.o(" self.%s._copy(i.%s)" % (uname, uname))
else:
raise ICE()
self.o()
def generate_table(self, table: Table) -> None:
# Recursively generate direct contained members
for value in table.members:
if value.direct_table:
self.generate_table(value.direct_table)
if value.direct_union:
self.generate_union(value.direct_union)
if value.direct_enum:
self.generate_enum(value.direct_enum)
if value.direct_struct:
self.generate_struct(value.direct_struct)
if table.empty:
return
# Generate table reader
self.o("class %sIn(scalgoproto.TableIn):" % table.name)
self.output_doc(table, " ")
self.o(" __slots__: typing_.List[str] = []")
self.o(
" _MAGIC: typing_.ClassVar[int] = 0x%08X # type: ignore" % table.magic
)
self.o(" _MEMBERS = [")
for node in table.members:
self.o(' "%s",' % snake(self.value(node.identifier)))
self.o(" ]")
self.o()
for node in table.members:
self.generate_value_in(table, node)
self.o()
assert | |
"""
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
cut_tree
optimal_leaf_ordering
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
Utility classes:
.. autosummary::
:toctree: generated/
DisjointSet -- data structure for incremental connectivity queries
"""
# Copyright (C) <NAME>, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: <NAME>
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, <NAME>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import bisect
from collections import deque
import numpy as np
from . import _hierarchy, _optimal_leaf_ordering
import scipy.spatial.distance as distance
from scipy._lib._disjoint_set import DisjointSet
_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,
'median': 4, 'ward': 5, 'weighted': 6}
_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')
__all__ = ['ClusterNode', 'DisjointSet', 'average', 'centroid', 'complete',
'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster',
'fclusterdata', 'from_mlab_linkage', 'inconsistent',
'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage',
'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists',
'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering',
'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree',
'ward', 'weighted']
class ClusterWarning(UserWarning):
pass
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copy the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accept a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
"""
Generate a random distance matrix stored in condensed form.
Parameters
----------
pnts : int
The number of points in the distance matrix. Has to be at least 2.
Returns
-------
D : ndarray
A ``pnts * (pnts - 1) / 2`` sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Perform single/min/nearest linkage on the condensed distance matrix ``y``.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import single, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = single(y)
>>> Z
array([[ 0., 1., 1., 2.],
[ 2., 12., 1., 3.],
[ 3., 4., 1., 2.],
[ 5., 14., 1., 3.],
[ 6., 7., 1., 2.],
[ 8., 16., 1., 3.],
[ 9., 10., 1., 2.],
[11., 18., 1., 3.],
[13., 15., 2., 6.],
[17., 20., 2., 9.],
[19., 21., 2., 12.]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32)
>>> fcluster(Z, 1, criterion='distance')
array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
>>> fcluster(Z, 2, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Perform complete/max/farthest point linkage on a condensed distance matrix.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the `linkage` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import complete, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = complete(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.41421356, 3. ],
[ 5. , 13. , 1.41421356, 3. ],
[ 8. , 14. , 1.41421356, 3. ],
[11. , 15. , 1.41421356, 3. ],
[16. , 17. , 4.12310563, 6. ],
[18. , 19. , 4.12310563, 6. ],
[20. , 21. , 5.65685425, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
>>> fcluster(Z, 1.5, criterion='distance')
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
>>> fcluster(Z, 4.5, criterion='distance')
array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 6, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
| |
<reponame>openamundsen/openamundsen
import loguru
from openamundsen import constants, errors, forcing, util
import pandas as pd
from pathlib import Path
import xarray as xr
def read_meteo_data(
meteo_format,
meteo_data_dir,
start_date,
end_date,
meteo_crs=None,
grid_crs=None,
bounds=None,
exclude=None,
include=None,
freq='H',
aggregate=False,
logger=None,
):
"""
Read all available stations in NetCDF or CSV format for a given period.
In case of NetCDF input, all available .nc files in `meteo_data_dir` are
used.
For CSV input a file named `stations.csv` must be provided in
the specified directory containing the metadata (IDs, names, x/y
coordinates, altitudes) of the stations. Station files must be in the same
directory and must be named `<station_id>.csv`
Parameters
----------
meteo_format : str
Data format (either 'netcdf' or 'csv').
meteo_data_dir : path-like
Location of the NetCDF files.
start_date : datetime-like
Start date.
end_date : datetime-like
End date.
meteo_crs : str
CRS of the station coordinates specified in the stations.csv file
(required only when `meteo_format` is 'csv').
grid_crs : str
CRS of the model grid.
bounds : list or None
If specified, use only stations within the region specified as a list of
(x_min, y_min, x_max, y_max) coordinates in the model grid CRS.
exclude: list or None
List of station IDs to exclude.
include: list or None
List of station IDs to include even if otherwise excluded via `bounds`
or `exclude`.
freq : str
Pandas-compatible frequency string (e.g. '3H') to which the data should
be resampled.
aggregate : boolean, default False
Aggregate data when downsampling to a lower frequency or take
instantaneous values.
logger : logger, default None
Logger to use for status messages.
Returns
-------
ds : Dataset
Station data.
"""
if logger is None:
logger = loguru.logger
meteo_data_dir = Path(meteo_data_dir)
meta = _read_meteo_metadata(meteo_format, meteo_data_dir, meteo_crs, grid_crs)
if meta.empty:
raise errors.MeteoDataError('No stations found')
meta = _apply_station_rules(meta, bounds, exclude, include)
if meta.empty:
raise errors.MeteoDataError('No stations available after applying station rules')
datasets = []
for station_id in meta.index:
if meteo_format == 'netcdf':
filename = meteo_data_dir / f'{station_id}.nc'
logger.info(f'Reading meteo file: {filename}')
ds = read_netcdf_meteo_file(filename)
elif meteo_format == 'csv':
filename = meteo_data_dir / f'{station_id}.csv'
logger.info(f'Reading meteo file: {filename}')
ds = read_csv_meteo_file(
filename,
station_id,
meta.loc[station_id, 'name'],
meta.loc[station_id, 'x'],
meta.loc[station_id, 'y'],
meta.loc[station_id, 'alt'],
grid_crs,
)
ds = _slice_and_resample_dataset(ds, start_date, end_date, freq, aggregate=aggregate)
if ds.dims['time'] == 0:
logger.warning('File contains no meteo data for the specified period')
else:
datasets.append(ds)
if len(datasets) == 0:
raise errors.MeteoDataError('No meteo data available for the specified period')
ds_combined = forcing.combine_point_datasets(datasets)
dates_combined = ds_combined.time.to_index()
if dates_combined[0] > start_date or dates_combined[-1] < end_date:
raise errors.MeteoDataError('Insufficient meteo data available.\n'
f'Requested period: {start_date}..{end_date}\n'
f'Available period: {dates_combined[0]}..{dates_combined[-1]}')
return ds_combined
def read_netcdf_meteo_file(filename):
"""
Read a meteo data file in NetCDF format and
- check if the time, lon, lat, and alt variables are included
- rename the variables according to NETCDF_VAR_MAPPINGS
- convert precipitation rates to sums if necessary
- check if the units are as expected (METEO_VAR_METADATA)
- remove all unsupported variables
- set the station id and name as attributes.
Parameters
----------
filename : path-like
CSV filename.
Returns
-------
ds : Dataset
Station data.
"""
ds = xr.load_dataset(filename)
# rename variables
ds_vars = list(ds.variables.keys())
rename_vars = set(constants.NETCDF_VAR_MAPPINGS.keys()) & set(ds_vars)
rename_dict = {v: constants.NETCDF_VAR_MAPPINGS[v] for v in rename_vars}
ds = ds.rename_vars(rename_dict)
ds = forcing.make_point_dataset(data=ds, point_id=_netcdf_station_id(filename))
return ds
def read_csv_meteo_file(filename, station_id, station_name, x, y, alt, crs):
"""
Read a meteorological data file in CSV format and return it as a Dataset.
Unlike in read_netcdf_meteo_file, here it is assumed that precipitation is
specified as a sum over the time step (i.e., kg m-2) instead of a rate (kg
m-2 s-1).
Parameters
----------
filename : path-like
CSV filename.
station_id : str
Station ID (must be unique).
station_name : str
Station name.
x : float
Station x coordinate.
y : float
Station y coordinate.
alt : float
Station altitude.
crs : str
CRS of the x/y coordinates.
Returns
-------
ds : Dataset
Station data.
"""
param_mappings = {
'temp': 'temp',
'precip': 'precip',
'rel_hum': 'rel_hum',
'sw_in': 'sw_in',
'wind_speed': 'wind_speed',
}
lon, lat = util.transform_coords(x, y, crs, constants.CRS_WGS84)
df = pd.read_csv(filename, parse_dates=True, index_col=0)
df = df.rename(columns=param_mappings)
if 'precip' in df.columns:
pass
# TODO check if the time step is really 3600 s and if precip is really a sum
ds = forcing.make_point_dataset(
data=df,
point_id=station_id,
name=station_name,
lon=lon,
lat=lat,
alt=alt,
)
return ds
def _slice_and_resample_dataset(ds, start_date, end_date, freq, aggregate=False):
"""
Slice a dataset to a given date range and optionally resample to a given
time frequency.
Parameters
----------
ds : xr.Dataset
Dataset.
start_date : datetime-like
Start date.
end_date : datetime-like
End date.
freq : str
Pandas-compatible frequency string (e.g. '3H'). Must be an exact subset
of the original frequency of the data.
aggregate : boolean, default False
Aggregate data when downsampling to a lower frequency or take
instantaneous values.
Returns
-------
ds : Dataset
Sliced and resampled dataset.
"""
freq_td = util.offset_to_timedelta(freq)
inferred_freq = ds.time.to_index().inferred_freq
td_1d = pd.Timedelta('1d')
if inferred_freq is None and ds.dims['time'] > 2:
# ("> 2" because inferring the frequency requires at least 3 points, so for shorter
# time series inferred_freq is always None)
raise errors.MeteoDataError('File contains missing records or non-uniform timesteps')
if (
aggregate
and freq_td == td_1d
and util.offset_to_timedelta(inferred_freq) < td_1d
):
end_date = (end_date + td_1d).normalize()
ds = ds.sel(time=slice(start_date, end_date))
if inferred_freq is not None and inferred_freq != freq:
ds = _resample_dataset(ds, freq, aggregate=aggregate)
return ds
def _resample_dataset(ds, freq, aggregate=False):
"""
Resample a dataset to a given time frequency.
Parameters
----------
ds : xr.Dataset
Dataset.
freq : str
Pandas-compatible frequency string (e.g. '3H'). Must be an exact subset
of the original frequency of the data.
aggregate : boolean, default False
Aggregate data when downsampling to a lower frequency or take
instantaneous values.
Returns
-------
ds_res : Dataset
Resampled dataset.
"""
td = util.offset_to_timedelta(freq)
td_1d = pd.Timedelta('1d')
if td < td_1d:
resample_kwargs = dict(label='right', closed='right', origin='start')
elif td == td_1d:
resample_kwargs = dict(label='left', closed='right', origin='start')
else:
raise errors.MeteoDataError('Resampling to frequencies > 1 day is not supported')
# ds.resample() is extremely slow for some reason, so we resample using pandas
df = ds.to_dataframe().drop(columns=['lon', 'lat', 'alt'])
if aggregate:
# Calculate averages
df_res = df.resample(freq, **resample_kwargs).mean()
# We might end up with an extra bin after resampling; take only the dates which we would
# have taken when using instantaneous values
dates = df.asfreq(freq).index
if td == td_1d:
# When resampling from sub-daily to daily timesteps, df also includes the first timestep
# of the day after the end date
dates = dates[:-1]
df_res = df_res.loc[dates]
else:
# Take the instantaneous values
df_res = df.asfreq(freq)
# Precipitation is summed up regardless of the aggregation setting
if 'precip' in df:
df_res['precip'] = df['precip'].resample(
freq,
**resample_kwargs,
).agg(pd.Series.sum, skipna=False)
# Check if the desired frequency is a subset of the original frequency of the
# data (e.g., resampling hourly to 3-hourly data is ok, but not hourly to
# 1.5-hourly, or upsampling in general)
if not df.index.intersection(df_res.index).equals(df_res.index):
raise errors.MeteoDataError(f'Resampling from freq "{df.index.inferred_freq}" '
f'to "{freq}" not supported')
# Create a new dataset with the resampled time series
ds_res = ds[['lon', 'lat', 'alt', 'time']]
ds_res['time'] = df_res.index
ds_res.attrs = ds.attrs
for param in df_res.columns:
ds_res[param] = df_res[param]
return ds_res
def _netcdf_station_id(filename):
"""
Return the station ID of a station in NetCDF format, i.e., the base name of
the file without the .nc extension.
"""
return Path(filename).stem
def _read_meteo_metadata(meteo_format, meteo_data_dir, meteo_crs, grid_crs):
"""
Read the metadata of the available meteorological stations.
Parameters
----------
meteo_format : str
Data format (either 'netcdf' or 'csv').
meteo_data_dir : path-like
Location of the NetCDF files.
meteo_crs : str
CRS of the station coordinates specified in the stations.csv file
(required only when `meteo_format` is 'csv').
grid_crs : str
CRS of the model grid.
Returns
-------
meta : DataFrame
DataFrame containing the station IDs as index and the columns `name`,
`x` (x coordinate in the grid CRS), `y` (y coordinate in the grid CRS),
and `alt`.
"""
if meteo_format == 'netcdf':
nc_files = sorted(list(meteo_data_dir.glob('*.nc')))
if len(nc_files) == 0:
raise errors.MeteoDataError(f'No meteo data | |
from __future__ import absolute_import
import random
import logging
from datetime import datetime
from time import time
from django.utils import timezone
from django.conf import settings
import sentry_sdk
from sentry_sdk.tracing import Span
from sentry_relay.processing import StoreNormalizer
from sentry import features, reprocessing, options
from sentry.relay.config import get_project_config
from sentry.datascrubbing import scrub_data
from sentry.constants import DEFAULT_STORE_NORMALIZER_ARGS
from sentry.attachments import attachment_cache
from sentry.tasks.base import instrumented_task
from sentry.utils import metrics
from sentry.utils.safe import safe_execute
from sentry.stacktraces.processing import process_stacktraces, should_process_for_stacktraces
from sentry.utils.canonical import CanonicalKeyDict, CANONICAL_TYPES
from sentry.utils.dates import to_datetime
from sentry.utils.sdk import set_current_project
from sentry.models import ProjectOption, Activity, Project, Organization
from sentry.eventstore.processing import event_processing_store
error_logger = logging.getLogger("sentry.errors.events")
info_logger = logging.getLogger("sentry.store")
# Is reprocessing on or off by default?
REPROCESSING_DEFAULT = False
class RetryProcessing(Exception):
pass
class RetrySymbolication(Exception):
def __init__(self, retry_after=None):
self.retry_after = retry_after
@metrics.wraps("should_process")
def should_process(data):
"""Quick check if processing is needed at all."""
from sentry.plugins.base import plugins
if data.get("type") == "transaction":
return False
for plugin in plugins.all(version=2):
processors = safe_execute(
plugin.get_event_preprocessors, data=data, _with_transaction=False
)
if processors:
return True
enhancers = safe_execute(plugin.get_event_enhancers, data=data, _with_transaction=False)
if enhancers:
return True
if should_process_for_stacktraces(data):
return True
return False
def submit_process(
project, from_reprocessing, cache_key, event_id, start_time, data, data_has_changed=None,
):
task = process_event_from_reprocessing if from_reprocessing else process_event
task.delay(
cache_key=cache_key,
start_time=start_time,
event_id=event_id,
data_has_changed=data_has_changed,
)
def sample_symbolicate_event_apm():
return random.random() < getattr(settings, "SENTRY_SYMBOLICATE_EVENT_APM_SAMPLING", 0)
def submit_symbolicate(project, from_reprocessing, cache_key, event_id, start_time, data):
task = symbolicate_event_from_reprocessing if from_reprocessing else symbolicate_event
task.delay(cache_key=cache_key, start_time=start_time, event_id=event_id)
def submit_save_event(project, cache_key, event_id, start_time, data):
if cache_key:
data = None
save_event.delay(
cache_key=cache_key,
data=data,
start_time=start_time,
event_id=event_id,
project_id=project.id,
)
def sample_process_event_apm():
return random.random() < getattr(settings, "SENTRY_PROCESS_EVENT_APM_SAMPLING", 0)
def _do_preprocess_event(cache_key, data, start_time, event_id, process_task, project):
from sentry.lang.native.processing import should_process_with_symbolicator
if cache_key and data is None:
data = event_processing_store.get(cache_key)
if data is None:
metrics.incr("events.failed", tags={"reason": "cache", "stage": "pre"}, skip_internal=False)
error_logger.error("preprocess.failed.empty", extra={"cache_key": cache_key})
return
original_data = data
data = CanonicalKeyDict(data)
project_id = data["project"]
set_current_project(project_id)
if project is None:
project = Project.objects.get_from_cache(id=project_id)
else:
assert project.id == project_id, (project.id, project_id)
from_reprocessing = process_task is process_event_from_reprocessing
if should_process_with_symbolicator(data):
submit_symbolicate(
project, from_reprocessing, cache_key, event_id, start_time, original_data
)
return
if should_process(data):
submit_process(
project,
from_reprocessing,
cache_key,
event_id,
start_time,
original_data,
data_has_changed=False,
)
return
submit_save_event(project, cache_key, event_id, start_time, original_data)
@instrumented_task(
name="sentry.tasks.store.preprocess_event",
queue="events.preprocess_event",
time_limit=65,
soft_time_limit=60,
)
def preprocess_event(
cache_key=None, data=None, start_time=None, event_id=None, project=None, **kwargs
):
return _do_preprocess_event(
cache_key=cache_key,
data=data,
start_time=start_time,
event_id=event_id,
process_task=process_event,
project=project,
)
@instrumented_task(
name="sentry.tasks.store.preprocess_event_from_reprocessing",
queue="events.reprocessing.preprocess_event",
time_limit=65,
soft_time_limit=60,
)
def preprocess_event_from_reprocessing(
cache_key=None, data=None, start_time=None, event_id=None, project=None, **kwargs
):
return _do_preprocess_event(
cache_key=cache_key,
data=data,
start_time=start_time,
event_id=event_id,
process_task=process_event_from_reprocessing,
project=project,
)
def _do_symbolicate_event(cache_key, start_time, event_id, symbolicate_task, data=None):
from sentry.lang.native.processing import get_symbolication_function
if data is None:
data = event_processing_store.get(cache_key)
if data is None:
metrics.incr(
"events.failed", tags={"reason": "cache", "stage": "symbolicate"}, skip_internal=False
)
error_logger.error("symbolicate.failed.empty", extra={"cache_key": cache_key})
return
data = CanonicalKeyDict(data)
project_id = data["project"]
set_current_project(project_id)
event_id = data["event_id"]
symbolication_function = get_symbolication_function(data)
has_changed = False
from_reprocessing = symbolicate_task is symbolicate_event_from_reprocessing
try:
with sentry_sdk.start_span(op="tasks.store.symbolicate_event.symbolication") as span:
span.set_data("symbolicaton_function", symbolication_function.__name__)
with metrics.timer("tasks.store.symbolicate_event.symbolication"):
symbolicated_data = symbolication_function(data)
span.set_data("symbolicated_data", bool(symbolicated_data))
if symbolicated_data:
data = symbolicated_data
has_changed = True
except RetrySymbolication as e:
if start_time and (time() - start_time) > settings.SYMBOLICATOR_PROCESS_EVENT_WARN_TIMEOUT:
error_logger.warning(
"symbolicate.slow", extra={"project_id": project_id, "event_id": event_id}
)
if start_time and (time() - start_time) > settings.SYMBOLICATOR_PROCESS_EVENT_HARD_TIMEOUT:
# Do not drop event but actually continue with rest of pipeline
# (persisting unsymbolicated event)
error_logger.exception(
"symbolicate.failed.infinite_retry",
extra={"project_id": project_id, "event_id": event_id},
)
data.setdefault("_metrics", {})["flag.processing.error"] = True
data.setdefault("_metrics", {})["flag.processing.fatal"] = True
has_changed = True
else:
# Requeue the task in the "sleep" queue
retry_symbolicate_event.apply_async(
args=(),
kwargs={
"symbolicate_task_name": symbolicate_task.__name__,
"task_kwargs": {
"cache_key": cache_key,
"event_id": event_id,
"start_time": start_time,
},
},
countdown=e.retry_after,
)
return
except Exception:
error_logger.exception("tasks.store.symbolicate_event.symbolication")
data.setdefault("_metrics", {})["flag.processing.error"] = True
data.setdefault("_metrics", {})["flag.processing.fatal"] = True
has_changed = True
# We cannot persist canonical types in the cache, so we need to
# downgrade this.
if isinstance(data, CANONICAL_TYPES):
data = dict(data.items())
if has_changed:
cache_key = event_processing_store.store(data)
process_task = process_event_from_reprocessing if from_reprocessing else process_event
_do_process_event(
cache_key=cache_key,
start_time=start_time,
event_id=event_id,
process_task=process_task,
data=data,
data_has_changed=has_changed,
from_symbolicate=True,
)
@instrumented_task(
name="sentry.tasks.store.symbolicate_event",
queue="events.symbolicate_event",
time_limit=65,
soft_time_limit=60,
)
def symbolicate_event(cache_key, start_time=None, event_id=None, **kwargs):
"""
Handles event symbolication using the external service: symbolicator.
:param string cache_key: the cache key for the event data
:param int start_time: the timestamp when the event was ingested
:param string event_id: the event identifier
"""
with sentry_sdk.start_span(
Span(
op="tasks.store.symbolicate_event",
transaction="TaskSymbolicateEvent",
sampled=sample_symbolicate_event_apm(),
)
):
return _do_symbolicate_event(
cache_key=cache_key,
start_time=start_time,
event_id=event_id,
symbolicate_task=symbolicate_event,
)
@instrumented_task(
name="sentry.tasks.store.symbolicate_event_from_reprocessing",
queue="events.reprocessing.symbolicate_event",
time_limit=65,
soft_time_limit=60,
)
def symbolicate_event_from_reprocessing(cache_key, start_time=None, event_id=None, **kwargs):
with sentry_sdk.start_span(
Span(
op="tasks.store.symbolicate_event_from_reprocessing",
transaction="TaskSymbolicateEvent",
sampled=sample_symbolicate_event_apm(),
)
):
return _do_symbolicate_event(
cache_key=cache_key,
start_time=start_time,
event_id=event_id,
symbolicate_task=symbolicate_event_from_reprocessing,
)
@instrumented_task(
name="sentry.tasks.store.retry_symbolicate_event",
queue="sleep",
time_limit=(60 * 5) + 5,
soft_time_limit=60 * 5,
)
def retry_symbolicate_event(symbolicate_task_name, task_kwargs, **kwargs):
"""
The only purpose of this task is be enqueued with some ETA set. This is
essentially an implementation of ETAs on top of Celery's existing ETAs, but
with the intent of having separate workers wait for those ETAs.
"""
tasks = {
"symbolicate_event": symbolicate_event,
"symbolicate_event_from_reprocessing": symbolicate_event_from_reprocessing,
}
symbolicate_task = tasks.get(symbolicate_task_name)
if not symbolicate_task:
raise ValueError(
"Invalid argument for symbolicate_task_name: %s" % (symbolicate_task_name,)
)
symbolicate_task.delay(**task_kwargs)
@instrumented_task(
name="sentry.tasks.store.retry_process_event",
queue="sleep",
time_limit=(60 * 5) + 5,
soft_time_limit=60 * 5,
)
def retry_process_event(process_task_name, task_kwargs, **kwargs):
"""
The only purpose of this task is be enqueued with some ETA set. This is
essentially an implementation of ETAs on top of Celery's existing ETAs, but
with the intent of having separate workers wait for those ETAs.
"""
tasks = {
"process_event": process_event,
"process_event_from_reprocessing": process_event_from_reprocessing,
}
process_task = tasks.get(process_task_name)
if not process_task:
raise ValueError("Invalid argument for process_task_name: %s" % (process_task_name,))
process_task.delay(**task_kwargs)
def _do_process_event(
cache_key,
start_time,
event_id,
process_task,
data=None,
data_has_changed=None,
from_symbolicate=False,
):
from sentry.plugins.base import plugins
if data is None:
data = event_processing_store.get(cache_key)
if data is None:
metrics.incr(
"events.failed", tags={"reason": "cache", "stage": "process"}, skip_internal=False
)
error_logger.error("process.failed.empty", extra={"cache_key": cache_key})
return
data = CanonicalKeyDict(data)
project_id = data["project"]
set_current_project(project_id)
event_id = data["event_id"]
with sentry_sdk.start_span(op="tasks.store.process_event.get_project_from_cache"):
project = Project.objects.get_from_cache(id=project_id)
with metrics.timer("tasks.store.process_event.organization.get_from_cache"):
project._organization_cache = Organization.objects.get_from_cache(
id=project.organization_id
)
has_changed = bool(data_has_changed)
with sentry_sdk.start_span(op="tasks.store.process_event.get_reprocessing_revision"):
# Fetch the reprocessing revision
reprocessing_rev = reprocessing.get_reprocessing_revision(project_id)
# Stacktrace based event processors.
with sentry_sdk.start_span(op="task.store.process_event.stacktraces"):
with metrics.timer(
"tasks.store.process_event.stacktraces", tags={"from_symbolicate": from_symbolicate}
):
new_data = process_stacktraces(data)
if new_data is not None:
has_changed = True
data = new_data
# Second round of datascrubbing after stacktrace and language-specific
# processing. First round happened as part of ingest.
#
# *Right now* the only sensitive data that is added in stacktrace
# processing are usernames in filepaths, so we run directly after
# stacktrace processors.
#
# We do not yet want to deal with context data produced by plugins like
# sessionstack or fullstory (which are in `get_event_preprocessors`), as
# this data is very unlikely to be sensitive data. This is why scrubbing
# happens somewhere in the middle of the pipeline.
#
# On the other hand, Javascript event error translation is happening after
# this block because it uses `get_event_preprocessors` instead of
# `get_event_enhancers`.
#
# We are fairly confident, however, that this should run *before*
# re-normalization as it is hard to find sensitive data in partially
# trimmed strings.
if (
has_changed
and options.get("processing.can-use-scrubbers")
and features.has("organizations:datascrubbers-v2", project.organization, actor=None)
):
with sentry_sdk.start_span(op="task.store.datascrubbers.scrub"):
with metrics.timer(
"tasks.store.datascrubbers.scrub", tags={"from_symbolicate": from_symbolicate}
):
project_config = get_project_config(project)
new_data = safe_execute(scrub_data, project_config=project_config, event=data.data)
# XXX(markus): When datascrubbing is finally "totally stable", we might want
# to drop the event if it crashes to avoid saving PII
if new_data is not None:
data.data = new_data
# TODO(dcramer): ideally we would know if data changed by default
# Default event processors.
for plugin in plugins.all(version=2):
with sentry_sdk.start_span(op="task.store.process_event.preprocessors") as span:
span.set_data("plugin", plugin.slug)
span.set_data("from_symbolicate", from_symbolicate)
with metrics.timer(
"tasks.store.process_event.preprocessors",
tags={"plugin": plugin.slug, "from_symbolicate": from_symbolicate},
):
processors = safe_execute(
plugin.get_event_preprocessors, data=data, _with_transaction=False
)
for processor in processors or ():
try:
result = processor(data)
except Exception:
error_logger.exception("tasks.store.preprocessors.error")
data.setdefault("_metrics", {})["flag.processing.error"] = True
has_changed = True
else:
if result:
data = result
has_changed = True
assert data["project"] == project_id, "Project cannot be mutated by plugins"
# We cannot persist canonical types in the cache, so we need to
# downgrade this.
if isinstance(data, CANONICAL_TYPES):
data = dict(data.items())
if has_changed:
# Run some of normalization again such that we don't:
# - persist e.g. incredibly large stacktraces from minidumps
# - store event timestamps that are older than our retention window
# (also happening with minidumps)
normalizer = StoreNormalizer(
remove_other=False, is_renormalize=True, **DEFAULT_STORE_NORMALIZER_ARGS
)
data = normalizer.normalize_event(dict(data))
issues = data.get("processing_issues")
try:
if issues and create_failed_event(
cache_key,
data,
project_id,
list(issues.values()),
event_id=event_id,
start_time=start_time,
reprocessing_rev=reprocessing_rev,
):
return
except RetryProcessing:
# If `create_failed_event` indicates that we need to retry we
# invoke ourselves | |
<reponame>googleapis/googleapis-gen<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.analytics.admin_v1alpha.services.analytics_admin_service import pagers
from google.analytics.admin_v1alpha.types import analytics_admin
from google.analytics.admin_v1alpha.types import resources
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
from .transports.base import AnalyticsAdminServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import AnalyticsAdminServiceGrpcAsyncIOTransport
from .client import AnalyticsAdminServiceClient
class AnalyticsAdminServiceAsyncClient:
"""Service Interface for the Analytics Admin API (GA4)."""
_client: AnalyticsAdminServiceClient
DEFAULT_ENDPOINT = AnalyticsAdminServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = AnalyticsAdminServiceClient.DEFAULT_MTLS_ENDPOINT
account_path = staticmethod(AnalyticsAdminServiceClient.account_path)
parse_account_path = staticmethod(AnalyticsAdminServiceClient.parse_account_path)
account_summary_path = staticmethod(AnalyticsAdminServiceClient.account_summary_path)
parse_account_summary_path = staticmethod(AnalyticsAdminServiceClient.parse_account_summary_path)
android_app_data_stream_path = staticmethod(AnalyticsAdminServiceClient.android_app_data_stream_path)
parse_android_app_data_stream_path = staticmethod(AnalyticsAdminServiceClient.parse_android_app_data_stream_path)
conversion_event_path = staticmethod(AnalyticsAdminServiceClient.conversion_event_path)
parse_conversion_event_path = staticmethod(AnalyticsAdminServiceClient.parse_conversion_event_path)
custom_dimension_path = staticmethod(AnalyticsAdminServiceClient.custom_dimension_path)
parse_custom_dimension_path = staticmethod(AnalyticsAdminServiceClient.parse_custom_dimension_path)
custom_metric_path = staticmethod(AnalyticsAdminServiceClient.custom_metric_path)
parse_custom_metric_path = staticmethod(AnalyticsAdminServiceClient.parse_custom_metric_path)
data_retention_settings_path = staticmethod(AnalyticsAdminServiceClient.data_retention_settings_path)
parse_data_retention_settings_path = staticmethod(AnalyticsAdminServiceClient.parse_data_retention_settings_path)
data_sharing_settings_path = staticmethod(AnalyticsAdminServiceClient.data_sharing_settings_path)
parse_data_sharing_settings_path = staticmethod(AnalyticsAdminServiceClient.parse_data_sharing_settings_path)
display_video360_advertiser_link_path = staticmethod(AnalyticsAdminServiceClient.display_video360_advertiser_link_path)
parse_display_video360_advertiser_link_path = staticmethod(AnalyticsAdminServiceClient.parse_display_video360_advertiser_link_path)
display_video360_advertiser_link_proposal_path = staticmethod(AnalyticsAdminServiceClient.display_video360_advertiser_link_proposal_path)
parse_display_video360_advertiser_link_proposal_path = staticmethod(AnalyticsAdminServiceClient.parse_display_video360_advertiser_link_proposal_path)
enhanced_measurement_settings_path = staticmethod(AnalyticsAdminServiceClient.enhanced_measurement_settings_path)
parse_enhanced_measurement_settings_path = staticmethod(AnalyticsAdminServiceClient.parse_enhanced_measurement_settings_path)
firebase_link_path = staticmethod(AnalyticsAdminServiceClient.firebase_link_path)
parse_firebase_link_path = staticmethod(AnalyticsAdminServiceClient.parse_firebase_link_path)
global_site_tag_path = staticmethod(AnalyticsAdminServiceClient.global_site_tag_path)
parse_global_site_tag_path = staticmethod(AnalyticsAdminServiceClient.parse_global_site_tag_path)
google_ads_link_path = staticmethod(AnalyticsAdminServiceClient.google_ads_link_path)
parse_google_ads_link_path = staticmethod(AnalyticsAdminServiceClient.parse_google_ads_link_path)
google_signals_settings_path = staticmethod(AnalyticsAdminServiceClient.google_signals_settings_path)
parse_google_signals_settings_path = staticmethod(AnalyticsAdminServiceClient.parse_google_signals_settings_path)
ios_app_data_stream_path = staticmethod(AnalyticsAdminServiceClient.ios_app_data_stream_path)
parse_ios_app_data_stream_path = staticmethod(AnalyticsAdminServiceClient.parse_ios_app_data_stream_path)
measurement_protocol_secret_path = staticmethod(AnalyticsAdminServiceClient.measurement_protocol_secret_path)
parse_measurement_protocol_secret_path = staticmethod(AnalyticsAdminServiceClient.parse_measurement_protocol_secret_path)
property_path = staticmethod(AnalyticsAdminServiceClient.property_path)
parse_property_path = staticmethod(AnalyticsAdminServiceClient.parse_property_path)
user_link_path = staticmethod(AnalyticsAdminServiceClient.user_link_path)
parse_user_link_path = staticmethod(AnalyticsAdminServiceClient.parse_user_link_path)
web_data_stream_path = staticmethod(AnalyticsAdminServiceClient.web_data_stream_path)
parse_web_data_stream_path = staticmethod(AnalyticsAdminServiceClient.parse_web_data_stream_path)
common_billing_account_path = staticmethod(AnalyticsAdminServiceClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(AnalyticsAdminServiceClient.parse_common_billing_account_path)
common_folder_path = staticmethod(AnalyticsAdminServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(AnalyticsAdminServiceClient.parse_common_folder_path)
common_organization_path = staticmethod(AnalyticsAdminServiceClient.common_organization_path)
parse_common_organization_path = staticmethod(AnalyticsAdminServiceClient.parse_common_organization_path)
common_project_path = staticmethod(AnalyticsAdminServiceClient.common_project_path)
parse_common_project_path = staticmethod(AnalyticsAdminServiceClient.parse_common_project_path)
common_location_path = staticmethod(AnalyticsAdminServiceClient.common_location_path)
parse_common_location_path = staticmethod(AnalyticsAdminServiceClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AnalyticsAdminServiceAsyncClient: The constructed client.
"""
return AnalyticsAdminServiceClient.from_service_account_info.__func__(AnalyticsAdminServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AnalyticsAdminServiceAsyncClient: The constructed client.
"""
return AnalyticsAdminServiceClient.from_service_account_file.__func__(AnalyticsAdminServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> AnalyticsAdminServiceTransport:
"""Returns the transport used by the client instance.
Returns:
AnalyticsAdminServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(AnalyticsAdminServiceClient).get_transport_class, type(AnalyticsAdminServiceClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, AnalyticsAdminServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the analytics admin service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AnalyticsAdminServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = AnalyticsAdminServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def get_account(self,
request: analytics_admin.GetAccountRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.Account:
r"""Lookup for a single Account.
Args:
request (:class:`google.analytics.admin_v1alpha.types.GetAccountRequest`):
The request object. Request message for GetAccount RPC.
name (:class:`str`):
Required. The name of the account to
lookup. Format: accounts/{account}
Example: "accounts/100"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.Account:
A resource message representing a
Google Analytics account.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = analytics_admin.GetAccountRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_account,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_accounts(self,
request: analytics_admin.ListAccountsRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAccountsAsyncPager:
r"""Returns all accounts accessible by the caller.
Note that these accounts might not currently have GA4
properties. Soft-deleted (ie: "trashed") accounts are
excluded by default. Returns an empty list if no
relevant accounts are found.
Args:
request (:class:`google.analytics.admin_v1alpha.types.ListAccountsRequest`):
The request object. Request message for ListAccounts
RPC.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListAccountsAsyncPager:
Request message for ListAccounts RPC.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
request = analytics_admin.ListAccountsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_accounts,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListAccountsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_account(self,
request: analytics_admin.DeleteAccountRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Marks target Account as soft-deleted (ie: "trashed")
and returns it.
This API does not have a method to restore soft-deleted
accounts. However, they can be restored using the Trash
Can UI.
If the accounts are not restored before the expiration
time, the account and all child resources (eg:
Properties, GoogleAdsLinks, Streams, | |
CV scores
cv_trainmisclassifiedsamples.append(train[cv_pipeline.modelParameters['Logistic']['MisclassifiedSamples']])
cv_trainclasspredictions.append([*zip(train, cv_pipeline.modelParameters['Logistic']['ClassPredictions'])])
# TODO: add the roc curve interpolation in the fitting method
cv_trainroc_curve.append(cv_pipeline.modelParameters['Logistic']['ROC'])
testscores = cv_pipeline.transform(x=xtest)
class_score = cv_pipeline.logreg_algorithm.decision_function(testscores)
y_pred = cv_pipeline.predict(xtest)
if n_classes == 2:
test_accuracy = metrics.accuracy_score(ytest, y_pred)
test_precision = metrics.precision_score(ytest, y_pred)
test_recall = metrics.recall_score(ytest, y_pred)
test_auc_area = metrics.roc_auc_score(ytest, class_score)
test_f1_score = metrics.f1_score(ytest, y_pred)
test_zero_oneloss = metrics.zero_one_loss(ytest, y_pred)
test_matthews_mcc = metrics.matthews_corrcoef(ytest, y_pred)
else:
test_accuracy = metrics.accuracy_score(ytest, y_pred)
test_precision = metrics.precision_score(ytest, y_pred, average='weighted')
test_recall = metrics.recall_score(ytest, y_pred, average='weighted')
#test_auc_area = metrics.roc_auc_score(ytest, class_score)
test_f1_score = metrics.f1_score(ytest, y_pred, average='weighted')
test_zero_oneloss = metrics.zero_one_loss(ytest, y_pred)
test_matthews_mcc = np.nan
# Check the actual indexes in the original samples
test_misclassified_samples = test[np.where(ytest.ravel() != y_pred.ravel())[0]]
test_classpredictions = [*zip(test, y_pred)]
test_conf_matrix = metrics.confusion_matrix(ytest, y_pred)
# TODO: Apply the same ROC curve interpolation as the fit method
#test_roc_curve = metrics.roc_curve(ytest, class_score[:, 0], pos_label=0)
test_roc_curve = 1
# Test metrics
cv_testaccuracy[cvround] = test_accuracy
cv_testprecision[cvround] = test_precision
cv_testrecall[cvround] = test_recall
#cv_testauc[cvround, y_nvars] = test_auc_area
cv_testf1[cvround] = test_f1_score
cv_testmatthews_mcc[cvround] = test_matthews_mcc
cv_testzerooneloss[cvround] = test_zero_oneloss
# Check this indexes, same as CV scores
cv_testmisclassifiedsamples.append(test_misclassified_samples)
cv_testroc_curve.append(test_roc_curve)
cv_testconfusionmatrix.append(test_conf_matrix)
cv_testclasspredictions.append(test_classpredictions)
# Align model parameters to account for sign indeterminacy.
# The criteria here used is to select the sign that gives a more similar profile (by L1 distance) to the loadings from
# on the model fitted with the whole data. Any other parameter can be used, but since the loadings in X capture
# the covariance structure in the X data block, in theory they should have more pronounced features even in cases of
# null X-Y association, making the sign flip more resilient.
for cvround in range(0, ncvrounds):
for currload in range(0, self.ncomps):
# evaluate based on loadings _p
choice = np.argmin(
np.array([np.sum(np.abs(self.loadings_p[:, currload] - cv_loadings_p[cvround, :, currload])),
np.sum(np.abs(
self.loadings_p[:, currload] - cv_loadings_p[cvround, :, currload] * -1))]))
if choice == 1:
cv_loadings_p[cvround, :, currload] = -1 * cv_loadings_p[cvround, :, currload]
cv_loadings_q[cvround, :, currload] = -1 * cv_loadings_q[cvround, :, currload]
cv_weights_w[cvround, :, currload] = -1 * cv_weights_w[cvround, :, currload]
cv_weights_c[cvround, :, currload] = -1 * cv_weights_c[cvround, :, currload]
cv_rotations_ws[cvround, :, currload] = -1 * cv_rotations_ws[cvround, :, currload]
cv_rotations_cs[cvround, :, currload] = -1 * cv_rotations_cs[cvround, :, currload]
cv_train_scores_t.append([*zip(train, -1 * cv_pipeline.scores_t)])
cv_train_scores_u.append([*zip(train, -1 * cv_pipeline.scores_u)])
cv_test_scores_t.append([*zip(test, -1 * cv_pipeline.scores_t)])
cv_test_scores_u.append([*zip(test, -1 * cv_pipeline.scores_u)])
else:
cv_train_scores_t.append([*zip(train, cv_pipeline.scores_t)])
cv_train_scores_u.append([*zip(train, cv_pipeline.scores_u)])
cv_test_scores_t.append([*zip(test, cv_pipeline.scores_t)])
cv_test_scores_u.append([*zip(test, cv_pipeline.scores_u)])
# Calculate Q-squareds
q_squaredy = 1 - (pressy / ssy)
q_squaredx = 1 - (pressx / ssx)
# Store everything...
self.cvParameters = {'PLS': {'Q2X': q_squaredx, 'Q2Y': q_squaredy,
'MeanR2X_Training': np.mean(R2X_training),
'MeanR2Y_Training': np.mean(R2Y_training),
'StdevR2X_Training': np.std(R2X_training),
'StdevR2Y_Training': np.std(R2X_training),
'MeanR2X_Test': np.mean(R2X_test),
'MeanR2Y_Test': np.mean(R2Y_test),
'StdevR2X_Test': np.std(R2X_test),
'StdevR2Y_Test': np.std(R2Y_test)},
'Logistic': dict()}
# Means and standard deviations...
self.cvParameters['PLS']['Mean_Loadings_q'] = cv_loadings_q.mean(0)
self.cvParameters['PLS']['Stdev_Loadings_q'] = cv_loadings_q.std(0)
self.cvParameters['PLS']['Mean_Loadings_p'] = cv_loadings_p.mean(0)
self.cvParameters['PLS']['Stdev_Loadings_p'] = cv_loadings_q.std(0)
self.cvParameters['PLS']['Mean_Weights_c'] = cv_weights_c.mean(0)
self.cvParameters['PLS']['Stdev_Weights_c'] = cv_weights_c.std(0)
self.cvParameters['PLS']['Mean_Weights_w'] = cv_weights_w.mean(0)
self.cvParameters['PLS']['Stdev_Loadings_w'] = cv_weights_w.std(0)
self.cvParameters['PLS']['Mean_Rotations_ws'] = cv_rotations_ws.mean(0)
self.cvParameters['PLS']['Stdev_Rotations_ws'] = cv_rotations_ws.std(0)
self.cvParameters['PLS']['Mean_Rotations_cs'] = cv_rotations_cs.mean(0)
self.cvParameters['PLS']['Stdev_Rotations_cs'] = cv_rotations_cs.std(0)
self.cvParameters['PLS']['Mean_Beta'] = cv_betacoefs.mean(0)
self.cvParameters['PLS']['Stdev_Beta'] = cv_betacoefs.std(0)
self.cvParameters['PLS']['Mean_VIP'] = cv_vipsw.mean(0)
self.cvParameters['PLS']['Stdev_VIP'] = cv_vipsw.std(0)
self.cvParameters['Logistic']['Mean_MCC'] = cv_testmatthews_mcc.mean(0)
self.cvParameters['Logistic']['Stdev_MCC'] = cv_testmatthews_mcc.std(0)
self.cvParameters['Logistic']['Mean_Recall'] = cv_testrecall.mean(0)
self.cvParameters['Logistic']['Stdev_Recall'] = cv_testrecall.std(0)
self.cvParameters['Logistic']['Mean_Precision'] = cv_testprecision.mean(0)
self.cvParameters['Logistic']['Stdev_Precision'] = cv_testprecision.std(0)
self.cvParameters['Logistic']['Mean_Accuracy'] = cv_testaccuracy.mean(0)
self.cvParameters['Logistic']['Stdev_Accuracy'] = cv_testaccuracy.std(0)
self.cvParameters['Logistic']['Mean_f1'] = cv_testf1.mean(0)
self.cvParameters['Logistic']['Stdev_f1'] = cv_testf1.std(0)
self.cvParameters['Logistic']['Mean_0-1Loss'] = cv_testzerooneloss.mean(0)
self.cvParameters['Logistic']['Stdev_0-1Loss'] = cv_testzerooneloss.std(0)
self.cvParameters['Logistic']['Mean_Coefs'] = cv_logisticcoefs.mean(0)
self.cvParameters['Logistic']['Stdev_Coefs'] = cv_logisticcoefs.std(0)
# Save everything found during CV
if outputdist is True:
# Apart from R'2s and scores, the PLS parameters and Logistic regression coefficients are
# All relevant from a training set point of view
self.cvParameters['PLS']['CVR2X_Training'] = R2X_training
self.cvParameters['PLS']['CVR2Y_Training'] = R2Y_training
self.cvParameters['PLS']['CVR2X_Test'] = R2X_test
self.cvParameters['PLS']['CVR2Y_Test'] = R2Y_test
self.cvParameters['PLS']['CV_Loadings_q'] = cv_loadings_q
self.cvParameters['PLS']['CV_Loadings_p'] = cv_loadings_p
self.cvParameters['PLS']['CV_Weights_c'] = cv_weights_c
self.cvParameters['PLS']['CV_Weights_w'] = cv_weights_w
self.cvParameters['PLS']['CV_Rotations_ws'] = cv_rotations_ws
self.cvParameters['PLS']['CV_Rotations_cs'] = cv_rotations_cs
self.cvParameters['PLS']['CV_TestScores_t'] = cv_test_scores_t
self.cvParameters['PLS']['CV_TestScores_u'] = cv_test_scores_u
self.cvParameters['PLS']['CV_TrainScores_t'] = cv_train_scores_t
self.cvParameters['PLS']['CV_TrainScores_u'] = cv_train_scores_u
self.cvParameters['PLS']['CV_Beta'] = cv_betacoefs
self.cvParameters['PLS']['CV_VIPw'] = cv_vipsw
self.cvParameters['Logistic']['CV_Coefs'] = cv_logisticcoefs
# CV Test set metrics - The metrics which matter to benchmark classifier
self.cvParameters['Logistic']['CV_TestMCC'] = cv_testmatthews_mcc
self.cvParameters['Logistic']['CV_TestRecall'] = cv_testrecall
self.cvParameters['Logistic']['CV_TestPrecision'] = cv_testprecision
self.cvParameters['Logistic']['CV_TestAccuracy'] = cv_testaccuracy
self.cvParameters['Logistic']['CV_Testf1'] = cv_testf1
self.cvParameters['Logistic']['CV_Test0-1Loss'] = cv_testzerooneloss
self.cvParameters['Logistic']['CV_TestROC'] = cv_testroc_curve
self.cvParameters['Logistic']['CV_TestConfusionMatrix'] = cv_testconfusionmatrix
self.cvParameters['Logistic']['CV_TestSamplePrediction'] = cv_testclasspredictions
self.cvParameters['Logistic']['CV_TestMisclassifiedsamples'] = cv_testmisclassifiedsamples
# CV Train parameters - so we can keep a look on model performance in training set
self.cvParameters['Logistic']['CV_TrainMCC'] = cv_trainmatthews_mcc
self.cvParameters['Logistic']['CV_TrainRecall'] = cv_trainrecall
self.cvParameters['Logistic']['CV_TrainPrecision'] = cv_trainprecision
self.cvParameters['Logistic']['CV_TrainAccuracy'] = cv_trainaccuracy
self.cvParameters['Logistic']['CV_Trainf1'] = cv_trainf1
self.cvParameters['Logistic']['CV_Train0-1Loss'] = cv_trainzerooneloss
self.cvParameters['Logistic']['CV_TrainROC'] = cv_trainroc_curve
self.cvParameters['Logistic']['CV_TrainConfusionMatrix'] = cv_trainconfusionmatrix
self.cvParameters['Logistic']['CV_TrainSamplePrediction'] = cv_trainclasspredictions
self.cvParameters['Logistic']['CV_TrainMisclassifiedsamples'] = cv_trainmisclassifiedsamples
return None
except TypeError as terp:
raise terp
def permutation_test(self, x, y, nperms=1000, cv_method=KFold(7, shuffle=True), **permtest_kwargs):
"""
Permutation test for the classifier. Outputs permuted null distributions for model performance metrics (Q2X/Q2Y)
and most model parameters.
:param x: Data matrix to fit the PLS model.
:type x: numpy.ndarray, shape [n_samples, n_features]
:param y: Data matrix to fit the PLS model.
:type y: numpy.ndarray, shape [n_samples, n_features]
:param int nperms: Number of permutations to perform.
:param cv_method: An instance of a scikit-learn CrossValidator object.
:type cv_method: BaseCrossValidator or BaseShuffleSplit
:param kwargs permtest_kwargs: Keyword arguments to be passed to the .fit() method during cross-validation and model fitting.
:return: Permuted null distributions for model parameters and the permutation p-value for the Q2Y value.
:rtype: dict
"""
try:
# Check if global model is fitted... and if not, fit it using all of X
if self._isfitted is False or self.loadings_p is None:
self.fit(x, y, **permtest_kwargs)
# Make a copy of the object, to ensure the internal state doesn't come out differently from the
# cross validation method call...
permute_class = deepcopy(self)
if x.ndim > 1:
x_nvars = x.shape[1]
else:
x_nvars = 1
if y.ndim > 1:
y_nvars = y.shape[1]
else:
y_nvars = 1
n_classes = np.unique(y).size
# Initialize data structures for permuted distributions
perm_loadings_q = np.zeros((nperms, y_nvars, self.ncomps))
perm_loadings_p = np.zeros((nperms, x_nvars, self.ncomps))
perm_weights_c = np.zeros((nperms, y_nvars, self.ncomps))
perm_weights_w = np.zeros((nperms, x_nvars, self.ncomps))
perm_rotations_cs = np.zeros((nperms, y_nvars, self.ncomps))
perm_rotations_ws = np.zeros((nperms, x_nvars, self.ncomps))
perm_beta = np.zeros((nperms, x_nvars, y_nvars))
perm_vipsw = np.zeros((nperms, x_nvars))
permuted_R2Y = np.zeros(nperms)
permuted_R2X = np.zeros(nperms)
permuted_Q2Y = np.zeros(nperms)
permuted_Q2X = np.zeros(nperms)
permuted_R2Y_test = np.zeros(nperms)
permuted_R2X_test = np.zeros(nperms)
perm_logisticcoefs = np.zeros((nperms, self.ncomps))
perm_trainprecision = np.zeros(nperms)
perm_trainrecall = np.zeros(nperms)
perm_trainaccuracy = np.zeros(nperms)
perm_trainauc = np.zeros(nperms)
perm_trainmatthews_mcc = np.zeros(nperms)
perm_trainzerooneloss = np.zeros(nperms)
perm_trainf1 = np.zeros(nperms)
perm_trainclasspredictions = list()
perm_trainroc_curve = list()
perm_trainconfusionmatrix = list()
perm_trainmisclassifiedsamples = list()
perm_testprecision = np.zeros(nperms)
perm_testrecall = np.zeros(nperms)
perm_testaccuracy = np.zeros(nperms)
perm_testauc = np.zeros(nperms)
perm_testmatthews_mcc = np.zeros(nperms)
perm_testzerooneloss = np.zeros(nperms)
perm_testf1 = np.zeros(nperms)
perm_testclasspredictions = list()
perm_testroc_curve = list()
perm_testconfusionmatrix = list()
perm_testmisclassifiedsamples = list()
for permutation in range(0, nperms):
# Copy original column order, shuffle array in place...
perm_y = np.random.permutation(y)
# ... Fit model and replace original data
permute_class.fit(x, perm_y, **permtest_kwargs)
permute_class.cross_validation(x, perm_y, cv_method=cv_method, **permtest_kwargs)
permuted_R2Y[permutation] = permute_class.modelParameters['R2Y']
permuted_R2X[permutation] = permute_class.modelParameters['R2X']
permuted_Q2Y[permutation] = permute_class.cvParameters['Q2Y']
permuted_Q2X[permutation] = permute_class.cvParameters['Q2X']
# Store the loadings for each permutation component-wise
perm_loadings_q[permutation, :, :] = permute_class.loadings_q
perm_loadings_p[permutation, :, :] = permute_class.loadings_p
perm_weights_c[permutation, :, :] = permute_class.weights_c
perm_weights_w[permutation, :, :] = permute_class.weights_w
perm_rotations_cs[permutation, :, :] = permute_class.rotations_cs
perm_rotations_ws[permutation, :, :] = permute_class.rotations_ws
perm_beta[permutation, :, :] = permute_class.beta_coeffs
perm_vipsw[permutation, :] = permute_class.VIP()
# Align model parameters due to sign indeterminacy.
# Solution provided is to select the sign that gives a more similar profile to the
# Loadings calculated with the whole data.
for perm_round in range(0, nperms):
for currload in range(0, self.ncomps):
# evaluate based on loadings _p
choice = np.argmin(np.array(
[np.sum(np.abs(self.loadings_p[:, currload] - perm_loadings_p[perm_round, :, currload])),
np.sum(np.abs(self.loadings_p[:, currload] - perm_loadings_p[perm_round, :, currload] * -1))]))
if choice == 1:
perm_loadings_p[perm_round, :, currload] = -1 * perm_loadings_p[perm_round, :, currload]
perm_loadings_q[perm_round, :, currload] = -1 * perm_loadings_q[perm_round, :, currload]
perm_weights_w[perm_round, :, currload] = -1 * perm_weights_w[perm_round, :, currload]
perm_weights_c[perm_round, :, currload] = -1 * perm_weights_c[perm_round, :, currload]
perm_rotations_ws[perm_round, :, currload] = -1 * perm_rotations_ws[perm_round, :, currload]
perm_rotations_cs[perm_round, :, currload] = -1 * perm_rotations_cs[perm_round, :, currload]
# Pack everything into a nice data structure and return
# Calculate p-value for Q2Y as well
permutationTest = dict()
permutationTest['R2Y'] = permuted_R2Y
permutationTest['R2X'] = permuted_R2X
permutationTest['Q2Y'] = permuted_Q2Y
permutationTest['Q2X'] = permuted_Q2X
permutationTest['R2Y_Test'] = permuted_R2Y_test
permutationTest['R2X_Test'] = permuted_R2X_test
permutationTest['Loadings_p'] = perm_loadings_p
permutationTest['Loadings_q'] = perm_loadings_q
permutationTest['Weights_c'] = | |
lemma == 'vienas':
word = 'vienoms'
elif lemma.endswith('dešimtis'):
word = lemma[:-2] + 'ims'
elif lemma.endswith('etas') \
or (num.num_type == 'kiek'
and (lemma == 'šimtas' or lemma == 'milijonas' or lemma == 'milijardas')) \
or lemma == 'ketvertas':
return
elif lemma.endswith('as'):
if num.degree == 'aukšč':
word = lemma[:-2] + 'iausioms'
elif num.degree == 'aukšt':
word = lemma[:-2] + 'esnėms'
else:
word = lemma[:-2] + 'oms'
elif lemma == 'tūkstantis':
return
elif lemma.endswith('is'):
word = lemma[:-2] + 'ėms'
if rqrd_infl == 'G':
if lemma == 'vienas':
word = 'vienas'
elif lemma.endswith('dešimtis'):
word = lemma
elif lemma.endswith('etas') \
or (num.num_type == 'kiek'
and (lemma == 'šimtas' or lemma == 'milijonas' or lemma == 'milijardas')) \
or lemma == 'ketvertas':
return
elif lemma.endswith('as'):
if num.degree == 'aukšč':
word = lemma[:-2] + 'iausias'
elif num.degree == 'aukšt':
word = lemma[:-2] + 'esnes'
else:
word = lemma[:-2] + 'as'
elif lemma == 'tūkstantis':
return
elif lemma.endswith('is'):
word = lemma[:-2] + 'es'
if rqrd_infl == 'Įn':
if lemma == 'vienas':
word = 'vienomis'
elif lemma.endswith('dešimtis'):
word = lemma[:-2] + 'imis'
elif lemma.endswith('etas') \
or (num.num_type == 'kiek'
and (lemma == 'šimtas' or lemma == 'milijonas' or lemma == 'milijardas')) \
or lemma == 'ketvertas':
return
elif lemma.endswith('as'):
if num.degree == 'aukšč':
word = lemma[:-2] + 'iausiomis'
elif num.degree == 'aukšt':
word = lemma[:-2] + 'esnėmis'
else:
word = lemma[:-2] + 'omis'
elif lemma == 'tūkstantis':
return
elif lemma.endswith('is'):
word = lemma[:-2] + 'ėmis'
if rqrd_infl == 'Vt':
if lemma == 'vienas':
word = 'vienose'
elif lemma.endswith('dešimtis'):
word = lemma[:-2] + 'yse'
elif lemma.endswith('etas') \
or (num.num_type == 'kiek'
and (lemma == 'šimtas' or lemma == 'milijonas' or lemma == 'milijardas')) \
or lemma == 'ketvertas':
return
elif lemma.endswith('as'):
if num.degree == 'aukšč':
word = lemma[:-2] + 'iausiose'
elif num.degree == 'aukšt':
word = lemma[:-2] + 'esnėse'
else:
word = lemma[:-2] + 'ose'
elif lemma == 'tūkstantis':
return
elif lemma.endswith('is'):
word = lemma[:-2] + 'ėse'
if word is None:
return
else:
if rqrd_gen is not None and rqrd_num is not None and rqrd_infl is not None:
n = Numeral(word, lemma, num.num_form, num.num_type, rqrd_gen, rqrd_num, rqrd_infl, num.dfnt, degree=num.degree)
elif rqrd_gen is not None and rqrd_infl is not None:
n = Numeral(word, lemma, num.num_form, num.num_type, rqrd_gen, infl=rqrd_infl, number=num.number, dfnt=num.dfnt, degree=num.degree)
elif rqrd_num is not None and rqrd_infl is not None:
n = Numeral(word, lemma, num.num_form, num.num_type, gender=num.gender, number=rqrd_num, infl=rqrd_infl, degree=num.degree)
elif rqrd_infl is not None:
n = Numeral(word, lemma, num.num_form, num.num_type, gender=num.gender, number=num.number, infl=rqrd_infl, degree=num.degree)
else:
n = Numeral(word, lemma, num.num_form, num.num_type, gender=num.gender, number=num.number, infl=num.infl, degree=num.degree)
return n
def num_def(num, rqrd_gen, rqrd_num=None, rqrd_infl=None):
"""Decline definite numerals. Only possible with ordinal numerals
:param num: Numeral object
:param rqrd_gen: the gender the numeral should be inflected for
:param rqrd_num: the number the numeral should be inflected for
:param rqrd_infl: the inflection the numeral should be inflected for
:return: a Numeral object with the numeral in a desired form and its grammatical information
"""
real_lemma = num.lemma
lemma = num.lemma
# only ordinal numerals can be definite
if num.degree is None or num.degree == 'nelygin' or num.degree == 'aukšč':
if num.degree == 'aukšč':
lemma = lemma[:-2] + 'iausias'
if rqrd_num == 'vns':
if rqrd_gen == 'vyr':
if rqrd_infl == 'V':
word = lemma + 'is'
elif rqrd_infl == 'K':
word = lemma[:-2] + 'ojo'
elif rqrd_infl == 'N':
word = lemma[:-2] + 'ajam'
elif rqrd_infl == 'G':
word = lemma[:-2] + 'ąjį'
elif rqrd_infl == 'Įn':
word = lemma[:-2] + 'uoju'
elif rqrd_infl == 'Vt':
word = lemma[:-2] + 'ajame'
elif rqrd_gen == 'mot': # neuter or fem
if rqrd_infl == 'V':
word = lemma[:-2] + 'oji'
elif rqrd_infl == 'K':
word = lemma[:-2] + 'osios'
elif rqrd_infl == 'N':
word = lemma[:-2] + 'ajai'
elif rqrd_infl == 'G':
word = lemma[:-2] + 'ąją'
elif rqrd_infl == 'Įn':
word = lemma[:-2] + 'ąja'
elif rqrd_infl == 'Vt':
word = lemma[:-2] + 'ojoje'
else: # plural num
if rqrd_gen == 'vyr':
if rqrd_infl == 'V':
if lemma.endswith('ias'):
word = lemma[:-2] + 'eji'
else:
word = lemma[:-2] + 'ieji'
elif rqrd_infl == 'K':
word = lemma[:-2] + 'ųjų'
elif rqrd_infl == 'N':
if lemma.endswith('ias'):
word = lemma[:-2] + 'esiems'
else:
word = lemma[:-2] + 'iesiems'
elif rqrd_infl == 'G':
word = lemma[:-2] + 'uosius'
elif rqrd_infl == 'Įn':
word = lemma[:-2] + 'aisiais'
elif rqrd_infl == 'Vt':
word = lemma[:-2] + 'uosiuose'
elif rqrd_gen == 'mot': # neuter or fem
if rqrd_infl == 'V':
word = lemma[:-2] + 'osios'
elif rqrd_infl == 'K':
word = lemma[:-2] + 'ųjų'
elif rqrd_infl == 'N':
word = lemma[:-2] + 'osioms'
elif rqrd_infl == 'G':
word = lemma[:-2] + 'ąsias'
elif rqrd_infl == 'Įn':
word = lemma[:-2] + 'osiomis'
elif rqrd_infl == 'Vt':
word = lemma[:-2] + 'osiose'
else:
lemma = lemma[:-2] + 'esnis'
if rqrd_num == 'vns':
if rqrd_gen == 'vyr':
if rqrd_infl == 'V':
word = lemma[:-2] + 'ysis'
elif rqrd_infl == 'K':
word = lemma[:-2] + 'iojo'
elif rqrd_infl == 'N':
word = lemma[:-2] + 'iajam'
elif rqrd_infl == 'G':
word = lemma[:-2] + 'įjį'
elif rqrd_infl == 'Įn':
word = lemma[:-2] + 'iuoju'
elif rqrd_infl == 'Vt':
word = lemma[:-2] + 'iajame'
elif rqrd_gen == 'mot': # neuter or fem
if rqrd_infl == 'V':
word = lemma[:-2] + 'ioji'
elif rqrd_infl == 'K':
word = lemma[:-2] + 'iosios'
elif rqrd_infl == 'N':
word = lemma[:-2] + 'iajai'
elif rqrd_infl == 'G':
word = lemma[:-2] + 'iąją'
elif rqrd_infl == 'Įn':
word = lemma[:-2] + 'iąja'
elif rqrd_infl == 'Vt':
word = lemma[:-2] + 'iojoje'
else: # plural num
if rqrd_gen == 'vyr':
if rqrd_infl == 'V':
word = lemma[:-2] + 'ieji'
elif rqrd_infl == 'K':
word = lemma[:-2] + 'iųjų'
elif rqrd_infl == 'N':
word = lemma[:-2] + 'iesiems'
elif rqrd_infl == 'G':
word = lemma[:-2] + 'iuosius'
elif rqrd_infl == 'Įn':
word = lemma[:-2] + 'iaisiais'
elif rqrd_infl == 'Vt':
word = lemma[:-2] + 'iuosiuose'
elif rqrd_gen == 'mot': # neuter or fem
if rqrd_infl == 'V':
word = lemma[:-2] + 'iosios'
elif rqrd_infl == 'K':
word = lemma[:-2] + 'iųjų'
elif rqrd_infl == 'N':
word = lemma[:-2] + 'iosioms'
elif rqrd_infl == 'G':
word = lemma[:-2] + 'iąsias'
elif rqrd_infl == 'Įn':
word = lemma[:-2] + 'iosiomis'
elif rqrd_infl == 'Vt':
word = lemma[:-2] + 'iosiose'
if word is None:
return
else:
return Numeral(word, real_lemma, num.num_form, num.num_type, rqrd_gen, rqrd_num, rqrd_infl, True, degree=num.degree)
def xpos_to_feats(pos):
"""Transform the XPOS format into the universal FEATS format
:param pos: the part of speech the XPOS informaton of which needs to be transformed
:return:
result TODO: remove result
feats transformed xpos
"""
# būtasis dažninis has Iter Aspect in MATAS and Hab aspect in ALKSNIS
# ALKSNIS has Mood=Nec, which is just dlv in xpos
all = {'gender':{'mot':'Fem', 'vyr':'Masc', 'bev':'Neut'}, 'number': {'vns': 'Sing', 'dgs': 'Plur'},
'infl': {'V':'Nom', 'K': 'Gen', 'N': 'Dat', 'G':'Acc', 'Įn':'Ins', 'Vt':'Loc', 'Š':'Voc', 'Il': 'Il'},
'nums': {'kiek':'Card', 'kelint':'Ord'},
'tense': {'es':'Pres', 'būt-k':'Past|Aspect=Perf', 'būt-d':'Past|Aspect=Hab', 'būs':'Fut', 'būt':'Past'},
'mood': {'tiesiog':'Ind', 'tar':'Cnd', 'liep': 'Imp', 'reik':'Nec'}, 'voice': {'neveik':'Pass', 'veik':'Act'},
'verb_form': {'asm':'Fin', 'dlv':'Part', 'pad':'Ger', 'padlv':'Ger', 'pusd':'Conv', 'bndr':'Inf', 'būdn':'Conv'},
'polarity':{'neig':'Neg', 'teig':'Pos'}, 'definite':{'įvardž':'Def', 'neįvardž':'Ind'},
'degree':{'nelygin':'Pos', 'aukšt':'Cmp', 'aukšč':'Sup'}, 'num_form': {'arab':'Digit', 'rom':'Roman', 'mišr':'Combi', 'raid':'Word'},
'num_type': {'kiek':'Card', 'kelint':'Ord', 'daugin':'Mult', 'kuopin':'Sets', 'trup':'Frac'}}
result = dict()
feats = list()
# find the features and their equivalents in the dictionary of dictionaries
for a in vars(pos):
if vars(pos)[a] is not None and a != 'word' and a != 'lemma' \
and a != 'rflx' and a != 'person' and a != 'proper' and a != 'dfnt':
result[a] = all[a][vars(pos)[a]]
if isinstance(pos, Verb):
if pos.rflx:
feats.append('Reflex=Yes')
if pos.person is not None:
feats.append('Person=' + pos.person)
for key in result.keys():
if key == 'gender':
feats.append('Gender=' + result[key])
elif key == 'number':
feats.append('Number=' + result[key])
elif key == 'infl':
feats.append('Case=' + result[key])
elif key == 'tense':
feats.append('Tense=' + result[key])
elif key == 'mood':
feats.append('Mood=' + result[key])
elif | |
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
try:
from collections import OrderedDict # 2.7
except ImportError:
from sqlalchemy.util import OrderedDict
from ckan.lib import helpers as h
from logging import getLogger
import re
from . import helpers
log = getLogger(__name__)
class Package2Pod(object):
def __init__(self):
pass
seen_identifiers = None
@staticmethod
def wrap_json_catalog(dataset_dict, json_export_map):
catalog_headers = [(x, y) for x, y in json_export_map.get('catalog_headers').items()]
catalog = OrderedDict(
catalog_headers + [('dataset', dataset_dict)]
)
return catalog
@staticmethod
def filter(content):
if not isinstance(content, str):
return content
content = Package2Pod.strip_redacted_tags(content)
content = helpers.strip_if_string(content)
return content
@staticmethod
def strip_redacted_tags(content):
if not isinstance(content, str):
return content
return re.sub(helpers.REDACTED_TAGS_REGEX, '', content)
@staticmethod
def mask_redacted(content, reason):
if not content:
content = ''
if reason:
# check if field is partial redacted
masked = content
for redact in re.findall(helpers.PARTIAL_REDACTION_REGEX, masked):
masked = masked.replace(redact, '')
if len(masked) < len(content):
return masked
return '[[REDACTED-EX ' + reason + ']]'
return content
@staticmethod
def convert_package(package, json_export_map, redaction_enabled=False):
import os
import sys
try:
dataset = Package2Pod.export_map_fields(package, json_export_map, redaction_enabled)
# skip validation if we export whole /data.json catalog
if json_export_map.get('validation_enabled'):
return Package2Pod.validate(package, dataset)
else:
return dataset
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
filename = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.error("%s : %s : %s : %s", exc_type, filename, exc_tb.tb_lineno, str(e))
raise e
@staticmethod
def export_map_fields(package, json_export_map, redaction_enabled=False):
import os
import string
import sys
public_access_level = helpers.get_extra(package, 'public_access_level')
if not public_access_level or public_access_level not in ['non-public', 'restricted public']:
redaction_enabled = False
Wrappers.redaction_enabled = redaction_enabled
json_fields = json_export_map.get('dataset_fields_map')
try:
dataset = OrderedDict([("@type", "dcat:Dataset")])
Wrappers.pkg = package
Wrappers.full_field_map = json_fields
for key, field_map in json_fields.items():
# log.debug('%s => %s', key, field_map)
field_type = field_map.get('type', 'direct')
is_extra = field_map.get('extra')
array_key = field_map.get('array_key')
field = field_map.get('field')
split = field_map.get('split')
wrapper = field_map.get('wrapper')
default = field_map.get('default')
if redaction_enabled and field and 'publisher' != field and 'direct' != field_type:
redaction_reason = helpers.get_extra(package, 'redacted_' + field, False)
# keywords(tags) have some UI-related issues with this, so we'll check both versions here
if not redaction_reason and 'tags' == field:
redaction_reason = helpers.get_extra(package, 'redacted_tag_string', False)
if redaction_reason:
dataset[key] = '[[REDACTED-EX ' + redaction_reason + ']]'
continue
if 'direct' == field_type and field:
if is_extra:
# log.debug('field: %s', field)
# log.debug('value: %s', helpers.get_extra(package, field))
dataset[key] = helpers.strip_if_string(helpers.get_extra(package, field, default))
else:
dataset[key] = helpers.strip_if_string(package.get(field, default))
if redaction_enabled and 'publisher' != field:
redaction_reason = helpers.get_extra(package, 'redacted_' + field, False)
# keywords(tags) have some UI-related issues with this, so we'll check both versions here
if redaction_reason:
dataset[key] = Package2Pod.mask_redacted(dataset[key], redaction_reason)
continue
else:
dataset[key] = Package2Pod.filter(dataset[key])
elif 'array' == field_type:
if is_extra:
found_element = helpers.strip_if_string(helpers.get_extra(package, field))
if found_element:
if helpers.is_redacted(found_element):
dataset[key] = found_element
elif split:
dataset[key] = [Package2Pod.filter(x) for x in string.split(found_element, split)]
else:
if array_key:
dataset[key] = [Package2Pod.filter(t[array_key]) for t in package.get(field, {})]
if wrapper:
# log.debug('wrapper: %s', wrapper)
method = getattr(Wrappers, wrapper)
if method:
Wrappers.current_field_map = field_map
dataset[key] = method(dataset.get(key))
# CKAN doesn't like empty values on harvest, let's get rid of them
# Remove entries where value is None, "", or empty list []
dataset = OrderedDict([(x, y) for x, y in dataset.items() if y is not None and y != "" and y != []])
return dataset
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
filename = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.error("%s : %s : %s : %s", exc_type, filename, exc_tb.tb_lineno, str(e))
raise e
@staticmethod
def validate(pkg, dataset_dict):
import os
import sys
global currentPackageOrg
try:
# When saved from UI DataQuality value is stored as "on" instead of True.
# Check if value is "on" and replace it with True.
dataset_dict = OrderedDict(dataset_dict)
if dataset_dict.get('dataQuality') == "on" \
or dataset_dict.get('dataQuality') == "true" \
or dataset_dict.get('dataQuality') == "True":
dataset_dict['dataQuality'] = True
elif dataset_dict.get('dataQuality') == "false" \
or dataset_dict.get('dataQuality') == "False":
dataset_dict['dataQuality'] = False
errors = []
try:
from .datajsonvalidator import do_validation
do_validation([dict(dataset_dict)], errors, Package2Pod.seen_identifiers)
except Exception as e:
errors.append(("Internal Error", ["Something bad happened: " + str(e)]))
if len(errors) > 0:
for error in errors:
log.warn(error)
try:
currentPackageOrg
except NameError:
currentPackageOrg = 'unknown'
errors_dict = OrderedDict([
('id', pkg.get('id')),
('name', Package2Pod.filter(pkg.get('name'))),
('title', Package2Pod.filter(pkg.get('title'))),
('organization', Package2Pod.filter(currentPackageOrg)),
('errors', errors),
])
return errors_dict
return dataset_dict
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
filename = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.error("%s : %s : %s", exc_type, filename, exc_tb.tb_lineno)
raise e
class Wrappers(object):
def __init__(self):
pass
redaction_enabled = False
pkg = None
current_field_map = None
full_field_map = None
bureau_code_list = None
resource_formats = None
@staticmethod
def catalog_publisher(value):
publisher = None
if value:
publisher = helpers.get_responsible_party(value)
if not publisher and 'organization' in Wrappers.pkg and 'title' in Wrappers.pkg.get('organization'):
publisher = Wrappers.pkg.get('organization').get('title')
return OrderedDict([
("@type", "org:Organization"),
("name", publisher)
])
@staticmethod
def inventory_publisher(value):
global currentPackageOrg
publisher = helpers.strip_if_string(helpers.get_extra(Wrappers.pkg, Wrappers.current_field_map.get('field')))
if publisher is None:
return None
currentPackageOrg = publisher
organization_list = list()
organization_list.append([
('@type', 'org:Organization'), # optional
('name', Package2Pod.filter(publisher)), # required
])
for i in range(1, 6):
pub_key = 'publisher_' + str(i) # e.g. publisher_1
if helpers.get_extra(Wrappers.pkg, pub_key): # e.g. package.extras.publisher_1
organization_list.append([
('@type', 'org:Organization'), # optional
('name', Package2Pod.filter(helpers.get_extra(Wrappers.pkg, pub_key))), # required
])
currentPackageOrg = Package2Pod.filter(helpers.get_extra(Wrappers.pkg, pub_key)) # e.g. GSA
if Wrappers.redaction_enabled:
redaction_mask = helpers.get_extra(Wrappers.pkg, 'redacted_' + Wrappers.current_field_map.get('field'), False)
if redaction_mask:
return OrderedDict(
[
('@type', 'org:Organization'), # optional
('name', '[[REDACTED-EX ' + redaction_mask + ']]'), # required
]
)
# so now we should have list() organization_list e.g.
# (
# [('@type', 'org:Org'), ('name','GSA')],
# [('@type', 'org:Org'), ('name','OCSIT')]
# )
size = len(organization_list) # e.g. 2
tree = organization_list[0]
for i in range(1, size):
tree = organization_list[i] + [('subOrganizationOf', OrderedDict(tree))]
return OrderedDict(tree)
# used by get_accrual_periodicity
accrual_periodicity_dict = {
'completely irregular': 'irregular',
'decennial': 'R/P10Y',
'quadrennial': 'R/P4Y',
'annual': 'R/P1Y',
'bimonthly': 'R/P2M', # or R/P0.5M
'semiweekly': 'R/P3.5D',
'daily': 'R/P1D',
'biweekly': 'R/P2W', # or R/P0.5W
'semiannual': 'R/P6M',
'biennial': 'R/P2Y',
'triennial': 'R/P3Y',
'three times a week': 'R/P0.33W',
'three times a month': 'R/P0.33M',
'continuously updated': 'R/PT1S',
'monthly': 'R/P1M',
'quarterly': 'R/P3M',
'semimonthly': 'R/P0.5M',
'three times a year': 'R/P4M',
'weekly': 'R/P1W',
'hourly': 'R/PT1H',
'continual': 'R/PT1S',
'fortnightly': 'R/P0.5M',
'annually': 'R/P1Y',
'biannualy': 'R/P0.5Y',
'asneeded': 'irregular',
'irregular': 'irregular',
'notplanned': 'irregular',
'unknown': 'irregular',
'not updated': 'irregular'
}
@staticmethod
def fix_accrual_periodicity(frequency):
return Wrappers.accrual_periodicity_dict.get(str(frequency).lower().strip(), frequency)
@staticmethod
def build_contact_point(someValue):
import os
import sys
try:
contact_point_map = Wrappers.full_field_map.get('contactPoint').get('map')
if not contact_point_map:
return None
package = Wrappers.pkg
if contact_point_map.get('fn').get('extra'):
fn = helpers.get_extra(package, contact_point_map.get('fn').get('field'),
helpers.get_extra(package, "Contact Name",
package.get('maintainer')))
else:
fn = package.get(contact_point_map.get('fn').get('field'),
helpers.get_extra(package, "Contact Name",
package.get('maintainer')))
fn = helpers.get_responsible_party(fn)
if Wrappers.redaction_enabled:
redaction_reason = helpers.get_extra(package, 'redacted_' + contact_point_map.get('fn').get('field'), False)
if redaction_reason:
fn = Package2Pod.mask_redacted(fn, redaction_reason)
else:
fn = Package2Pod.filter(fn)
if contact_point_map.get('hasEmail').get('extra'):
email = helpers.get_extra(package, contact_point_map.get('hasEmail').get('field'),
package.get('maintainer_email'))
else:
email = package.get(contact_point_map.get('hasEmail').get('field'),
package.get('maintainer_email'))
if email and not helpers.is_redacted(email) and '@' in email:
email = 'mailto:' + email
if Wrappers.redaction_enabled:
redaction_reason = helpers.get_extra(package, 'redacted_' + contact_point_map.get('hasEmail').get('field'),
False)
if redaction_reason:
email = Package2Pod.mask_redacted(email, redaction_reason)
else:
email = Package2Pod.filter(email)
contact_point = OrderedDict([('@type', 'vcard:Contact')])
if fn:
contact_point['fn'] = fn
if email:
contact_point['hasEmail'] = email
return contact_point
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
filename = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.error("%s : %s : %s", exc_type, filename, exc_tb.tb_lineno)
raise e
@staticmethod
def inventory_parent_uid(parent_dataset_id):
if parent_dataset_id:
import ckan.model as model
parent = model.Package.get(parent_dataset_id)
parent_uid = parent.extras.col.target['unique_id'].value
if parent_uid:
parent_dataset_id = parent_uid
return parent_dataset_id
@staticmethod
def generate_distribution(someValue):
arr = []
package = Wrappers.pkg
distribution_map = Wrappers.full_field_map.get('distribution').get('map')
if not distribution_map or 'resources' not in package:
return arr
for r in package["resources"]:
resource = OrderedDict([('@type', "dcat:Distribution")])
for pod_key, json_map in distribution_map.items():
value = helpers.strip_if_string(r.get(json_map.get('field'), json_map.get('default')))
if Wrappers.redaction_enabled:
if 'redacted_' + json_map.get('field') in r and r.get('redacted_' + json_map.get('field')):
value = Package2Pod.mask_redacted(value, r.get('redacted_' + json_map.get('field')))
else:
value = Package2Pod.filter(value)
# filtering/wrapping if defined by export_map
wrapper = json_map.get('wrapper')
if wrapper:
method = getattr(Wrappers, wrapper)
if method:
value = method(value)
if value:
resource[pod_key] = value
# inventory rules
res_url = helpers.strip_if_string(r.get('url'))
if Wrappers.redaction_enabled:
if 'redacted_url' in r and r.get('redacted_url'):
res_url = '[[REDACTED-EX ' + r.get('redacted_url') + ']]'
else:
res_url = Package2Pod.filter(res_url)
if res_url:
res_url = res_url.replace('http://[[REDACTED', '[[REDACTED')
res_url = res_url.replace('http://http', 'http')
if r.get('resource_type') in ['api', 'accessurl']:
resource['accessURL'] = res_url
if 'mediaType' in resource:
resource.pop('mediaType')
else:
if 'accessURL' in resource:
resource.pop('accessURL')
resource['downloadURL'] = res_url
if 'mediaType' not in resource:
log.warn("Missing mediaType for resource in package | |
<reponame>adsharma/pyserde
import dataclasses
import decimal
import enum
import itertools
import logging
import pathlib
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
import pytest
import more_itertools
import serde
import serde.compat
from serde import asdict, astuple, deserialize, from_dict, from_tuple, serialize
from serde.json import from_json, to_json
from serde.msgpack import from_msgpack, to_msgpack
from serde.toml import from_toml, to_toml
from serde.yaml import from_yaml, to_yaml
from . import data
from .data import Bool, Float, Int, ListPri, NestedPri, NestedPriOpt, Pri, PriDefault, PriOpt, Str
log = logging.getLogger('test')
serde.init(True)
format_dict: List = [(asdict, from_dict)]
format_tuple: List = [(astuple, from_tuple)]
format_json: List = [(to_json, from_json)]
format_msgpack: List = [(to_msgpack, from_msgpack)]
format_yaml: List = [(to_yaml, from_yaml)]
format_toml: List = [(to_toml, from_toml)]
all_formats: List = format_dict + format_tuple + format_json + format_msgpack + format_yaml + format_toml
opt_case: List = [{}, {'rename_all': 'camelcase'}, {'rename_all': 'snakecase'}]
types: List = [
(10, int), # Primitive
('foo', str),
(100.0, float),
(True, bool),
(10, Optional[int]), # Optional
('foo', Optional[str]),
(100.0, Optional[float]),
(True, Optional[bool]),
(None, Optional[int]),
(None, Optional[str]),
(None, Optional[float]),
(None, Optional[bool]),
(Pri(10, 'foo', 100.0, True), Pri), # dataclass
(Pri(10, 'foo', 100.0, True), Optional[Pri]),
(None, Optional[Pri]),
(pathlib.Path('/tmp/foo'), pathlib.Path), # Extended types
(pathlib.Path('/tmp/foo'), Optional[pathlib.Path]),
(None, Optional[pathlib.Path]),
(decimal.Decimal(10), decimal.Decimal),
]
types_combinations: List = list(map(lambda c: list(more_itertools.flatten(c)), itertools.combinations(types, 2)))
def make_id_from_dict(d: Dict) -> str:
if not d:
return 'none'
else:
key = list(d)[0]
return f'{key}-{d[key]}'
def opt_case_ids():
return map(make_id_from_dict, opt_case)
def type_ids():
from serde.compat import typename
def make_id(pair: Tuple):
t, T = pair
return f'{typename(T)}({t})'
return map(make_id, types)
def type_combinations_ids():
from serde.compat import typename
def make_id(quad: Tuple):
t, T, u, U = quad
return f'{typename(T)}({t})-{typename(U)}({u})'
return map(make_id, types_combinations)
@pytest.mark.parametrize('t,T', types, ids=type_ids())
@pytest.mark.parametrize('opt', opt_case, ids=opt_case_ids())
@pytest.mark.parametrize('se,de', all_formats)
def test_simple(se, de, opt, t, T):
log.info(f'Running test with se={se.__name__} de={de.__name__} opts={opt}')
@deserialize(**opt)
@serialize(**opt)
@dataclass
class C:
i: int
t: T
c = C(10, t)
assert c == de(C, se(c))
def test_non_dataclass():
with pytest.raises(TypeError):
@deserialize
@serialize
class Foo:
i: int
def test_forward_declaration():
@serialize
@deserialize
@dataclass
class Foo:
bar: 'Bar'
@serialize
@deserialize
@dataclass
class Bar:
i: int
h = Foo(bar=Bar(i=10))
assert h.bar.i == 10
assert 'Bar' == dataclasses.fields(Foo)[0].type
@pytest.mark.parametrize('opt', opt_case, ids=opt_case_ids())
@pytest.mark.parametrize('se,de', all_formats)
def test_list(se, de, opt):
@deserialize(**opt)
@serialize(**opt)
@dataclass
class PriList:
i: List[int]
s: List[str]
f: List[float]
b: List[bool]
p = PriList([10, 10], ['foo', 'bar'], [10.0, 10.0], [True, False])
assert p == de(PriList, se(p))
@deserialize(**opt)
@serialize(**opt)
@dataclass
class BareList:
i: List
p = BareList([10])
assert p == de(BareList, se(p))
# List can contain different types (except Toml).
if se is not to_toml:
p = BareList([10, 'foo', 10.0, True])
assert p == de(BareList, se(p))
@pytest.mark.parametrize('opt', opt_case, ids=opt_case_ids())
@pytest.mark.parametrize('se,de', all_formats)
def test_dict(se, de, opt):
@deserialize(**opt)
@serialize(**opt)
@dataclass
class PriDict:
i: Dict[int, int]
s: Dict[str, str]
f: Dict[float, float]
b: Dict[bool, bool]
if se in (to_json, to_msgpack, to_toml):
# JSON, Msgpack, Toml don't allow non string key.
p = PriDict({'10': 10}, {'foo': 'bar'}, {'100.0': 100.0}, {'True': False})
assert p == de(PriDict, se(p))
else:
p = PriDict({10: 10}, {'foo': 'bar'}, {100.0: 100.0}, {True: False})
assert p == de(PriDict, se(p))
@deserialize(**opt)
@serialize(**opt)
@dataclass
class BareDict:
d: Dict
p = BareDict({'10': 10})
assert p == de(BareDict, se(p))
p = BareDict({'10': 10, 'foo': 'bar', '100.0': 100.0, 'True': False})
assert p == de(BareDict, se(p))
@pytest.mark.parametrize('opt', opt_case, ids=opt_case_ids())
@pytest.mark.parametrize('se,de', all_formats)
def test_enum(se, de, opt):
from .data import E, IE
from serde.compat import is_enum
class Inner(enum.IntEnum):
V0 = enum.auto()
V1 = enum.auto()
V2 = enum.auto()
class NestedEnum(enum.Enum):
V = Inner.V0
@deserialize(**opt)
@serialize(**opt)
@dataclass
class Foo:
e: E
ie: IE
ne: NestedEnum
e2: E = E.S
ie2: IE = IE.V1
ne2: NestedEnum = NestedEnum.V
f = Foo(E.I, IE.V0, NestedEnum.V)
ff = de(Foo, se(f))
assert f == ff
assert is_enum(ff.e) and isinstance(ff.e, E)
assert is_enum(ff.ie) and isinstance(ff.ie, IE)
assert is_enum(ff.ne) and isinstance(ff.ne, NestedEnum)
assert is_enum(ff.e2) and isinstance(ff.e2, E)
assert is_enum(ff.ie2) and isinstance(ff.ie2, IE)
assert is_enum(ff.ne2) and isinstance(ff.ne2, NestedEnum)
# pyserde automatically convert enum compatible value.
f = Foo('foo', 2, Inner.V0, True, 10, Inner.V0)
ff = de(Foo, se(f))
assert is_enum(ff.e) and isinstance(ff.e, E) and ff.e == E.S
assert is_enum(ff.ie) and isinstance(ff.ie, IE) and ff.ie == IE.V1
assert is_enum(ff.ne) and isinstance(ff.ne, NestedEnum) and ff.ne == NestedEnum.V
assert is_enum(ff.e2) and isinstance(ff.e2, E) and ff.e2 == E.B
assert is_enum(ff.ie2) and isinstance(ff.ie2, IE) and ff.ie2 == IE.V2
assert is_enum(ff.ne2) and isinstance(ff.ne2, NestedEnum) and ff.ne2 == NestedEnum.V
@pytest.mark.parametrize('se,de', all_formats)
def test_enum_imported(se, de):
from .data import EnumInClass
c = EnumInClass()
cc = de(EnumInClass, se(c))
assert c == cc
@pytest.mark.parametrize('opt', opt_case, ids=opt_case_ids())
@pytest.mark.parametrize('se,de', all_formats)
def test_tuple(se, de, opt):
@deserialize(**opt)
@serialize(**opt)
@dataclass
class Homogeneous:
i: Tuple[int, int]
s: Tuple[str, str]
f: Tuple[float, float]
b: Tuple[bool, bool]
p = Homogeneous((10, 20), ('a', 'b'), (10.0, 20.0), (True, False))
assert p == de(Homogeneous, se(p))
# List can also be used.
p = Homogeneous([10, 20], ['a', 'b'], [10.0, 20.0], [True, False])
assert p != de(Homogeneous, se(p))
@deserialize(**opt)
@serialize(**opt)
@dataclass
class Variant:
t: Tuple[int, str, float, bool]
# Toml doesn't support variant type of array.
if se is not to_toml:
p = Variant((10, 'a', 10.0, True))
assert p == de(Variant, se(p))
@deserialize(**opt)
@serialize(**opt)
@dataclass
class BareTuple:
t: Tuple
p = BareTuple((10, 20))
assert p == de(BareTuple, se(p))
@deserialize(**opt)
@serialize(**opt)
@dataclass
class Nested:
i: Tuple[Int, Int]
s: Tuple[Str, Str]
f: Tuple[Float, Float]
b: Tuple[Bool, Bool]
# hmmm.. Nested tuple doesn't work ..
if se is not to_toml:
p = Nested((Int(10), Int(20)), (Str("a"), Str("b")), (Float(10.0), Float(20.0)), (Bool(True), Bool(False)))
assert p == de(Nested, se(p))
@pytest.mark.parametrize('se,de', all_formats)
def test_dataclass_default_factory(se, de):
@deserialize
@serialize
@dataclass
class Foo:
foo: str
items: Dict[str, int] = field(default_factory=dict)
f = Foo('bar')
assert f == de(Foo, se(f))
assert {'foo': 'bar', 'items': {}} == asdict(f)
assert f == from_dict(Foo, {'foo': 'bar'})
@pytest.mark.parametrize('se,de', all_formats)
def test_default(se, de):
p = PriDefault()
assert p == de(PriDefault, se(p))
p = PriDefault()
assert p == from_dict(PriDefault, {})
assert p == from_dict(PriDefault, {'i': 10})
assert p == from_dict(PriDefault, {'i': 10, 's': 'foo'})
assert p == from_dict(PriDefault, {'i': 10, 's': 'foo', 'f': 100.0})
assert p == from_dict(PriDefault, {'i': 10, 's': 'foo', 'f': 100.0, 'b': True})
assert 10 == dataclasses.fields(PriDefault)[0].default
assert 'foo' == dataclasses.fields(PriDefault)[1].default
assert 100.0 == dataclasses.fields(PriDefault)[2].default
assert True is dataclasses.fields(PriDefault)[3].default
@pytest.mark.parametrize('se,de', (format_dict + format_tuple + format_json + format_msgpack + format_yaml))
def test_list_pri(se, de):
p = [data.PRI, data.PRI]
assert p == de(ListPri, se(p))
p = []
assert p == de(ListPri, se(p))
@pytest.mark.parametrize('se,de', (format_dict + format_tuple + format_json + format_msgpack + format_yaml))
def test_dict_pri(se, de):
p = {'1': data.PRI, '2': data.PRI}
assert p == de(data.DictPri, se(p))
p = {}
assert p == de(data.DictPri, se(p))
def test_json():
p = Pri(10, 'foo', 100.0, True)
s = '{"i": 10, "s": "foo", "f": 100.0, "b": true}'
assert s == to_json(p)
assert '10' == to_json(10)
assert '[10, 20, 30]' == to_json([10, 20, 30])
assert '{"foo": 10, "fuga": 10}' == to_json({'foo': 10, 'fuga': 10})
def test_msgpack():
p = Pri(10, 'foo', 100.0, True)
d = b'\x84\xa1i\n\xa1s\xa3foo\xa1f\xcb@Y\x00\x00\x00\x00\x00\x00\xa1b\xc3'
assert d == to_msgpack(p)
assert p == from_msgpack(Pri, d)
def test_msgpack_named():
p = Pri(10, 'foo', 100.0, True)
d = b'\x94\n\xa3foo\xcb@Y\x00\x00\x00\x00\x00\x00\xc3'
assert d == to_msgpack(p, named=False)
assert p == from_msgpack(Pri, d, named=False)
def test_from_dict():
p = Pri(10, 'foo', 100.0, True)
d = {'i': 10, 's': 'foo', 'f': 100.0, 'b': True}
assert d == asdict(p)
assert p == from_dict(Pri, d)
p = {'p': Pri(10, 'foo', 100.0, True)}
d = {'p': {'i': 10, 's': 'foo', 'f': 100.0, 'b': True}}
assert d == asdict(p)
assert p == from_dict(Dict[str, Pri], d)
p = [Pri(10, 'foo', 100.0, True)]
d = ({'i': 10, 's': 'foo', 'f': 100.0, 'b': True},)
assert d == asdict(p)
assert p == from_dict(List[Pri], d)
p = (Pri(10, 'foo', 100.0, True),)
d = ({'i': 10, 's': 'foo', 'f': 100.0, 'b': True},)
assert d == asdict(p)
assert p == from_dict(Tuple[Pri], d)
def test_from_tuple():
p = Pri(10, 'foo', 100.0, True)
d = (10, 'foo', 100.0, True)
assert d == astuple(p)
assert p == from_tuple(Pri, d)
p = {'p': Pri(10, 'foo', 100.0, True)}
d = {'p': (10, 'foo', 100.0, True)}
assert d == astuple(p)
assert p == from_tuple(Dict[str, Pri], d)
p = [Pri(10, 'foo', 100.0, True)]
d = ((10, 'foo', 100.0, True),)
assert d == astuple(p)
assert p == from_tuple(List[Pri], d)
p = (Pri(10, 'foo', 100.0, True),)
d = ((10, 'foo', 100.0, True),)
assert d == astuple(p)
assert p == from_tuple(Tuple[Pri], d)
@pytest.mark.parametrize('se,de', all_formats)
def test_rename(se, de):
@deserialize
@serialize
@dataclass
class Foo:
class_name: str = field(metadata={'serde_rename': 'class'})
f = Foo(class_name='foo')
assert f == de(Foo, se(f))
@pytest.mark.parametrize('se,de', format_json + format_yaml + format_toml + format_msgpack)
def test_rename_all(se, de):
@deserialize(rename_all='camelcase')
@serialize(rename_all='camelcase')
@dataclass
| |
the same thing.
"""
if not context.MPI_DISTRIBUTABLE or serial:
return runActionsInSerial(o, r, cs, actions)
useForComputation = [True] * context.MPI_SIZE
if numPerNode != None:
if numPerNode < 1:
raise ValueError("numPerNode must be >= 1")
numThisNode = {nodeName: 0 for nodeName in context.MPI_NODENAMES}
for rank, nodeName in enumerate(context.MPI_NODENAMES):
useForComputation[rank] = numThisNode[nodeName] < numPerNode
numThisNode[nodeName] += 1
numBatches = int(
math.ceil(
len(actions) / float(len([rank for rank in useForComputation if rank]))
)
)
runLog.extra(
"Running {} MPI actions in parallel over {} batches".format(
len(actions), numBatches
)
)
queue = list(actions) # create a new list.. we will use as a queue
results = []
batchNum = 0
while queue:
actionsThisRound = []
for useRank in useForComputation:
actionsThisRound.append(queue.pop(0) if useRank and queue else None)
realActions = [
(context.MPI_NODENAMES[rank], rank, act)
for rank, act in enumerate(actionsThisRound)
if act is not None
]
batchNum += 1
runLog.extra(
"Distributing {} MPI actions for parallel processing (batch {} of {}):\n{}".format(
len(realActions),
batchNum,
numBatches,
tabulate.tabulate(realActions, headers=["Nodename", "Rank", "Action"]),
)
)
distrib = DistributionAction(actionsThisRound)
distrib.broadcast()
results.append(distrib.invoke(o, r, cs))
return results
def runActionsInSerial(o, r, cs, actions):
"""Run a series of MpiActions in serial.
Notes
-----
This will set the `MpiAction.serial` attribute to :code:`True`, and the `MpiAction.broadcast` and `MpiAction.gather`
methods will basically just return the value being supplied.
"""
results = []
runLog.extra("Running {} MPI actions in serial".format(len(actions)))
numActions = len(actions)
for aa, action in enumerate(actions):
action.serial = True
runLog.extra("Running action {} of {}: {}".format(aa + 1, numActions, action))
results.append(action.invoke(o, r, cs))
action.serial = False # return to original state
return results
class DistributionAction(MpiAction):
"""
This MpiAction scatters the workload of multiple actions to available resources.
Notes
-----
This currently only works from the root (of COMM_WORLD). Eventually, it would be nice to make
it possible for sub-tasks to manage their own communicators and spawn their own work within some
sub-communicator.
This performs an MPI Split operation and takes over the context.MPI_COMM and associated varaibles.
For this reason, it is possible that when someone thinks they have distributed information to all
nodes, it may only be a subset that was necessary to perform the number of actions needed by this
DsitributionAction.
"""
def __init__(self, actions):
MpiAction.__init__(self)
self._actions = actions
def __reduce__(self):
"""Reduce prevents from unnecessary actions to others, after all we only want to scatter.
Consequently, the worker nodes _actions will be None.
"""
return DistributionAction, (None,)
def invokeHook(self):
"""
Overrides invokeHook to distribute work amongst available resources as requested.
Notes
=====
Two things about this method make it non-recursiv
"""
canDistribute = context.MPI_DISTRIBUTABLE
mpiComm = context.MPI_COMM
mpiRank = context.MPI_RANK
mpiSize = context.MPI_SIZE
mpiNodeNames = context.MPI_NODENAMES
if self.cs["verbosity"] == "debug" and mpiRank == 0:
runLog.debug("Printing diagnostics for MPI actions!")
objectCountDict = collections.defaultdict(int)
for debugAction in self._actions:
utils.classesInHierarchy(debugAction, objectCountDict)
for objekt, count in objectCountDict.items():
runLog.debug(
"There are {} {} in MPI action {}".format(
count, objekt, debugAction
)
)
try:
action = mpiComm.scatter(self._actions, root=0)
# create a new communicator that only has these specific dudes running
context.MPI_DISTRIBUTABLE = False
hasAction = action is not None
context.MPI_COMM = mpiComm.Split(int(hasAction))
context.MPI_RANK = context.MPI_COMM.Get_rank()
context.MPI_SIZE = context.MPI_COMM.Get_size()
context.MPI_NODENAMES = context.MPI_COMM.allgather(context.MPI_NODENAME)
if hasAction:
return action.invoke(self.o, self.r, self.cs)
finally:
# restore the global variables
context.MPI_DISTRIBUTABLE = canDistribute
context.MPI_COMM = mpiComm
context.MPI_RANK = mpiRank
context.MPI_SIZE = mpiSize
context.MPI_NODENAMES = mpiNodeNames
class MpiActionError(Exception):
"""Exception class raised when error conditions occur during an MpiAction."""
class DistributeStateAction(MpiAction):
def __init__(self, skipInterfaces=False):
MpiAction.__init__(self)
self._skipInterfaces = skipInterfaces
def invokeHook(self):
r"""Sync up all nodes with the reactor, the cs, and the interfaces.
Notes
-----
This is run by all workers and the master any time the code needs to sync all processors.
"""
if context.MPI_SIZE <= 1:
runLog.extra("Not distributing state because there is only one processor")
return
# Detach phase:
# The Reactor and the interfaces have links to the Operator, which contains Un-MPI-able objects
# like the MPI Comm and the SQL database connections.
runLog.info("Distributing State")
start = timeit.default_timer()
try:
cs = self._distributeSettings()
self._distributeReactor(cs)
self._distributeParamAssignments()
if self._skipInterfaces:
self.o.reattach(self.r, cs) # may be redundant?
else:
self._distributeInterfaces()
# lastly, make sure the reactor knows it is up to date
# the operator/interface attachment may invalidate some of the cache, but since
# all the underlying data is the same, ultimately all state should be (initially) the
# same.
# XXX: this is an indication we need to revamp either how the operator attachment works
# or how the interfaces are distributed.
self.r._markSynchronized() # pylint: disable=protected-access
except (cPickle.PicklingError, TypeError) as error:
runLog.error("Failed to transmit on distribute state root MPI bcast")
runLog.error(error)
# workers are still waiting for a reactor object
if context.MPI_RANK == 0:
_diagnosePickleError(self.o)
context.MPI_COMM.bcast("quit") # try to get the workers to quit.
raise
if context.MPI_RANK != 0:
self.r.core.regenAssemblyLists() # pylint: disable=no-member
# check to make sure that everything has been properly reattached
if self.r.core.getFirstBlock().r is not self.r: # pylint: disable=no-member
raise RuntimeError("Block.r is not self.r. Reattach the blocks!")
beforeCollection = timeit.default_timer()
# force collection; we've just created a bunch of objects that don't need to be used again.
runLog.debug("Forcing garbage collection.")
gc.collect()
stop = timeit.default_timer()
runLog.extra(
"Distributed state in {}s, garbage collection took {}s".format(
beforeCollection - start, stop - beforeCollection
)
)
def _distributeSettings(self):
if context.MPI_RANK == 0:
runLog.debug("Sending the settings object")
self.cs = cs = self.broadcast(self.o.cs)
if isinstance(cs, settings.Settings):
runLog.setVerbosity(
cs["verbosity"] if context.MPI_RANK == 0 else cs["branchVerbosity"]
)
runLog.debug("Received settings object")
else:
raise RuntimeError("Failed to transmit settings, received: {}".format(cs))
if context.MPI_RANK != 0:
settings.setMasterCs(cs)
self.o.cs = cs
return cs
def _distributeReactor(self, cs):
runLog.debug("Sending the Reactor object")
r = self.broadcast(self.r)
if isinstance(r, reactors.Reactor):
runLog.debug("Received reactor")
else:
raise RuntimeError("Failed to transmit reactor, received: {}".format(r))
if context.MPI_RANK == 0:
# on the master node this unfortunately created a __deepcopy__ of the reactor, delete it
del r
else:
# maintain original reactor object on master
self.r = r
self.o.r = r
self.r.o = self.o
runLog.debug(
"The reactor has {} assemblies".format(len(self.r.core.getAssemblies()))
)
numAssemblies = self.broadcast(assemblies.getAssemNum())
assemblies.setAssemNumCounter(numAssemblies)
# attach here so any interface actions use a properly-setup reactor.
self.o.reattach(self.r, cs) # sets r and cs
def _distributeParamAssignments(self):
data = dict()
if context.MPI_RANK == 0:
data = {
(pName, pdType.__name__): pDef.assigned
for (
pName,
pdType,
), pDef in parameterDefinitions.ALL_DEFINITIONS.items()
}
data = context.MPI_COMM.bcast(data, root=0)
if context.MPI_RANK != 0:
for (pName, pdType), pDef in parameterDefinitions.ALL_DEFINITIONS.items():
pDef.assigned = data[pName, pdType.__name__]
def _distributeInterfaces(self):
"""
Distribute the interfaces to all MPI nodes.
Interface copy description
Since interfaces store information that can influence a calculation, it is important
in branch searches to make sure that no information is carried forward from these
runs on either the master node or the workers. However, there are interfaces that
cannot be distributed, making this a challenge. To solve this problem, any interface
that cannot be distributed is simply re-initialized. If any information needs to be
given to the worker nodes on a non-distributable interface, additional function definitions
(and likely soul searching as to why needed distributable information is on a
non-distributable interface) are required to pass the information around.
See Also
--------
armi.interfaces.Interface.preDistributeState : runs on master before DS
armi.interfaces.Interface.postDistributeState : runs on master after DS
armi.interfaces.Interface.interactDistributeState : runs on workers after DS
"""
if context.MPI_RANK == 0:
# These run on the master node. (Worker nodes run sychronized code below)
toRestore = {}
for i in self.o.getInterfaces():
if i.distributable() == interfaces.Interface.Distribute.DUPLICATE:
runLog.debug("detaching interface {0}".format(i.name))
i.detachReactor()
toRestore[i] = i.preDistributeState()
# Verify that the interface stacks are identical.
runLog.debug("Sending the interface names and flags")
_dumIList = self.broadcast(
[(i.name, i.distributable()) for i in self.o.getInterfaces()]
)
# transmit interfaces
for i in self.o.getInterfaces():
# avoid sending things that don't pickle, like the database.
if i.distributable() == interfaces.Interface.Distribute.DUPLICATE:
runLog.debug("Sending the interface {0}".format(i))
_idum = self.broadcast(i) # don't send the reactor or operator
i.postDistributeState(toRestore[i])
i.attachReactor(self.o, self.r)
else:
# These run on the worker nodes.
# verify identical interface stack
# This list is (interfaceName, distributable) tuples)
interfaceList = self.broadcast(None)
for iName, distributable in interfaceList:
iOld = self.o.getInterface(iName)
if distributable == interfaces.Interface.Distribute.DUPLICATE:
| |
the number of returned results to this many.
:param str filter: Expression to filter the result set. Defaults to filter down to active instruments only, i.e. those that have not been deleted. Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:param list[str] instrument_property_keys: A list of property keys from the \"Instrument\" domain to decorate onto each instrument. These take the format {domain}/{scope}/{code} e.g. \"Instrument/system/Name\".
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PagedResourceListOfInstrument
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_instruments_with_http_info(**kwargs) # noqa: E501
def list_instruments_with_http_info(self, **kwargs): # noqa: E501
"""[EARLY ACCESS] List instruments # noqa: E501
List all the instruments that have been mastered in the LUSID instrument master. The maximum number of instruments that this method can list per request is 2,000. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_instruments_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param datetime as_at: The asAt datetime at which to list the instruments. Defaults to return the latest version of each instruments if not specified.
:param str effective_at: The effective datetime or cut label at which to list the instruments. Defaults to the current LUSID system datetime if not specified.
:param str page: The pagination token to use to continue listing instruments from a previous call to list instruments. This value is returned from the previous call. If a pagination token is provided the sortBy, filter, effectiveAt, and asAt fields must not have changed since the original request. Also, if set, a start value cannot be provided.
:param list[str] sort_by: Order the results by these fields. Use use the '-' sign to denote descending order e.g. -MyFieldName.
:param int start: When paginating, skip this number of results.
:param int limit: When paginating, limit the number of returned results to this many.
:param str filter: Expression to filter the result set. Defaults to filter down to active instruments only, i.e. those that have not been deleted. Read more about filtering results from LUSID here https://support.lusid.com/filtering-results-from-lusid.
:param list[str] instrument_property_keys: A list of property keys from the \"Instrument\" domain to decorate onto each instrument. These take the format {domain}/{scope}/{code} e.g. \"Instrument/system/Name\".
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PagedResourceListOfInstrument, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['as_at', 'effective_at', 'page', 'sort_by', 'start', 'limit', 'filter', 'instrument_property_keys'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_instruments" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if 'limit' in local_var_params and local_var_params['limit'] > 5000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_instruments`, must be a value less than or equal to `5000`") # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_instruments`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'as_at' in local_var_params:
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'effective_at' in local_var_params:
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'sort_by' in local_var_params:
query_params.append(('sortBy', local_var_params['sort_by'])) # noqa: E501
collection_formats['sortBy'] = 'multi' # noqa: E501
if 'start' in local_var_params:
query_params.append(('start', local_var_params['start'])) # noqa: E501
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'filter' in local_var_params:
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'instrument_property_keys' in local_var_params:
query_params.append(('instrumentPropertyKeys', local_var_params['instrument_property_keys'])) # noqa: E501
collection_formats['instrumentPropertyKeys'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.2342'
return self.api_client.call_api(
'/api/instruments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagedResourceListOfInstrument', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_instrument_identifier(self, identifier_type, identifier, update_instrument_identifier_request, **kwargs): # noqa: E501
"""[EARLY ACCESS] Update instrument identifier # noqa: E501
Update, insert or delete a single instrument identifier for a single instrument. If it is not being deleted the identifier will be updated if it already exists and inserted if it does not. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_instrument_identifier(identifier_type, identifier, update_instrument_identifier_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str identifier_type: The identifier to use to resolve the instrument e.g. \"Figi\". (required)
:param str identifier: The original value of the identifier for the requested instrument. (required)
:param UpdateInstrumentIdentifierRequest update_instrument_identifier_request: The identifier to update or remove. This may or may not be the same identifier used to resolve the instrument. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Instrument
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_instrument_identifier_with_http_info(identifier_type, identifier, update_instrument_identifier_request, **kwargs) # noqa: E501
def update_instrument_identifier_with_http_info(self, identifier_type, identifier, update_instrument_identifier_request, **kwargs): # noqa: E501
"""[EARLY ACCESS] Update instrument identifier # noqa: E501
Update, insert or delete a single instrument identifier for a single instrument. If it is not being deleted the identifier will be updated if it already exists and inserted if it does not. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_instrument_identifier_with_http_info(identifier_type, identifier, update_instrument_identifier_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str identifier_type: The identifier to use to resolve the instrument e.g. \"Figi\". (required)
:param str identifier: The original value of the identifier for the requested instrument. (required)
:param UpdateInstrumentIdentifierRequest update_instrument_identifier_request: The identifier to update or remove. This may or may not be the same identifier used to resolve the instrument. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Instrument, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['identifier_type', 'identifier', 'update_instrument_identifier_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_instrument_identifier" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'update_instrument_identifier_request' is set
if ('update_instrument_identifier_request' not in local_var_params or
local_var_params['update_instrument_identifier_request'] is None):
raise ApiValueError("Missing the required parameter `update_instrument_identifier_request` when calling `update_instrument_identifier`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier_type' in local_var_params:
path_params['identifierType'] = local_var_params['identifier_type'] # noqa: E501
if 'identifier' in local_var_params:
path_params['identifier'] = local_var_params['identifier'] # noqa: E501
| |
pdf_service = conf['PDF_SERVICE']
if pdf_service == 'DocRaptor':
from cla.models.docraptor_models import DocRaptor as pdf
elif pdf_service == 'MockDocRaptor':
from cla.models.docraptor_models import MockDocRaptor as pdf
else:
raise Exception('Invalid PDF service selected in configuration: %s' % pdf_service)
pdf_instance = pdf()
if initialize:
pdf_instance.initialize(conf)
return pdf_instance
def get_key_value_store_service(conf=None):
"""
Helper function to get the configured key-value store service instance.
:param conf: Same as get_database_models().
:type conf: dict
:return: The key-value store service instance based on configuration specified.
:rtype: KeyValueStore
"""
if conf is None:
conf = cla.conf
keyvalue = cla.conf['KEYVALUE']
if keyvalue == 'Memory':
from hug.store import InMemoryStore as Store
elif keyvalue == 'DynamoDB':
from cla.models.dynamo_models import Store
else:
raise Exception('Invalid key-value store selected in configuration: %s' % keyvalue)
return Store()
def get_supported_repository_providers():
"""
Returns a dict of supported repository service providers.
:return: Dictionary of supported repository service providers in the following
format: {'<provider_name>': <provider_class>}
:rtype: dict
"""
from cla.models.github_models import GitHub, MockGitHub
# from cla.models.gitlab_models import GitLab, MockGitLab
# return {'github': GitHub, 'mock_github': MockGitHub,
# 'gitlab': GitLab, 'mock_gitlab': MockGitLab}
return {'github': GitHub, 'mock_github': MockGitHub}
def get_repository_service(provider, initialize=True):
"""
Get a repository service instance by provider name.
:param provider: The provider to load.
:type provider: string
:param initialize: Whether or not to call the initialize() method on the object.
:type initialize: boolean
:return: A repository provider instance (GitHub, Gerrit, etc).
:rtype: RepositoryService
"""
providers = get_supported_repository_providers()
if provider not in providers:
raise NotImplementedError('Provider not supported')
instance = providers[provider]()
if initialize:
instance.initialize(cla.conf)
return instance
def get_repository_service_by_repository(repository, initialize=True):
"""
Helper function to get a repository service provider instance based
on a repository.
:param repository: The repository object or repository_id.
:type repository: cla.models.model_interfaces.Repository | string
:param initialize: Whether or not to call the initialize() method on the object.
:type initialize: boolean
:return: A repository provider instance (GitHub, Gerrit, etc).
:rtype: RepositoryService
"""
repository_model = get_database_models()['Repository']
if isinstance(repository, repository_model):
repo = repository
else:
repo = repository_model()
repo.load(repository)
provider = repo.get_repository_type()
return get_repository_service(provider, initialize)
def get_supported_document_content_types(): # pylint: disable=invalid-name
"""
Returns a list of supported document content types.
:return: List of supported document content types.
:rtype: dict
"""
return ['pdf', 'url+pdf', 'storage+pdf']
def get_project_document(project, document_type, major_version, minor_version):
"""
Helper function to get the specified document from a project.
:param project: The project model object to look in.
:type project: cla.models.model_interfaces.Project
:param document_type: The type of document (individual or corporate).
:type document_type: string
:param major_version: The major version number to look for.
:type major_version: integer
:param minor_version: The minor version number to look for.
:type minor_version: integer
:return: The document model if found.
:rtype: cla.models.model_interfaces.Document
"""
if document_type == 'individual':
documents = project.get_project_individual_documents()
else:
documents = project.get_project_corporate_documents()
for document in documents:
if document.get_document_major_version() == major_version and \
document.get_document_minor_version() == minor_version:
return document
return None
def get_project_latest_individual_document(project_id):
"""
Helper function to return the latest individual document belonging to a project.
:param project_id: The project ID in question.
:type project_id: string
:return: Latest ICLA document object for this project.
:rtype: cla.models.model_instances.Document
"""
project = get_project_instance()
project.load(str(project_id))
document_models = project.get_project_individual_documents()
major, minor = get_last_version(document_models)
return project.get_project_individual_document(major, minor)
# TODO Heller remove
def get_project_latest_corporate_document(project_id):
"""
Helper function to return the latest corporate document belonging to a project.
:param project_id: The project ID in question.
:type project_id: string
:return: Latest CCLA document object for this project.
:rtype: cla.models.model_instances.Document
"""
project = get_project_instance()
project.load(str(project_id))
document_models = project.get_project_corporate_documents()
major, minor = get_last_version(document_models)
return project.get_project_corporate_document(major, minor)
def get_last_version(documents):
"""
Helper function to get the last version of the list of documents provided.
:param documents: List of documents to check.
:type documents: [cla.models.model_interfaces.Document]
:return: 2-item tuple containing (major, minor) version number.
:rtype: tuple
"""
last_major = 0 # 0 will be returned if no document was found.
last_minor = -1 # -1 will be returned if no document was found.
for document in documents:
current_major = document.get_document_major_version()
current_minor = document.get_document_minor_version()
if current_major > last_major:
last_major = current_major
last_minor = current_minor
continue
if current_major == last_major and current_minor > last_minor:
last_minor = current_minor
return last_major, last_minor
def user_icla_check(user: User, project: Project, signature: Signature, latest_major_version=False) -> bool:
cla.log.debug(f'ICLA signature found for user: {user} on project: {project}, '
f'signature_id: {signature.get_signature_id()}')
# Here's our logic to determine if the signature is valid
if latest_major_version: # Ensure it's latest signature.
document_models = project.get_project_individual_documents()
major, _ = get_last_version(document_models)
if signature.get_signature_document_major_version() != major:
cla.log.debug(f'User: {user} only has an old document version signed '
f'(v{signature.get_signature_document_major_version()}) - needs a new version')
return False
if signature.get_signature_signed() and signature.get_signature_approved():
# Signature found and signed/approved.
cla.log.debug(f'User: {user} has ICLA signed and approved signature_id: {signature.get_signature_id()} '
f'for project: {project}')
return True
elif signature.get_signature_signed(): # Not approved yet.
cla.log.debug(f'User: {user} has ICLA signed with signature_id: {signature.get_signature_id()}, '
f'project: {project}, but has not been approved yet')
return False
else: # Not signed or approved yet.
cla.log.debug(f'User: {user} has ICLA with signature_id: {signature.get_signature_id()}, '
f'project: {project}, but has not been signed or approved yet')
return False
def user_ccla_check(user: User, project: Project, signature: Signature) -> bool:
cla.log.debug(f'CCLA signature found for user: {user} on project: {project}, '
f'signature_id: {signature.get_signature_id()}')
if signature.get_signature_signed() and signature.get_signature_approved():
cla.log.debug(f'User: {user} has a signed and approved CCLA for project: {project}')
return True
if signature.get_signature_signed():
cla.log.debug(f'User: {user} has CCLA signed with signature_id: {signature.get_signature_id()}, '
f'project: {project}, but has not been approved yet')
return False
else: # Not signed or approved yet.
cla.log.debug(f'User: {user} has CCLA with signature_id: {signature.get_signature_id()}, '
f'project: {project}, but has not been signed or approved yet')
return False
def user_signed_project_signature(user: User, project: Project) -> bool:
"""
Helper function to check if a user has signed a project signature tied to a repository.
Will consider both ICLA and employee signatures.
:param user: The user object to check for.
:type user: cla.models.model_interfaces.User
:param project: the project model
:type project: cla.models.model_interfaces.Project
:return: Whether or not the user has an signature that's signed and approved
for this project.
:rtype: boolean
"""
fn = 'utils.user_signed_project_signature'
# Check if we have an ICLA for this user
cla.log.debug(f'{fn} - checking to see if user has signed an ICLA, user: {user}, project: {project}')
signature = user.get_latest_signature(project.get_project_id(), signature_signed=True, signature_approved=True)
icla_pass = False
if signature is not None:
icla_pass = True
else:
cla.log.debug(f'{fn} - ICLA signature NOT found for User: {user} on project: {project}')
# If we passed the ICLA check - good, return true, no need to check CCLA
if icla_pass:
cla.log.debug(
f'{fn} - ICLA signature check passed for User: {user} on project: {project} - skipping CCLA check')
return True
else:
cla.log.debug(
f'{fn} - ICLA signature check failed for User: {user} on project: {project} - will now check CCLA')
# Check if we have an CCLA for this user
company_id = user.get_user_company_id()
ccla_pass = False
if company_id is not None:
cla.log.debug(f'{fn} - CCLA signature check - user has a company: {company_id} - '
'looking up user\'s employee acknowledgement...')
# Get employee signature
employee_signature = user.get_latest_signature(
project.get_project_id(),
company_id=company_id,
signature_signed=True,
signature_approved=True)
if employee_signature is not None:
cla.log.debug(f'{fn} - CCLA signature check - located employee acknowledgement - '
f'signature id: {employee_signature.get_signature_id()}')
cla.log.debug(f'{fn} - CCLA signature check - loading company record by id: {company_id}...')
company = get_company_instance()
company.load(company_id)
# Get CCLA signature of company to access whitelist
cla.log.debug(f'{fn} - CCLA signature check - loading signed CCLA for project|company, '
f'user: {user}, project_id: {project}, company_id: {company_id}')
signature = company.get_latest_signature(
project.get_project_id(), signature_signed=True, signature_approved=True)
# Don't check the version for employee signatures.
if signature is not None:
cla.log.debug(f'{fn} - CCLA signature check - loaded signed CCLA for project|company, '
f'user: {user}, project_id: {project}, company_id: {company_id}, '
f'signature_id: {signature.get_signature_id()}')
# Verify if user has been approved: https://github.com/communitybridge/easycla/issues/332
cla.log.debug(f'{fn} - CCLA signature check - '
'checking to see if the user is in one of the approval lists...')
if user.is_approved(signature):
ccla_pass = True
else:
# Set user signatures approved = false due to user failing whitelist checks
cla.log.debug(f'{fn} - user not in one of the approval lists - '
'marking signature approved = false for '
f'user: {user}, project_id: {project}, company_id: {company_id}')
user_signatures = user.get_user_signatures(
project_id=project.get_project_id(), company_id=company_id, signature_approved=True,
signature_signed=True
)
for signature | |
Shape_Writer(self.log,self.shapeOutFutureFilename,method_object.outputFutureDetailFieldsShape,self.boolAddShapeFields ,self.userFieldShapeMap, self.referentialSpaceWKT)
self.sendMessage("INFO","Create future - Shape File Output Handler")
else:
self.sendMessage("INFO","Process Cancel Request By User - Create Shape File Output Handler")
return False
# Create JSON file for Java Callback
if self.planHeatDMM.data.processContinue == True:
self.jsonArgs4Java(self.log, self.fileJsonArg,self.folderProject,self.shpFilename,self.logFileName,self.lidarDTMFolder,\
self.lidarDSMFolder, self.referentialSpaceEPSG, self.fieldsSHPJavaPosition,self.buildingUseFloorHeightDict)
self.sendMessage("INFO","Create JSON File for Java Process Ok")
else:
self.sendMessage("INFO","Process Cancel Request By User - Create JSON File for Java Process")
return False
# CALL TO JAVA PROCESS
if self.planHeatDMM.data.processContinue == True:
if Config.LAUNCH_JAVA_PROCESS in ("Y","y"):
self.sendMessage("INFO","Call to Java Process")
self.message_update.emit("Running Java Process...",self.planHeatDMM)
self.javaLaunchProcess(self.log,self.javaLog, self.fileJava, self.fileJar,self.fileLib, self.mainJavaClass, self.fileJsonArg)
self.planHeatDMM.resources.javaProcessObject = None
self.sendMessage("INFO","Finish Java Process Ok")
else:
self.sendMessage("INFO","Process Cancel Request By User - Call to Java Process")
return False
#Read CSV in file
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO","Reading java CSV")
data = self.inputCsvFile.csv_read()
self.sendMessage("INFO","Finish CSV Read Ok")
else:
self.sendMessage("INFO","Process Cancel Request By User - Reading CSV file")
return False
self.progress_total.emit(len(data),self.planHeatDMM)
building_list = []
self.sendMessage("INFO","Data Calculate Processing Start")
# Process Data
for i, row in enumerate(data):
if self.planHeatDMM.data.processContinue == True:
building = Building(self.log,self.projectName,self.areaName,self.country_id,row)
self.message_update.emit("Processing data calculation - Building {}/{}".format(i+1,len(data)),self.planHeatDMM)
self.assignBuildingShapeGeometryAndRecord(self.inputShpFile,building)
self.progress_update.emit(i+1,self.planHeatDMM)
building = method_object.calculateConsumption(building)
building_list.append(building)
if self.boolHourlyDetailFile and building.Regstatus and building.Regprocess:
#write rows on CSV file with baseline Hourly per building
self.outputHourlyBaselineCSVFile.writeRowsCSV(building.hourlyBaselineDemandList)
#write rows on CSV file with Future Hourly per building
if self.boolRetrofittedScenarios:
self.outputHourlyFutureCSVFile.writeRowsCSV(building.hourlyFutureDemandList)
building.hourlyBaselineDemandList = []
building.hourlyFutureDemandList = []
else:
self.sendMessage("INFO","Process Cancel Request By User - Data Calculate Processing")
return False
self.progress_update.emit(len(data),self.planHeatDMM)
self.sendMessage("INFO", "Processing data calculation - Building {}/{}".format(len(data),len(data)))
self.sendMessage("INFO", "Free memory reources - CSV input file and Geometry index")
self.freeMemoryResources(self.inputCsvFile,self.inputShpFile.geometryAndRecordBuildingIndex)
#Retrieve totals for selected calculation method
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Calculate Totalized Data.")
method_object.calculateTotalizedConsumptionDemand()
self.sendMessage("INFO", "Calculate Totalized Data Ok")
else:
self.log.write_log("INFO", "Process Cancel Request By User - Calculate Totalized Data")
return False
#Write Baseline Detail CSV file
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Writing Output CSV - Baseline Detail file")
self.outputDetailBaselineCSVFile.writeRowsCSV(building_list)
self.sendMessage("INFO", "Writing Output CSV - Baseline Detail file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing CSV file")
return False
#Write Future Detail CSV file
if self.boolRetrofittedScenarios == True:
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Writing Output CSV - Future Detail file")
self.outputDetailFutureCSVFile.writeRowsCSV(building_list)
self.sendMessage("INFO", "Writing Output CSV - Future Detail file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing CSV file")
return False
#Write Baseline Totalized CSV file
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Writing Output CSV - Baseline Totalized file")
self.outputTotalizedBaselineCSVFile.writeRowsCSV(method_object.baselineTotalizedDemandList)
self.sendMessage("INFO", "Writing Output CSV - Baseline Totalized file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing CSV file")
return False
#Write Future Totalized CSV file
if self.boolRetrofittedScenarios == True:
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Writing Output CSV - Future Totalized file")
self.outputTotalizedFutureCSVFile.writeRowsCSV(method_object.futureTotalizedDemandList)
self.sendMessage("INFO", "Writing Output CSV - Future Totalized file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing CSV file")
return False
self.sendMessage("INFO", "Free memory resources output CSV files")
self.freeMemoryResources(self.outputDetailBaselineCSVFile,self.outputDetailFutureCSVFile,\
self.outputHourlyBaselineCSVFile,self.outputHourlyFutureCSVFile,\
self.outputTotalizedBaselineCSVFile, self.outputTotalizedFutureCSVFile,callGC=False)
self.sendMessage("INFO", "Free memory resources method object data")
self.freeMemoryResources(method_object)
#Populate SHP file - Baseline
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Populate Baseline Output Shape file")
self.outputBaselineSHPFile.populateAll(building_list)
self.sendMessage("INFO", "Populate Baseline Output Shape file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - populate Qgis Files")
return False
# Save QGIS files - Baseline
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Saving Output Baseline Qgis files")
self.outputBaselineSHPFile.saveQgisFiles()
self.sendMessage("INFO", "Saving Output Baseline Qgis files Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing Qgis files")
return False
self.sendMessage("INFO", "Free memory resources baseline shape file")
self.freeMemoryResources(self.outputBaselineSHPFile)
if self.boolRetrofittedScenarios == True:
#Populate SHP file - Future
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Populate Future Output Shape file")
self.outputFutureSHPFile.populateAll(building_list)
self.sendMessage("INFO", "Populate Future Output Shape file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - populate Qgis Files")
return False
# Save QGIS files - Future
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Saving Output Future Qgis files")
self.outputFutureSHPFile.saveQgisFiles()
self.sendMessage("INFO", "Saving Output Future Qgis files Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing Qgis files")
return False
self.sendMessage("INFO", "Free memory resources future shape file")
self.freeMemoryResources(self.outputFutureSHPFile, callGC=False)
self.sendMessage("INFO", "Free memory resources building list data")
self.freeMemoryResources(building_list)
self.total, self.ok, self.error, self.skip = showResults(building_list)
result = "Processed:{} buildings - Ok:{} - Error:{} - Skipped:{}".format(self.total,self.ok, self.error,self.skip)
self.sendMessage("INFO", result)
self.log.write_log("INFO", "Simplified Proccess End")
return True
self.exec_()
except Exception as e:
self.log.write_log("ERROR ", "Thread process - Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
self.showMessageDialog.emit("CRITICAL","ERROR", "process Unexpected error:" + str(e),self.planHeatDMM)
return False
def freeMemoryResources(self,*args,callGC=True,**kwargs):
try:
for arg in args:
#print("free", asizeof.asizeof(arg),str(arg))
del arg
if callGC:
gc.collect()
except:
self.log.write_log("ERROR ","freeMemoryResources Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def assignBuildingShapeGeometryAndRecord(self,shpReader,building):
try:
position = shpReader.geometryAndRecordBuildingIndex[str(building.reference)]
building.shpGeometryData = shpReader.geometryAndRecordBuilding[position].shape
building.shpRecordData = shpReader.geometryAndRecordBuilding[position].record
except KeyError:
self.log.write_log("ERROR ","assignBuildingShapeGeometryAndRecord Not Exists building reference in shapefile id:" + str(building.reference))
building.Regstatus = False
except:
self.log.write_log("ERROR ","assignBuildingShapeGeometryAndRecord Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
building.Regstatus = False
def sendMessage(self,level,message):
try:
self.log.write_log(level, message)
self.message_update.emit(message,self.planHeatDMM)
except Exception as e:
self.log.write_log("ERROR ", "process Unexpected error:" + str(e))
self.showMessageDialog.emit("ERROR", "process Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]),self.planHeatDMM)
def jsonArgs4Java(self,log,fileArg,folderProject,shpFilename,logFileName,lidarDTMFolder,lidarDSMFolder,referentialSpace,fieldMapping,buildingUseFloorHeightDict):
try:
with open(fileArg, "w") as jsonFile:
jsonPythonJava = OrderedDict()
jsonPythonJava["fieldMapping"]=fieldMapping
jsonPythonJava["logFileName"] = logFileName
jsonPythonJava["lidarDTMFolder"] = lidarDTMFolder
jsonPythonJava["lidarDSMFolder"] = lidarDSMFolder
jsonPythonJava["shpFilename"] = shpFilename
jsonPythonJava["referentialSpace"] = referentialSpace
jsonPythonJava["folderProject"] = folderProject
jsonPythonJava["floorHeight"] = buildingUseFloorHeightDict
jsonFile.write(dumps(jsonPythonJava))
log.write_log("INFO", "Write JSON Args File")
except:
log.write_log("ERROR", "jsonArgs4Java unexpected error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
def javaLaunchProcess(self,log,javaLog, fileJava, fileJar,fileLib, mainJavaClass,fileJsonArg):
try:
javaConsoleOutput = ""
run_process = ""
if os.path.isfile(fileJar):
if platform.system() == 'Windows':
#Add command Windows, to do not visible CMD
try:
run_test_process = fileJava + 'java -version '
CREATE_NO_WINDOW = 0x08000000
java_process = subprocess.Popen(run_test_process,shell=False,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL,creationflags = CREATE_NO_WINDOW)
WindowMode = CREATE_NO_WINDOW
except Exception as e:
print("excepcion " + str(e))
WindowMode = subprocess.SW_HIDE
#run_process = fileJava + 'java -cp "' + fileJar + ';' + fileLib + '" ' + mainJavaClass + ' ' + fileJsonArg
else:
WindowMode=0
foutput = open(javaLog, "w")
run_process = fileJava + 'java -XX:+UseG1GC -Xms1g -Xmx4g -cp "' + fileJar + ';' + fileLib + '" ' + mainJavaClass + ' ' + fileJsonArg
#self.planHeatDMM.resources.javaProcessObject = subprocess.run(run_process,check=True,shell=False,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
#java_process = subprocess.Popen(run_process,shell=False,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL,creationflags = WindowMode)
java_process = subprocess.Popen(run_process,shell=False,stdout=foutput,stderr=foutput,creationflags = WindowMode)
self.planHeatDMM.resources.javaProcessObject = java_process
log.write_log("INFO","Java execute command " + str(java_process.args))
returnCode = java_process.wait()
foutput.close()
if returnCode and self.planHeatDMM.data.processContinue is not False:
#Process Error
raise JavaProcessException("Error on Java Process , exit status code:{:d}".format(returnCode))
else:
raise NotFoundResourceException("jar file not found at location " + fileJar)
except JavaProcessException:
log.write_log("ERROR","Execute error " + run_process)
log.write_log("ERROR", "javaLaunchProcess JavaProcessException JAVA error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
except subprocess.CalledProcessError as e:
javaConsoleOutput = str(e.stdout, 'utf-8', errors='ignore')
log.write_log("ERROR","Java Console Output " + javaConsoleOutput)
log.write_log("ERROR","Execute error " + run_process)
log.write_log("ERROR", "javaLaunchProcess CalledProcessError JAVA error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
except OSError as e:
javaConsoleOutput = str(e)
log.write_log("ERROR","Java Console Output " + javaConsoleOutput)
log.write_log("ERROR","Execute error " + run_process)
log.write_log("ERROR", "javaLaunchProcess OSError JAVA error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
except Exception as e:
log.write_log("ERROR", "javaLaunchProcess launching new process JAVA Unexpected error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
def initizalizeLogExecuteOptions(self,file_handler):
try:
if Config.OPTIONS_FILE.lower() == "y":
with open(file_handler, "w") as optionsFile:
pass
except Exception as e:
self.log.write_log("ERROR", "initizalizeLogExecuteOptions Unexpected error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def writeLogExecuteOptions(self,file_handler, message):
try:
if Config.OPTIONS_FILE.lower() == "y":
with open(file_handler, "a") | |
<gh_stars>1-10
# @Author : <NAME>
# @Email : <EMAIL>
SimuData = \
{ 'Begin_Day_of_Month' : 1,
'Begin_Month' : 1,
'End_Day_of_Month' : 31,
'End_Month' : 12,
'SaveLogFiles' : False, #if True, computing folder is not removed thus all energyplus outpus files are preserved
#'FloorZoningLevel' : True, #1 zone per floor, if False --> 1 zone per building bloc
}
#files are needed to be located in the eather folder of EnergyPlus asthe same path is used afterward to launch the simulation
WeatherFile = \
{'Loc' : 'WeatherData/USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw', #WE_Stockholm.Arlanda.024600_IWEC.epw'
}
#Thisdict gives all the materials characteristics.
# There are 2 layer maximum, the word Inertia and Insulation or key factor further in the code. If one layer is wanted, just comment the other one.
#the basement is considered not heated and thus never insulated layer
BaseMaterial = \
{'Window' : {'UFactor' : 1.9,
'Solar_Heat_Gain_Coefficient' : 0.7,
'Visible_Transmittance' : 0.8,
},
'Wall Inertia' : {'Thickness' : 0.2, #this layer will be considered also for the basement walls
'Conductivity' : 0.9,
'Roughness' : "Rough",
'Density' : 2300,
'Specific_Heat' : 1000,
},
'Wall Insulation' : {'Thickness' : 0.2,
'Conductivity' : 0.03,
'Roughness' : "Rough",
'Density' : 150,
'Specific_Heat' : 1000,
#'Thermal_Absorptance' : 0.001,
},
'Basement Floor' : {'Thickness' : 0.1, #this layer will be considered also for the basement floor
'Conductivity' : 0.9,
'Roughness' : "Rough",
'Density' : 2300,
'Specific_Heat' : 1000,
},
# 'Basement Floor Insulation' : {'Thickness' : 0.05, #not needed as even without basement the Heated1rstFloor is taken for the first floor
# 'Conductivity' : 0.25*0.1,
# 'Roughness' : "Rough",
# 'Density' : 1000,
# 'Specific_Heat' : 1000,
# },
# 'Roof Inertia' : {'Thickness' : 0.05, #not needed unless one wants to have inertia in the roof layer
# 'Conductivity' : 0.15*0.1,
# 'Roughness' : "Rough",
# 'Density' : 1000,
# 'Specific_Heat' : 1000,
# },
'Roof Insulation' : {'Thickness' : 0.3,
'Conductivity' : 0.03,
'Roughness' : "Rough",
'Density' : 150,
'Specific_Heat' : 1000,
#'Thermal_Absorptance' : 0.001,
},
'Heated1rstFloor Inertia' : {'Thickness' : 0.1,
'Conductivity' : 0.9,
'Roughness' : "Rough",
'Density' : 2300,
'Specific_Heat' : 1000,
},
'Heated1rstFloor Insulation' : {'Thickness' : 0.15,
'Conductivity' : 0.035,
'Roughness' : "Rough",
'Density' : 150,
'Specific_Heat' : 1000,
#'Thermal_Absorptance' : 0.001,
},
}
#this dict is for specification of internalMass equivalence.
#the material should represent the overall mean material of all partition and furnitures
#the weight par zone area gives the quentity and the Average thickness enable to compute the surface for heat transfer
#the mass gives a volume thanks to the density that gives a surface thanks to the average thickness
InternalMass = \
{'HeatedZoneIntMass' : {
'Thickness' : 0.1, #m this will define the surface in contact with the zone
'Conductivity' : 0.3,
'Roughness' : "Rough",
'Density' : 600,
'Specific_Heat' : 1400,
'WeightperZoneArea' : 40, #kg/m2
},
'NonHeatedZoneIntMass' : {
'Thickness' : 0.1, #m this will define the surface in contact with the zone
'Conductivity' : 0.3,
'Roughness' : "Rough",
'Density' : 600,
'Specific_Heat' : 1400,
'WeightperZoneArea' : 40, #kg/m2
},
}
#this dict give element for the Domestic Hot water. it gives the externail file for the water taps and the inlet cold water temp.
#if empty it is no longer taken into account. if new file are given, thses should be present in the Externael file folder
ExtraEnergy = \
{} #if no DomesticHot Water is to be consdered, it still needs an empty dict
# ExtraEnergy = \
# {'Name' : 'DHW',
# 'WatertapsFile':'ExternalFiles/mDHW_Sum_over_40.txt', #this file is in l/mnin and will be converted into m3/s afertward. it needs to have hourly values
# 'ColdWaterTempFile' :'ExternalFiles/ColdWaterTemp.txt',
# 'HotWaterSetTemp': 55,
# 'WaterTapsMultiplier':1e-4/6/40, #this is because the file given above is for 40 apartment and is in l/min where we need m3/s. in the code in is afterward multiplied by the number of apartement in the building
# }
#this dict is for the shading paradigm. There are two files that we need. the firt one is the main geojson that contains all buildings and their propreties
#the other one contains for each shading surface id the vertex point and the building Id in order to catch the height of it.
#to externalize as much as possible, these elements are reported in the dict below
GeomElement = \
{'BuildIDKey' : ['<KEY>', 'FormularId','objectid','OBJECTID'],
'ShadingIdKey' : 'vaggid',
'BuildingIdKey' : 'byggnadsid',
'VertexKey':'geometries',
'MaxShadingDist': 200,
'DistanceTolerance': 0.2, #this is a threshold below every edge are removed and vertexes merged
}
#this dict gives information on occupancy times each day. If DCV = True, the airflow will follow the number of person
# and the schedule. if not it will be based only on the extra airflow rate but without schedule (all the time)
#if some separation (ventilation and people) is needed than people heat generation should be converted inteo Electric Load as thus ariflow can be
# related to a schedule, other wise...impossible
BasisElement = \
{'Office_Open': '08:00',
'Office_Close': '18:00',
'DemandControlledVentilation' : True,
'OccupBasedFlowRate': 7, # l/s/person
'OccupHeatRate' : 70, #W per person
'EnvLeak': 0.8, # l/s/m2 at 50Pa
'BasementAirLeak': 1, #in Air change rate [vol/hour]
'wwr': 0.25,
'ExternalInsulation' : False,
'ElecYearlyLoad' :15, #this is the W\m2 value that will be applied constantly for appliances and occupancy consumptipon impact. It is replace by the values in EPCs if available
'IntLoadType' : 'Cste', #change either by 'Cste', 'winter', or 'summer' for reversed sigmoid or sigmoid this will generate hourly values file in the InputFiles folder
'IntLoadMultiplier': 1, #this is a multiplier the modeler would like to play with for calibration
'IntLoadCurveShape':3, #this defines the slop of the curves
'OffOccRandom' : False,
'AreaBasedFlowRate' : 0.35, #l/s/m2
'AreaBasedFlowRateDefault' : 0.35, #l/s/m2 This will not be changed by EPCs and is needed if EPCs report only the balcned ventilation flow with HR for building that have 2 system and one wihtout recovery.
'setTempUpL' : [25,25], #only one have to be defined for none temperature modulation
'setTempLoL' : [21,21], #only one have to be defined for none temperature modulation
'ComfortTempOff' :'23:00', #hours at wich the first temperature set point is considered
'ComfortTempOn': '06:00', #hours at wich the second temperature set point is considered
'ACH_freecool' :4, #this the the vol/hr of extra ventilation when free cooling is on
'intT_freecool' : 26, #internal temperature threshold for free coolong (opening windows with fixed ACH)
'dT_freeCool': 1, #Tint-Text to authorize free cooling to be turned on
'AirRecovEff' : 0.65, #efficiency if heat is recovered from ventilation
'HVACLimitMode': 'NoLimit', #'LimitCapacity', #can be NoLimit or LimitFlowRate or LimitFlowRateAndCpacity
'HVACPowLimit' : 25, #in Watt/m2
}
# definition of person/m2...complytely abritrary, but we still need some vaalues
# these proposales are taken from <NAME> report and from personnal suggestions
# to be enhanced !!!!!BBR gives also some
OccupType = \
{'Residential_key' : 'EgenAtempBostad', 'Residential_Rate': [0.02, 0.02],
'Hotel_key' : 'EgenAtempHotell', 'Hotel_Rate': [0.01, 0.02],
'Restaurant_key' : 'EgenAtempRestaurang', 'Restaurant_Rate': [0.01, 0.09],
'Office_key' : 'EgenAtempKontor', 'Office_Rate': [0.01, 0.09],
'FoodMarket_key' : 'EgenAtempLivsmedel', 'FoodMarket_Rate': [0.01, 0.09],
'GoodsMarket_key' : 'EgenAtempButik', 'GoodsMarket_Rate': [0.01, 0.09],
'Shopping_key' : 'EgenAtempKopcentrum', 'Shopping_Rate': [0.01, 0.09], #'I still wonder what is the difference with goods'
'Hospital24h_key' : 'EgenAtempVard', 'Hospital24h_Rate': [0.01, 0.09],
'Hospitalday_key' : 'EgenAtempHotell', 'Hospitalday_Rate': [0.01, 0.09],
'School_key' : 'EgenAtempSkolor', 'School_Rate': [0.01, 0.1],
'IndoorSports_key' : 'EgenAtempBad', 'IndoorSports_Rate': [0.01, 0.1],
'Other_key' : 'EgenAtempOvrig', 'Other_Rate': [0.01, 0.1],
'AssmbPlace_key' : 'EgenAtempTeater', 'AssmbPlace_Rate': [0.01, 0.2],
# 'OccupRate': OccupRate,
}
#this dict deals with the ventilation systems
VentSyst = \
{'BalX' : 'VentTypFTX',
'Exh' : 'VentTypF',
'Bal' : 'VentTypFT',
'Nat' : 'VentTypSjalvdrag',
'ExhX' : 'VentTypFmed',
}
#this dict defines the acceptable limits for the element precised as well as the swedish key for the DataBase
DBLimits = \
{'surface_key': ['EgenAtemp','SHAPE.AREA'], 'surface_lim': [0, 50000],
'nbfloor_key': 'EgenAntalPlan', 'nbfloor_lim': [0, 100],
'nbBasefloor_key': 'EgenAntalKallarplan', 'nbBasefloor_lim': [0, 4],
'year_key': 'EgenNybyggAr', 'year_lim': [0, 2022],
'nbAppartments_key': 'EgenAntalBolgh', 'nbAppartments_lim':[0, 100],
'height_key': ['height', 'SHAPE.LEN','st_lengthshape'], 'height_lim': [0, 100],
'AreaBasedFlowRate_key': 'EgenProjVentFlode', 'AreaBasedFlowRate_lim': [0.35, 10],
'nbStairwell_key': 'EgenAntalTrapphus', 'nbStairwell_lim': [0, 100],
}
#this dict defines the EPC measured key word
EPCMeters = \
{'Heating':
{'OilHeating_key' : '<KEY>', 'OilHeatingCOP' : 0.85,
'GasHeating_key' : 'EgiGasUPPV', 'GasHeatingCOP' : 0.9,
'WoodHeating_key' : 'EgiVedUPPV', 'WoodHeatingCOP' : 0.75,
'PelletHeating_key' : 'EgiFlisUPPV', 'PelletHeatingCOP' : 0.75,
'BioFuelHeating_key' : 'EgiOvrBiobransleUPPV', 'BioFuelHeatingCOP' : 0.75,
'ElecWHeating_key' : 'EgiElVattenUPPV', 'ElecWHeatingCOP' : 1,
'ElecHeating_key' : 'EgiElDirektUPPV', 'ElecHeatingCOP' : | |
import re
import sys
sys.path.append('deps')
import glob
import serial
import subprocess
import time
import os
import zipfile
from collections import namedtuple
from os import listdir
from os.path import isfile, join
import platform
# Defaults
DEFAULT_VERSION = 'v1.1.0'
DEFAULT_CHOOSE_OPERATION = False
DEFAULT_INTERACTIVE = False
DEFAULT_DEBUG = False
DEFAULT_SILENT = True
DEFAULT_HOST = 'https://pythings.io'
DEFAULT_ARTIFACTS_PATH = 'artifacts'
DEFAULT_ALLOW_DOWNLOAD = False
DEFAULT_EXPERIMENTAL = False
# Default Python
try:
# Can we use "python3"?
if subprocess.call(['python3', '--version'], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT):
raise OSError
except OSError:
try:
# Can we use "py"?
if subprocess.call(['py', '--version'], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT):
raise OSError
except OSError as e:
# Fallback on a generic Python
DEFAULT_PYTHON='python'
else:
# Use Py
DEFAULT_PYTHON='py'
else:
# Use Python3
DEFAULT_PYTHON='python3'
# Platform-specific
if platform.system() == 'Windows':
PLATFORM_SAFE_QUOTE='"'
else:
PLATFORM_DEFAULT_PYTHON='python'
PLATFORM_SAFE_QUOTE='\''
# Booleanize utility
def booleanize(var):
if isinstance(var, bool):
return var
elif isinstance(var, str):
if var.lower()=='false':
return False
else:
return True
elif isinstance(var, int) or isinstance(var, float):
if var == 0 :
return False
else:
return True
else:
print('')
print('Value "{}" is not valid'.format(var))
print('')
sys.exit(1)
# Overrides from env vars
VERSION = os.environ.get('VERSION', DEFAULT_VERSION)
CHOOSE_OPERATION = booleanize(os.environ.get('CHOOSE_OPERATION', DEFAULT_CHOOSE_OPERATION))
INTERACTIVE = booleanize(os.environ.get('INTERACTIVE', DEFAULT_INTERACTIVE))
DEBUG = booleanize(os.environ.get('DEBUG', DEFAULT_DEBUG))
SILENT = booleanize(os.environ.get('SILENT', DEFAULT_SILENT))
PYTHON = os.environ.get('PYTHON', DEFAULT_PYTHON)
HOST = os.environ.get('HOST', DEFAULT_HOST)
ARTIFACTS_PATH = os.environ.get('ARTIFACTS_PATH', DEFAULT_ARTIFACTS_PATH)
ALLOW_DOWNLOAD = booleanize(os.environ.get('ALLOW_DOWNLOAD', DEFAULT_ALLOW_DOWNLOAD))
EXPERIMENTAL = booleanize(os.environ.get('EXPERIMENTAL', DEFAULT_EXPERIMENTAL))
# Extra external settings
PORT = os.environ.get('PORT', None)
PLATFORM = os.environ.get('PLATFORM', None)
OPERATION = os.environ.get('OPERATION', None)
FROZEN = os.environ.get('FROZEN', None)
if FROZEN is not None:
FROZEN = booleanize(FROZEN)
# Support vars
only_console = False
serial_port = None
# Do we have command line arguments?
for arg in sys.argv:
if arg.endswith('.py'):
continue
if arg == '--console':
only_console = True
else:
if serial_port:
raise ValueError('Two serial port arguments given or unrecognized option "{}"'.format(arg))
if serial_port and PORT:
raise ValueError('Cannot set port both via env var and command line arguments')
serial_port = arg
# Set serial port if given via env var
if PORT:
serial_port = PORT
#========================
# Utility functions
#========================
def sanitize_file_chars(string):
'''Quote a string so it can be used as an argument in a posix shell
According to: http://www.unix.org/single_unix_specification/
2.2.1 Escape Character (Backslash)
A backslash that is not quoted shall preserve the literal value
of the following character, with the exception of a <newline>.
2.2.2 Single-Quotes
Enclosing characters in single-quotes ( '' ) shall preserve
the literal value of each character within the single-quotes.
A single-quote cannot occur within single-quotes.
'''
return "{}".format(PLATFORM_SAFE_QUOTE).join(PLATFORM_SAFE_QUOTE + p + PLATFORM_SAFE_QUOTE for p in string.split("'"))
def serial_ports():
'''Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
'''
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def valid(line):
#print('Checking line validity: "{}"'.format(line))
if not line:
validity = False
else:
if 'INFO' in line or 'DEBUG' in line or 'ERROR' in line or 'WARNING' in line or 'CRITICAL' in line:
#print('Condition1')
validity = True
elif 'Version:' in line or 'Platform' in line or 'Thing ID' in line or '|-------' in line or 'Starting' in line:
#print('Condition2')
validity = True
elif 'Error' in line or 'Exception' in line or 'File' in line:
#print('Condition3')
validity = True
elif 'Cannot' in line:
#print('Condition4')
validity = True
else:
validity = False
#print('Validity="{}"'.format(validity))
return validity
def sanitize_encoding(text):
return text.encode("utf-8", errors="ignore")
def format_shell_error(stdout, stderr, exit_code):
string = '\n#---------------------------------'
string += '\n# Shell exited with exit code {}'.format(exit_code)
string += '\n#---------------------------------\n'
string += '\nStandard output: "'
string += str(sanitize_encoding(stdout))
string += '"\n\nStandard error: "'
string += str(sanitize_encoding(stderr)) +'"\n\n'
string += '#---------------------------------\n'
string += '# End Shell output\n'
string += '#---------------------------------\n'
return string
def os_shell(command, capture=False, verbose=False, interactive=False, silent=False):
'''Execute a command in the os_shell. By default prints everything. If the capture switch is set,
then it returns a namedtuple with stdout, stderr, and exit code.'''
if capture and verbose:
raise Exception('You cannot ask at the same time for capture and verbose, sorry')
# Log command
if DEBUG:
print('Executing command: {}'.format(command))
# Execute command in interactive mode
if verbose or interactive:
exit_code = subprocess.call(command, shell=True)
if exit_code == 0:
return True
else:
return False
# Execute command getting stdout and stderr
# http://www.saltycrane.com/blog/2008/09/how-get-stdout-and-stderr-using-python-subprocess-module/
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdout, stderr) = process.communicate()
exit_code = process.wait()
# Convert to str (Python 3)
stdout = stdout.decode(encoding='UTF-8')
stderr = stderr.decode(encoding='UTF-8')
# Formatting..
stdout = stdout[:-1] if (stdout and stdout[-1] == '\n') else stdout
stderr = stderr[:-1] if (stderr and stderr[-1] == '\n') else stderr
# Output namedtuple
Output = namedtuple('Output', 'stdout stderr exit_code')
if exit_code != 0:
if capture:
return Output(stdout, stderr, exit_code)
else:
print(format_shell_error(stdout, stderr, exit_code))
return False
else:
if capture:
return Output(stdout, stderr, exit_code)
elif not silent:
# Just print stdout and stderr cleanly
print(stdout)
print(stderr)
return True
else:
return True
def abort(msg):
print('')
print(msg)
print('')
if (not OPERATION) or INTERACTIVE:
print('')
print('Press any key to exit')
try:
raw_input()
except:
input()
print('')
sys.exit(1)
def download(url, dest=''):
if not ALLOW_DOWNLOAD:
abort('Sorry, I could not find the file within the installer and I am not allowed to download.')
file_name = url.split('/')[-1]
try:
import urllib2
# Python 2
response = urllib2.urlopen(url)
f = open(dest+file_name, 'wb')
f.write(response.read())
f.close()
except ImportError:
import urllib.request
# Python3
response = urllib.request.urlopen(url)
data = response.read()
with open(dest+file_name, 'wb') as f:
f.write(data)
#========================
# Main
#========================
try:
if not only_console:
if not OPERATION:
print('')
print('-----------------------------------------------')
print(' Welcome to PythingsOS {} installer '.format(VERSION))
print('-----------------------------------------------')
print('')
print('Notes:')
#print(' * An active Internet connection is required.')
print(' * An active serial connection to your board is required.')
print(' - Most common USB-to-serial drivers here: {}/downloads.'.format(HOST))
print(' - Some boards require quality USB cables, switch cable in case of problems.')
print(' * On Linux, run this program as root (i.e. "sudo installer.sh")')
print('')
# Create tmp dir if not already present
if not os.path.isdir('tmp'):
os.mkdir('tmp')
if not PLATFORM:
print('On which platform do you want to install?')
print('')
print(' 1) Esp8266')
print(' 2) Esp32')
print(' 3) Esp32 + SIM800')
if EXPERIMENTAL:
print(' 4) Esp8266 + SIM800 (experimental)')
print('')
sys.stdout.write('Your choice (number): ')
try:
platform_id = input()
except:
abort('Error, please type a valid numerical choice')
platforms={}
platforms[1] = 'esp8266'
platforms[2] = 'esp32'
platforms[3] = 'esp32_sim800'
if EXPERIMENTAL:
platforms[4] = 'esp8266_sim800'
try:
platform_id = int(platform_id)
platform = platforms[platform_id]
except:
abort('Error, please type a valid numerical choice')
else:
if not PLATFORM in ['esp8266', 'esp8266_sim800', 'esp32', 'esp32_sim800']:
abort('Error, got unsupported platform "{}"'.format(PLATFORM))
platform = PLATFORM
else:
platform_id = None
platform = None
if (not only_console) and CHOOSE_OPERATION:
print('')
print('What operation do you want to perform?')
print('')
print(' 1) Flash and install PythingsOS')
print(' 2) Only install PythingsOS')
print(' 3) Open a serial console')
print('')
sys.stdout.write('Your choice (number): ')
try:
operation = int(input())
if not operation in [1,2,3]:
raise
except:
abort('Error, please type a valid numerical choice')
else:
if only_console:
operation=3
else:
operation=1
print('----------')
print(platform)
#==========================
# Set operations
#==========================
if OPERATION:
if OPERATION == 'flash':
flash = True
copy = False
console = False
elif OPERATION == 'copy':
flash = False
copy = True
console = False
elif OPERATION == 'console':
flash = False
copy = False
console = True
else:
abort('Error, got invalid OPERATION: "{}"'.format(OPERATION))
else:
# Set steps
if operation == 1:
flash = True
copy = True
console = True
elif operation == 2:
flash = False
copy = True
console = True
elif operation == 3:
flash = False
copy = False
console = True
else:
abort('Consistency exception')
# Ask also if frozen or non frozen for esp8266
frozen = False
if flash and platform=='esp8266_sim800':
print('')
print('')
print('PythingsOS for the Esp8266 + SIM800 platform only comes in frozen version.')
print('')
print('With a frozen version you will not be able to update PythingsOS')
print('remotely as it will be frozen into the firmware, but you will')
print('have more memory for your Apps and | |
dictionary
Keyword arguments passed to plt.quiver()
Returns
-------
quiver : matplotlib.pyplot.quiver
Vectors of specific discharge.
"""
warnings.warn(
"plot_discharge() has been deprecated and will be replaced "
"in version 3.3.5. Use plot_vector() instead, which should "
"follow after postprocessing.get_specific_discharge()",
DeprecationWarning,
)
if self.mg.grid_type != "structured":
err_msg = "Use plot_specific_discharge for " "{} grids".format(
self.mg.grid_type
)
raise NotImplementedError(err_msg)
else:
if self.mg.top is None:
err = (
"StructuredGrid must have top and "
"botm defined to use plot_discharge()"
)
raise AssertionError(err)
delr = self.mg.delr
delc = self.mg.delc
top = np.copy(self.mg.top)
botm = np.copy(self.mg.botm)
laytyp = None
hnoflo = 999.0
hdry = 999.0
laycbd = None
if self.model is not None:
if self.model.laytyp is not None:
laytyp = self.model.laytyp
if self.model.hnoflo is not None:
hnoflo = self.model.hnoflo
if self.model.hdry is not None:
hdry = self.model.hdry
if self.model.laycbd is not None:
laycbd = self.model.laycbd
if laycbd is not None and 1 in laycbd:
active = np.ones((botm.shape[0],), dtype=int)
kon = 0
for cbd in laycbd:
if cbd > 0:
kon += 1
active[kon] = 0
botm = botm[active == 1]
# If no access to head or laytyp, then calculate confined saturated
# thickness by setting laytyp to zeros
if head is None or laytyp is None:
head = np.zeros(botm.shape, np.float32)
laytyp = np.zeros((botm.shape[0],), dtype=int)
# calculate the saturated thickness
sat_thk = plotutil.PlotUtilities.saturated_thickness(
head, top, botm, laytyp, [hnoflo, hdry]
)
# Calculate specific discharge
qx, qy, qz = plotutil.PlotUtilities.centered_specific_discharge(
frf, fff, flf, delr, delc, sat_thk
)
return self.plot_vector(qx, qy, istep, jstep, normalize, **kwargs)
def plot_pathline(self, pl, travel_time=None, **kwargs):
"""
Plot the MODPATH pathlines.
Parameters
----------
pl : list of rec arrays or a single rec array
rec array or list of rec arrays is data returned from
modpathfile PathlineFile get_data() or get_alldata()
methods. Data in rec array is 'x', 'y', 'z', 'time',
'k', and 'particleid'.
travel_time : float or str
travel_time is a travel time selection for the displayed
pathlines. If a float is passed then pathlines with times
less than or equal to the passed time are plotted. If a
string is passed a variety logical constraints can be added
in front of a time value to select pathlines for a select
period of time. Valid logical constraints are <=, <, >=, and
>. For example, to select all pathlines less than 10000 days
travel_time='< 10000' would be passed to plot_pathline.
(default is None)
kwargs : layer, ax, colors. The remaining kwargs are passed
into the LineCollection constructor. If layer='all',
pathlines are output for all layers
Returns
-------
lc : matplotlib.collections.LineCollection
"""
from matplotlib.collections import LineCollection
# make sure pathlines is a list
if not isinstance(pl, list):
pl = [pl]
if "layer" in kwargs:
kon = kwargs.pop("layer")
if isinstance(kon, bytes):
kon = kon.decode()
if isinstance(kon, str):
if kon.lower() == "all":
kon = -1
else:
kon = self.layer
else:
kon = self.layer
marker = kwargs.pop("marker", None)
markersize = kwargs.pop("markersize", None)
markersize = kwargs.pop("ms", markersize)
markercolor = kwargs.pop("markercolor", None)
markerevery = kwargs.pop("markerevery", 1)
ax = kwargs.pop("ax", self.ax)
if "colors" not in kwargs:
kwargs["colors"] = "0.5"
linecol = []
markers = []
for p in pl:
tp = plotutil.filter_modpath_by_travel_time(p, travel_time)
# transform data!
x0r, y0r = geometry.transform(
tp["x"],
tp["y"],
self.mg.xoffset,
self.mg.yoffset,
self.mg.angrot_radians,
)
# build polyline array
arr = np.vstack((x0r, y0r)).T
# select based on layer
if kon >= 0:
kk = p["k"].copy().reshape(p.shape[0], 1)
kk = np.repeat(kk, 2, axis=1)
arr = np.ma.masked_where((kk != kon), arr)
else:
arr = np.ma.asarray(arr)
# append line to linecol if there is some unmasked segment
if not arr.mask.all():
linecol.append(arr)
if not arr.mask.all():
linecol.append(arr)
if marker is not None:
for xy in arr[::markerevery]:
if not np.all(xy.mask):
markers.append(xy)
# create line collection
lc = None
if len(linecol) > 0:
lc = LineCollection(linecol, **kwargs)
ax.add_collection(lc)
if marker is not None:
markers = np.array(markers)
ax.plot(
markers[:, 0],
markers[:, 1],
lw=0,
marker=marker,
color=markercolor,
ms=markersize,
)
return lc
def plot_timeseries(self, ts, travel_time=None, **kwargs):
"""
Plot the MODPATH timeseries.
Parameters
----------
ts : list of rec arrays or a single rec array
rec array or list of rec arrays is data returned from
modpathfile TimeseriesFile get_data() or get_alldata()
methods. Data in rec array is 'x', 'y', 'z', 'time',
'k', and 'particleid'.
travel_time : float or str
travel_time is a travel time selection for the displayed
pathlines. If a float is passed then pathlines with times
less than or equal to the passed time are plotted. If a
string is passed a variety logical constraints can be added
in front of a time value to select pathlines for a select
period of time. Valid logical constraints are <=, <, >=, and
>. For example, to select all pathlines less than 10000 days
travel_time='< 10000' would be passed to plot_pathline.
(default is None)
kwargs : layer, ax, colors. The remaining kwargs are passed
into the LineCollection constructor. If layer='all',
pathlines are output for all layers
Returns
-------
lo : list of Line2D objects
"""
if "color" in kwargs:
kwargs["markercolor"] = kwargs["color"]
return self.plot_pathline(ts, travel_time=travel_time, **kwargs)
def plot_endpoint(
self,
ep,
direction="ending",
selection=None,
selection_direction=None,
**kwargs
):
"""
Plot the MODPATH endpoints.
Parameters
----------
ep : rec array
A numpy recarray with the endpoint particle data from the
MODPATH 6 endpoint file
direction : str
String defining if starting or ending particle locations should be
considered. (default is 'ending')
selection : tuple
tuple that defines the zero-base layer, row, column location
(l, r, c) to use to make a selection of particle endpoints.
The selection could be a well location to determine capture zone
for the well. If selection is None, all particle endpoints for
the user-sepcified direction will be plotted. (default is None)
selection_direction : str
String defining is a selection should be made on starting or
ending particle locations. If selection is not None and
selection_direction is None, the selection direction will be set
to the opposite of direction. (default is None)
kwargs : ax, c, s or size, colorbar, colorbar_label, shrink. The
remaining kwargs are passed into the matplotlib scatter
method. If colorbar is True a colorbar will be added to the plot.
If colorbar_label is passed in and colorbar is True then
colorbar_label will be passed to the colorbar set_label()
method. If shrink is passed in and colorbar is True then
the colorbar size will be set using shrink.
Returns
-------
sp : matplotlib.pyplot.scatter
"""
ax = kwargs.pop("ax", self.ax)
tep, _, xp, yp = plotutil.parse_modpath_selection_options(
ep, direction, selection, selection_direction
)
# scatter kwargs that users may redefine
if "c" not in kwargs:
c = tep["time"] - tep["time0"]
else:
c = np.empty((tep.shape[0]), dtype="S30")
c.fill(kwargs.pop("c"))
s = kwargs.pop("s", np.sqrt(50))
s = float(kwargs.pop("size", s)) ** 2.0
# colorbar kwargs
createcb = kwargs.pop("colorbar", False)
colorbar_label = kwargs.pop("colorbar_label", "Endpoint Time")
shrink = float(kwargs.pop("shrink", 1.0))
# transform data!
x0r, y0r = geometry.transform(
tep[xp],
tep[yp],
self.mg.xoffset,
self.mg.yoffset,
self.mg.angrot_radians,
)
# build array to plot
arr = np.vstack((x0r, y0r)).T
# plot the end point data
sp = ax.scatter(arr[:, 0], arr[:, 1], c=c, s=s, **kwargs)
# add a colorbar for travel times
if createcb:
cb = plt.colorbar(sp, ax=ax, shrink=shrink)
cb.set_label(colorbar_label)
return sp
class DeprecatedMapView(PlotMapView):
"""
Deprecation handler for the PlotMapView class
Parameters
----------
model : flopy.modflow.Modflow object
modelgrid : flopy.discretization.Grid object
ax : matplotlib.pyplot.axes object
layer : int
model layer to plot, default is layer 1
extent : tuple of floats
(xmin, xmax, ymin, ymax) will be used to specify axes limits. If None
then these will be calculated based on grid, coordinates, and rotation.
"""
def __init__(
self, model=None, modelgrid=None, ax=None, layer=0, extent=None
):
super().__init__(
model=model, modelgrid=modelgrid, ax=ax, layer=layer, extent=extent
)
def plot_discharge(
self,
frf,
fff,
dis=None,
flf=None,
head=None,
istep=1,
jstep=1,
normalize=False,
**kwargs
):
"""
Use quiver to plot vectors. Deprecated method that uses
the old function call to pass the method to PlotMapView
Parameters
----------
frf : numpy.ndarray
MODFLOW's 'flow right face'
fff : numpy.ndarray
MODFLOW's 'flow front face'
| |
is not None:
self.receive_phone_num = m.get('receive_phone_num')
if m.get('auth_instance_biz_uuid') is not None:
self.auth_instance_biz_uuid = m.get('auth_instance_biz_uuid')
return self
class SendDasSmsResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
return self
class AuthDasSmsRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
verification_code: str = None,
auth_instance_biz_uuid: str = None,
auth_person_enterprise_info: AuthPersonEnterpriseInfo = None,
auth_person_individual_info: AuthPersonIndividualInfo = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 手机验证码
self.verification_code = verification_code
# 授权实例biz_uuid
self.auth_instance_biz_uuid = auth_instance_biz_uuid
# 授权企业信息
self.auth_person_enterprise_info = auth_person_enterprise_info
# 授权人个人信息
self.auth_person_individual_info = auth_person_individual_info
def validate(self):
self.validate_required(self.verification_code, 'verification_code')
self.validate_required(self.auth_instance_biz_uuid, 'auth_instance_biz_uuid')
if self.auth_person_enterprise_info:
self.auth_person_enterprise_info.validate()
if self.auth_person_individual_info:
self.auth_person_individual_info.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.verification_code is not None:
result['verification_code'] = self.verification_code
if self.auth_instance_biz_uuid is not None:
result['auth_instance_biz_uuid'] = self.auth_instance_biz_uuid
if self.auth_person_enterprise_info is not None:
result['auth_person_enterprise_info'] = self.auth_person_enterprise_info.to_map()
if self.auth_person_individual_info is not None:
result['auth_person_individual_info'] = self.auth_person_individual_info.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('verification_code') is not None:
self.verification_code = m.get('verification_code')
if m.get('auth_instance_biz_uuid') is not None:
self.auth_instance_biz_uuid = m.get('auth_instance_biz_uuid')
if m.get('auth_person_enterprise_info') is not None:
temp_model = AuthPersonEnterpriseInfo()
self.auth_person_enterprise_info = temp_model.from_map(m['auth_person_enterprise_info'])
if m.get('auth_person_individual_info') is not None:
temp_model = AuthPersonIndividualInfo()
self.auth_person_individual_info = temp_model.from_map(m['auth_person_individual_info'])
return self
class AuthDasSmsResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
auth_status: str = None,
vc: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 授权状态
self.auth_status = auth_status
# VC
self.vc = vc
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.auth_status is not None:
result['auth_status'] = self.auth_status
if self.vc is not None:
result['vc'] = self.vc
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('auth_status') is not None:
self.auth_status = m.get('auth_status')
if m.get('vc') is not None:
self.vc = m.get('vc')
return self
class VerifyDasEnterpriseRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
auth_instance_biz_uuid: str = None,
vc: str = None,
be_authed_person_info: BeAuthedPersonInfo = None,
auth_person_enterprise_info: AuthPersonEnterpriseInfo = None,
data_source_info: List[DataSource] = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 被授权企业接入应用ID
self.auth_instance_biz_uuid = auth_instance_biz_uuid
# VC完整内容
self.vc = vc
# 被授权企业信息
self.be_authed_person_info = be_authed_person_info
# 授权人企业信息
self.auth_person_enterprise_info = auth_person_enterprise_info
# 需要访问的数据源信息列表
self.data_source_info = data_source_info
def validate(self):
self.validate_required(self.auth_instance_biz_uuid, 'auth_instance_biz_uuid')
self.validate_required(self.vc, 'vc')
self.validate_required(self.be_authed_person_info, 'be_authed_person_info')
if self.be_authed_person_info:
self.be_authed_person_info.validate()
self.validate_required(self.auth_person_enterprise_info, 'auth_person_enterprise_info')
if self.auth_person_enterprise_info:
self.auth_person_enterprise_info.validate()
self.validate_required(self.data_source_info, 'data_source_info')
if self.data_source_info:
for k in self.data_source_info:
if k:
k.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.auth_instance_biz_uuid is not None:
result['auth_instance_biz_uuid'] = self.auth_instance_biz_uuid
if self.vc is not None:
result['vc'] = self.vc
if self.be_authed_person_info is not None:
result['be_authed_person_info'] = self.be_authed_person_info.to_map()
if self.auth_person_enterprise_info is not None:
result['auth_person_enterprise_info'] = self.auth_person_enterprise_info.to_map()
result['data_source_info'] = []
if self.data_source_info is not None:
for k in self.data_source_info:
result['data_source_info'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('auth_instance_biz_uuid') is not None:
self.auth_instance_biz_uuid = m.get('auth_instance_biz_uuid')
if m.get('vc') is not None:
self.vc = m.get('vc')
if m.get('be_authed_person_info') is not None:
temp_model = BeAuthedPersonInfo()
self.be_authed_person_info = temp_model.from_map(m['be_authed_person_info'])
if m.get('auth_person_enterprise_info') is not None:
temp_model = AuthPersonEnterpriseInfo()
self.auth_person_enterprise_info = temp_model.from_map(m['auth_person_enterprise_info'])
self.data_source_info = []
if m.get('data_source_info') is not None:
for k in m.get('data_source_info'):
temp_model = DataSource()
self.data_source_info.append(temp_model.from_map(k))
return self
class VerifyDasEnterpriseResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
success: bool = None,
failed_reason: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 核验结果
self.success = success
# 核验失败原因
self.failed_reason = failed_reason
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.success is not None:
result['success'] = self.success
if self.failed_reason is not None:
result['failed_reason'] = self.failed_reason
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('success') is not None:
self.success = m.get('success')
if m.get('failed_reason') is not None:
self.failed_reason = m.get('failed_reason')
return self
class VerifyDasIndividualRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
auth_instance_biz_uuid: str = None,
vc: str = None,
be_authed_person_info: BeAuthedPersonInfo = None,
auth_person_individual_info: AuthPersonIndividualInfo = None,
data_source_info: List[DataSource] = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 授权实例biz_uuid
self.auth_instance_biz_uuid = auth_instance_biz_uuid
# VC完整内容
self.vc = vc
# 被授权企业信息
self.be_authed_person_info = be_authed_person_info
# 授权人个人信息
self.auth_person_individual_info = auth_person_individual_info
# 需要访问的数据源信息列表
self.data_source_info = data_source_info
def validate(self):
self.validate_required(self.auth_instance_biz_uuid, 'auth_instance_biz_uuid')
self.validate_required(self.vc, 'vc')
self.validate_required(self.be_authed_person_info, 'be_authed_person_info')
if self.be_authed_person_info:
self.be_authed_person_info.validate()
self.validate_required(self.auth_person_individual_info, 'auth_person_individual_info')
if self.auth_person_individual_info:
self.auth_person_individual_info.validate()
self.validate_required(self.data_source_info, 'data_source_info')
if self.data_source_info:
for k in self.data_source_info:
if k:
k.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.auth_instance_biz_uuid is not None:
result['auth_instance_biz_uuid'] = self.auth_instance_biz_uuid
if self.vc is not None:
result['vc'] = self.vc
if self.be_authed_person_info is not None:
result['be_authed_person_info'] = self.be_authed_person_info.to_map()
if self.auth_person_individual_info is not None:
result['auth_person_individual_info'] = self.auth_person_individual_info.to_map()
result['data_source_info'] = []
if self.data_source_info is not None:
for k in self.data_source_info:
result['data_source_info'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('auth_instance_biz_uuid') is not None:
self.auth_instance_biz_uuid = m.get('auth_instance_biz_uuid')
if m.get('vc') is not None:
self.vc = m.get('vc')
if m.get('be_authed_person_info') is not None:
temp_model = BeAuthedPersonInfo()
self.be_authed_person_info = temp_model.from_map(m['be_authed_person_info'])
if m.get('auth_person_individual_info') is not None:
temp_model = AuthPersonIndividualInfo()
self.auth_person_individual_info = temp_model.from_map(m['auth_person_individual_info'])
self.data_source_info = []
if m.get('data_source_info') is not None:
for k in m.get('data_source_info'):
temp_model = DataSource()
self.data_source_info.append(temp_model.from_map(k))
return self
class VerifyDasIndividualResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
success: bool = None,
failed_reason: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 核验结果
self.success = success
# 核验失败原因
self.failed_reason = failed_reason
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.success is not None:
result['success'] = self.success
if self.failed_reason is not None:
result['failed_reason'] = self.failed_reason
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('success') is not None:
self.success = m.get('success')
if m.get('failed_reason') is not None:
self.failed_reason = m.get('failed_reason')
return self
class CreateDasDatasourceRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
name: str = | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Common Winograd implementation for Adreno backend"""
import tvm
from tvm import te
from tvm import autotvm
from tvm.topi import nn
from tvm.topi.utils import get_const_int, get_const_tuple, traverse_inline
from ..nn.winograd_util import winograd_transform_matrices
from .utils import (
split_to_chunks,
pack_input,
pack_filter,
bind_data_copy,
get_texture_storage,
infer_tile_size,
)
def conv2d_winograd_comp(
cfg, data, kernel, strides, padding, dilation, out_dtype, args, pre_computed, layout
):
"""Compute declaration for winograd
Parameters
----------
cfg: ConfigEntity
The config for this template
data: tvm.te.Tensor
4-D or 5-D Data tensor with shape NCHW or NCHW4c
kernel: tvm.te.Tensor
4-D or 5-D tensor with shape OIHW or OIHW4o
strides: int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding: int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
args: dict
Dictionary with additional arguments, e.g. accumulator type
pre_computed: bool
Flag if weights were pre computed if true or the weights should be
computed in runtime
layout: str
NHWC or NCHW values are accepted
Returns
-------
output: tvm.te.Tensor
4-D or 5-D with shape NCHW or NCHW4c
"""
assert layout in ("NCHW", "NHWC")
tile_size = infer_tile_size(data, layout)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides
convert_from4d = False
if len(data.shape) == 4:
if layout == "NCHW":
N, DCI, H, W = get_const_tuple(data.shape)
else:
N, H, W, DCI = get_const_tuple(data.shape)
if not pre_computed:
if layout == "NCHW":
out_channels, CI, KH, KW = get_const_tuple(kernel.shape)
else:
KH, KW, CI, out_channels = get_const_tuple(kernel.shape)
else:
alpha, _, CI, out_channels = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
in_channel_chunks, in_channel_block, in_channel_tail = split_to_chunks(CI, 4)
out_channel_chunks, out_channel_block, out_channel_tail = split_to_chunks(out_channels, 4)
if autotvm.GLOBAL_SCOPE.in_tuning is True:
if layout == "NCHW":
dshape = (N, in_channel_chunks, H, W, in_channel_block)
else:
dshape = (N, H, W, in_channel_chunks, in_channel_block)
if not pre_computed: # kernel tensor is raw tensor, do strict check
if layout == "NCHW":
kshape = (out_channel_chunks, CI, KH, KW, out_channel_block)
else:
kshape = (KH, KW, CI, out_channel_chunks, out_channel_block)
else:
kshape = (alpha, alpha, CI, out_channel_chunks, out_channel_block)
data = tvm.te.placeholder(dshape, data.dtype, name="data_placeholder")
kernel = tvm.te.placeholder(kshape, kernel.dtype, name="kernel_placeholder")
else:
convert_from4d = True
data = pack_input(
data, layout, N, in_channel_chunks, in_channel_block, in_channel_tail, H, W
)
kernel_layout = "OIHW" if layout == "NCHW" else "HWIO"
if not pre_computed: # kernel tensor is raw tensor, do strict check
kernel = pack_filter(
kernel,
kernel_layout,
out_channel_chunks,
out_channel_block,
out_channel_tail,
CI,
in_channel_chunks,
in_channel_block,
in_channel_tail,
KH,
KW,
)
else:
kernel = pack_filter(
kernel,
"HWIO",
out_channel_chunks,
out_channel_block,
out_channel_tail,
CI,
in_channel_chunks,
in_channel_block,
in_channel_tail,
alpha,
alpha,
)
if layout == "NCHW":
N, DCI, H, W, CB = get_const_tuple(data.shape)
else:
N, H, W, DCI, CB = get_const_tuple(data.shape)
if not pre_computed: # kernel tensor is raw tensor, do strict check
if layout == "NCHW":
CO, CI, KH, KW, COB = get_const_tuple(kernel.shape)
else:
KH, KW, CI, CO, COB = get_const_tuple(kernel.shape)
alpha = KW + tile_size - 1
assert HSTR == 1 and WSTR == 1 and KH == KW
else:
alpha, _, CI, CO, COB = get_const_tuple(kernel.shape)
KH = KW = alpha + 1 - tile_size
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1
if isinstance(N, tvm.tir.Any):
N = tvm.te.size_var("n")
if not isinstance(H, int) or not isinstance(W, int):
raise RuntimeError(
"adreno winograd conv2d doesn't support dynamic input\
height or width."
)
pt, pl, pb, pr = nn.get_pad_tuple(padding, (KH, KW))
if layout == "NCHW":
data_pad = nn.pad(data, (0, 0, pt, pl, 0), (0, 0, pb, pr, 0), name="data_pad")
else:
data_pad = nn.pad(data, (0, pt, pl, 0, 0), (0, pb, pr, 0, 0), name="data_pad")
r = KW
m = tile_size
A, B, G = winograd_transform_matrices(m, r, out_dtype)
H = (H + pt + pb - KH) // HSTR + 1
W = (W + pl + pr - KW) // WSTR + 1
nH, nW = (H + m - 1) // m, (W + m - 1) // m
P = N * nH * nW if isinstance(N, int) else nH * nW
# transform kernel
if not pre_computed:
r_kh = te.reduce_axis((0, KH), name="r_kh")
r_kw = te.reduce_axis((0, KW), name="r_kw")
if layout == "NCHW":
kernel_pack = te.compute(
(alpha, alpha, CI, CO, COB),
lambda eps, nu, ci, co, cob: te.sum(
kernel[co][ci][r_kh][r_kw][cob] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = te.compute(
(alpha, alpha, CI, CO, COB),
lambda eps, nu, ci, co, cob: te.sum(
kernel[r_kh][r_kw][ci][co][cob] * G[eps][r_kh] * G[nu][r_kw], axis=[r_kh, r_kw]
),
name="kernel_pack",
)
else:
kernel_pack = kernel
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
if layout == "NCHW":
N, CI, H, W, CB = get_const_tuple(data.shape)
else:
N, H, W, CI, CB = get_const_tuple(data.shape)
# pack input tile
if layout == "NCHW":
input_tile = te.compute(
(alpha, alpha, CI, P, CB),
lambda eps, nu, c, p, cb: data_pad[idxdiv(p, (nH * nW))][c][
idxmod(idxdiv(p, nW), nH) * m + eps
][idxmod(p, nW) * m + nu][cb],
name="d",
)
else:
input_tile = te.compute(
(alpha, alpha, CI, P, CB),
lambda eps, nu, c, p, cb: data_pad[idxdiv(p, (nH * nW))][
idxmod(idxdiv(p, nW), nH) * m + eps
][idxmod(p, nW) * m + nu][c][cb],
name="d",
)
# transform data
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_a")
data_pack = te.compute(
(P, CI, alpha, alpha, CB),
lambda p, ci, eps, nu, cb: te.sum(
input_tile[r_a][r_b][ci][p][cb] * B[r_a][eps] * B[r_b][nu], axis=[r_a, r_b]
),
name="data_pack",
)
# repack transformed data
data_pack_trans = te.compute(
(alpha, alpha, CI, P, CB),
lambda eps, nu, c, p, cb: data_pack[p][c][eps][nu][cb],
name="data_pack_trans",
)
# do batch gemm
ci = te.reduce_axis((0, CI), name="ci")
cb = te.reduce_axis((0, CB), name="cb")
bgemm = te.compute(
(alpha, alpha, CO, P, COB),
lambda eps, nu, co, p, cob: te.sum(
(
kernel_pack[eps][nu][ci * CB + cb][co][cob] * data_pack_trans[eps][nu][ci][p][cb]
).astype(args["accumulator"]),
axis=[ci, cb],
),
name="bgemm",
)
# inverse transform
r_a = te.reduce_axis((0, alpha), "r_a")
r_b = te.reduce_axis((0, alpha), "r_a")
inverse = te.compute(
(CO, P, m, m, COB),
lambda co, p, vh, vw, cob: te.sum(
bgemm[r_a][r_b][co][p][cob] * (A[r_a][vh] * A[r_b][vw]).astype(args["accumulator"]),
axis=[r_a, r_b],
),
name="inverse",
)
# output
if layout == "NCHW":
if convert_from4d and autotvm.GLOBAL_SCOPE.in_tuning is False:
output = te.compute(
(N, out_channels, H, W),
lambda n, c, h, w: inverse[c // CB][n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)][
idxmod(h, m)
][idxmod(w, m)][c % CB].astype(out_dtype),
name="output",
tag="cast_from_acc" + args["accumulator"][-2:],
)
else:
output = te.compute(
(N, CO, H, W, COB),
lambda n, co, h, w, cob: inverse[co][
n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)
][idxmod(h, m)][idxmod(w, m)][cob].astype(out_dtype),
name="output",
tag="cast_from_acc" + args["accumulator"][-2:],
)
else:
if convert_from4d and autotvm.GLOBAL_SCOPE.in_tuning is False:
output = te.compute(
(N, H, W, out_channels),
lambda n, h, w, c: inverse[c // CB][n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)][
idxmod(h, m)
][idxmod(w, m)][c % CB].astype(out_dtype),
name="output",
tag="cast_from_acc" + args["accumulator"][-2:],
)
else:
output = te.compute(
(N, H, W, CO, COB),
lambda n, h, w, co, cob: inverse[co][
n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)
][idxmod(h, m)][idxmod(w, m)][cob].astype(out_dtype),
name="output",
tag="cast_from_acc" + args["accumulator"][-2:],
)
if isinstance(N, int):
cfg.add_flop(2 * N * | |
user.user_agent = request.user_agent
user.save()
if not service:
# TODO: check that return_to is a local url
redirect = return_to
else:
raise BadRequestError("Invalid service '%s'." % service)
if params.get('invitoken'):
confirm_invitation(request)
return {
'auth': True,
'redirect': redirect,
'csrf_token': get_csrf_token(request),
}
@view_config(route_name='switch_context', request_method='GET')
@view_config(route_name='switch_context_org', request_method='GET')
def switch_org(request):
"""
Switch user's context.
Personal or organizational
---
org_id:
description: The team's org id
type: string
required: true
"""
org_id = request.matchdict.get('org_id')
user = user_from_request(request)
params = params_from_request(request)
return_to = params.get('return_to', '')
org = None
if org_id:
try:
org = Organization.objects.get(id=org_id)
except me.DoesNotExist:
raise ForbiddenError()
if org.parent:
parent_owners = org.parent.teams.get(name='Owners').members
if user not in org.members + parent_owners:
raise ForbiddenError()
elif user not in org.members:
raise ForbiddenError()
reissue_cookie_session(request, user, org=org, after=1)
raise RedirectError(urllib.parse.unquote(return_to) or '/')
@view_config(route_name='login', request_method='GET')
@view_config(route_name='login_service', request_method='GET')
def login_get(request):
"""
User visits login form.
If there is a 'return_to' parameter the user will be redirected to this
local url upon successful authentication.
There is also an optional 'service' parameter, mainly meant to be used for
SSO.
---
return_to:
description: if exists, redirect user
type: string
service:
description: used for SSO
type: string
"""
# check if user sent a GET instead of POST, process it accordingly
try:
ret = login(request)
if ret['auth']:
return HTTPFound(ret['redirect'])
except:
pass
service = request.matchdict.get('service', '')
params = params_from_request(request)
return_to = params.get('return_to', '')
invitoken = params.get('invitoken', '')
try:
user_from_request(request)
if not service:
return HTTPFound(urllib.parse.unquote(return_to) or '/')
raise BadRequestError("Invalid service '%s'." % service)
except UserUnauthorizedError:
path = "sign-in"
query_params = {}
if return_to:
query_params['return_to'] = return_to
if invitoken:
query_params['invitoken'] = invitoken
if query_params:
path += '?' + urllib.parse.urlencode(query_params)
return HTTPFound(path)
@view_config(route_name='logout', request_method=('GET', 'POST'))
def logout(request):
"""
User logs out.
If user is an admin under su, he returns to his regular user.
"""
user = user_from_request(request)
session = session_from_request(request)
if isinstance(session, ApiToken):
raise ForbiddenError('If you wish to revoke a token use the /tokens'
' path')
real_user = session.get_user(effective=False)
if user != real_user:
log.warn("Su logout")
reissue_cookie_session(request, real_user)
else:
reissue_cookie_session(request)
return HTTPFound('/')
@view_config(route_name='register', request_method='POST', renderer='json')
def register(request):
"""
New user signs up.
"""
params = params_from_request(request)
email = params.get('email')
promo_code = params.get('promo_code')
name = params.get('name')
token = params.get('token')
selected_plan = params.get('selected_plan')
request_demo = params.get('request_demo', False)
request_beta = params.get('request_beta', False)
if not email or not email.strip():
raise RequiredParameterMissingError('email')
if not name or not name.strip():
raise RequiredParameterMissingError('name')
if type(request_demo) != bool:
raise BadRequestError('Request demo must be a boolean value')
name = name.strip().split(" ", 1)
email = email.strip().lower()
if type(name) == str:
name = name.encode('utf-8', 'ignore')
if not request_beta:
try:
user = User.objects.get(email=email)
if user.status == 'confirmed' and not request_demo:
raise ConflictError("User already registered "
"and confirmed email.")
except me.DoesNotExist:
first_name = name[0]
last_name = name[1] if len(name) > 1 else ""
user, org = register_user(email, first_name, last_name, 'email',
selected_plan, promo_code, token,
request=request)
if user.status == 'pending':
# if user is not confirmed yet resend the email
subject = config.CONFIRMATION_EMAIL_SUBJECT.format(
portal_name=config.PORTAL_NAME)
body = config.CONFIRMATION_EMAIL_BODY.format(
fname=user.first_name, ip_addr=ip_from_request(request),
portal_uri=config.CORE_URI, follow_us=config.FOLLOW_US,
portal_name=config.PORTAL_NAME,
activation_key=user.activation_key)
if not send_email(subject, body, user.email):
raise ServiceUnavailableError("Could not send "
"confirmation email.")
# TODO: Move to mist.billing or remove altogether
if request_demo:
# if user requested a demo then notify the mist.api team
subject = "Demo request"
body = "User %s has requested a demo\n" % user.email
tasks.send_email.send(subject, body, config.NOTIFICATION_EMAIL['demo'])
user.requested_demo = True
user.demo_request_date = time()
user.save()
msg = (
"Dear %s %s, we will contact you within 24 hours to schedule a "
"demo. In the meantime, we sent you an activation email so you"
" can create an account to test Mist.io. If the email doesn't"
" appear in your inbox, check your spam folder."
) % (user.first_name, user.last_name)
elif request_beta:
user = None
# if user requested a demo then notify the mist.api team
subject = "Private beta request"
body = "User %s <%s> has requested access to the private beta\n" % (
params.get('name').encode('utf-8', 'ignore'), email)
tasks.send_email.send(subject, body, config.NOTIFICATION_EMAIL['demo'])
msg = (
"Dear %s, we will contact you within 24 hours with more "
"information about the Mist.io private beta program. In the "
"meantime, if you have any questions don't hesitate to contact"
" us at <EMAIL>"
) % params.get('name').encode('utf-8', 'ignore')
else:
msg = (
"Dear %s,\n"
"you will soon receive an activation email. "
"If it does not appear in your Inbox within "
"a few minutes, please check your spam folder.\n"
) % user.first_name
return {
'msg': msg,
'user_ga_id': user and user.get_external_id('ga'),
'user_id': user and user.id}
@view_config(route_name='confirm', request_method='GET')
def confirm(request):
"""
Confirm a user's email address when signing up.
After registering, the user is sent a confirmation email to his email
address with a link containing a token that directs the user to this view
to confirm his email address.
If invitation token exists redirect to set_password or to social auth
"""
params = params_from_request(request)
key = params.get('key')
if not key:
raise RequiredParameterMissingError('key')
try:
user = User.objects.get(activation_key=key)
except me.DoesNotExist:
return HTTPFound('/error?msg=bad-key')
if user.status != 'pending' or user.password:
# if user has an invitation token but has been confirmed call the
# confirm invitation token
if params.get('invitoken'):
return confirm_invitation(request)
else:
return HTTPFound('/error?msg=already-confirmed')
token = hashlib.sha1(key.encode()).hexdigest()
key = encrypt("%s:%s" % (token, user.email), config.SECRET)
user.password_set_token = token
user.password_set_token_created = time()
user.password_set_user_agent = request.user_agent
log.debug("will now save (register)")
user.save(write_concern={'w': 1, 'fsync': True})
invitoken = params.get('invitoken')
if config.ALLOW_SIGNIN_EMAIL:
url = request.route_url('set_password', _query={'key': key})
elif config.ALLOW_SIGNIN_GOOGLE:
url = '/social_auth/login/google-oauth2?key=%s' % key
elif config.ALLOW_SIGNIN_GITHUB:
url = '/social_auth/login/github-oauth2?key=%s' % key
elif config.ALLOW_SIGNIN_MS365:
url = '/social_auth/login/azuread-oauth2?key=%s' % key
elif config.ALLOW_SIGNIN_CILOGON:
url = '/social_auth/login/cilogon-oauth2?key=%s' % key
else:
log.error('Confirm invitation attempt with sign-in disabled')
raise ForbiddenError("No sign-in method configured.")
if invitoken:
try:
MemberInvitation.objects.get(token=invitoken)
url += '&invitoken=' + invitoken
except me.DoesNotExist:
pass
return HTTPFound(url)
@view_config(route_name='forgot_password', request_method='POST')
def forgot_password(request):
"""
User visits password forgot form and submits his email
or user presses the set password button in the account page
and has registered through the SSO and has no previous
password set in the database. In the latter case the email
will be fetched from the session.
"""
try:
email = user_from_request(request).email
except UserUnauthorizedError:
email = params_from_request(request).get('email', '')
try:
user = User.objects.get(email=email)
except (UserNotFoundError, me.DoesNotExist):
# still return OK so that there's no leak on valid email
return OK
if user.status != 'confirmed':
# resend confirmation email
user.activation_key = get_secure_rand_token()
user.save()
subject = config.CONFIRMATION_EMAIL_SUBJECT.format(
portal_name=config.PORTAL_NAME
)
body = config.CONFIRMATION_EMAIL_BODY.format(
fname=user.first_name, ip_addr=ip_from_request(request),
portal_uri=config.CORE_URI, follow_us=config.FOLLOW_US,
portal_name=config.PORTAL_NAME,
activation_key=user.activation_key)
if not send_email(subject, body, user.email):
raise ServiceUnavailableError("Could not send confirmation email.")
return OK
token = get_secure_rand_token()
user.password_reset_token = token
user.password_reset_token_created = time()
user.password_reset_token_ip_addr = ip_from_request(request)
log.debug("will now save (forgot)")
user.save()
subject = config.RESET_PASSWORD_EMAIL_SUBJECT.format(
portal_name=config.PORTAL_NAME
)
body = config.RESET_PASSWORD_EMAIL_BODY.format(
fname=user.first_name, portal_name=config.PORTAL_NAME,
portal_uri=config.CORE_URI,
ip_addr=user.password_reset_token_ip_addr,
activation_key=encrypt("%s:%s" % (token, email), config.SECRET)
)
if not send_email(subject, body, email):
log.info("Failed to send email to user %s for forgot password link" %
user.email)
raise ServiceUnavailableError()
log.info("Sent email to user %s\n%s" % (email, body))
return OK
# SEC
@view_config(route_name='reset_password', request_method=('GET', 'POST'))
def reset_password(request):
"""
User visits reset password form and posts his email address
If he is logged in when he presses the link then he will be logged out
and then redirected to the landing page with the reset password token.
"""
params = params_from_request(request)
key = params.get('key')
if not key:
raise BadRequestError("Reset password token is missing")
reissue_cookie_session(request) # logout
# SEC decrypt key using secret
try:
(token, email) = decrypt(key, config.SECRET).split(':')
except:
raise BadRequestError("invalid password token.")
try:
user = User.objects.get(email=email)
except (UserNotFoundError, me.DoesNotExist):
raise UserUnauthorizedError()
# SEC check status, token, expiration
if token != user.password_reset_token:
raise BadRequestError("Invalid reset password token.")
delay = time() - user.password_reset_token_created
if delay > config.RESET_PASSWORD_EXPIRATION_TIME:
raise MethodNotAllowedError("Password reset token has expired.")
if request.method == 'GET':
build_path = ''
if config.JS_BUILD and not params.get('debug'):
build_path = '/build/%s/bundled/' % config.VERSION.get('sha')
template_inputs = config.HOMEPAGE_INPUTS
template_inputs['build_path'] = build_path
template_inputs['csrf_token'] = json.dumps(get_csrf_token(request))
get_landing_template(build_path)
return render_to_response('templates/landing.pt', template_inputs)
elif request.method == 'POST':
password = params.get('password', '')
if not password:
raise RequiredParameterMissingError('password')
# change password
user.set_password(password)
user.status = 'confirmed'
# in case the use has been with a pending confirm state
user.password_reset_token_created = 0
user.save()
reissue_cookie_session(request, user)
return OK
raise BadRequestError("Bad method %s" % request.method)
@view_config(route_name='request_whitelist_ip', request_method='POST')
def | |
#!/usr/bin/env python3
"""A pipeline to extract repeat expansion variants from ClinVar XML dump. For documentation refer to README.md"""
from collections import Counter
import logging
import numpy as np
import pandas as pd
from eva_cttv_pipeline import clinvar_xml_utils
from . import biomart, clinvar_identifier_parsing
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
STANDARD_CHROMOSOME_NAMES = {str(c) for c in range(1, 23)} | {'X', 'Y', 'M', 'MT'}
def none_to_nan(*args):
"""Converts all arguments which are None to np.nan, for consistency inside a Pandas dataframe."""
return [np.nan if a is None else a for a in args]
def load_clinvar_data(clinvar_xml):
"""Load ClinVar data, preprocess, and return it as a Pandas dataframe."""
# Iterate through ClinVar XML records
variant_data = [] # To populate the return dataframe (see columns below)
stats = Counter()
for i, clinvar_record in enumerate(clinvar_xml_utils.ClinVarDataset(clinvar_xml)):
if i and i % 100000 == 0:
total_repeat_expansion_variants = stats[clinvar_xml_utils.ClinVarRecordMeasure.MS_REPEAT_EXPANSION] + \
stats[clinvar_xml_utils.ClinVarRecordMeasure.MS_NO_COMPLETE_COORDS]
logger.info(f'Processed {i} records, collected {total_repeat_expansion_variants} repeat expansion variant '
f'candidates')
# Skip a record if it does not contain variant information
if not clinvar_record.measure:
continue
# Repeat expansion events come in two forms: with explicit coordinates and allele sequences (CHROM/POS/REF/ALT),
# or without them. In the first case we can compute the explicit variant length as len(ALT) - len(REF). In the
# second case, which is more rare but still important, we have to resort to parsing HGVS-like variant names.
stats[clinvar_record.measure.microsatellite_category] += 1
# Skip the record if it's a deletion or a short insertion
if not clinvar_record.measure.is_repeat_expansion_variant:
continue
# Extract gene symbol(s). Here and below, dashes are sometimes assigned to be compatible with the variant
# summary format which was used previously.
gene_symbols = clinvar_record.measure.preferred_gene_symbols
if not gene_symbols:
gene_symbols = ['-']
# Extract HGNC ID
hgnc_ids = clinvar_record.measure.hgnc_ids
hgnc_id = hgnc_ids[0] if len(hgnc_ids) == 1 and len(gene_symbols) == 1 else '-'
# Append data strings
for gene_symbol in gene_symbols:
variant_data.append([
clinvar_record.measure.name,
clinvar_record.accession,
gene_symbol,
hgnc_id
])
total_repeat_expansion_variants = stats[clinvar_xml_utils.ClinVarRecordMeasure.MS_REPEAT_EXPANSION] + \
stats[clinvar_xml_utils.ClinVarRecordMeasure.MS_NO_COMPLETE_COORDS]
logger.info(f'Done. A total of {i} records, {total_repeat_expansion_variants} repeat expansion variant candidates')
variants = pd.DataFrame(variant_data, columns=('Name', 'RCVaccession', 'GeneSymbol', 'HGNC_ID'))
# Since the same record can have coordinates in multiple builds, it can be repeated. Remove duplicates
variants = variants.drop_duplicates()
# Sort values by variant name
return variants.sort_values(by=['Name']), stats
def parse_variant_identifier(row):
"""Parse variant identifier and extract certain characteristics into separate columns."""
variant_name = str(row.Name)
row['TranscriptID'], row['CoordinateSpan'], row['RepeatUnitLength'], row['IsProteinHGVS'] = \
none_to_nan(*clinvar_identifier_parsing.parse_variant_identifier(variant_name))
return row
def annotate_ensembl_gene_info(variants):
"""Annotate the `variants` dataframe with information about Ensembl gene ID and name"""
# Ensembl gene ID can be determined using three ways, listed in the order of decreasing priority. Having multiple
# ways is necessary because no single method works on all ClinVar variants.
gene_annotation_sources = (
# Dataframe column Biomart column Filtering function
('HGNC_ID', 'hgnc_id', lambda i: i.startswith('HGNC:')),
('GeneSymbol', 'external_gene_name', lambda i: i != '-'),
('TranscriptID', 'refseq_mrna', lambda i: pd.notnull(i)),
)
# This copy of the dataframe is required to facilitate filling in data using the `combine_first()` method. This
# allows us to apply priorities: e.g., if a gene ID was already populated using HGNC_ID, it will not be overwritten
# by a gene ID determined using GeneSymbol.
variants_original = variants.copy(deep=True)
for column_name_in_dataframe, column_name_in_biomart, filtering_function in gene_annotation_sources:
# Get all identifiers we want to query BioMart with
identifiers_to_query = sorted({
i for i in variants[column_name_in_dataframe]
if filtering_function(i)
})
# Query BioMart for Ensembl Gene IDs
annotation_info = biomart.query_biomart(
key_column=(column_name_in_biomart, column_name_in_dataframe),
query_column=('ensembl_gene_id', 'EnsemblGeneID'),
identifier_list=identifiers_to_query,
)
# Make note where the annotations came from
annotation_info['GeneAnnotationSource'] = column_name_in_dataframe
# Combine the information we received with the *original* dataframe (a copy made before any iterations of this
# cycle were allowed to run). This is similar to SQL merge.
annotation_df = pd.merge(variants_original, annotation_info, on=column_name_in_dataframe, how='left')
# Update main dataframe with the new values. This replaces the NaN values in the dataframe with the ones
# available in another dataframe we just created, `annotation_df`.
variants = variants \
.set_index([column_name_in_dataframe]) \
.combine_first(annotation_df.set_index([column_name_in_dataframe]))
# Reset index to default
variants.reset_index(inplace=True)
# Some records are being annotated to multiple Ensembl genes. For example, HGNC:10560 is being resolved to
# ENSG00000285258 and ENSG00000163635. We need to explode dataframe by that column.
variants = variants.explode('EnsemblGeneID')
# Based on the Ensembl gene ID, annotate (1) gene name and (2) which chromosome it is on
gene_query_columns = (
('external_gene_name', 'EnsemblGeneName'),
('chromosome_name', 'EnsemblChromosomeName'),
)
for column_name_in_biomart, column_name_in_dataframe in gene_query_columns:
annotation_info = biomart.query_biomart(
key_column=('ensembl_gene_id', 'EnsemblGeneID'),
query_column=(column_name_in_biomart, column_name_in_dataframe),
identifier_list=sorted({str(i) for i in variants['EnsemblGeneID'] if str(i).startswith('ENSG')}),
)
variants = pd.merge(variants, annotation_info, on='EnsemblGeneID', how='left')
# Check that there are no multiple mappings for any given ID
assert variants[column_name_in_dataframe].str.len().dropna().max() == 1, \
'Found multiple gene ID → gene attribute mappings!'
# Convert the one-item list into a plain column
variants = variants.explode(column_name_in_dataframe)
return variants
def determine_repeat_type(row):
"""Based on all available information about a variant, determine its type. The resulting type can be:
* trinucleotide_repeat_expansion, corresponding to SO:0002165
* short_tandem_repeat_expansion, corresponding to SO:0002162
* NaN (not able to determine)
Also, depending on the information, determine whether the record is complete, i.e., whether it has all necessary
fields to be output for the final "consequences" table."""
repeat_type = np.nan
if row['IsProteinHGVS']:
# For protein HGVS notation, assume that repeat is a trinucleotide one, since it affects entire amino acids
repeat_type = 'trinucleotide_repeat_expansion'
else:
# As a priority, use the repeat unit length determined directly from the HGVS-like base sequence
repeat_unit_length = row['RepeatUnitLength']
# If not available, fall back to using and end coordinate difference
if pd.isnull(repeat_unit_length):
repeat_unit_length = row['CoordinateSpan']
# Determine repeat type based on repeat unit length
if pd.notnull(repeat_unit_length):
if repeat_unit_length % 3 == 0:
repeat_type = 'trinucleotide_repeat_expansion'
else:
repeat_type = 'short_tandem_repeat_expansion'
# Check if the HGVS-like name of the variant contains a simple deletion. In this case, it should not be processed
# as a repeat *expansion* variant. The reason such records are present at this stage is that for records without
# explicit allele sequences we cannot verify whether they definitely represent expansions.
if row['Name'].endswith('del') or row['Name'].endswith('del)'):
repeat_type = np.nan
# Based on the information which we have, determine whether the record is complete
row['RepeatType'] = repeat_type
row['RecordIsComplete'] = (
pd.notnull(row['EnsemblGeneID']) and
pd.notnull(row['EnsemblGeneName']) and
pd.notnull(row['RepeatType']) and
row['EnsemblChromosomeName'] in STANDARD_CHROMOSOME_NAMES
)
return row
def generate_output_files(variants, output_consequences, output_dataframe):
"""Postprocess and output final tables."""
# Rearrange order of dataframe columns
variants = variants[
['Name', 'RCVaccession', 'GeneSymbol', 'HGNC_ID',
'RepeatUnitLength', 'CoordinateSpan', 'IsProteinHGVS', 'TranscriptID',
'EnsemblGeneID', 'EnsemblGeneName', 'EnsemblChromosomeName', 'GeneAnnotationSource',
'RepeatType', 'RecordIsComplete']
]
# Write the full dataframe. This is used for debugging and investigation purposes.
variants.sort_values(by=['Name', 'RCVaccession', 'GeneSymbol'])
variants.to_csv(output_dataframe, sep='\t', index=False)
# Generate consequences table
consequences = variants[variants['RecordIsComplete']] \
.groupby(['RCVaccession', 'EnsemblGeneID', 'EnsemblGeneName'])['RepeatType'] \
.apply(set).reset_index(name='RepeatType')
if consequences.empty:
logger.info('There are no records ready for output')
return
# Check that for every (RCV, gene) pair there is only one consequence type
assert consequences['RepeatType'].str.len().dropna().max() == 1, 'Multiple (RCV, gene) → variant type mappings!'
# Get rid of sets
consequences['RepeatType'] = consequences['RepeatType'].apply(list)
consequences = consequences.explode('RepeatType')
# Form a six-column file compatible with the consequence mapping pipeline, for example:
# RCV000005966 1 ENSG00000156475 PPP2R2B trinucleotide_repeat_expansion 0
consequences['PlaceholderOnes'] = 1
consequences['PlaceholderZeroes'] = 0
consequences = consequences[['RCVaccession', 'PlaceholderOnes', 'EnsemblGeneID', 'EnsemblGeneName', 'RepeatType',
'PlaceholderZeroes']]
consequences.sort_values(by=['RepeatType', 'RCVaccession', 'EnsemblGeneID'], inplace=True)
# Check that there are no empty cells in the final consequences table
assert consequences.isnull().to_numpy().sum() == 0
# Write the consequences table. This is used by the main evidence string generation pipeline.
consequences.to_csv(output_consequences, sep='\t', index=False, header=False)
# Output statistics
logger.info(f'Generated {len(consequences)} consequences in total:')
logger.info(f' {sum(consequences.RepeatType == "trinucleotide_repeat_expansion")} trinucleotide repeat expansion')
logger.info(f' {sum(consequences.RepeatType == "short_tandem_repeat_expansion")} short tandem repeat expansion')
def main(clinvar_xml, output_consequences, output_dataframe):
"""Process data and generate output files.
Args:
clinvar_xml: filepath to the ClinVar XML file.
output_consequences: filepath to the output file with variant consequences. The file uses a 6-column format
compatible with the VEP mapping pipeline (see /vep-mapping-pipeline/README.md).
output_dataframe: filepath to the output file with the full dataframe used in the analysis. This will contain
all relevant columns and can be used for review or debugging purposes."""
logger.info('Load and preprocess variant data')
variants, s = load_clinvar_data(clinvar_xml)
# Output ClinVar record statistics
logger.info(f'''
Microsatellite records: {sum(s.values())}
With complete | |
<gh_stars>0
##########################################################################
#
# Common code generation functions
#
# These functions are called from the target-specific code generators
#
##########################################################################
import re
import datetime
import glob
import os
def comment_remover(text):
"""Remove comments from text"""
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return ''
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
def remove_trailing_w_space(text):
text = text+' '
line_start = 0
line = ""
line_end = 0
striped_test = ''
count = 0
while 1:
line_end = text.find("\n",line_start+1)
line = text[line_start:line_end]
line = line.rstrip()
striped_test = striped_test + line +'\n'
line_start = line_end + 1
line = ""
if line_end < 0:
return striped_test[:-1]
def extract_includes(text):
## Find all '#includes ...' that are not inside of function declarations.
includes = []
include_pattern = r'([ \t]*#include[\s]+[\'\"<][\w\.]+[\'\">])'
first_line_pattern = '(^)([^\n]+)'
rest_of_lines_pattern = '([\n])([^\n]+)'
line_pattern = re.compile(first_line_pattern + '|' + rest_of_lines_pattern)
function_depth = 0
for match in re.findall(line_pattern, text):
if match[1] != "":
line = match[1]
else:
line = match[3]
## Remove noise from the line to improve search for
## entering and exiting of functions:
line_clean = line
# Remove escaped quotation character:
line_clean = re.sub(r"\\\'", '', line_clean)
line_clean = re.sub(r"\\\"", '', line_clean)
# Remove quoted string in single line:
line_clean = re.sub(r'"[^"]*"', '', line_clean)
line_clean = re.sub(r"'[^']*'", '', line_clean)
# Remove quoted string split over two lines:
line_clean = re.sub(r'"[^"]*\\\n[^"]*"', '', line_clean)
line_clean = re.sub(r"'[^']*\\\n[^']*'", '', line_clean)
# Remove inline scoped logic ( {...} ):
line_clean = re.sub(r'{[^{]*}', '', line_clean)
function_depth += line_clean.count('{') - line_clean.count('}')
if function_depth != 0:
continue
match = re.search(include_pattern, line)
if match:
includes.append(line)
return includes
def para_parse(text, j, op_b, cl_b):
"""Parsing code block, i.e. text to find the correct closing brace"""
depth = 0
loc2 = j
while 1:
if text[loc2] == op_b:
depth = depth + 1
elif text[loc2] == cl_b:
depth = depth - 1
if depth == 0:
return loc2
loc2 = loc2 + 1
def replace_local_includes_with_file_contents(text, search_dir):
''' Replace occurences of '#include "<FILE>"' with <FILE> contents '''
include_rgx = r'' + "^([\s]*)" + "#include" + "[\s]+" + '"([\w\.]+)"'
text2 = ''
for line in text.split('\n'):
if not "#include" in line:
text2 += line+'\n'
else:
include_item_filepath = ""
matches = re.findall(include_rgx, line)[0]
if len(matches) != 2:
text2 += line+'\n'
else:
leading_whitespace = matches[0]
include_item = matches[1]
for r, d, f in os.walk(search_dir):
for f_item in f:
if f_item == include_item:
include_item_filepath = os.path.join(r, f_item)
break
if include_item_filepath != "":
break
if include_item_filepath == "":
print("Failed to locate file '{0}'".format(include_item))
quit()
f = open(include_item_filepath, 'r')
include_file_text = f.read()
f.close()
include_file_text = comment_remover(include_file_text)
for line in include_file_text.split('\n'):
text2 += leading_whitespace + line+'\n'
return text2
def get_stride_string(g_m,maps,mapnames,name):
OP_ID = 1; OP_GBL = 2; OP_MAP = 3;
if maps[g_m] == OP_ID:
return 'direct_'+name+'_stride_OP2CONSTANT'
if maps[g_m] == OP_GBL:
return '(gridDim%x*blockDim%x)'
else:
idx = mapnames.index(mapnames[g_m])
return 'opDat'+str(idx)+'_'+name+'_stride_OP2CONSTANT'
arithmetic_regex_pattern = r'^[ \(\)\+\-\*\\\.\%0-9]+$'
def op_parse_macro_defs(text):
"""Parsing for C macro definitions"""
defs = {}
macro_def_pattern = r'(\n|^)[ ]*(#define[ ]+)([A-Za-z0-9\_]+)[ ]+([0-9A-Za-z\_\.\+\-\*\/\(\) ]+)'
for match in re.findall(macro_def_pattern, text):
if len(match) < 4:
continue
elif len(match) > 4:
print("Unexpected format for macro definition: " + str(match))
continue
key = match[2]
value = match[3]
defs[key] = value
# print(key + " -> " + value)
return defs
def self_evaluate_macro_defs(macro_defs):
"""Recursively evaluate C macro definitions that refer to other detected macros"""
## First, calculate the expected number of substitutions to perform:
num_subs_expected = 0
for k in macro_defs.keys():
k_val = macro_defs[k]
m = re.search(arithmetic_regex_pattern, k_val)
if m != None:
continue
pattern = r'' + '([a-zA-Z0-9_]+)'
occurences = re.findall(pattern, k_val)
for o in occurences:
m = re.search(arithmetic_regex_pattern, o)
if m == None:
if o in macro_defs.keys():
num_subs_expected += 1
substitutions_performed = True
num_subs_performed = 0
while substitutions_performed:
substitutions_performed = False
for k in macro_defs.keys():
k_val = macro_defs[k]
m = re.search(arithmetic_regex_pattern, k_val)
if m != None:
## This macro definiton is numeric
continue
if k == k_val:
del macro_defs[k]
continue
# print("Processing '{0}' -> '{1}'".format(k, k_val))
## If value of key 'k' depends on value of other
## keys, then substitute in value:
for k2 in macro_defs.keys():
if k == k2:
continue
pattern = r'' + '(^|[^a-zA-Z0-9_])' + k2 + '($|[^a-zA-Z0-9_])'
m = re.search(pattern, k_val)
if m != None:
## The macro "k" refers to macro "k2"
k2_val = macro_defs[k2]
m = re.search(arithmetic_regex_pattern, k2_val)
if m == None:
# 'k2_val' has not been resolved. Wait for this to occur before
# substituting its value into 'k_val', as this minimises the total
# number of substitutions performed across all macros and so
# improves detection of infinite substitution loops.
continue
macro_defs[k] = re.sub(pattern, "\\g<1>"+k2_val+"\\g<2>", k_val)
# print("- performing a substitution of '" + k2 + "'->'" + k2_val + "' into '" + k_val + "' to produce '" + macro_defs[k] + "'")
k_val = macro_defs[k]
substitutions_performed = True
num_subs_performed += 1
if num_subs_performed > num_subs_expected:
print("WARNING: " + str(num_subs_performed) + " macro substitutions performed, but expected " + str(num_subs_expected) + ", probably stuck in a loop.")
return
## Evaluate any mathematical expressions:
for k in macro_defs.keys():
val = macro_defs[k]
m = re.search(arithmetic_regex_pattern, val)
if m != None:
res = ""
try:
res = eval(val)
except:
pass
if type(res) != type(""):
if str(res) != val:
# print("Replacing '" + val + "' with '" + str(res) + "'")
macro_defs[k] = str(res)
def evaluate_macro_defs_in_string(macro_defs, string):
"""Recursively evaluate C macro definitions in 'string' """
## First, calculate the expected number of substitutions to perform:
num_subs_expected = 0
m = re.search(arithmetic_regex_pattern, string)
if m == None:
pattern = r'' + '([a-zA-Z0-9_]+)'
occurences = re.findall(pattern, string)
for o in occurences:
m = re.search(arithmetic_regex_pattern, o)
if m == None:
if o in macro_defs.keys():
num_subs_expected = num_subs_expected + 1
resolved_string = string
substitutions_performed = True
num_subs_performed = 0
while substitutions_performed:
substitutions_performed = False
for k in macro_defs.keys():
k_val = macro_defs[k]
k_pattern = r'' + r'' + '(^|[^a-zA-Z0-9_])' + k + '($|[^a-zA-Z0-9_])'
m = re.search(k_pattern, resolved_string)
if m != None:
## "string" contains a reference to macro "k", so substitute in its definition:
resolved_string_new = re.sub(k_pattern, "\\g<1>"+k_val+"\\g<2>", resolved_string)
# print("Performing a substitution of '" + k + "'->'" + k_val + "' into '" + resolved_string + "'' to produce '" + resolved_string_new + "'")
resolved_string = resolved_string_new
substitutions_performed = True
num_subs_performed = num_subs_performed + 1
if num_subs_performed > num_subs_expected:
print("WARNING: " + str(num_subs_performed) + " macro substitutions performed, but expected " + str(num_subs_expected) + ", probably stuck in a loop.")
return
if re.search(arithmetic_regex_pattern, resolved_string) != None:
res = ""
try:
res = eval(resolved_string)
except:
return resolved_string
else:
if type(res) != type(""):
resolved_string = str(res)
return resolved_string
def create_kernel_info(kernel, inc_stage = 0):
OP_ID = 1; OP_GBL = 2; OP_MAP = 3;
OP_READ = 1; OP_WRITE = 2; OP_RW = 3;
OP_INC = 4; OP_MAX = 5; OP_MIN = 6;
name = kernel['name']
nargs = kernel['nargs']
dims = kernel['dims']
maps = kernel['maps']
var = kernel['var']
typs = kernel['typs']
accs = kernel['accs']
idxs = kernel['idxs']
inds = kernel['inds']
soaflags = kernel['soaflags']
optflags = kernel['optflags']
decl_filepath = kernel['decl_filepath']
ninds = kernel['ninds']
inddims = kernel['inddims']
indaccs = kernel['indaccs']
indtyps = kernel['indtyps']
invinds = kernel['invinds']
mapnames = kernel['mapnames']
invmapinds = kernel['invmapinds']
mapinds = kernel['mapinds']
nmaps = 0
if ninds > 0:
nmaps = max(mapinds)+1
nargs_novec = nargs
vec = [m for m in range(0,nargs) if int(idxs[m])<0 and maps[m] == OP_MAP]
if len(vec) > 0:
unique_args = [1];
vec_counter = 1;
vectorised = []
new_dims = []
new_maps = []
new_vars = []
new_typs = []
new_accs = []
new_idxs = []
new_inds = []
new_soaflags = []
new_optflags = []
new_mapnames = []
for m in range(0,nargs):
if int(idxs[m])<0 and maps[m] == OP_MAP:
if m > 0:
unique_args = unique_args + [len(new_dims)+1]
temp = [0]*(-1*int(idxs[m]))
for i in range(0,-1*int(idxs[m])):
temp[i] = var[m]
new_vars = new_vars+temp
for i in range(0,-1*int(idxs[m])):
temp[i] = typs[m]
new_typs = new_typs+temp
for i in range(0,-1*int(idxs[m])):
temp[i] = dims[m]
new_dims = new_dims+temp
new_maps = new_maps+[maps[m]]*int(-1*int(idxs[m]))
new_mapnames = new_mapnames+[mapnames[m]]*int(-1*int(idxs[m]))
new_soaflags = new_soaflags+[soaflags[m]]*int(-1*int(idxs[m]))
new_optflags | |
* self.t)
X0 += 0.00000000150 * math.cos(0.46939872247 + 104371.52614468009 * self.t)
X0 += 0.00000000166 * math.cos(0.65776448592 + 522.8212355773 * self.t)
X0 += 0.00000000149 * math.cos(4.09099834194 + 23889.0596157707 * self.t)
X0 += 0.00000000125 * math.cos(0.99047205683 + 65697.31390725628 * self.t)
X0 += 0.00000000127 * math.cos(3.92999950538 + 54294.81396101029 * self.t)
X0 += 0.00000000136 * math.cos(1.42756921141 + 102233.09252340188 * self.t)
X0 += 0.00000000140 * math.cos(3.82491395518 + 52808.83383994509 * self.t)
X0 += 0.00000000171 * math.cos(2.58259586325 + 78187.68717093048 * self.t)
X0 += 0.00000000119 * math.cos(1.41260374923 + 178063.61231144409 * self.t)
X0 += 0.00000000121 * math.cos(3.91577422121 + 181506.18725640948 * self.t)
X0 += 0.00000000150 * math.cos(2.94179740209 + 13362.6935242827 * self.t)
X0 += 0.00000000160 * math.cos(2.07478360203 + 955.8435590921 * self.t)
X0 += 0.00000000122 * math.cos(2.54246856782 + 52072.9573264133 * self.t)
X0 += 0.00000000139 * math.cos(5.71618549148 + 97113.18079218028 * self.t)
X0 += 0.00000000116 * math.cos(4.06977577347 + 104344.74283677948 * self.t)
X0 += 0.00000000127 * math.cos(0.21852764054 + 26521.8586969345 * self.t)
X0 += 0.00000000113 * math.cos(1.81808535780 + 154938.58977164488 * self.t)
X0 += 0.00000000116 * math.cos(1.53733085408 + 110634.93223377169 * self.t)
X0 += 0.00000000120 * math.cos(4.73916175135 + 23969.3830986793 * self.t)
X0 += 0.00000000117 * math.cos(2.35333449828 + 18093.6185170335 * self.t)
X0 += 0.00000000114 * math.cos(0.81259306552 + 25754.2910182883 * self.t)
X0 += 0.00000000150 * math.cos(0.60833287071 + 25551.34244696229 * self.t)
X0 += 0.00000000109 * math.cos(2.00086908622 + 40565.01050729069 * self.t)
X0 += 0.00000000110 * math.cos(2.36264207110 + 24176.9474758405 * self.t)
X0 += 0.00000000107 * math.cos(0.12185511336 + 68242.11596210669 * self.t)
X0 += 0.00000000112 * math.cos(4.84462127363 + 26422.0028998271 * self.t)
X0 += 0.00000000107 * math.cos(5.97982710676 + 90695.99589260388 * self.t)
X0 += 0.00000000104 * math.cos(1.75659774658 + 156101.06447605268 * self.t)
X0 += 0.00000000111 * math.cos(4.99889923860 + 116918.00808376308 * self.t)
X0 += 0.00000000121 * math.cos(1.56849356350 + 28287.2343023447 * self.t)
X0 += 0.00000000108 * math.cos(6.08227647204 + 44181.03402364069 * self.t)
X0 += 0.00000000102 * math.cos(4.15125783923 + 26198.3532802771 * self.t)
X0 += 0.00000000143 * math.cos(5.40448013475 + 50579.86365834729 * self.t)
X0 += 0.00000000108 * math.cos(1.57765174204 + 26107.32908499049 * self.t)
X0 += 0.00000000103 * math.cos(1.01910656324 + 29416.28261533789 * self.t)
X0 += 0.00000000102 * math.cos(2.83999953225 + 74923.34081550628 * self.t)
X0 += 0.00000000111 * math.cos(3.14675866322 + 51639.24558853649 * self.t)
X0 += 0.00000000100 * math.cos(5.42391958785 + 25035.8785758693 * self.t)
X0 += 0.00000000097 * math.cos(5.13819824333 + 323.74923414091 * self.t)
X0 += 0.00000000100 * math.cos(3.26784777828 + 104565.15547921829 * self.t)
X0 += 0.00000000099 * math.cos(0.40287203553 + 31415.6230674405 * self.t)
X0 += 0.00000000120 * math.cos(0.30671438664 + 134991.71302241329 * self.t)
X0 += 0.00000000131 * math.cos(2.63461906713 + 647.25465079831 * self.t)
X0 += 0.00000000128 * math.cos(4.13013360989 + 104138.55728834229 * self.t)
X0 += 0.00000000110 * math.cos(0.16249290358 + 24357.0246061251 * self.t)
X0 += 0.00000000105 * math.cos(5.76370877998 + 26067.9895631909 * self.t)
X0 += 0.00000000095 * math.cos(2.53553314645 + 52705.25343075949 * self.t)
X0 += 0.00000000105 * math.cos(5.29014619681 + 55516.6625273317 * self.t)
X0 += 0.00000000093 * math.cos(5.06220815245 + 52126.05347872769 * self.t)
X0 += 0.00000000092 * math.cos(3.89022682970 + 54509.2464935039 * self.t)
X0 += 0.00000000096 * math.cos(3.48344687819 + 743.23387801611 * self.t)
X0 += 0.00000000109 * math.cos(4.33764824375 + 632.5399218297 * self.t)
X0 += 0.00000000090 * math.cos(3.28421553573 + 27999.3464422749 * self.t)
X0 += 0.00000000094 * math.cos(0.26566762837 + 103291.98681862408 * self.t)
X0 += 0.00000000111 * math.cos(4.40219993403 + 40565.4981422577 * self.t)
X0 += 0.00000000101 * math.cos(3.06854770182 + 51951.70530492999 * self.t)
X0 += 0.00000000102 * math.cos(1.77060203080 + 104355.73771913828 * self.t)
X0 += 0.00000000088 * math.cos(0.98000029962 + 1588.82907780029 * self.t)
X0 += 0.00000000102 * math.cos(5.00530419072 + 26941.3433408097 * self.t)
X0 += 0.00000000109 * math.cos(0.41184272678 + 60056.13925397089 * self.t)
X0 += 0.00000000093 * math.cos(5.48203494654 + 27780.31262856009 * self.t)
X0 += 0.00000000089 * math.cos(3.15455699832 + 68050.18006102808 * self.t)
X0 += 0.00000000106 * math.cos(3.72898790593 + 6770.9544187291 * self.t)
X0 += 0.00000000101 * math.cos(0.37716551293 + 52250.8316991992 * self.t)
X0 += 0.00000000086 * math.cos(0.08251374650 + 52325.61329776649 * self.t)
X0 += 0.00000000092 * math.cos(5.92652248722 + 140653.04507156549 * self.t)
X0 += 0.00000000107 * math.cos(1.95514516893 + 157483.26240853728 * self.t)
X0 += 0.00000000087 * math.cos(2.25938740860 + 25938.5837619231 * self.t)
X0 += 0.00000000094 * math.cos(4.55110857685 + 51219.76094466129 * self.t)
X0 += 0.00000000089 * math.cos(5.24168644118 + 36109.9842391571 * self.t)
X0 += 0.00000000093 * math.cos(3.23654834978 + 103.3365917021 * self.t)
X0 += 0.00000000098 * math.cos(5.50021939322 + 27819.2693119903 * self.t)
X0 += 0.00000000084 * math.cos(3.07323794777 + 53228.31848382029 * self.t)
X0 += 0.00000000087 * math.cos(4.71717400578 + 52065.84377941249 * self.t)
X0 += 0.00000000099 * math.cos(4.86243388147 + 97581.14578253469 * self.t)
X0 += 0.00000000105 * math.cos(5.43405944979 + 70269.42480018188 * self.t)
X0 += 0.00000000083 * math.cos(1.07166783581 + 51219.96272405629 * self.t)
X0 += 0.00000000101 * math.cos(0.37289734688 + 52226.0467225361 * self.t)
X0 += 0.00000000087 * math.cos(3.22552375788 + 104778.45457465628 * self.t)
X0 += 0.00000000102 * math.cos(6.17051419926 + 12432.2864678813 * self.t)
X0 += 0.00000000082 * math.cos(3.53594042739 + 78731.91823256049 * self.t)
X0 += 0.00000000101 * math.cos(5.35965052966 + 119117.0954270501 * self.t)
X0 += 0.00000000108 * math.cos(1.06265370367 + 49527.59527515889 * self.t)
X0 += 0.00000000092 * math.cos(4.58719441891 + 51859.65823097529 * self.t)
X0 += 0.00000000080 * math.cos(5.90301150782 + 53242.5455778219 * self.t)
X0 += 0.00000000107 * math.cos(3.33033819724 + 25447.76203777669 * self.t)
X0 += 0.00000000088 * math.cos(6.01318518947 + 12725.20961729149 * self.t)
X0 += 0.00000000091 * math.cos(3.91995839732 + 26300.95841952869 * self.t)
X0 += 0.00000000080 * math.cos(3.22630056109 + 105940.92927906409 * self.t)
X0 += 0.00000000076 * math.cos(1.13669532403 + 102133.09927959349 * self.t)
X0 += 0.00000000079 * math.cos(4.83610406929 + 80482.22271142589 * self.t)
X0 += 0.00000000091 * math.cos(4.23329610745 + 51013.57539622409 * self.t)
X0 += 0.00000000094 * math.cos(2.96601881549 + 2222.1004520805 * self.t)
X0 += 0.00000000089 * math.cos(2.52058786962 + 137678.43511695448 * self.t)
X0 += 0.00000000075 * math.cos(4.78568609813 + 3327.89183879669 * self.t)
X0 += 0.00000000089 * math.cos(0.85977762746 + 1910.9556657337 * self.t)
X0 += 0.00000000098 * math.cos(3.32594040773 + 50056.79860528649 * self.t)
X0 += 0.00000000074 * math.cos(1.41768963459 + 25977.94063783829 * self.t)
X0 += 0.00000000089 * math.cos(0.85266701038 + 52492.4419702885 * self.t)
X0 += 0.00000000073 * math.cos(5.13639330582 + 50697.1835265675 * self.t)
X0 += 0.00000000102 * math.cos(3.34303114673 + 104347.97504842229 * self.t)
X0 += 0.00000000072 * math.cos(3.04850836782 + 170049.41410852008 * self.t)
X0 += 0.00000000078 * math.cos(5.73381249578 + 103285.36090659029 * self.t)
X0 += 0.00000000075 * math.cos(1.55658438412 + 52595.53474450709 * self.t)
X0 += 0.00000000074 * math.cos(2.73392405787 + 25021.6514818677 * self.t)
X0 += 0.00000000075 * math.cos(1.65865555855 + 77844.46859833089 * self.t)
X0 += 0.00000000078 * math.cos(1.07533201236 + 74.53778108379 * self.t)
X0 += 0.00000000094 * math.cos(3.12911978302 + 52698.62751872569 * self.t)
X0 += 0.00000000095 * math.cos(5.99575918602 + 25938.09612695609 * self.t)
X0 += 0.00000000087 * math.cos(5.82357433280 + 72602.13355808688 * self.t)
X0 += 0.00000000070 * math.cos(5.67887850233 + 27670.10630734069 * self.t)
X0 += 0.00000000076 * math.cos(0.64559937719 + 104358.96993078108 * self.t)
X0 += 0.00000000074 * math.cos(5.28706150347 + 25954.0379094727 * self.t)
X0 += 0.00000000070 * math.cos(1.27402715914 + 66653.40128383189 * self.t)
X0 += 0.00000000071 * math.cos(3.97675324486 + 24998.4381678641 * self.t)
X0 += 0.00000000073 * math.cos(0.37426023469 + 26222.2560086427 * self.t)
X0 += 0.00000000081 * math.cos(4.55346032047 + 8194.0315157251 * self.t)
X0 += 0.00000000066 * math.cos(4.21780979859 + 78270.58180110609 * self.t)
X0 += 0.00000000072 * math.cos(1.38206164812 + 426.3543733925 * self.t)
X0 += 0.00000000078 * math.cos(5.28440082300 + 104202.29318664569 * self.t)
X0 += 0.00000000066 * math.cos(3.65294907987 + 89586.12970554348 * self.t)
X0 += 0.00000000081 * math.cos(4.51389667692 + 26080.54577708989 * self.t)
X0 += 0.00000000068 * math.cos(2.13854484878 + 207644.08701988788 * self.t)
X0 += 0.00000000065 * math.cos(5.87225172961 + 12546.72575656651 * self.t)
X0 += 0.00000000074 * math.cos(3.91991873665 + 97670.63153038069 * self.t)
X0 += 0.00000000067 * math.cos(2.82169598550 + 2111.8941308611 * self.t)
X0 += 0.00000000079 * math.cos(5.28726952189 + 77830.24150432929 * self.t)
X0 += 0.00000000083 * math.cos(3.78654945074 + 522.3336006103 * self.t)
X0 += 0.00000000079 * math.cos(3.79951632035 + 25446.73339731871 * self.t)
X0 += 0.00000000063 * math.cos(6.08370597700 + 70268.93716521488 * self.t)
X0 += 0.00000000074 * math.cos(2.86346034860 + 95247.46175469569 * self.t)
X0 += 0.00000000085 * math.cos(5.74305012713 + 72935.98949885629 * self.t)
X0 += 0.00000000062 * math.cos(3.50617407914 + 150244.58681693728 * self.t)
X0 += 0.00000000081 * math.cos(6.24341823549 + 77733.77464214448 * self.t)
X0 += 0.00000000077 * math.cos(1.16703890175 + 316.6356871401 * self.t)
X0 += 0.00000000075 * math.cos(1.04471353660 + 33967.74847742969 * self.t)
X0 += 0.00000000071 * math.cos(3.33594132090 + 27684.3334013423 * self.t)
X0 += 0.00000000061 * math.cos(4.37869496731 + 846.3266522347 * self.t)
X0 += 0.00000000064 * math.cos(1.68211518159 + 27177.8557502513 * self.t)
X0 += 0.00000000073 * math.cos(4.46095079744 + 26083.7779887327 * self.t)
X0 += 0.00000000075 * math.cos(1.85897859977 + 26729.56052079669 * self.t)
X0 += 0.00000000071 * math.cos(4.93586530137 + 17893.8716258491 * self.t)
| |
<reponame>vbisserie/elastalert2<filename>tests/alerters/pagerduty_test.py
import json
import mock
import pytest
from requests import RequestException
from elastalert.alerters.pagerduty import PagerDutyAlerter
from elastalert.loaders import FileRulesLoader
from elastalert.util import EAException
def test_pagerduty_alerter():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'description': 'Test PD Rule',
'details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n'
},
'event_type': 'trigger',
'incident_key': '',
'service_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with('https://events.pagerduty.com/generic/2010-04-15/create_event.json',
data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_v2():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_api_version': 'v2',
'pagerduty_v2_payload_class': 'ping failure',
'pagerduty_v2_payload_component': 'mysql',
'pagerduty_v2_payload_group': 'app-stack',
'pagerduty_v2_payload_severity': 'error',
'pagerduty_v2_payload_source': 'mysql.host.name',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'payload': {
'class': 'ping failure',
'component': 'mysql',
'group': 'app-stack',
'severity': 'error',
'source': 'mysql.host.name',
'summary': 'Test PD Rule',
'custom_details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n'
},
'timestamp': '2017-01-01T00:00:00'
},
'event_action': 'trigger',
'dedup_key': '',
'routing_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with('https://events.pagerduty.com/v2/enqueue',
data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_v2_payload_class_args():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_api_version': 'v2',
'pagerduty_v2_payload_class': 'somefield',
'pagerduty_v2_payload_class_args': ['@timestamp', 'somefield'],
'pagerduty_v2_payload_component': 'mysql',
'pagerduty_v2_payload_group': 'app-stack',
'pagerduty_v2_payload_severity': 'error',
'pagerduty_v2_payload_source': 'mysql.host.name',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'payload': {
'class': 'somefield',
'component': 'mysql',
'group': 'app-stack',
'severity': 'error',
'source': 'mysql.host.name',
'summary': 'Test PD Rule',
'custom_details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n'
},
'timestamp': '2017-01-01T00:00:00'
},
'event_action': 'trigger',
'dedup_key': '',
'routing_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with('https://events.pagerduty.com/v2/enqueue',
data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_v2_payload_component_args():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_api_version': 'v2',
'pagerduty_v2_payload_class': 'ping failure',
'pagerduty_v2_payload_component': 'somefield',
'pagerduty_v2_payload_component_args': ['@timestamp', 'somefield'],
'pagerduty_v2_payload_group': 'app-stack',
'pagerduty_v2_payload_severity': 'error',
'pagerduty_v2_payload_source': 'mysql.host.name',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'payload': {
'class': 'ping failure',
'component': 'somefield',
'group': 'app-stack',
'severity': 'error',
'source': 'mysql.host.name',
'summary': 'Test PD Rule',
'custom_details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n'
},
'timestamp': '2017-01-01T00:00:00'
},
'event_action': 'trigger',
'dedup_key': '',
'routing_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with('https://events.pagerduty.com/v2/enqueue',
data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_v2_payload_group_args():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_api_version': 'v2',
'pagerduty_v2_payload_class': 'ping failure',
'pagerduty_v2_payload_component': 'mysql',
'pagerduty_v2_payload_group': 'somefield',
'pagerduty_v2_payload_group_args': ['@timestamp', 'somefield'],
'pagerduty_v2_payload_severity': 'error',
'pagerduty_v2_payload_source': 'mysql.host.name',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'payload': {
'class': 'ping failure',
'component': 'mysql',
'group': 'somefield',
'severity': 'error',
'source': 'mysql.host.name',
'summary': 'Test PD Rule',
'custom_details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n'
},
'timestamp': '2017-01-01T00:00:00'
},
'event_action': 'trigger',
'dedup_key': '',
'routing_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with('https://events.pagerduty.com/v2/enqueue',
data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_v2_payload_source_args():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_api_version': 'v2',
'pagerduty_v2_payload_class': 'ping failure',
'pagerduty_v2_payload_component': 'mysql',
'pagerduty_v2_payload_group': 'app-stack',
'pagerduty_v2_payload_severity': 'error',
'pagerduty_v2_payload_source': 'somefield',
'pagerduty_v2_payload_source_args': ['@timestamp', 'somefield'],
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'payload': {
'class': 'ping failure',
'component': 'mysql',
'group': 'app-stack',
'severity': 'error',
'source': 'somefield',
'summary': 'Test PD Rule',
'custom_details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n'
},
'timestamp': '2017-01-01T00:00:00'
},
'event_action': 'trigger',
'dedup_key': '',
'routing_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with('https://events.pagerduty.com/v2/enqueue',
data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_v2_payload_custom_details():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_api_version': 'v2',
'pagerduty_v2_payload_class': 'ping failure',
'pagerduty_v2_payload_component': 'mysql',
'pagerduty_v2_payload_group': 'app-stack',
'pagerduty_v2_payload_severity': 'error',
'pagerduty_v2_payload_source': 'mysql.host.name',
'pagerduty_v2_payload_custom_details': {'a': 'somefield', 'c': 'f'},
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'payload': {
'class': 'ping failure',
'component': 'mysql',
'group': 'app-stack',
'severity': 'error',
'source': 'mysql.host.name',
'summary': 'Test PD Rule',
'custom_details': {
'a': 'foobarbaz',
'c': None,
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n'
},
'timestamp': '2017-01-01T00:00:00'
},
'event_action': 'trigger',
'dedup_key': '',
'routing_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with('https://events.pagerduty.com/v2/enqueue',
data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_v2_payload_include_all_info():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_api_version': 'v2',
'pagerduty_v2_payload_class': 'ping failure',
'pagerduty_v2_payload_component': 'mysql',
'pagerduty_v2_payload_group': 'app-stack',
'pagerduty_v2_payload_severity': 'error',
'pagerduty_v2_payload_source': 'mysql.host.name',
'pagerduty_v2_payload_include_all_info': False,
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'payload': {
'class': 'ping failure',
'component': 'mysql',
'group': 'app-stack',
'severity': 'error',
'source': 'mysql.host.name',
'summary': 'Test PD Rule',
'custom_details': {},
'timestamp': '2017-01-01T00:00:00'
},
'event_action': 'trigger',
'dedup_key': '',
'routing_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with('https://events.pagerduty.com/v2/enqueue',
data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_custom_incident_key():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_incident_key': 'custom key',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'description': 'Test PD Rule',
'details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n'
},
'event_type': 'trigger',
'incident_key': 'custom key',
'service_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with(alert.url, data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_custom_incident_key_with_args():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_incident_key': 'custom {0}',
'pagerduty_incident_key_args': ['somefield'],
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'description': 'Test PD Rule',
'details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n'
},
'event_type': 'trigger',
'incident_key': 'custom foobarbaz',
'service_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with(alert.url, data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_custom_alert_subject():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'alert_subject': 'Hungry kittens',
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_incident_key': 'custom {0}',
'pagerduty_incident_key_args': ['somefield'],
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'description': 'Hungry kittens',
'details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: foobarbaz\n'
},
'event_type': 'trigger',
'incident_key': 'custom foobarbaz',
'service_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with(alert.url, data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_custom_alert_subject_with_args():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'alert_subject': '{0} kittens',
'alert_subject_args': ['somefield'],
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_incident_key': 'custom {0}',
'pagerduty_incident_key_args': ['someotherfield'],
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'Stinky',
'someotherfield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'description': 'Stinky kittens',
'details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: Stinky\nsomeotherfield: foobarbaz\n'
},
'event_type': 'trigger',
'incident_key': 'custom foobarbaz',
'service_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with(alert.url, data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_custom_alert_subject_with_args_specifying_trigger():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'alert_subject': '{0} kittens',
'alert_subject_args': ['somefield'],
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_event_type': 'trigger',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_incident_key': 'custom {0}',
'pagerduty_incident_key_args': ['someotherfield'],
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'Stinkiest',
'someotherfield': 'foobarbaz'
}
with mock.patch('requests.post') as mock_post_request:
alert.alert([match])
expected_data = {
'client': 'ponies inc.',
'description': 'Stinkiest kittens',
'details': {
'information': 'Test PD Rule\n\n@timestamp: 2017-01-01T00:00:00\nsomefield: Stinkiest\nsomeotherfield: foobarbaz\n'
},
'event_type': 'trigger',
'incident_key': 'custom foobarbaz',
'service_key': 'magicalbadgers',
}
mock_post_request.assert_called_once_with(alert.url, data=mock.ANY, headers={'content-type': 'application/json'}, proxies=None)
assert expected_data == json.loads(mock_post_request.call_args_list[0][1]['data'])
def test_pagerduty_alerter_proxy():
rule = {
'name': 'Test PD Rule',
'type': 'any',
'alert_subject': '{0} kittens',
'alert_subject_args': ['somefield'],
'pagerduty_service_key': 'magicalbadgers',
'pagerduty_event_type': 'trigger',
'pagerduty_client_name': 'ponies inc.',
'pagerduty_incident_key': 'custom {0}',
'pagerduty_incident_key_args': ['someotherfield'],
'pagerduty_proxy': 'http://proxy.url',
'alert': []
}
rules_loader = FileRulesLoader({})
rules_loader.load_modules(rule)
alert = PagerDutyAlerter(rule)
match = {
'@timestamp': '2017-01-01T00:00:00',
'somefield': 'Stinkiest',
'someotherfield': 'foobarbaz'
}
with mock.patch('requests.post') as | |
"4355 4452 4523",
45885: "4355 4452 4524",
45886: "4355 4452 4525",
45887: "4355 4452 4526",
45888: "4355 4452 4527",
45889: "4355 4452 4528",
45890: "4355 4452 4529",
45891: "4355 4452 4530",
45892: "4355 4452 4531",
45893: "4355 4452 4532",
45894: "4355 4452 4533",
45895: "4355 4452 4534",
45896: "4355 4452 4535",
45897: "4355 4452 4536",
45898: "4355 4452 4537",
45899: "4355 4452 4538",
45900: "4355 4452 4539",
45901: "4355 4452 4540",
45902: "4355 4452 4541",
45903: "4355 4452 4542",
45904: "4355 4452 4543",
45905: "4355 4452 4544",
45906: "4355 4452 4545",
45907: "4355 4452 4546",
45908: "4355 4453",
45909: "4355 4453 4520",
45910: "4355 4453 4521",
45911: "4355 4453 4522",
45912: "4355 4453 4523",
45913: "4355 4453 4524",
45914: "4355 4453 4525",
45915: "4355 4453 4526",
45916: "4355 4453 4527",
45917: "4355 4453 4528",
45918: "4355 4453 4529",
45919: "4355 4453 4530",
45920: "4355 4453 4531",
45921: "4355 4453 4532",
45922: "4355 4453 4533",
45923: "4355 4453 4534",
45924: "4355 4453 4535",
45925: "4355 4453 4536",
45926: "4355 4453 4537",
45927: "4355 4453 4538",
45928: "4355 4453 4539",
45929: "4355 4453 4540",
45930: "4355 4453 4541",
45931: "4355 4453 4542",
45932: "4355 4453 4543",
45933: "4355 4453 4544",
45934: "4355 4453 4545",
45935: "4355 4453 4546",
45936: "4355 4454",
45937: "4355 4454 4520",
45938: "4355 4454 4521",
45939: "4355 4454 4522",
45940: "4355 4454 4523",
45941: "4355 4454 4524",
45942: "4355 4454 4525",
45943: "4355 4454 4526",
45944: "4355 4454 4527",
45945: "4355 4454 4528",
45946: "4355 4454 4529",
45947: "4355 4454 4530",
45948: "4355 4454 4531",
45949: "4355 4454 4532",
45950: "4355 4454 4533",
45951: "4355 4454 4534",
45952: "4355 4454 4535",
45953: "4355 4454 4536",
45954: "4355 4454 4537",
45955: "4355 4454 4538",
45956: "4355 4454 4539",
45957: "4355 4454 4540",
45958: "4355 4454 4541",
45959: "4355 4454 4542",
45960: "4355 4454 4543",
45961: "4355 4454 4544",
45962: "4355 4454 4545",
45963: "4355 4454 4546",
45964: "4355 4455",
45965: "4355 4455 4520",
45966: "4355 4455 4521",
45967: "4355 4455 4522",
45968: "4355 4455 4523",
45969: "4355 4455 4524",
45970: "4355 4455 4525",
45971: "4355 4455 4526",
45972: "4355 4455 4527",
45973: "4355 4455 4528",
45974: "4355 4455 4529",
45975: "4355 4455 4530",
45976: "4355 4455 4531",
45977: "4355 4455 4532",
45978: "4355 4455 4533",
45979: "4355 4455 4534",
45980: "4355 4455 4535",
45981: "4355 4455 4536",
45982: "4355 4455 4537",
45983: "4355 4455 4538",
45984: "4355 4455 4539",
45985: "4355 4455 4540",
45986: "4355 4455 4541",
45987: "4355 4455 4542",
45988: "4355 4455 4543",
45989: "4355 4455 4544",
45990: "4355 4455 4545",
45991: "4355 4455 4546",
45992: "4355 4456",
45993: "4355 4456 4520",
45994: "4355 4456 4521",
45995: "4355 4456 4522",
45996: "4355 4456 4523",
45997: "4355 4456 4524",
45998: "4355 4456 4525",
45999: "4355 4456 4526",
46000: "4355 4456 4527",
46001: "4355 4456 4528",
46002: "4355 4456 4529",
46003: "4355 4456 4530",
46004: "4355 4456 4531",
46005: "4355 4456 4532",
46006: "4355 4456 4533",
46007: "4355 4456 4534",
46008: "4355 4456 4535",
46009: "4355 4456 4536",
46010: "4355 4456 4537",
46011: "4355 4456 4538",
46012: "4355 4456 4539",
46013: "4355 4456 4540",
46014: "4355 4456 4541",
46015: "4355 4456 4542",
46016: "4355 4456 4543",
46017: "4355 4456 4544",
46018: "4355 4456 4545",
46019: "4355 4456 4546",
46020: "4355 4457",
46021: "4355 4457 4520",
46022: "4355 4457 4521",
46023: "4355 4457 4522",
46024: "4355 4457 4523",
46025: "4355 4457 4524",
46026: "4355 4457 4525",
46027: "4355 4457 4526",
46028: "4355 4457 4527",
46029: "4355 4457 4528",
46030: "4355 4457 4529",
46031: "4355 4457 4530",
46032: "4355 4457 4531",
46033: "4355 4457 4532",
46034: "4355 4457 4533",
46035: "4355 4457 4534",
46036: "4355 4457 4535",
46037: "4355 4457 4536",
46038: "4355 4457 4537",
46039: "4355 4457 4538",
46040: "4355 4457 4539",
46041: "4355 4457 4540",
46042: "4355 4457 4541",
46043: "4355 4457 4542",
46044: "4355 4457 4543",
46045: "4355 4457 4544",
46046: "4355 4457 4545",
46047: "4355 4457 4546",
46048: "4355 4458",
46049: "4355 4458 4520",
46050: "4355 4458 4521",
46051: "4355 4458 4522",
46052: "4355 4458 4523",
46053: "4355 4458 4524",
46054: "4355 4458 4525",
46055: "4355 4458 4526",
46056: "4355 4458 4527",
46057: "4355 4458 4528",
46058: "4355 4458 4529",
46059: "4355 4458 4530",
46060: "4355 4458 4531",
46061: "4355 4458 4532",
46062: "4355 4458 4533",
46063: "4355 4458 4534",
46064: "4355 4458 4535",
46065: "4355 4458 4536",
46066: "4355 4458 4537",
46067: "4355 4458 4538",
46068: "4355 4458 4539",
46069: "4355 4458 4540",
46070: "4355 4458 4541",
46071: "4355 4458 4542",
46072: "4355 4458 4543",
46073: "4355 4458 4544",
46074: "4355 4458 4545",
46075: "4355 4458 4546",
46076: "4355 4459",
46077: "4355 4459 4520",
46078: "4355 4459 4521",
46079: "4355 4459 4522",
46080: "4355 4459 4523",
46081: "4355 4459 4524",
46082: "4355 4459 4525",
46083: "4355 4459 4526",
46084: "4355 4459 4527",
46085: "4355 4459 4528",
46086: "4355 4459 4529",
46087: "4355 4459 4530",
46088: "4355 4459 4531",
46089: "4355 4459 4532",
46090: "4355 4459 4533",
46091: "4355 4459 4534",
46092: "4355 4459 4535",
46093: "4355 4459 4536",
46094: "4355 4459 4537",
46095: "4355 4459 4538",
46096: "4355 4459 4539",
46097: "4355 4459 4540",
46098: "4355 4459 4541",
46099: "4355 4459 4542",
46100: "4355 4459 4543",
46101: "4355 4459 4544",
46102: "4355 4459 4545",
46103: "4355 4459 4546",
46104: "4355 4460",
46105: "4355 4460 4520",
46106: "4355 4460 4521",
46107: "4355 4460 4522",
46108: "4355 4460 4523",
46109: "4355 4460 4524",
46110: "4355 4460 4525",
46111: "4355 4460 4526",
46112: "4355 4460 4527",
46113: "4355 4460 4528",
46114: "4355 4460 4529",
46115: "4355 4460 4530",
46116: "4355 4460 4531",
46117: "4355 4460 4532",
46118: "4355 4460 4533",
46119: "4355 4460 4534",
46120: "4355 4460 4535",
46121: "4355 4460 4536",
46122: "4355 4460 4537",
46123: "4355 4460 4538",
46124: "4355 4460 4539",
46125: "4355 4460 4540",
46126: "4355 4460 4541",
46127: "4355 4460 4542",
46128: "4355 4460 4543",
46129: "4355 4460 4544",
46130: "4355 4460 4545",
46131: "4355 4460 4546",
46132: "4355 4461",
46133: "4355 4461 4520",
46134: "4355 4461 4521",
46135: "4355 4461 4522",
46136: "4355 4461 4523",
46137: "4355 4461 4524",
46138: "4355 4461 4525",
46139: "4355 4461 4526",
46140: "4355 4461 4527",
46141: "4355 4461 4528",
46142: "4355 4461 4529",
46143: "4355 4461 4530",
46144: "4355 4461 4531",
46145: "4355 4461 4532",
46146: "4355 4461 4533",
46147: "4355 4461 4534",
46148: "4355 4461 4535",
46149: "4355 4461 4536",
46150: "4355 4461 4537",
46151: "4355 4461 4538",
46152: "4355 4461 4539",
46153: "4355 4461 4540",
46154: "4355 4461 4541",
46155: "4355 4461 4542",
46156: "4355 4461 4543",
46157: "4355 4461 4544",
46158: "4355 4461 4545",
46159: "4355 4461 4546",
46160: "4355 4462",
46161: "4355 4462 4520",
46162: "4355 4462 4521",
46163: "4355 4462 4522",
46164: "4355 4462 4523",
46165: "4355 4462 4524",
46166: "4355 4462 4525",
46167: "4355 4462 4526",
46168: "4355 4462 4527",
46169: "4355 4462 4528",
46170: "4355 4462 4529",
46171: "4355 4462 4530",
46172: "4355 4462 4531",
46173: "4355 4462 4532",
46174: "4355 4462 4533",
46175: "4355 4462 4534",
46176: "4355 4462 4535",
46177: "4355 4462 4536",
46178: "4355 4462 4537",
46179: "4355 4462 4538",
46180: "4355 4462 4539",
46181: "4355 4462 4540",
46182: "4355 4462 4541",
46183: "4355 4462 4542",
46184: "4355 4462 4543",
46185: "4355 4462 4544",
46186: "4355 4462 4545",
46187: "4355 4462 4546",
46188: "4355 4463",
46189: "4355 4463 4520",
46190: "4355 4463 4521",
46191: "4355 4463 4522",
46192: "4355 4463 4523",
46193: "4355 4463 4524",
46194: "4355 4463 4525",
46195: "4355 4463 4526",
46196: "4355 4463 4527",
46197: "4355 4463 4528",
46198: "4355 4463 4529",
46199: "4355 4463 4530",
46200: "4355 4463 4531",
46201: "4355 4463 4532",
46202: "4355 4463 4533",
46203: "4355 4463 4534",
46204: "4355 4463 4535",
46205: "4355 4463 4536",
46206: "4355 4463 4537",
| |
<reponame>Jappenn/CCL
import tempfile
import numpy as np
from numpy.testing import (
assert_raises, assert_no_warnings, assert_almost_equal)
import pytest
import pyccl as ccl
def test_parameters_lcdm_defaults():
cosmo = ccl.Cosmology(
Omega_c=0.25,
Omega_b=0.05,
h=0.7,
A_s=2.1e-9,
n_s=0.96)
assert np.allclose(cosmo['Omega_c'], 0.25)
assert np.allclose(cosmo['Omega_b'], 0.05)
assert np.allclose(cosmo['Omega_m'], 0.30)
assert np.allclose(cosmo['Omega_k'], 0)
assert np.allclose(cosmo['sqrtk'], 0)
assert np.allclose(cosmo['k_sign'], 0)
assert np.allclose(cosmo['w0'], -1)
assert np.allclose(cosmo['wa'], 0)
assert np.allclose(cosmo['H0'], 70)
assert np.allclose(cosmo['h'], 0.7)
assert np.allclose(cosmo['A_s'], 2.1e-9)
assert np.allclose(cosmo['n_s'], 0.96)
assert np.isnan(cosmo['sigma8'])
assert np.isnan(cosmo['z_star'])
assert np.allclose(cosmo['Neff'], 3.046)
assert cosmo['N_nu_mass'] == 0
assert np.allclose(cosmo['N_nu_rel'], 3.046)
assert np.allclose(cosmo['sum_nu_masses'], 0)
assert np.allclose(cosmo['m_nu'], 0)
assert np.allclose(cosmo['Omega_nu_mass'], 0)
assert np.allclose(cosmo['T_CMB'], ccl.physical_constants.T_CMB)
assert np.allclose(cosmo['bcm_ks'], 55.0)
assert np.allclose(cosmo['bcm_log10Mc'], np.log10(1.2e14))
assert np.allclose(cosmo['bcm_etab'], 0.5)
assert not cosmo['has_mgrowth']
assert cosmo['nz_mgrowth'] == 0
assert cosmo['z_mgrowth'] is None
assert cosmo['df_mgrowth'] is None
# these are defined in the code via some constants so
# going to test the total
# Omega_nu_rel
# Omega_g
# Omega_l
assert np.allclose(
cosmo['Omega_l'] + cosmo['Omega_m'] + cosmo['Omega_g'] +
cosmo['Omega_nu_rel'] + cosmo['Omega_nu_mass'] + cosmo['Omega_k'],
1)
@pytest.mark.parametrize('m_nu_type', ['normal', 'inverted', 'single'])
def test_parameters_nu(m_nu_type):
cosmo = ccl.Cosmology(
Omega_c=0.25,
Omega_b=0.05,
h=0.7,
A_s=2.1e-9,
n_s=0.96,
wa=0.01,
w0=-1,
Neff=3.046,
Omega_k=0.0,
m_nu=0.15,
m_nu_type=m_nu_type
)
if m_nu_type == 'inverted':
assert np.allclose(cosmo['m_nu'][1]**2 - cosmo['m_nu'][0]**2,
ccl.physical_constants.DELTAM12_sq,
atol=1e-4, rtol=0)
assert np.allclose(
cosmo['m_nu'][2]**2 - cosmo['m_nu'][0]**2,
ccl.physical_constants.DELTAM13_sq_neg, atol=1e-4, rtol=0)
elif m_nu_type == 'normal':
assert np.allclose(cosmo['m_nu'][1]**2 - cosmo['m_nu'][0]**2,
ccl.physical_constants.DELTAM12_sq,
atol=1e-4, rtol=0)
assert np.allclose(
cosmo['m_nu'][2]**2 - cosmo['m_nu'][0]**2,
ccl.physical_constants.DELTAM13_sq_pos, atol=1e-4, rtol=0)
elif m_nu_type == 'single':
assert np.allclose(cosmo['m_nu'][0], 0.15, atol=1e-4, rtol=0)
assert np.allclose(cosmo['m_nu'][1], 0., atol=1e-4, rtol=0)
assert np.allclose(cosmo['m_nu'][2], 0., atol=1e-4, rtol=0)
def test_parameters_nu_Nnurel_neg():
assert_raises(ValueError, ccl.Cosmology, Omega_c=0.27, Omega_b=0.049,
h=0.67, sigma8=0.8, n_s=0.96, m_nu=[0.03, 0.02, 0.04],
Neff=3., m_nu_type='list')
def test_parameters_nu_list():
cosmo = ccl.Cosmology(
Omega_c=0.25,
Omega_b=0.05,
h=0.7,
A_s=2.1e-9,
n_s=0.96,
m_nu=[0.1, 0.01, 0.003],
m_nu_type='list')
assert np.allclose(cosmo['Omega_c'], 0.25)
assert np.allclose(cosmo['Omega_b'], 0.05)
assert np.allclose(cosmo['Omega_m'] - cosmo['Omega_nu_mass'], 0.30)
assert np.allclose(cosmo['Omega_k'], 0)
assert np.allclose(cosmo['sqrtk'], 0)
assert np.allclose(cosmo['k_sign'], 0)
assert np.allclose(cosmo['w0'], -1)
assert np.allclose(cosmo['wa'], 0)
assert np.allclose(cosmo['H0'], 70)
assert np.allclose(cosmo['h'], 0.7)
assert np.allclose(cosmo['A_s'], 2.1e-9)
assert np.allclose(cosmo['n_s'], 0.96)
assert np.isnan(cosmo['sigma8'])
assert np.isnan(cosmo['z_star'])
assert np.allclose(cosmo['T_CMB'], ccl.physical_constants.T_CMB)
assert np.allclose(cosmo['bcm_ks'], 55.0)
assert np.allclose(cosmo['bcm_log10Mc'], np.log10(1.2e14))
assert np.allclose(cosmo['bcm_etab'], 0.5)
assert not cosmo['has_mgrowth']
assert cosmo['nz_mgrowth'] == 0
assert cosmo['z_mgrowth'] is None
assert cosmo['df_mgrowth'] is None
# these are defined in the code via some constants so
# going to test the total
# Omega_nu_rel
# Omega_g
# Omega_l
assert np.allclose(
cosmo['Omega_l'] + cosmo['Omega_m'] + cosmo['Omega_g'] +
cosmo['Omega_nu_rel'] + cosmo['Omega_k'],
1)
assert np.allclose(cosmo['Neff'], 3.046)
assert cosmo['N_nu_mass'] == 3
assert np.allclose(cosmo['sum_nu_masses'], 0.1 + 0.01 + 0.003)
assert np.allclose(cosmo['m_nu'], [0.1, 0.01, 0.003])
def test_parameters_nu_normal():
cosmo = ccl.Cosmology(
Omega_c=0.25,
Omega_b=0.05,
h=0.7,
A_s=2.1e-9,
n_s=0.96,
m_nu=0.3,
m_nu_type='normal')
assert np.allclose(cosmo['Omega_c'], 0.25)
assert np.allclose(cosmo['Omega_b'], 0.05)
assert np.allclose(cosmo['Omega_m'] - cosmo['Omega_nu_mass'], 0.30)
assert np.allclose(cosmo['Omega_k'], 0)
assert np.allclose(cosmo['sqrtk'], 0)
assert np.allclose(cosmo['k_sign'], 0)
assert np.allclose(cosmo['w0'], -1)
assert np.allclose(cosmo['wa'], 0)
assert np.allclose(cosmo['H0'], 70)
assert np.allclose(cosmo['h'], 0.7)
assert np.allclose(cosmo['A_s'], 2.1e-9)
assert np.allclose(cosmo['n_s'], 0.96)
assert np.isnan(cosmo['sigma8'])
assert np.isnan(cosmo['z_star'])
assert np.allclose(cosmo['T_CMB'], ccl.physical_constants.T_CMB)
assert np.allclose(cosmo['bcm_ks'], 55.0)
assert np.allclose(cosmo['bcm_log10Mc'], np.log10(1.2e14))
assert np.allclose(cosmo['bcm_etab'], 0.5)
assert not cosmo['has_mgrowth']
assert cosmo['nz_mgrowth'] == 0
assert cosmo['z_mgrowth'] is None
assert cosmo['df_mgrowth'] is None
# these are defined in the code via some constants so
# going to test the total
# Omega_nu_rel
# Omega_g
# Omega_l
assert np.allclose(
cosmo['Omega_l'] + cosmo['Omega_m'] + cosmo['Omega_g'] +
cosmo['Omega_nu_rel'] + cosmo['Omega_k'],
1)
assert np.allclose(cosmo['Neff'], 3.046)
assert cosmo['N_nu_mass'] == 3
assert np.allclose(cosmo['sum_nu_masses'], 0.3)
def test_parameters_nu_inverted():
cosmo = ccl.Cosmology(
Omega_c=0.25,
Omega_b=0.05,
h=0.7,
A_s=2.1e-9,
n_s=0.96,
m_nu=0.3,
m_nu_type='inverted')
assert np.allclose(cosmo['Omega_c'], 0.25)
assert np.allclose(cosmo['Omega_b'], 0.05)
assert np.allclose(cosmo['Omega_m'] - cosmo['Omega_nu_mass'], 0.30)
assert np.allclose(cosmo['Omega_k'], 0)
assert np.allclose(cosmo['sqrtk'], 0)
assert np.allclose(cosmo['k_sign'], 0)
assert np.allclose(cosmo['w0'], -1)
assert np.allclose(cosmo['wa'], 0)
assert np.allclose(cosmo['H0'], 70)
assert np.allclose(cosmo['h'], 0.7)
assert np.allclose(cosmo['A_s'], 2.1e-9)
assert np.allclose(cosmo['n_s'], 0.96)
assert np.isnan(cosmo['sigma8'])
assert np.isnan(cosmo['z_star'])
assert np.allclose(cosmo['T_CMB'], ccl.physical_constants.T_CMB)
assert np.allclose(cosmo['bcm_ks'], 55.0)
assert np.allclose(cosmo['bcm_log10Mc'], np.log10(1.2e14))
assert np.allclose(cosmo['bcm_etab'], 0.5)
assert not cosmo['has_mgrowth']
assert cosmo['nz_mgrowth'] == 0
assert cosmo['z_mgrowth'] is None
assert cosmo['df_mgrowth'] is None
# these are defined in the code via some constants so
# going to test the total
# Omega_nu_rel
# Omega_g
# Omega_l
assert np.allclose(
cosmo['Omega_l'] + cosmo['Omega_m'] + cosmo['Omega_g'] +
cosmo['Omega_nu_rel'] + cosmo['Omega_k'],
1)
assert np.allclose(cosmo['Neff'], 3.046)
assert cosmo['N_nu_mass'] == 3
assert np.allclose(cosmo['sum_nu_masses'], 0.3)
def test_parameters_nu_equal():
cosmo = ccl.Cosmology(
Omega_c=0.25,
Omega_b=0.05,
h=0.7,
A_s=2.1e-9,
n_s=0.96,
m_nu=0.3,
m_nu_type='equal')
assert np.allclose(cosmo['Omega_c'], 0.25)
assert np.allclose(cosmo['Omega_b'], 0.05)
assert np.allclose(cosmo['Omega_m'] - cosmo['Omega_nu_mass'], 0.30)
assert np.allclose(cosmo['Omega_k'], 0)
assert np.allclose(cosmo['sqrtk'], 0)
assert np.allclose(cosmo['k_sign'], 0)
assert np.allclose(cosmo['w0'], -1)
assert np.allclose(cosmo['wa'], 0)
assert np.allclose(cosmo['H0'], 70)
assert np.allclose(cosmo['h'], 0.7)
assert np.allclose(cosmo['A_s'], 2.1e-9)
assert np.allclose(cosmo['n_s'], 0.96)
assert np.isnan(cosmo['sigma8'])
assert np.isnan(cosmo['z_star'])
assert np.allclose(cosmo['T_CMB'], ccl.physical_constants.T_CMB)
assert np.allclose(cosmo['bcm_ks'], 55.0)
assert np.allclose(cosmo['bcm_log10Mc'], np.log10(1.2e14))
assert np.allclose(cosmo['bcm_etab'], 0.5)
assert not cosmo['has_mgrowth']
assert cosmo['nz_mgrowth'] == 0
assert cosmo['z_mgrowth'] is None
assert cosmo['df_mgrowth'] is None
# these are defined in the code via some constants so
# going to test the total
# Omega_nu_rel
# Omega_g
# Omega_l
assert np.allclose(
cosmo['Omega_l'] + cosmo['Omega_m'] + cosmo['Omega_g'] +
cosmo['Omega_nu_rel'] + cosmo['Omega_k'],
1)
assert np.allclose(cosmo['Neff'], 3.046)
assert cosmo['N_nu_mass'] == 3
assert np.allclose(cosmo['sum_nu_masses'], 0.3)
assert np.allclose(cosmo['m_nu'], 0.1)
@pytest.mark.parametrize('m_nu,kind', [(0.05, 'normal'), (0.09, 'inverted')])
def test_parameters_nu_unphysical_raises(m_nu, kind):
with pytest.raises(ValueError):
ccl.Cosmology(
Omega_c=0.25,
Omega_b=0.05,
h=0.7,
A_s=2.1e-9,
n_s=0.96,
m_nu=m_nu,
m_nu_type=kind)
def test_parameters_valid_input():
"""
Check that valid parameter arguments are accepted.
"""
assert_no_warnings(ccl.Cosmology, Omega_c=0.25, Omega_b=0.05, h=0.7,
A_s=2.1e-9, n_s=0.96)
assert_no_warnings(ccl.Cosmology, Omega_c=0.25, Omega_b=0.05, h=0.7,
A_s=2.1e-9, n_s=0.96, Omega_k=0.05)
assert_no_warnings(ccl.Cosmology, Omega_c=0.25, Omega_b=0.05, h=0.7,
A_s=2.1e-9, n_s=0.96, Neff=2.046)
assert_no_warnings(ccl.Cosmology, Omega_c=0.25, Omega_b=0.05, h=0.7,
A_s=2.1e-9, n_s=0.96, Neff=3.046, m_nu=0.06)
assert_no_warnings(ccl.Cosmology, Omega_c=0.25, Omega_b=0.05, h=0.7,
A_s=2.1e-9, n_s=0.96, w0=-0.9)
assert_no_warnings(ccl.Cosmology, Omega_c=0.25, Omega_b=0.05, h=0.7,
A_s=2.1e-9, n_s=0.96, w0=-0.9, wa=0.1)
# Check that kwarg order doesn't matter
assert_no_warnings(ccl.Cosmology, h=0.7, Omega_c=0.25, Omega_b=0.05,
A_s=2.1e-9, n_s=0.96)
# Try a set of parameters with non-zero mu0 / Sig0
assert_no_warnings(ccl.Cosmology, h=0.7, Omega_c=0.25, Omega_b=0.05,
A_s=2.1e-9, n_s=0.96, mu_0=0.1, sigma_0=0.1)
def test_parameters_missing():
"""
Check that errors are raised when compulsory parameters are missing, but
not when non-compulsory ones are.
"""
assert_raises(ValueError, ccl.Cosmology, Omega_c=0.25)
# Check that a single missing compulsory parameter is noticed
assert_raises(ValueError, ccl.Cosmology, Omega_c=0.25, Omega_b=0.05,
h=0.7, A_s=2.1e-9)
assert_raises(ValueError, ccl.Cosmology, Omega_c=0.25, Omega_b=0.05,
h=0.7, n_s=0.96)
assert_raises(ValueError, ccl.Cosmology, Omega_c=0.25, Omega_b=0.05,
A_s=2.1e-9, n_s=0.96)
assert_raises(ValueError, ccl.Cosmology, Omega_c=0.25,
h=0.7, A_s=2.1e-9, n_s=0.96)
assert_raises(ValueError, ccl.Cosmology, Omega_b=0.05,
h=0.7, A_s=2.1e-9, n_s=0.96)
# Make sure that compulsory parameters are compulsory
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
Omega_k=None)
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
w0=None)
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
wa=None)
# Check that sigma8 vs A_s is handled ok.
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, n_s=0.8,
A_s=2.1e-9, sigma8=0.7)
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, n_s=0.8)
# Make sure that optional parameters are optional
assert_no_warnings(
ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
z_mg=None, df_mg=None)
assert_no_warnings(
ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
z_mg=None)
assert_no_warnings(
ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
df_mg=None)
def test_parameters_set():
"""
Check that a Cosmology object doesn't let parameters be set.
"""
params = ccl.Cosmology(
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9,
n_s=0.96)
# Check that values of sigma8 and A_s won't be misinterpreted by the C code
assert_raises(ValueError, ccl.Cosmology, Omega_c=0.25, Omega_b=0.05,
h=0.7, A_s=2e-5, n_s=0.96)
assert_raises(ValueError, ccl.Cosmology, Omega_c=0.25, Omega_b=0.05,
h=0.7, sigma8=9e-6, n_s=0.96)
# Check that error is raised when unrecognized parameter requested
assert_raises(KeyError, lambda: params['wibble'])
def test_parameters_mgrowth():
"""
Check that valid modified growth inputs are allowed, and invalid ones are
rejected.
"""
zarr = np.linspace(0., 1., 15)
dfarr = 0.1 * np.ones(15)
def f_func(z):
return 0.1 * z
# Valid constructions
for omega_g in [None, 0.0, 0.1]:
assert_no_warnings(
ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
z_mg=zarr, df_mg=dfarr, Omega_g=omega_g)
assert_no_warnings(
ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
z_mg=[0., 0.1, 0.2],
df_mg=[0.1, 0.1, 0.1], Omega_g=omega_g)
# Invalid constructions
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
z_mg=zarr, Omega_g=omega_g)
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
df_mg=dfarr, Omega_g=omega_g)
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
z_mg=None,
df_mg=dfarr, Omega_g=omega_g)
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
z_mg=zarr,
df_mg=0.1, Omega_g=omega_g)
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
z_mg=zarr,
df_mg=f_func, Omega_g=omega_g)
# Mis-matched array sizes and dimensionality
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
z_mg=zarr,
df_mg=dfarr[1:], Omega_g=omega_g)
assert_raises(
ValueError, ccl.Cosmology,
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
z_mg=zarr,
df_mg=np.column_stack((dfarr, dfarr)), Omega_g=omega_g)
def test_parameters_read_write():
"""Check that Cosmology objects can be read and written"""
params = ccl.Cosmology(
Omega_c=0.25, Omega_b=0.05, h=0.7, A_s=2.1e-9, n_s=0.96,
m_nu=[0.02, 0.1, 0.05], m_nu_type='list',
z_mg=[0.0, 1.0], df_mg=[0.01, 0.0])
# Make a temporary file name
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
temp_file_name = tmpfile.name
# Write out and then read in the parameters from that file
assert_raises(IOError, params.write_yaml, "/bogus/file/name")
params.write_yaml(temp_file_name)
params2 = ccl.Cosmology.read_yaml(temp_file_name)
# Check the | |
from django.shortcuts import render, redirect
from django.utils.html import escape
from . import forms
from django.views.generic import TemplateView
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
import json
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.authtoken.models import Token
from rest_framework import generics, status
from .models import Teacher, Student, Lecture, Div, Subject, AppUser
from .models import SubjectTeacher, StudentLecture, StudentDivision
from .serializers import (TeacherSerializer, StudentSerializer, LectureSerializer, DivSerializer, SubjectSerializer,
ShortTeacherSerializer)
from rest_framework.authentication import TokenAuthentication
import datetime
import csv
class HomePage(TemplateView):
template_name = 'Attendance/index.html'
class ThanksPage(TemplateView):
template_name = 'Attendance/logout_success.html'
def login_user_teacher(request):
# logout(request)
# username = password = ''
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('Attendance:dash')
return render(request, 'Attendance/login.html', context={'form': forms.TeacherLoginForm()})
@login_required
def dash(request):
return render(request, 'Attendance/login_success.html')
# WEB EndPoint Views
class TeachersSubjectDataView(generics.GenericAPIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
teacherId = kwargs['teacherId']
try:
teacher = Teacher.objects.get(teacherID=teacherId)
except Exception as e:
response_data = {'error_message': "Invalid TeacherId" + str(e)}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
divisions = Div.objects.filter(classteacher=teacher, calendar_year=datetime.date.today().year)
class_subjects = []
for div in divisions:
class_subjects_ordereddict = list(set([st.subject for st in SubjectTeacher.objects.filter(div=div)]))
for subject in class_subjects_ordereddict:
subject_json = SubjectSerializer(subject).data
subject_json["div"] = str(div)
class_subjects.append(subject_json)
taught_subjects = []
subjectteacher = SubjectTeacher.objects.filter(teacher=teacher)
for st in subjectteacher:
subject_json = SubjectSerializer(st.subject).data
subject_json["div"] = str(st.div)
taught_subjects.append(subject_json)
division = None
for div in divisions:
if div.get_class_type() == "Class":
division = div
response_data = {
'taught_subjects': taught_subjects,
'class_subjects': class_subjects,
'division_they_are_class_teacher_of': DivSerializer(division).data,
}
return JsonResponse(response_data, status=status.HTTP_200_OK)
class LoginTeacherView(generics.GenericAPIView):
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
try:
form_data = json.loads(request.body.decode())
teacherId = escape(form_data['teacherId'])
password = escape(form_data['password'])
except Exception:
teacherId = escape(request.POST.get('teacherId'))
password = escape(request.POST.get('password'))
teacher = Teacher.objects.get(teacherID=teacherId)
user = authenticate(username=teacher.user.username, password=password)
if user is not None:
token, _ = Token.objects.get_or_create(user=user)
login(request, user)
response_data = {
'token': token.key,
}
return JsonResponse(response_data, status=status.HTTP_200_OK)
else:
response_data = {'error_message': "Cannot log you in"}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
class LogoutTeacherView(generics.GenericAPIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
# simply delete the token to force a login
request.user.auth_token.delete()
logout(request)
response_data = {'success_message': 'Successfully logged you out'}
return JsonResponse(response_data, status=status.HTTP_200_OK)
class SignUpTeacherView(generics.GenericAPIView):
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
if not request.user.is_superuser:
response_data = {'error_message': "You are not authorised to use this endpoint. You aren't a superuser!"}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
try:
form_data = json.loads(request.body.decode())
teacherId = escape(form_data['teacherId'])
password = escape(form_data['password'])
f_name = escape(form_data['fname'])
l_name = escape(form_data['lname'])
specialization = escape(form_data['spec'])
except Exception:
teacherId = escape(request.POST.get('teacherId'))
password = escape(request.POST.get('password'))
f_name = escape(request.POST.get('fname'))
l_name = escape(request.POST.get('lname'))
specialization = escape(request.POST.get('spec'))
try:
user = AppUser.objects.create(username=teacherId, password=password)
user.first_name = f_name
user.last_name = l_name
user.set_password(password)
user.is_teacher = True
user.save()
teacher = Teacher.objects.create(user=user, teacherID=teacherId, specialization=specialization)
teacher.save()
login(request, user)
token, _ = Token.objects.get_or_create(user=user)
response_data = {
'token': token.key,
}
return JsonResponse(response_data, status=status.HTTP_200_OK)
except Exception as e:
response_data = {'error_message': "Cannot sign you up due to " + str(e)}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
class GenericLoginView(generics.GenericAPIView):
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
try:
form_data = json.loads(request.body.decode())
user_id = escape(form_data['id'])
password = escape(form_data['password'])
except Exception:
user_id = escape(request.POST.get('id'))
password = escape(request.POST.get('password'))
user = authenticate(username=user_id, password=password)
if user is not None:
token, _ = Token.objects.get_or_create(user=user)
login(request, user)
response_data = {
'isStudent': user.is_student,
'isTeacher': user.is_teacher,
'token': token.key,
}
if user.is_student:
response_data['user'] = StudentSerializer(Student.objects.get(user=user)).data
else:
response_data['user'] = ShortTeacherSerializer(Teacher.objects.get(user=user)).data
response_data['user']['name'] = user.getname()
return JsonResponse(response_data, status=status.HTTP_200_OK)
else:
response_data = {'error_message': "Cannot log you in"}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
class GetAttendanceOfDay(generics.GenericAPIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
subject_name = kwargs['subject']
div = kwargs['div']
try:
date = kwargs['date']
d, m, y = date.split('-')
date = datetime.datetime(int(y), int(m), int(d)).date()
except KeyError:
date = datetime.date.today()
yearname, division = div.split("_")
year = Div.yearnameToYear(yearname)
if date.month < 6:
semester = year * 2
else:
semester = year * 2 - 1
try:
subject = Subject.objects.get(name=subject_name)
div = Div.objects.get(division=division, semester=semester, calendar_year=datetime.date.today().year)
except Subject.DoesNotExist:
response_data = {'error_message': "Subject " + subject_name + " Does Not Exist"}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
except Div.DoesNotExist:
response_data = {'error_message': "Division " + div + " Does Not Exist"}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
teacher = Teacher.objects.get(user=request.user)
if div.classteacher and div.classteacher.teacherID == teacher.teacherID:
lecs = Lecture.objects.filter(date=date, div=div, subject=subject, attendanceTaken=True)
else:
lecs = Lecture.objects.filter(date=date, teacher=teacher, div=div, subject=subject, attendanceTaken=True)
attendance_list = {}
for lec in lecs:
lecTime = lec.getTimeString()
attendance_list[lecTime] = []
student_lecs = StudentLecture.objects.filter(lecture=lec)
present_students = [sl.student for sl in student_lecs]
student_divs = StudentDivision.objects.filter(division=div)
div_students = [sd.student for sd in student_divs]
absent_students = list(set(div_students) - set(present_students))
for student in present_students:
student_json = StudentSerializer(student).data
student_json["attendance"] = 1
attendance_list[lecTime].append(student_json)
for student in absent_students:
student_json = StudentSerializer(student).data
student_json["attendance"] = 0
attendance_list[lecTime].append(student_json)
attendance_list[lecTime].sort(key=lambda x: x["sapID"])
final_attendance_list = []
for lecTime in attendance_list:
attendance_object = {}
attendance_object['time'] = lecTime
attendance_object['attendance_list'] = attendance_list[lecTime]
final_attendance_list.append(attendance_object)
response_data = {
'attendance': final_attendance_list,
}
return JsonResponse(response_data, status=status.HTTP_200_OK)
class GetAttendanceOfRange(generics.GenericAPIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
subject_name = kwargs['subject']
div = kwargs['div']
try:
date_from = kwargs['date_from']
d, m, y = date_from.split('-')
date_from = datetime.datetime(int(y), int(m), int(d)).date()
except KeyError:
date_from = datetime.date.today()
try:
date_to = kwargs['date_to']
d, m, y = date_to.split('-')
date_to = datetime.datetime(int(y), int(m), int(d)).date()
except KeyError:
date_to = datetime.date.today()
yearname, division = div.split("_")
year = Div.yearnameToYear(yearname)
if date_from.month < 6 and date_to.month < 6:
semester = year * 2
elif date_from.month >= 6 and date_to.month >= 6:
semester = year * 2 - 1
else:
response_data = {'error_message': "Dates are not from the same semester."}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
try:
subject = Subject.objects.get(name=subject_name)
div = Div.objects.get(division=division, semester=semester, calendar_year=datetime.date.today().year)
except Subject.DoesNotExist:
response_data = {'error_message': "Subject " + subject_name + " Does Not Exist"}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
except Div.DoesNotExist:
response_data = {'error_message': "Division " + div + " Does Not Exist"}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
teacher = Teacher.objects.get(user=request.user)
if div.classteacher and div.classteacher.teacherID == teacher.teacherID:
lecs = Lecture.objects.filter(date__lte=date_to, date__gte=date_from, div=div, subject=subject,
attendanceTaken=True)
else:
lecs = Lecture.objects.filter(date__lte=date_to, date__gte=date_from, teacher=teacher,
div=div, subject=subject, attendanceTaken=True)
attendance_list = {}
for lec in lecs:
lecDateTime = lec.getDateTimeString()
attendance_list[lecDateTime] = []
student_lecs = StudentLecture.objects.filter(lecture=lec)
present_students = [sl.student for sl in student_lecs]
student_divs = StudentDivision.objects.filter(division=div)
div_students = [sd.student for sd in student_divs]
absent_students = list(set(div_students) - set(present_students))
for student in present_students:
student_json = StudentSerializer(student).data
student_json["attendance"] = 1
attendance_list[lecDateTime].append(student_json)
for student in absent_students:
student_json = StudentSerializer(student).data
student_json["attendance"] = 0
attendance_list[lecDateTime].append(student_json)
attendance_list[lecDateTime].sort(key=lambda x: x["sapID"])
final_attendance_list = []
for lecDateTime in attendance_list:
attendance_object = {}
attendance_object['time'] = lecDateTime
attendance_object['attendance_list'] = attendance_list[lecDateTime]
final_attendance_list.append(attendance_object)
response_data = {
'attendance': final_attendance_list,
}
return JsonResponse(response_data, status=status.HTTP_200_OK)
class GetAttendanceOfStudent(generics.GenericAPIView):
permission_classes = (IsAuthenticated,)
def multiple_lectures(self, lecture):
if lecture.div.get_class_type() == 'Practical':
return 1
start = datetime.datetime.combine(lecture.date, lecture.startTime)
end = datetime.datetime.combine(lecture.date, lecture.endTime)
difference = end - start
td = difference.total_seconds() / 60
if td > 90 and td <= 150:
return 2
elif td > 150 and td < 210:
return 3
return 1
def get(self, request, *args, **kwargs):
subject_name = kwargs['subject']
student_sap = int(kwargs['sapID'])
try:
subject = Subject.objects.get(name=subject_name)
student = Student.objects.get(sapID=student_sap)
except Subject.DoesNotExist:
response_data = {'error_message': "Subject " + subject_name + " Does Not Exist"}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
except Student.DoesNotExist:
response_data = {'error_message': "Student with SAP " + str(student_sap) + " Does Not Exist"}
return JsonResponse(response_data, status=status.HTTP_400_BAD_REQUEST)
divs = list(student.div.all())
class_teacher_list = [div.classteacher for div in divs]
teacher = Teacher.objects.get(user=request.user)
if teacher in class_teacher_list:
lecs = Lecture.objects.filter(div__in=divs, subject=subject, attendanceTaken=True)
else:
lecs = Lecture.objects.filter(teacher=teacher, div__in=divs, subject=subject, attendanceTaken=True)
if lecs:
lecs = list(lecs)
lecs.sort(key=lambda x: x.date)
attendance_list = []
attendance_count = 0
attendance_total = 0
for lecture in lecs:
lecture_json = LectureSerializer(lecture).data
lecture_json["date"] = "-".join(lecture_json["date"].split('-')[::-1])
try:
StudentLecture.objects.get(student=student, lecture=lecture)
lecture_json["attendance"] = 1
attendance_count += self.multiple_lectures(lecture)
except StudentLecture.DoesNotExist:
lecture_json["attendance"] = 0
attendance_total += self.multiple_lectures(lecture)
attendance_list.append(lecture_json)
attendance_percentage = attendance_count * 100 / attendance_total
response_data = {
'attendance': attendance_list,
'attendance_count': attendance_count,
'attendance_total': attendance_total,
'attendance_percentage': attendance_percentage,
}
else:
response_data = {
'attendance': [],
}
return JsonResponse(response_data, status=status.HTTP_200_OK)
class EditAttendanceOfDay(generics.GenericAPIView):
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
subject_name = kwargs['subject']
div = kwargs['div']
try:
form_data = json.loads(request.body.decode())
attendance_list = form_data['attendance_list']
except Exception:
attendance_list = request.POST.get('attendance_list')
try:
date = kwargs['date']
d, m, y = date.split('-')
date = datetime.datetime(int(y), int(m), int(d)).date()
except KeyError:
date = datetime.date.today()
yearname, division = div.split("_")
year = Div.yearnameToYear(yearname)
if date.month < 6:
semester = year * 2
else:
semester = year * 2 - 1
try:
subject = Subject.objects.get(name=subject_name)
div = Div.objects.get(division=division, semester=semester, calendar_year=datetime.date.today().year)
except Subject.DoesNotExist:
response_data = | |
self.temp.append(float(vals[6]))
self.tide_grav.append(0.0)
self.airpress_grav.append(0.0)
class Survey(Chanlist):
"""
Survey reading dataset from recording files.
The instance of Survey class must be setted before send to Campaign class.
% 55555 一个闭合结束, one loop
% 99999 整个测量结束, one survey
% 44444 66666 一天内中断
"""
#loop_list = []
#survey_table = []
def __init__(self, name, time_tag):
"""
"""
super(Survey,self).__init__() # call properties from the baseclass
self._name = name
self._time_tag = time_tag
self.meter_list = []
self.loop_list = []
self.survey_table = []
self._meter_sf_index = [] #20191024
self._meter_sf = []
def __str__(self):
"""Override the built-in method 'print' when applied to such object
"""
print('Number of gravimeter used in Survey: %d'%(len(self.meter_list)))
print('Number of loops in Survey: %d'%(len(self.loop_list)))
print('Number of recordings can be used: %d'%(len(self.survey_table)))
return "====Survey print by BACGS===="
def add_meter(self, meter):
if not isinstance(meter, Meter):
raise ValueError('Input is not a instance of Meter class!')
for i in self.meter_list:
if (i.msn == meter.msn):
raise ValueError('Meter {} has been added in the survey.'.format(meter.msn))
self.meter_list.append(meter)
@property
def name(self):
return self._name
@property
def time_tag(self):
return self._time_tag
@property
def meter_sf_index(self):
return self._meter_sf_index
@meter_sf_index.setter
def meter_sf_index(self, value):
if not isinstance(value, list):
raise ValueError('Input is not a instance of List!')
if len(self.meter_list) < 1:
raise ValueError('Meter MUST be firstly added in Survey!')
self._meter_sf_index = value
self._meter_sf = []
for mm in self.meter_list:
self._meter_sf.append(mm.msf)
@property
def net(self):
return self._net
@net.setter
def net(self, value):
if not isinstance(value, Network):
raise ValueError('Input is not a instance of Network class!')
self._net = value
@staticmethod
def tran_datetime(obstime): #eg. 1304070850
if not isinstance(obstime, int):
raise ValueError('Datetime must be a integer type with 10 bit!')
timeval=float(obstime);
iy=int(timeval/1E8)
im=int(timeval/1E6)-iy*100
idd=int(timeval/1E4)-iy*10000-im*100
ih=int(timeval/100)-iy*1000000-im*10000-idd*100
imin=timeval-iy*100000000-im*1000000-idd*10000-ih*100
hour_val=ih+imin/60
if iy > 50:
iy=1900+iy
else:
iy=2000+iy
day_fraction = ih / 24.0 + imin / 1440.0 + 0.0 / 86400.
days_val = date(iy, im, idd).toordinal() #+ day_fraction
#print(days_val)
time_list = [iy, im, idd, ih, imin, days_val, hour_val]
return time_list
def _get_pnt_loc(self, sid):
"""
Get location(lon, lat) from the Network instance by the ID of station
"""
lon = 0.0
lat = 0.0
for snet in self.net.sta_list:
if (snet._sid == sid):
lon = snet.slon
lat = snet.slat
elev = snet._elev
break
return lon, lat, elev
def _get_meter_sf(self, mname):
"""
Get scalefactor of meter
"""
for sname in self.meter_list:
if (sname.msn == mname):
sfval = sname._scalefactor
break
return sfval
def corr_aux_effect(self, alpha = 1.16, beta = -0.3):
"""Get gravity by table of LCG type meter
Get earthtide and atomsphere effect
"""
j = 0
for slp in self.loop_list:
i = 0
gra_list = []
for sname in self.meter_list:
if (sname.msn == slp.metername[0]):
if (len(sname.table_data) > 1):
table_list = sname.table_data
gra_list = self.read2mgal(slp.obsval, table_list)
break
#Just for LCG meter
for stime in slp.obstime:
tt = self.tran_datetime(int(stime))
#gdate0 = datetime(int(tt[0]), int(tt[1]), int(tt[2])) #20191013
slocid = slp.stationid[i]
slon, slat, selev = self._get_pnt_loc(slocid)
gdate = datetime(int(tt[0]), int(tt[1]), int(tt[2]),
int(tt[3]), int(tt[4]), 00)
gdate = gdate - timedelta(hours=8)
g1 = tide.TideModel()
gm, gs, g =g1.solve_longman(slat,slon,selev,gdate)
slp.tide_grav[i] = -g #*alpha -1.0 ****
slp.airpress_grav[i] = slp.airpress[i]*beta*0.001
sfval = self._get_meter_sf(slp.metername[i])
if (len(gra_list) == 0):
slist = [slp.metername[i], slocid, tt[5], tt[6], slp.obsval[i], sfval,
slp.tide_grav[i], slp.airpress_grav[i], i+1]
else:
slist = [slp.metername[i], slocid, tt[5], tt[6], gra_list[i], sfval,
slp.tide_grav[i], slp.airpress_grav[i], i+1]
self.survey_table.append(slist)
i += 1
j += 1
return
@staticmethod
def read2mgal(read_list, table_list):
"""Transform the reading to gravity value using table_data of meter.
read2mgal is staticmethod
Method : x1val*(table(j,2)+(dtd(i)-table(j,1))*table(j,3))
Returns
-------
gra_list: the gravity values
"""
gra_list = []
if len(table_list) > 0:
for reading in read_list:
i = 0
for table in table_list[:-1]:
i += 1
tt = [float(f) for f in table]
tt1 = [float(f) for f in table_list[i]]
if (reading >= tt[0]) and (reading < tt1[0]):
gval = tt[1] + (reading - tt[0])*tt[2]
gra_list.append(gval)
return gra_list
def read_survey_file(self, filename, rename = ''):
if not isinstance(self.net, Network):
raise ValueError('Property net must be set!')
if len(self.meter_list) < 1:
raise ValueError('Property meter must more than 1 set!')
try:
fh = open(filename, 'r')
#print("number of lines: %d"%len([1 for line in open(filename, 'r')]))
if len(rename.strip())> 0:
print('{} has all been renamed by {}'.format(filename, rename))
i = 0
flag = 0
for line in fh:
i += 1
line = line.strip() # Clean line
# Skip blank and comment lines # (line[0].isalpha())
if (not line) or (line[0].lower() == 'a') or (line[0] == '#') or (len(line)>6 and len(line)<15):
i -= 1
print(line)
continue
vals=line.split()
if len(rename.strip())> 0:
vals[0] = rename.strip()
if (i == 1): l1tmp = Loop(vals[0],vals[2])
#print(l1tmp)
if (flag == 1):
l1tmp = Loop(vals[0],vals[2])
flag = 0
if (line[0:5] == '55555') or (line[0:5] == '33333') or \
(line[0:5] == '44444') or (line[0:5] == '66666'):
self.loop_list.append(l1tmp) #loop
flag = 1
elif (line[0:5] == '99999'):
self.loop_list.append(l1tmp)
break #loop
else:
l1tmp.add_recording(vals)
except IOError:
print('No file : %s' %(filename))
except ValueError:
print('Reading at line %d : check raw data file'%(i))
except IndexError:
print('Reading at line %d : check raw data file: possibly last line?'%(i))
class Campaign(object):
"""
经典单期平差,贝叶斯平差, 动态平差,混合平差,格值估计,
type = 1.单期; 2.动态; 3.混合;
eg: gravwork = Campaign('IGP201702',type = 1) #1.初始化平差对象
m1 = Meter('G147') #2.初始化重力仪
m1.read_table('table_name')
m1.set_scale = 1.000002
n1 = Network('Huabei') #3.读取重力测网信息(点坐标,类型等)
n1.read_netinfo('datsfilename')
s1 = Survey('survey_name1','time_tag1') #4.初始化一次测量
s1.set_net(n1)
s1.add_meters(meter_List)
s1.read_raw_data('datsfilename')
s1.corr_earthtide(alpha = 1.16)
s1.corr_atmosphere(beta = -0.3)
print(s1) #输出测量基本信息
s2 = Survey('survey_name2','time_tag2')
...
stagrav1 = Station('BJT01') #台站校正后的日均值数据-混合平差
stagrav1.set_meter = mxx
stagrav1.set_tide_free_data = dataframe1
ag = AGstation()
gravwork.add_ag_sta(ag) #添加绝对点信息
gravwork.add_surveys(survey_list) #添加测量到平差任务
gravwork.adj_method = 1 #平差方法选择
gravwork.pre_adj #准备平差矩阵(optional)
gravwork.run_adj #5.运行平差
gravwork.export_station() #输出点值结果
gravwork.export_drift() #输出漂移结果
gravwork.export_error()
"""
survey_dic = {}
_adj_method = 1
__adj_method = {1: 'cls', 2:'bay', 3: 'bay1',4: 'bay2'}
#survey_list = []
#agstation_list = []
_mat_list = []
_obs_list = []
def __init__(self, name, camp_type):
"""
"""
self._name = name
self._type = camp_type
self.survey_list = []
self.agstation_list = []
self._adj_method = 1
def __str__(self):
"""Override the built-in method 'print' when applied to such object
"""
print('Number of Survey used in Campaign: %d'%(len(self.survey_list)))
return "====Campaign print by BACGS===="
@property
def adj_method(self):
return self._adj_method
@adj_method.setter
def adj_method(self, value):
if isinstance(value, float):
raise ValueError('Adjustment method cannot be an float!')
if isinstance(value, int):
if value < 1 or value > 4:
raise ValueError('Adjustment method should be 1-4!')
self._adj_method = self.__adj_method[value]
elif isinstance(value, str):
list(self.__adj_method.values()).index(value.lower())
self._adj_method = value.lower()
else:
raise ValueError('Adjustment method wrong!!!')
@property
def mat_list(self):
return self._mat_list
@property
def obs_list(self):
return self._obs_list
def add_ag_sta(self, ag):
"""add the absolute gravity station to the Campaign class
"""
if not isinstance(ag, AGstation):
raise ValueError('Input is not a instance of AGstation class!')
self.agstation_list.append(ag)
def add_ag_from_file(self, filename):
"""add the absolute gravity station to the Campaign class from the file
"""
#20191102
try:
print(filename)
f = open(filename, 'r')
#print("number of lines: %d"%len([1 for line in open(filename, 'r')]))
for line in f:
line = line.strip()
if (not line) or (line[0] == '#'):
print('Heading of AG file:',line)
continue
vals=line.split()
ag1 = AGstation(vals[0],vals[1],vals[2],vals[3],vals[4],vals[5])
#('白山洞绝对','11014121','A', 116.169, 40.018, 212.5)
ag1.ref_gra = float(vals[6]) #1110.54453
ag1.ref_gra_err = float(vals[7])#5.0E-3
print('AG station {} has been loaded.'.format(vals[0]))
self.agstation_list.append(ag1)
f.close
except IOError:
print('No file : %s' %(filename))
except ValueError:
print('check raw data file')
except IndexError:
print('check raw data file: possibly last line?')
def add_surveys(self, survey):
"""
"""
if not isinstance(survey, Survey):
raise ValueError('Input is not a instance of Survey class!')
self.survey_list.append(survey)
def pre_adj(self):
"""
checking survey information
generated matrix
"""
try:
#sur_tab = self.survey_list[0].survey_table #only one survey can be used
self._mat_list, self._obs_list = self.survey_matrix(0)
#print(_mat_list)
except:
return False
print('Please check the survey data file')
return True
@staticmethod
def tran_obs_array(rec_vals, pnt_id, kt):
"""
Matrix data type used Numpy ndarray. eg: np.ndarray([3,2])
Matlab old version code likes:
[Ut,Ur,dcnos,dcnoe,hoursdd1,ett1,prr1,DYn]=
tra2array(point_no21,point_no_ord,day_num1,data_num1,
g1_val,et1_val,hour1_val,press_val1,kt2);
(pnt_ids, pnt_id0, day_num, data_num, g1_val, et_val,
hour_val, press_val, kt):
"""
mlen = len(pnt_id) | |
<reponame>googleinterns/advertiser-quality-from-sites
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, math, time, itertools, shutil, random
from selenium import webdriver
import argparse
sys.path.append(os.getcwd() + '/..')
import numpy as np
import pandas as pd
from tqdm import tqdm
import socket
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow.python.util.deprecation as deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import tensorflow.compat.v1 as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import tensorflow as tftwo
from tensorflow import keras
from bert import BertModelLayer
from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights
from bert.tokenization.bert_tokenization import FullTokenizer
from recursive_model import tree_lib
from source.utils import utils
from source.utils.utils import overrides
CHROME_PATH = '/home/vahidsanei_google_com/chromedriver/chromedriver'
class Configuration():
def __init__(self, input_df_train=None, input_df_test=None, lr=1e-2, adam_lr=1e-5, n_epochs=20, l2=0.0001, val_split_ratio=None, anneal_factor=1.5,
anneal_tolerance=0.99, patience_early_stopping=5, max_depth=4, n_classes=None, bert_folder_path=None,
bert_embedding_size=768, embedding_size=768, keep_prob=0.6, max_content_length=128, is_multicase=True, verbose=1, url_link=None, chrome_path=CHROME_PATH):
assert bert_folder_path is not None, 'input files for BERT must be specified'
self.input_df_train = input_df_train
self.input_df_test = input_df_test
self.lr = lr
self.adam_lr = adam_lr
self.n_epochs = n_epochs
self.l2 = l2
self.val_split_ratio = val_split_ratio
self.anneal_factor = anneal_factor
self.anneal_tolerance = anneal_tolerance
self.patience_early_stopping = patience_early_stopping
self.max_depth = max_depth
self.n_classes = n_classes
self.bert_folder_path = bert_folder_path
self.bert_ckpt_file = os.path.join(self.bert_folder_path, 'bert_model.ckpt')
self.bert_config_file = os.path.join(self.bert_folder_path, 'bert_config.json')
self.tokenizer = FullTokenizer(vocab_file=os.path.join(self.bert_folder_path, 'vocab.txt'))
# the output size of uncased_L-12_H-768_A-12 is 768
self.bert_embedding_size = bert_embedding_size
self.embedding_size = embedding_size
self.keep_prob = keep_prob
self.max_content_length = max_content_length
self.is_multicase = is_multicase
self.verbose = verbose
self.url_link = url_link
# save weights after set value steps, to remove the constructed trees from RAM (to avoid OOM)
self.AVOID_OOM_AT = 100
self.model_name = None
self.chrome_path = chrome_path
class DOMBasedModel():
def __init__(self, configuration):
"""
Setting up a DOM-based Model and convert HTML contents to Tree strings.
Args:
configuration: Set up a model based on the hyperparameters of Configuration
"""
self.configuration = configuration
self.__set_model_name__()
self.bert_model = self.load_bert()
# if the url_link is not given train the model
# otherwise predict the rating (or category) of the given url link
if self.configuration.url_link is None:
self.train_trees = tree_lib.read_trees(self.configuration.input_df_train, verbose=self.configuration.verbose)
self.test_trees = tree_lib.read_trees(self.configuration.input_df_test, verbose=self.configuration.verbose)
if self.configuration.verbose > 0:
sys.stdout.write(' [Preprocessing step] Get embeddings of leaves contents:\n')
sys.stdout.flush()
for tree in (tqdm(self.train_trees) if self.configuration.verbose > 0 else self.train_trees):
self.__content_to_embedding__(tree.root)
for tree in (tqdm(self.test_trees) if self.configuration.verbose > 0 else self.test_trees):
self.__content_to_embedding__(tree.root)
self.val_trees = None
if self.configuration.val_split_ratio is not None:
split_size = int((1.0 - self.configuration.val_split_ratio) * len(self.test_trees))
self.val_trees = self.test_trees[split_size:]
self.test_trees = self.test_trees[:split_size]
if self.configuration.verbose > 0:
sys.stdout.write(f' Training data distribution with {len(self.train_trees)} entries\n')
sys.stdout.flush()
self.__cls_distributions__(self.train_trees)
if self.val_trees is not None:
sys.stdout.write(f' Validation data distribution with {len(self.val_trees)} entries\n')
sys.stdout.flush()
self.__cls_distributions__(self.val_trees)
sys.stdout.write(f' Test data distribution with {len(self.test_trees)} entries\n')
sys.stdout.flush()
self.__cls_distributions__(self.test_trees)
sys.stdout.write('*' * 100 + '\n')
sys.stdout.flush()
self.weights_path = os.path.join('.', 'weights', f'{self.configuration.model_name}.ckpt')
self.best_weights_path = os.path.join('.', 'weights', f'best.{self.configuration.model_name}.ckpt')
else:
browser = webdriver.Chrome(executable_path=self.configuration.chrome_path)
browser.get(self.configuration.url_link)
# get the HTML conent of the given url link
# and convert it to a tree string
html_content = browser.page_source
tree_string = tree_lib.html_to_encoded_tree(html_content, max_depth=4, label=-1)
self.url_tree = tree_lib.Tree(tree_string)
self.__content_to_embedding__(self.url_tree.root)
tf.disable_eager_execution()
def __set_model_name__(self):
self.configuration.model_name = f'{socket.gethostname()}_{self.__class__.__name__}_cls={self.configuration.n_classes}'
def __cls_distributions__(self, trees):
"""
A helper function to show the distribution of classes in the input.
Args:
trees: tree strings that resembles the HTML contents
"""
cls_count = {}
for tree in trees:
if tree.label not in cls_count: cls_count[tree.label] = 1
else: cls_count[tree.label] += 1
total = sum(cls_count.values())
cls_count = dict(sorted(cls_count.items()))
for key, value in cls_count.items():
sys.stdout.write(f' Class {key} has {round(value / total * 100, 2)}% of entries.\n')
sys.stdout.flush()
def __content_to_embedding__(self, node):
"""
A recursive function to convert the texts of leaves of the tree to embedding using BERT
"""
if node.is_leaf == True:
self.bert_model.run_eagerly=True
node.bert_embedding = np.asarray(self.bert_model.predict(np.expand_dims(self.__cut_with_padding__(node.content), 0)))
else:
for child in node.children:
self.__content_to_embedding__(child)
def __cut_with_padding__(self, content):
tokens = self.configuration.tokenizer.tokenize(content)
ids = self.configuration.tokenizer.convert_tokens_to_ids(tokens)
# pad the remaining cells with zero
ids = ids + [0 for _ in range(self.configuration.max_content_length - len(ids))]
ids = ids[:self.configuration.max_content_length]
return ids
def __evaluate_prediction__(self, prediction, truth):
prediction = np.asarray(prediction)
truth = np.asarray(truth)
if self.configuration.is_multicase == True:
prediction = prediction.astype(int)
# computes top 1 acc
return np.mean(np.equal(prediction, truth))
else:
# computes rmse, mae respectively
return np.sqrt((np.square(np.subtract(prediction, truth))).mean(axis=0)), np.median(np.abs(np.subtract(prediction, truth)))
def load_bert(self):
try:
with tftwo.io.gfile.GFile(self.configuration.bert_config_file, 'r') as gf:
bert_config = StockBertConfig.from_json_string(gf.read())
bert_params = map_stock_config_to_params(bert_config)
bert_params.adapter_size = None
bert = BertModelLayer.from_params(bert_params, name='bert')
except Exception as e:
print(e)
raise e
input_ = keras.layers.Input(shape=(self.configuration.max_content_length, ), dtype='int64', name="input_ids")
x = bert(input_)
output_ = keras.layers.Lambda(lambda seq: seq[:,0,:])(x)
model = keras.Model(inputs=input_, outputs=output_)
model.build(input_shape=(None, self.configuration.max_content_length))
load_stock_weights(bert, self.configuration.bert_ckpt_file)
return model
def get_tensor(self, node):
"""
Recursively creates a computational graph based on the structure of DOM HTML
returns: a tensorflow embedding of a node in DOM
"""
if node.is_leaf == True:
with tf.variable_scope('bert_layer', reuse=True):
W_bert = tf.get_variable('W_bert')
b_bert = tf.get_variable('b_bert')
node_tensor = tf.nn.relu(tf.add(tf.matmul(node.bert_embedding, W_bert), b_bert))
else:
children_tensors_list = []
for child in node.children:
children_tensors_list.append(self.get_tensor(child))
children_tensors = tf.concat(children_tensors_list, 1)
with tf.variable_scope(f'layer{len(node.children)}', reuse=True):
W = tf.get_variable(f'W{len(node.children)}')
b = tf.get_variable(f'b{len(node.children)}')
node_tensor = tf.nn.relu(tf.add(tf.matmul(children_tensors, W), b))
node_tensor = tf.nn.dropout(node_tensor, rate=1.0 - self.configuration.keep_prob)
return node_tensor
def _compute_logit(self, tree):
root_tensor = self.get_tensor(tree.root)
with tf.variable_scope('output_layer', reuse=True):
W = tf.get_variable('W0')
b = tf.get_variable('b0')
logit = tf.add(tf.matmul(root_tensor, W), b)
return logit
def get_variables(self):
"""
Define the trainable parameters including weights and biases
"""
with tf.variable_scope('output_layer'):
tf.get_variable('W0', [self.configuration.embedding_size, self.configuration.n_classes])
tf.get_variable('b0', [1, self.configuration.n_classes])
for d in range(1, self.configuration.max_depth + 1):
with tf.variable_scope(f'layer{d}'):
tf.get_variable(f'W{d}', [self.configuration.embedding_size * d, self.configuration.embedding_size])
tf.get_variable(f'b{d}', [1, self.configuration.embedding_size])
with tf.variable_scope('bert_layer'):
tf.get_variable('W_bert', [self.configuration.bert_embedding_size, self.configuration.embedding_size])
tf.get_variable('b_bert', [1, self.configuration.embedding_size])
def _compute_loss(self, label, logit):
cross_entropy_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.constant(label)))
Ws = []
with tf.variable_scope('output_layer', reuse=True):
W = tf.get_variable('W0')
Ws.append(W)
for d in range(1, self.configuration.max_depth + 1):
with tf.variable_scope(f'layer{d}', reuse=True):
W = tf.get_variable(f'W{d}')
Ws.append(W)
with tf.variable_scope('bert_layer', reuse=True):
W = tf.get_variable('W_bert')
Ws.append(W)
total_weight_loss = tf.reduce_sum([tf.nn.l2_loss(W) for W in Ws])
total_loss = self.configuration.l2 * total_weight_loss + cross_entropy_loss
return total_loss
def tree_loss(self, tree, logit=None):
if logit is None: logit = self._compute_logit(tree)
return self._compute_loss([tree.label], logit)
def predict(self, trees, weights_path):
predictions, losses = [], []
pos = 0
if self.configuration.verbose > 0:
sys.stdout.write(' Prediction:\n')
sys.stdout.flush()
rogbar = keras.utils.Progbar(len(trees))
while pos < len(trees):
with tf.Graph().as_default(), tf.Session() as sess:
self.get_variables()
saver = tf.train.Saver()
saver.restore(sess, weights_path)
# To avoid OOM, we break the loop so we can save the model and restore it back.
for _ in range(self.configuration.AVOID_OOM_AT):
if pos >= len(trees): break
tree = trees[pos]
logit = self._compute_logit(tree)
tf_prediction = tf.argmax(logit, 1)
y_pred = sess.run(tf_prediction)[0]
predictions.append(y_pred)
tree_loss = sess.run(self.tree_loss(tree, logit=logit))
losses.append(tree_loss)
pos += 1
if self.configuration.verbose > 0:
rogbar.update(pos)
return predictions, losses
def run_epoch(self, epoch_number):
random.shuffle(self.train_trees)
losses = []
pos = 0
if not os.path.exists('./weights'):
os.makedirs('./weights')
while pos < len(self.train_trees):
with tf.Graph().as_default(), tf.Session() as sess:
self.get_variables()
if epoch_number == 0 and pos == 0:
sess.run(tf.global_variables_initializer())
else:
saver = tf.train.Saver()
saver.restore(sess, self.weights_path)
# To avoid OOM, we break the loop so we can save the model and restore it back.
for i in range(self.configuration.AVOID_OOM_AT):
if pos >= len(self.train_trees): break
tree = self.train_trees[pos]
loss = self.tree_loss(tree)
optimizer = tf.train.GradientDescentOptimizer(self.configuration.lr).minimize(loss)
l, _ = sess.run([loss, optimizer])
losses.append(l)
if self.configuration.verbose > 0:
sys.stdout.write(f'\r Epoch:{epoch_number + 1}/{self.configuration.n_epochs}, {pos + 1}/{len(self.train_trees)}: loss={np.mean(losses)}')
sys.stdout.flush()
pos += 1
saver = tf.train.Saver()
saver.save(sess, self.weights_path)
if self.configuration.verbose > 0:
sys.stdout.write('\n')
sys.stdout.flush()
train_predictions, train_losses = self.predict(self.train_trees, weights_path=self.weights_path)
if self.val_trees is None:
val_predictions, val_losses = None, None
else:
val_predictions, val_losses = self.predict(self.val_trees, weights_path=self.weights_path)
train_labels = [t.label for t in self.train_trees]
val_labels = None if self.val_trees is None else [t.label for t in self.val_trees]
train_eval = self.__evaluate_prediction__(train_predictions, train_labels)
val_eval = None if self.val_trees is None else self.__evaluate_prediction__(val_predictions, val_labels)
return train_eval, val_eval, train_losses, val_losses
def train_model(self):
train_losses = []
train_evals = []
val_losses = []
val_evals = []
last_epoch_loss = float('inf')
self.best_val_loss = float('inf')
best_val_epoch = -1
for epoch in range(self.configuration.n_epochs):
start_time = time.time()
train_eval, val_eval, epoch_train_losses, epoch_val_losses = self.run_epoch(epoch_number=epoch)
epoch_time = time.time() - start_time
train_loss = np.mean(epoch_train_losses)
train_evals.append(train_eval)
train_losses.append(train_loss)
if self.configuration.is_multicase == True:
if val_eval is not None:
val_loss = np.mean(epoch_val_losses)
val_losses.append(val_loss)
val_evals.append(val_eval)
sys.stdout.write(f' Train acc = {round(train_eval * 100, 3)}%, Val acc = {round(val_eval * 100, 3)}%, Train loss = {train_loss}, Val loss = {val_loss}, epoch time={round(epoch_time, 2)}s\n')
else:
sys.stdout.write(f' Train acc = {round(train_eval * 100, 3)}%, Train loss = {train_loss}, epoch time={round(epoch_time, 2)}s\n')
else:
if val_eval is not None:
val_loss = np.mean(epoch_val_losses)
val_losses.append(val_loss)
val_evals.append(val_eval)
sys.stdout.write(f' Train rmse = {train_eval[0]}, Train mae = {train_eval[1]}, Val rmse = {val_eval[0]}, Val mae = {val_eval[1]}, Train loss = {train_loss}, Val loss = {val_loss}, epoch time={round(epoch_time, 2)}s\n')
else:
sys.stdout.write(f' Train rmse = {train_eval[0]}, Train mae = {train_eval[1]}, Train loss = {train_loss}, epoch time={round(epoch_time, 2)}s\n')
sys.stdout.write(('*' * 150) + '\n')
sys.stdout.flush()
# lr is a learning rate and is used only for non-fast DOM Based Models.
if train_losses[-1] > last_epoch_loss * self.configuration.anneal_tolerance:
self.configuration.lr /= self.configuration.anneal_factor
last_epoch_loss = train_losses[-1]
if val_eval is not None and np.mean(epoch_val_losses) < self.best_val_loss:
self.best_val_loss = np.mean(epoch_val_losses)
best_val_epoch = epoch
shutil.copyfile(f'{self.weights_path}.data-00000-of-00001', f'{self.best_weights_path}.data-00000-of-00001')
shutil.copyfile(f'{self.weights_path}.index', f'{self.best_weights_path}.index')
shutil.copyfile(f'{self.weights_path}.meta', f'{self.best_weights_path}.meta')
def evaluate_testset(self):
test_predictions, test_losses = self.predict(self.test_trees, self.best_weights_path)
test_labels = [t.label for t in self.test_trees]
test_eval = self.__evaluate_prediction__(test_predictions, test_labels)
if self.configuration.is_multicase == True:
sys.stdout.write(f' Test acc = {round(test_eval * 100, 3)}%\n')
else:
sys.stdout.write(f' Test rmse = {test_eval[0]}, Test mae = {test_eval[1]}\n')
class DOMBasedModelWithLessParams(DOMBasedModel):
def __init__(self, configuration):
"""
Setting up another version of DOM-based Model which uses
less trainable weight variables.
"""
super().__init__(configuration)
@overrides(DOMBasedModel)
def get_tensor(self, node):
if node.is_leaf == True:
with tf.variable_scope('bert_layer', reuse=True):
W_bert = tf.get_variable('W_bert')
b_bert = tf.get_variable('b_bert')
node_tensor = tf.nn.relu(tf.matmul(node.bert_embedding, W_bert) + b_bert)
else:
with tf.variable_scope(f'internal_layer', reuse=True):
W = tf.get_variable(f'W_internal')
b = tf.get_variable(f'b_internal')
for idx, child in enumerate(node.children):
if idx == 0: node_tensor = self.get_tensor(child)
else:
children_tensors = tf.concat([node_tensor, self.get_tensor(child)], 1)
node_tensor = tf.nn.relu(tf.matmul(children_tensors, W) + b)
node_tensor = tf.nn.dropout(node_tensor, rate=1.0 - self.configuration.keep_prob)
return node_tensor
@overrides(DOMBasedModel)
def _compute_loss(self, label, logit):
cross_entropy_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.constant(label)))
Ws = []
with tf.variable_scope('output_layer', reuse=True):
W = tf.get_variable('W0')
Ws.append(W)
with tf.variable_scope(f'internal_layer', reuse=True):
W = tf.get_variable(f'W_internal')
Ws.append(W)
with tf.variable_scope('bert_layer', reuse=True):
W = tf.get_variable('W_bert')
Ws.append(W)
total_weight_loss = tf.reduce_sum([tf.nn.l2_loss(W) for W in Ws])
total_loss = self.configuration.l2 * total_weight_loss + cross_entropy_loss
return total_loss
@overrides(DOMBasedModel)
def get_variables(self):
with tf.variable_scope('output_layer'):
tf.get_variable('W0', [self.configuration.embedding_size, self.configuration.n_classes])
tf.get_variable('b0', [1, self.configuration.n_classes])
with tf.variable_scope(f'internal_layer'):
tf.get_variable(f'W_internal', [self.configuration.embedding_size * 2, self.configuration.embedding_size])
tf.get_variable(f'b_internal', [1, self.configuration.embedding_size])
with tf.variable_scope('bert_layer'):
tf.get_variable('W_bert', [self.configuration.bert_embedding_size, self.configuration.embedding_size])
tf.get_variable('b_bert', [1, self.configuration.embedding_size])
class DOMBasedModelWithParamsForDepths(DOMBasedModel):
def __init__(self, configuration):
"""
Setting up another version of DOM-based Model which has
different weights for different depths.
"""
super().__init__(configuration)
@overrides(DOMBasedModel)
def get_tensor(self, node):
if node.is_leaf == True:
with tf.variable_scope('bert_layer', reuse=True):
W_bert = | |
get average of vertex-level data within parcels
# set all NaN values to 0 before calling `_stats` because we are
# returning sums, so the 0 values won't impact the sums (if we left
# the NaNs then all parcels with even one NaN entry would be NaN)
currdata = np.squeeze(data[start:end, idx])
isna = np.isnan(currdata)
counts, sums = _stats(np.nan_to_num(currdata), labels, indices)
# however, we do need to account for the NaN values in the counts
# so that our means are similar to what we'd get from e.g.,
# np.nanmean here, our "sums" are the counts of NaN values in our
# parcels
_, nacounts = _stats(isna, labels, indices)
counts = (np.asanyarray(counts, dtype=float)
- np.asanyarray(nacounts, dtype=float))
with np.errstate(divide='ignore', invalid='ignore'):
currdata = sums / counts
# get indices of unkown and corpuscallosum and delete from parcels
inds = sorted([
names.index(f) for f in set(drop_labels) & set(names)
])
currdata = np.delete(currdata, inds)
# store parcellated data
reduced[n_parc:n_parc + len(names) - len(inds), idx] = currdata
start = end
n_parc += len(names) - len(inds)
return np.squeeze(reduced)
def _get_fsaverage_coords(version='fsaverage', surface='sphere'):
"""
Gets vertex coordinates for specified `surface` of fsaverage `version`
Parameters
----------
version : str, optional
One of {'fsaverage', 'fsaverage3', 'fsaverage4', 'fsaverage5',
'fsaverage6'}. Default: 'fsaverage'
surface : str, optional
Surface for which to return vertex coordinates. Default: 'sphere'
Returns
-------
coords : (N, 3) numpy.ndarray
xyz coordinates of vertices for {left,right} hemisphere
hemiid : (N,) numpy.ndarray
Array denoting hemisphere designation of entries in `coords`, where
`hemiid=0` denotes the left and `hemiid=1` the right hemisphere
"""
# get coordinates and hemisphere designation for spin generation
lhsphere, rhsphere = fetch_fsaverage(version)[surface]
coords, hemi = [], []
for n, sphere in enumerate([lhsphere, rhsphere]):
coords.append(read_geometry(sphere)[0])
hemi.append(np.ones(len(coords[-1])) * n)
return np.row_stack(coords), np.hstack(hemi)
def _get_fsaverage_spins(version='fsaverage', spins=None, n_rotate=1000,
**kwargs):
"""
Generates spatial permutation resamples for fsaverage `version`
If `spins` are provided then performs checks to confirm they are valid
Parameters
----------
version : str, optional
Specifies which version of `fsaverage` for which to generate spins.
Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4', 'fsaverage5',
'fsaverage6'}. Default: 'fsaverage'
spins : array_like, optional
Pre-computed spins to use instead of generating them on the fly. If not
provided will use other provided parameters to create them. Default:
None
n_rotate : int, optional
Number of rotations to generate. Default: 1000
return_cost : bool, optional
Whether to return cost array (specified as Euclidean distance) for each
coordinate for each rotation. Currently this option is not supported if
pre-computed `spins` are provided. Default: True
kwargs : key-value pairs
Keyword arguments passed to `netneurotools.stats.gen_spinsamples`
Returns
--------
spins : (N, S) numpy.ndarray
Resampling array
"""
if spins is None:
coords, hemiid = _get_fsaverage_coords(version, 'sphere')
spins = gen_spinsamples(coords, hemiid, n_rotate=n_rotate,
**kwargs)
if kwargs.get('return_cost', False):
return spins
spins = np.asarray(spins, dtype='int32')
if spins.shape[-1] != n_rotate:
warnings.warn('Shape of provided `spins` array does not match '
'number of rotations requested with `n_rotate`. '
'Ignoring specified `n_rotate` parameter and using '
'all provided `spins`.')
n_rotate = spins.shape[-1]
return spins, None
def spin_data(data, *, lhannot, rhannot, version='fsaverage', n_rotate=1000,
spins=None, drop_labels=None, verbose=False, **kwargs):
"""
Projects parcellated `data` to surface, rotates, and re-parcellates
Projection to the surface uses `{lh,rh}annot` files. Rotation uses vertex
coordinates from the specified fsaverage `version` and relies on
:func:`netneurotools.stats.gen_spinsamples`. Re-parcellated data will not
be exactly identical to original values due to re-averaging process.
Parcels subsumed by regions in `drop_labels` will be listed as NaN.
Parameters
----------
data : (N,) numpy.ndarray
Parcellated data to be rotated. Parcels should be ordered by [left,
right] hemisphere; ordering within hemisphere should correspond to the
provided `{lh,rh}annot` annotation files.
{lh,rh}annot : str
Path to .annot file containing labels to parcels on the {left,right}
hemisphere
version : str, optional
Specifies which version of `fsaverage` provided annotation files
correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
n_rotate : int, optional
Number of rotations to generate. Default: 1000
spins : array_like, optional
Pre-computed spins to use instead of generating them on the fly. If not
provided will use other provided parameters to create them. Default:
None
drop_labels : list, optional
Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
will be inserted in place of the these regions in the returned data. If
not specified, parcels defined in `netneurotools.freesurfer.FSIGNORE`
are assumed to not be present. Default: None
verbose : bool, optional
Whether to print occasional status messages. Default: False
kwargs : key-value pairs
Keyword arguments passed to `netneurotools.stats.gen_spinsamples`
Returns
-------
rotated : (N, `n_rotate`) numpy.ndarray
Rotated `data
cost : (N, `n_rotate`,) numpy.ndarray
Cost (specified as Euclidean distance) of re-assigning each coordinate
for every rotation in `spinsamples`. Only provided if `return_cost` is
True.
"""
if drop_labels is None:
drop_labels = FSIGNORE
# get coordinates and hemisphere designation for spin generation
vertices = parcels_to_vertices(data, lhannot=lhannot, rhannot=rhannot,
drop_labels=drop_labels)
# get spins + cost (if requested)
spins, cost = _get_fsaverage_spins(version=version, spins=spins,
n_rotate=n_rotate,
verbose=verbose, **kwargs)
if len(vertices) != len(spins):
raise ValueError('Provided annotation files have a different '
'number of vertices than the specified fsaverage '
'surface.\n ANNOTATION: {} vertices\n '
'FSAVERAGE: {} vertices'
.format(len(vertices), len(spins)))
spun = np.zeros(data.shape + (n_rotate,))
for n in range(n_rotate):
if verbose:
msg = f'Reducing vertices to parcels: {n:>5}/{n_rotate}'
print(msg, end='\b' * len(msg), flush=True)
spun[..., n] = vertices_to_parcels(vertices[spins[:, n]],
lhannot=lhannot, rhannot=rhannot,
drop_labels=drop_labels)
if verbose:
print(' ' * len(msg) + '\b' * len(msg), end='', flush=True)
if kwargs.get('return_cost', False):
return spun, cost
return spun
def spin_parcels(*, lhannot, rhannot, version='fsaverage', n_rotate=1000,
spins=None, drop_labels=None, verbose=False, **kwargs):
"""
Rotates parcels in `{lh,rh}annot` and re-assigns based on maximum overlap
Vertex labels are rotated with :func:`netneurotools.stats.gen_spinsamples`
and a new label is assigned to each *parcel* based on the region maximally
overlapping with its boundaries.
Parameters
----------
{lh,rh}annot : str
Path to .annot file containing labels to parcels on the {left,right}
hemisphere
version : str, optional
Specifies which version of `fsaverage` provided annotation files
correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
n_rotate : int, optional
Number of rotations to generate. Default: 1000
spins : array_like, optional
Pre-computed spins to use instead of generating them on the fly. If not
provided will use other provided parameters to create them. Default:
None
drop_labels : list, optional
Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
will be inserted in place of the these regions in the returned data. If
not specified, parcels defined in `netneurotools.freesurfer.FSIGNORE`
are assumed to not be present. Default: None
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Default: None
verbose : bool, optional
Whether to print occasional status messages. Default: False
return_cost : bool, optional
Whether to return cost array (specified as Euclidean distance) for each
coordinate for each rotation. Default: True
kwargs : key-value pairs
Keyword arguments passed to `netneurotools.stats.gen_spinsamples`
Returns
-------
spinsamples : (N, `n_rotate`) numpy.ndarray
Resampling matrix to use in permuting data parcellated with labels from
{lh,rh}annot, where `N` is the number of parcels. Indices of -1
indicate that the parcel was completely encompassed by regions in
`drop` and should be ignored.
cost : (N, `n_rotate`,) numpy.ndarray
Cost (specified as Euclidean distance) of re-assigning each coordinate
for every rotation in `spinsamples`. Only provided if `return_cost` is
True.
"""
def overlap(vals):
""" Returns most common non-negative value in `vals`; -1 if all neg
"""
vals = np.asarray(vals)
vals, counts = np.unique(vals[vals > 0], return_counts=True)
try:
return vals[counts.argmax()]
except ValueError:
return -1
if drop_labels is None:
drop_labels = FSIGNORE
drop_labels = _decode_list(drop_labels)
# get vertex-level labels (set drop labels to - values)
vertices, end = [], 0
for n, annot in enumerate([lhannot, rhannot]):
labels, ctab, names = read_annot(annot)
names = _decode_list(names)
todrop = set(names) & set(drop_labels)
inds = [names.index(f) - n for n, f in enumerate(todrop)]
labs = np.arange(len(names) - len(inds)) + (end - (len(inds) * n))
insert = np.arange(-1, -(len(inds) + 1), -1)
vertices.append(np.insert(labs, inds, insert)[labels])
end += len(names)
vertices = np.hstack(vertices)
labels = | |
import ssl
import logging
import datetime
import pytz
from multidict import CIMultiDictProxy
from typing import List, Optional
from cryptoxlib.CryptoXLibClient import CryptoXLibClient, RestCallType
from cryptoxlib.clients.bitpanda import enums
from cryptoxlib.clients.bitpanda.exceptions import BitpandaRestException, BitpandaException
from cryptoxlib.clients.bitpanda.functions import map_pair
from cryptoxlib.Pair import Pair
from cryptoxlib.WebsocketMgr import WebsocketMgr, Subscription
from cryptoxlib.clients.bitpanda.BitpandaWebsocket import BitpandaWebsocket
LOG = logging.getLogger(__name__)
class BitpandaClient(CryptoXLibClient):
REST_API_URI = "https://api.exchange.bitpanda.com/public/v1/"
def __init__(self, api_key: str = None, api_trace_log: bool = False,
ssl_context: ssl.SSLContext = None) -> None:
super().__init__(api_trace_log, ssl_context)
self.api_key = api_key
def _get_rest_api_uri(self) -> str:
return self.REST_API_URI
def _sign_payload(self, rest_call_type: RestCallType, resource: str, data: dict = None, params: dict = None, headers: dict = None) -> None:
headers["Authorization"] = "Bearer " + self.api_key
def _preprocess_rest_response(self, status_code: int, headers: 'CIMultiDictProxy[str]', body: Optional[dict]) -> None:
if str(status_code)[0] != '2':
raise BitpandaRestException(status_code, body)
def _get_websocket_mgr(self, subscriptions: List[Subscription], startup_delay_ms: int = 0,
ssl_context = None) -> WebsocketMgr:
return BitpandaWebsocket(subscriptions = subscriptions, api_key = self.api_key, ssl_context = ssl_context,
startup_delay_ms = startup_delay_ms)
async def get_currencies(self) -> dict:
return await self._create_get("currencies")
async def get_fee_groups(self) -> dict:
return await self._create_get("fees")
async def get_account_balances(self) -> dict:
return await self._create_get("account/balances", signed = True)
async def get_account_fees(self) -> dict:
return await self._create_get("account/fees", signed = True)
async def get_account_orders(self, from_timestamp: datetime.datetime = None, to_timestamp: datetime.datetime = None,
pair: Pair = None, with_cancelled_and_rejected: str = None,
with_just_filled_inactive: str = None,
with_just_orders: str = None, max_page_size: str = None, cursor: str = None) -> dict:
params = BitpandaClient._clean_request_params({
"with_cancelled_and_rejected": with_cancelled_and_rejected,
"with_just_filled_inactive": with_just_filled_inactive,
"with_just_orders": with_just_orders,
"max_page_size": max_page_size,
"cursor": cursor,
})
if pair is not None:
params["instrument_code"] = map_pair(pair)
if from_timestamp is not None:
params["from"] = from_timestamp.astimezone(pytz.utc).isoformat()
if to_timestamp is not None:
params["to"] = to_timestamp.astimezone(pytz.utc).isoformat()
return await self._create_get("account/orders", params = params, signed = True)
async def get_account_order(self, order_id: str) -> dict:
return await self._create_get("account/orders/" + order_id, signed = True)
async def get_account_order_trades(self, order_id: str) -> dict:
return await self._create_get("account/orders/" + order_id + "/trades", signed = True)
async def get_account_trades(self, from_timestamp: datetime.datetime = None, to_timestamp: datetime.datetime = None,
pair: Pair = None, max_page_size: str = None, cursor: str = None) -> dict:
params = BitpandaClient._clean_request_params({
"max_page_size": max_page_size,
"cursor": cursor,
})
if pair is not None:
params["instrument_code"] = map_pair(pair)
if from_timestamp is not None:
params["from"] = from_timestamp.astimezone(pytz.utc).isoformat()
if to_timestamp is not None:
params["to"] = to_timestamp.astimezone(pytz.utc).isoformat()
return await self._create_get("account/trades", params = params, signed = True)
async def get_account_trade(self, trade_id: str) -> dict:
return await self._create_get("account/trades/" + trade_id, signed = True)
async def get_account_trading_volume(self) -> dict:
return await self._create_get("account/trading-volume", signed = True)
async def create_market_order(self, pair: Pair, side: enums.OrderSide, amount: str, client_id: str = None) -> dict:
data = {
"instrument_code": map_pair(pair),
"side": side.value,
"type": enums.OrderType.MARKET.value,
"amount": amount
}
if client_id is not None:
data['client_id'] = client_id
return await self._create_post("account/orders", data = data, signed = True)
async def create_limit_order(self, pair: Pair, side: enums.OrderSide, amount: str, limit_price: str,
time_in_force: enums.TimeInForce = None, client_id: str = None) -> dict:
data = {
"instrument_code": map_pair(pair),
"side": side.value,
"type": enums.OrderType.LIMIT.value,
"amount": amount,
"price": limit_price
}
if client_id is not None:
data['client_id'] = client_id
if time_in_force is not None:
data['time_in_force'] = time_in_force.value
return await self._create_post("account/orders", data = data, signed = True)
async def create_stop_limit_order(self, pair: Pair, side: enums.OrderSide, amount: str, limit_price: str,
stop_price: str,
time_in_force: enums.TimeInForce = None, client_id: str = None) -> dict:
data = {
"instrument_code": map_pair(pair),
"side": side.value,
"type": enums.OrderType.STOP_LIMIT.value,
"amount": amount,
"price": limit_price,
"trigger_price": stop_price
}
if client_id is not None:
data['client_id'] = client_id
if time_in_force is not None:
data['time_in_force'] = time_in_force.value
return await self._create_post("account/orders", data = data, signed = True)
async def delete_account_orders(self, pair: Pair = None, ids: List[str] = None) -> dict:
params = {}
if pair is not None:
params["instrument_code"] = map_pair(pair)
if ids is not None:
params['ids'] = ','.join(ids)
return await self._create_delete("account/orders", params = params, signed = True)
async def delete_account_order(self, order_id: str = None, client_id: str = None) -> dict:
if order_id is None and client_id is None:
raise BitpandaException('One of order_id/client_id has to be provided.')
if order_id is not None and client_id is not None:
raise BitpandaException('Only one of order_id/client_id can be provided.')
if order_id is not None:
return await self._create_delete("account/orders/" + order_id, signed = True)
else:
return await self._create_delete("account/orders/client/" + client_id, signed = True)
async def update_order(self, amount: str, order_id: str = None, client_id: str = None) -> dict:
if order_id is None and client_id is None:
raise BitpandaException('One of order_id/client_id has to be provided.')
if order_id is not None and client_id is not None:
raise BitpandaException('Only one of order_id/client_id can be provided.')
data = {
"amount": amount
}
if order_id is not None:
return await self._create_put("account/orders/" + order_id, data = data, signed = True)
else:
return await self._create_put("account/orders/client/" + client_id, data = data, signed = True)
async def get_candlesticks(self, pair: Pair, unit: enums.TimeUnit, period: str, from_timestamp: datetime.datetime,
to_timestamp: datetime.datetime) -> dict:
params = {
"unit": unit.value,
"period": period,
"from": from_timestamp.astimezone(pytz.utc).isoformat(),
"to": to_timestamp.astimezone(pytz.utc).isoformat(),
}
return await self._create_get("candlesticks/" + map_pair(pair), params = params)
async def get_instruments(self) -> dict:
return await self._create_get("instruments")
async def get_order_book(self, pair: Pair, level: str = None, depth: str = None) -> dict:
params = BitpandaClient._clean_request_params({
"level": level,
"depth": depth
})
return await self._create_get("order-book/" + map_pair(pair), params = params)
async def get_time(self) -> dict:
return await self._create_get("time")
async def get_market_tickers(self) -> dict:
return await self._create_get("market-ticker")
async def get_market_ticker(self, pair: Pair) -> dict:
return await self._create_get("market-ticker/" + map_pair(pair))
async def get_price_tick(self, pair: Pair, from_timestamp: datetime.datetime = None,
to_timestamp: datetime.datetime = None) -> dict:
params = {}
if from_timestamp is not None:
params['from'] = from_timestamp.astimezone(pytz.utc).isoformat()
if to_timestamp is not None:
params['to'] = to_timestamp.astimezone(pytz.utc).isoformat()
return await self._create_get("price-ticks/" + map_pair(pair), params = params)
async def create_deposit_crypto_address(self, currency: str) -> dict:
data = {
"currency": currency
}
return await self._create_post("account/deposit/crypto", data = data, signed = True)
async def get_deposit_crypto_address(self, currency: str) -> dict:
return await self._create_get("account/deposit/crypto/" + currency, signed = True)
async def get_fiat_deposit_info(self) -> dict:
return await self._create_get("account/deposit/fiat/EUR", signed = True)
async def withdraw_crypto(self, currency: str, amount: str, address: str, destination_tag: str = None) -> dict:
data = {
'currency': currency,
'amount': amount,
'recipient': {
'address': address
}
}
if destination_tag is not None:
data['recipient']['destination_tag'] = destination_tag
return await self._create_post("account/withdraw/crypto", data = data, signed = True)
async def withdraw_fiat(self, currency: str, amount: str, payout_account_id: str) -> dict:
data = {
'currency': currency,
'amount': amount,
'payout_account_id': payout_account_id
}
return await self._create_post("account/withdraw/fiat", data = data, signed = True)
async def get_deposits(self, from_timestamp: datetime.datetime = None,
to_timestamp: datetime.datetime = None,
currency: str = None,
max_page_size: int = None,
cursor: int = None) -> dict:
params = self._clean_request_params({
'currency_code': currency,
'max_page_size': max_page_size,
'cursor': cursor
})
if from_timestamp is not None:
params['from'] = from_timestamp.astimezone(pytz.utc).isoformat()
if to_timestamp is not None:
params['to'] = to_timestamp.astimezone(pytz.utc).isoformat()
return await self._create_get("account/deposits", params = params, signed = True)
async def get_bitpanda_deposits(self, from_timestamp: datetime.datetime = None,
to_timestamp: datetime.datetime = None,
currency: str = None,
max_page_size: int = None,
cursor: int = None) -> dict:
params = self._clean_request_params({
'currency_code': currency,
'max_page_size': max_page_size,
'cursor': cursor
})
if from_timestamp is not None:
params['from'] = from_timestamp.astimezone(pytz.utc).isoformat()
if to_timestamp is not None:
params['to'] = to_timestamp.astimezone(pytz.utc).isoformat()
return await self._create_get("account/deposits/bitpanda", params = params, signed = True)
async def get_withdrawals(self, from_timestamp: datetime.datetime = None,
to_timestamp: datetime.datetime = None,
currency: str = None,
max_page_size: int = None,
cursor: int = None) -> dict:
params = self._clean_request_params({
'currency_code': currency,
'max_page_size': max_page_size,
'cursor': cursor
})
if from_timestamp is not None:
params['from'] = from_timestamp.astimezone(pytz.utc).isoformat()
if to_timestamp is not None:
params['to'] = to_timestamp.astimezone(pytz.utc).isoformat()
return await self._create_get("account/withdrawals", params = params, signed = True)
async def get_bitpanda_withdrawals(self, from_timestamp: datetime.datetime = None,
to_timestamp: datetime.datetime = None,
currency: str = None,
max_page_size: int = None,
cursor: int = None) -> dict:
params = self._clean_request_params({
'currency_code': currency,
'max_page_size': max_page_size,
'cursor': cursor
})
if from_timestamp is not None:
params['from'] = from_timestamp.astimezone(pytz.utc).isoformat()
if to_timestamp is not None:
params['to'] = to_timestamp.astimezone(pytz.utc).isoformat()
return await self._create_get("account/withdrawals/bitpanda", params = params, signed = True)
async def toggle_best_fee_collection(self, indicator: bool) -> dict:
data = {
'collect_fees_in_best': indicator
}
return await self._create_post("account/fees", data = data, signed = True)
async def auto_cancel_all_orders(self, timeout_ms: int) | |
= list(iinc)
aiinc.extend(iisnc)
miianc = max(aiinc)
# Header
if np.isfinite(fill_value):
fval = str(fill_value)
else:
fval = 'NaN'
if header:
var, svar = _get_header(
f, head, sep, iinc, iisnc,
squeeze=squeeze,
fill=fill, fill_value=fval, sfill_value=sfill_value,
strip=strip, full_header=full_header,
transpose=transpose, strarr=strarr)
f.close()
return var, svar
# Values - first line
if (miianc >= nres) and (not fill):
f.close()
if sep is None:
sres = ' '.join(res)
else:
sres = sep.join(res)
raise ValueError('Line has not enough columns to index: ' + sres)
var = list()
svar = list()
if iinc:
null = _line2var(res, var, iinc, strip)
if iisnc:
null = _line2var(res, svar, iisnc, False if strip is None else strip)
# Values - rest of file
for line in f:
s = str(line.rstrip('\r\n'))
if len(s) == 0:
if skip_blank:
continue
else:
break
if comment is not None:
if (s[0] in comment):
continue
res = s.split(sep)
nres = len(res)
if (miianc >= nres) and (not fill):
f.close()
raise ValueError('Line has not enough columns to index: ' + s)
if iinc:
null = _line2var(res, var, iinc, strip)
if iisnc:
null = _line2var(res, svar, iisnc,
False if strip is None else strip)
f.close()
# Return correct shape and type
if var:
var = np.array(var, dtype=str)
if fill:
var = np.where(var == '', fval, var)
var = np.array(var, dtype=float)
if squeeze:
var = var.squeeze()
if transpose:
var = var.T
if return_list:
if var.ndim == 1:
var = [ i for i in var ]
else:
var = [ [ var[i, j] for j in range(var.shape[1]) ]
for i in range(var.shape[0]) ]
if svar:
svar = np.array(svar, dtype=str)
if fill:
svar = np.where(svar == '', sfill_value, svar)
if squeeze:
svar = svar.squeeze()
if transpose:
svar = svar.T
if return_list:
if svar.ndim == 1:
svar = [ i for i in svar ]
else:
svar = [ [ svar[i, j] for j in range(svar.shape[1]) ]
for i in range(svar.shape[0]) ]
return var, svar
# --------------------------------------------------------------------
def fread(infile,
nc=0, cname=None, snc=0, sname=None,
**kwargs):
"""
Read floats from a file into 2D float array
Columns can be picked specifically by index or name. The header can be read
separately with the (almost) same call as reading the floats.
Parameters
----------
infile : str
Source file name
nc : int or iterable, optional
Number of columns to be read as floats [default: all (*nc=0*)]. *nc*
can be an int or a vector of column indexes, starting with 0.
*nc<=0* reads all columns.
cname : iterable of str, optional
Columns for floats can be chosen by the values in the first header
line; must be an iterable with strings.
snc : int or iterable, optional
Not used in fread; will be silently ignored.
sname : iterable of str, optional
Not used in fread; will be silently ignored.
**kwargs : dict, optional
All other keywords will be passed to `fsread`.
Returns
-------
array of floats
Array of numbers in file, or header.
Notes
-----
If *header==True* then skip is counterintuitive because it is
actually the number of header rows to be read. This is to
be able to have the exact same call of the function, once
with *header=False* and once with *header=True*.
Blank lines are not filled but are taken as end of file if *fill=True*.
Examples
--------
Create some data
>>> filename = 'test.dat'
>>> with open(filename,'w') as ff:
... print('head1 head2 head3 head4', file=ff)
... print('1.1 1.2 1.3 1.4', file=ff)
... print('2.1 2.2 2.3 2.4', file=ff)
Read sample file in different ways
>>> # data
>>> print(fread(filename, skip=1))
[[1.1 1.2 1.3 1.4]
[2.1 2.2 2.3 2.4]]
>>> print(fread(filename, skip=2))
[[2.1 2.2 2.3 2.4]]
>>> print(fread(filename, skip=1, cskip=1))
[[1.2 1.3 1.4]
[2.2 2.3 2.4]]
>>> print(fread(filename, nc=2, skip=1, cskip=1))
[[1.2 1.3]
[2.2 2.3]]
>>> print(fread(filename, nc=[1,3], skip=1))
[[1.2 1.4]
[2.2 2.4]]
>>> print(fread(filename, nc=1, skip=1))
[[1.1]
[2.1]]
>>> print(fread(filename, nc=1, skip=1, squeeze=True))
[1.1 2.1]
>>> # header
>>> print(fread(filename, nc=2, skip=1, header=True))
[['head1', 'head2']]
>>> print(fread(filename, nc=2, skip=1, header=True, full_header=True))
['head1 head2 head3 head4']
>>> print(fread(filename, nc=1, skip=2, header=True))
[['head1'], ['1.1']]
>>> print(fread(filename, nc=1, skip=2, header=True, squeeze=True))
['head1', '1.1']
>>> print(fread(filename, nc=1, skip=2, header=True, strarr=True))
[['head1']
['1.1']]
Create data with blank lines
>>> with open(filename, 'a') as ff:
... print('', file=ff)
... print('3.1 3.2 3.3 3.4', file=ff)
>>> print(fread(filename, skip=1))
[[1.1 1.2 1.3 1.4]
[2.1 2.2 2.3 2.4]]
>>> print(fread(filename, skip=1, skip_blank=True, comment='#!'))
[[1.1 1.2 1.3 1.4]
[2.1 2.2 2.3 2.4]
[3.1 3.2 3.3 3.4]]
Create data with comment lines
>>> with open(filename, 'a') as ff:
... print('# First comment', file=ff)
... print('! Second 2 comment', file=ff)
... print('4.1 4.2 4.3 4.4', file=ff)
>>> print(fread(filename, skip=1))
[[1.1 1.2 1.3 1.4]
[2.1 2.2 2.3 2.4]]
>>> print(fread(filename, skip=1, nc=[2], skip_blank=True, comment='#'))
[[1.3]
[2.3]
[3.3]
[2. ]
[4.3]]
>>> print(fread(filename, skip=1, skip_blank=True, comment='#!'))
[[1.1 1.2 1.3 1.4]
[2.1 2.2 2.3 2.4]
[3.1 3.2 3.3 3.4]
[4.1 4.2 4.3 4.4]]
>>> print(fread(filename, skip=1, skip_blank=True, comment=('#','!')))
[[1.1 1.2 1.3 1.4]
[2.1 2.2 2.3 2.4]
[3.1 3.2 3.3 3.4]
[4.1 4.2 4.3 4.4]]
>>> print(fread(filename, skip=1, skip_blank=True, comment=['#','!']))
[[1.1 1.2 1.3 1.4]
[2.1 2.2 2.3 2.4]
[3.1 3.2 3.3 3.4]
[4.1 4.2 4.3 4.4]]
Add a line with fewer columns
>>> with open(filename, 'a') as ff:
... print('5.1 5.2', file=ff)
>>> print(fread(filename, skip=1))
[[1.1 1.2 1.3 1.4]
[2.1 2.2 2.3 2.4]]
>>> print(fread(filename, skip=1, skip_blank=True, comment='#!',
... fill=True, fill_value=-1))
[[ 1.1 1.2 1.3 1.4]
[ 2.1 2.2 2.3 2.4]
[ 3.1 3.2 3.3 3.4]
[ 4.1 4.2 4.3 4.4]
[ 5.1 5.2 -1. -1. ]]
>>> # transpose
>>> print(fread(filename, skip=1))
[[1.1 1.2 1.3 1.4]
[2.1 2.2 2.3 2.4]]
>>> print(fread(filename, skip=1, transpose=True))
[[1.1 2.1]
[1.2 2.2]
[1.3 2.3]
[1.4 2.4]]
Create some more data with Nan and Inf
>>> filename1 = 'test1.dat'
>>> with open(filename1,'w') as ff:
... print('head1 head2 head3 head4', file=ff)
... print('1.1 1.2 1.3 1.4', file=ff)
... print('2.1 nan Inf "NaN"', file=ff)
Treat Nan and Inf with automatic strip of " and '
>>> print(fread(filename1, skip=1, transpose=True))
[[1.1 2.1]
[1.2 nan]
[1.3 inf]
[1.4 nan]]
Create some more data with escaped numbers
>>> filename2 = 'test2.dat'
>>> with open(filename2,'w') as ff:
... print('head1 head2 head3 head4', file=ff)
... print('"1.1" "1.2" "1.3" "1.4"', file=ff)
... print('2.1 nan Inf "NaN"', file=ff)
Strip
>>> print(fread(filename2, skip=1, transpose=True, strip='"'))
[[1.1 2.1]
[1.2 nan]
[1.3 inf]
[1.4 nan]]
Create more data with an extra (shorter) header line
>>> filename3 = 'test3.dat'
>>> with open(filename3,'w') as ff:
... print('Extra header', file=ff)
... print('head1 head2 head3 head4', file=ff)
... print('1.1 1.2 1.3 1.4', file=ff)
... print('2.1 2.2 2.3 2.4', file=ff)
>>> print(fread(filename3, skip=2, hskip=1))
[[1.1 1.2 1.3 1.4]
[2.1 2.2 2.3 2.4]]
>>> print(fread(filename3, nc=2, skip=2, hskip=1, header=True))
[['head1', 'head2']]
>>> # cname
>>> print(fread(filename, cname='head2', skip=1, skip_blank=True,
... comment='#!', squeeze=True))
[1.2 2.2 3.2 4.2 5.2]
>>> print(fread(filename, cname=['head1','head2'], skip=1,
... skip_blank=True, comment='#!'))
[[1.1 1.2]
[2.1 2.2]
[3.1 3.2]
[4.1 4.2]
[5.1 5.2]]
>>> print(fread(filename, cname=['head1','head2'], skip=1, skip_blank=True,
... comment='#!', header=True))
[['head1', 'head2']]
>>> print(fread(filename, cname=['head1','head2'], skip=1, skip_blank=True,
... comment='#!', header=True, full_header=True))
['head1 head2 head3 head4']
>>> print(fread(filename, cname=[' head1','head2'], skip=1,
... skip_blank=True, comment='#!', hstrip=False))
[[1.2]
[2.2]
[3.2]
[4.2]
[5.2]]
Clean up doctest
>>> import os
>>> os.remove(filename)
>>> os.remove(filename1)
>>> os.remove(filename2)
>>> os.remove(filename3)
"""
# nc=0 in fread and sread reads all columns
if (nc == 0) and (cname is None):
nc = -1
dat, sdat = fsread(infile, nc=nc, cname=cname, snc=0, sname=None,
**kwargs)
return dat
# --------------------------------------------------------------------
def sread(infile,
nc=0, cname=None, snc=0, sname=None,
fill_value='', sfill_value='',
header=False, full_header=False,
**kwargs):
"""
Read strings from a file into 2D string array
Columns can be picked specifically by index or name. The header can be read
separately with the (almost) same call as reading the strings.
Parameters
----------
infile : str
Source file name
nc : int or iterable, optional
Number of columns to be read as strings [default: all (*nc=0*)]. *nc*
can be an int or a vector of | |
<reponame>elemental-lf/docker-unoconv
import os
import subprocess
from collections import namedtuple
from io import BytesIO, SEEK_SET
from typing import ByteString, List, Optional, Tuple, BinaryIO
from PIL import Image
from fs import open_fs
from celery import Celery
from fs.errors import ResourceNotFound
app = Celery('unoconv')
app.config_from_object('unoconv.celeryconfig')
_ImportFormat = namedtuple('ImportFormat', ['mime_type', 'document_type', 'import_filter', 'extension'])
_Dimensions = namedtuple(
'Dimensions', ['pixel_height', 'pixel_width', 'logical_height', 'logical_width', 'scale_height', 'scale_width'])
_null_dimensions = _Dimensions(None, None, None, None, False, False)
# yapf: disable
FORMATS = [
_ImportFormat(mime_type='application/vnd.oasis.opendocument.graphics', document_type='presentation', import_filter='odg', extension='.odg'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.graphics-template', document_type='graphics', import_filter='otg', extension='.otg'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.graphics-flat-xml', document_type='graphics', import_filter='fodg', extension='.fodg'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.presentation', document_type='presentation', import_filter='odp', extension='.odp'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.presentation-template', document_type='presentation', import_filter='otp', extension='.otp'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.presentation-flat-xml', document_type='presentation', import_filter='fodp', extension='.fodp'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.spreadsheet', document_type='spreadsheet', import_filter='ods', extension='.ods'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.spreadsheet-template', document_type='spreadsheet', import_filter='ots', extension='.ots'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.spreadsheet-flat-xml', document_type='spreadsheet', import_filter='fods', extension='.fods'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.text', document_type='document', import_filter='odt', extension='.odt'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.text-flat-xml', document_type='document', import_filter='fodt', extension='.fodt'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.text-template', document_type='document', import_filter='ott', extension='.ott'),
_ImportFormat(mime_type='application/vnd.oasis.opendocument.text-master-template', document_type='global', import_filter='otm', extension='.otm'),
_ImportFormat(mime_type='application/vnd.sun.xml.calc', document_type='spreadsheet', import_filter='sxc', extension='.sxc'),
_ImportFormat(mime_type='application/vnd.sun.xml.calc.template', document_type='spreadsheet', import_filter='stc', extension='.stc'),
_ImportFormat(mime_type='application/vnd.sun.xml.draw', document_type='graphics', import_filter='sxd', extension='.sxd'),
_ImportFormat(mime_type='application/vnd.sun.xml.draw.template', document_type='graphics', import_filter='std', extension='.std'),
_ImportFormat(mime_type='application/vnd.sun.xml.impress', document_type='presentation', import_filter='sxi', extension='.sxi'),
_ImportFormat(mime_type='application/vnd.sun.xml.impress.template', document_type='presentation', import_filter='sti', extension='.sti'),
_ImportFormat(mime_type='application/vnd.sun.xml.math', document_type='formula', import_filter='sxm', extension='.sxm'),
_ImportFormat(mime_type='application/vnd.sun.xml.writer', document_type='document', import_filter='sxw', extension='.sxw'),
_ImportFormat(mime_type='application/vnd.sun.xml.writer.global', document_type='document', import_filter='sxg', extension='.sxg'),
_ImportFormat(mime_type='application/vnd.sun.xml.writer.template', document_type='document', import_filter='stw', extension='.stw'),
_ImportFormat(mime_type='application/vnd.sun.xml.writer.web', document_type='document', import_filter='stw', extension='.stw'),
_ImportFormat(mime_type='application/msword', document_type='document', import_filter='doc', extension='.doc'),
_ImportFormat(mime_type='application/msword', document_type='document', import_filter='doc', extension='.dot'),
_ImportFormat(mime_type='application/x-mswrite', document_type='document', import_filter=None, extension='.wri'),
_ImportFormat(mime_type='application/vnd.ms-works', document_type='document', import_filter=None, extension='.wps'),
_ImportFormat(mime_type='application/vnd.ms-word.document.macroEnabled.12', document_type='document', import_filter=None, extension='.docm'),
_ImportFormat(mime_type='application/vnd.ms-word.template.macroEnabled.12', document_type='document', import_filter='dotm', extension='.dotm'),
_ImportFormat(mime_type='application/vnd.ms-powerpoint', document_type='presentation', import_filter='ppt', extension='.ppt'),
_ImportFormat(mime_type='application/vnd.ms-powerpoint.presentation.macroEnabled.12', document_type='presentation', import_filter=None, extension='.pptm'),
_ImportFormat(mime_type='application/vnd.ms-powerpoint', document_type='presentation', import_filter='pps', extension='.pps'),
_ImportFormat(mime_type='application/vnd.ms-powerpoint.slideshow.macroEnabled.12', document_type='presentation', import_filter='pps', extension='.ppsm'),
_ImportFormat(mime_type='application/vnd.ms-excel', document_type='spreadsheet', import_filter='xls', extension='.xls'),
_ImportFormat(mime_type='application/vnd.ms-excel.sheet.macroEnabled.12', document_type='spreadsheet', import_filter='xls', extension='.xlsm'),
_ImportFormat(mime_type='application/vnd.ms-excel', document_type='spreadsheet', import_filter='xlt', extension='.xlt'),
_ImportFormat(mime_type='application/vnd.ms-excel.sheet.macroEnabled.12', document_type='spreadsheet', import_filter='xltm', extension='.xltm'),
_ImportFormat(mime_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', document_type='spreadsheet', import_filter='xlsx', extension='.xlsx'),
_ImportFormat(mime_type='application/vnd.openxmlformats-officedocument.spreadsheetml.template', document_type='spreadsheet', import_filter='xlsx', extension='.xlsx'),
_ImportFormat(mime_type='application/vnd.openxmlformats-officedocument.presentationml.presentation', document_type='presentation', import_filter='pptx', extension='.pptx'),
_ImportFormat(mime_type='application/vnd.openxmlformats-officedocument.presentationml.template', document_type='presentation', import_filter='pptx', extension='.pptx'),
_ImportFormat(mime_type='application/vnd.openxmlformats-officedocument.presentationml.slideshow', document_type='presentation', import_filter='pptx', extension='.pptx'),
_ImportFormat(mime_type='application/vnd.openxmlformats-officedocument.presentationml.slide', document_type='presentation', import_filter='pptx', extension='.pptx'),
_ImportFormat(mime_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document', document_type='document', import_filter='docx', extension='.docx'),
_ImportFormat(mime_type='application/vnd.openxmlformats-officedocument.wordprocessingml.template', document_type='document', import_filter='docx', extension='.dotx'),
_ImportFormat(mime_type='application/wps-office.doc', document_type='document', import_filter='doc', extension='.doc'),
_ImportFormat(mime_type='application/wps-office.docx', document_type='document', import_filter='docx', extension='.docx'),
_ImportFormat(mime_type='application/wps-office.xls', document_type='spreadsheet', import_filter='xls', extension='.xls'),
_ImportFormat(mime_type='application/wps-office.xlsx', document_type='spreadsheet', import_filter='xlsx', extension='.xlsx'),
_ImportFormat(mime_type='application/wps-office.ppt', document_type='presentation', import_filter='ppt', extension='.ppt'),
_ImportFormat(mime_type='application/wps-office.pptx', document_type='presentation', import_filter='pptx', extension='.pptx'),
_ImportFormat(mime_type='application/docbook+xml', document_type='document', import_filter='docbook', extension='.docbook'),
_ImportFormat(mime_type='text/csv', document_type='spreadsheet', import_filter='csv', extension='.csv'),
_ImportFormat(mime_type='text/spreadsheet', document_type='spreadsheet', import_filter='slk', extension='.slk'),
_ImportFormat(mime_type='application/vnd.stardivision.draw', document_type='graphics', import_filter='sda', extension='.sda'),
_ImportFormat(mime_type='application/vnd.stardivision.calc', document_type='spreadsheet', import_filter='sdc', extension='.sdc'),
_ImportFormat(mime_type='application/vnd.sun.xml.calc.template', document_type='spreadsheet', import_filter='stc', extension='.stc'),
_ImportFormat(mime_type='application/vnd.stardivision.impress', document_type='presentation', import_filter='sdd', extension='.sdd'),
_ImportFormat(mime_type='application/vnd.stardivision.writer', document_type='document', import_filter='sdw', extension='.sdw'),
_ImportFormat(mime_type='application/x-starwriter', document_type='document', import_filter='sdw', extension='.sdw'),
_ImportFormat(mime_type='image/tiff', document_type='graphics', import_filter='tiff', extension='.tiff'),
_ImportFormat(mime_type='image/tiff', document_type='graphics', import_filter='tiff', extension='.tif'),
_ImportFormat(mime_type='image/emf', document_type='graphics', import_filter='emf', extension='.emf'),
_ImportFormat(mime_type='image/x-emf', document_type='graphics', import_filter='emf', extension='.emf'),
_ImportFormat(mime_type='image/x-svm', document_type='graphics', import_filter='svm', extension='.svm'),
_ImportFormat(mime_type='image/wmf', document_type='graphics', import_filter='wmf', extension='.wmf'),
_ImportFormat(mime_type='image/x-wmf', document_type='graphics', import_filter='wmf', extension='.wmf'),
_ImportFormat(mime_type='image/x-pict', document_type='graphics', import_filter='pct', extension='.pct'),
_ImportFormat(mime_type='image/x-cmx', document_type='graphics', import_filter='cmx', extension='.cmx'),
_ImportFormat(mime_type='image/svg+xml', document_type='graphics', import_filter='svg', extension='.svg'),
_ImportFormat(mime_type='image/bmp', document_type='graphics', import_filter='bmp', extension='.bmp'),
_ImportFormat(mime_type='image/x-ms-bmp', document_type='graphics', import_filter='bmp', extension='.bmp'),
_ImportFormat(mime_type='image/x-eps', document_type='graphics', import_filter='eps', extension='.eps'),
_ImportFormat(mime_type='application/rtf', document_type='document', import_filter='rtf', extension='.rtf'),
_ImportFormat(mime_type='text/rtf', document_type='document', import_filter='rtf', extension='.rtf'),
]
# yapf: enable
UNOCONV_DEFAULT_TIMEOUT = 300
def _determine_import_format(mime_type: str, extension: str) -> Optional[_ImportFormat]:
# Search for a full match
if mime_type is not None and extension is not None and extension not in ['.', '']:
for import_format in FORMATS:
if mime_type == import_format.mime_type and extension == import_format.extension:
return import_format
# Search only by extension
if extension is not None and extension not in ['.', '']:
for import_format in FORMATS:
if extension == import_format.extension:
return import_format
# Search only by MIME type
if mime_type is not None:
for import_format in FORMATS:
if mime_type == import_format.mime_type:
return import_format
return None
@app.task
def supported_import_format(*, mime_type: str = None, extension: str = None) -> bool:
import_format = _determine_import_format(mime_type, extension)
return import_format is not None
def _call_unoconv(*, args: List[str], data: BinaryIO, timeout: int) -> BytesIO:
args.insert(0, 'unoconv')
args.extend(['--stdin', '--stdout', '--timeout', str(timeout)])
try:
# Unfortunately we can't pass the file like object directly to subprocess.run as it requires a real
# OS file descriptor underneath.
result = subprocess.run(
args, input=data.read(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout)
except subprocess.CalledProcessError as exception:
raise RuntimeError(f'unoconv invocation failed with return code {result.returncode} and output: ' +
exception.stderr.decode('utf-8', errors='ignore').replace('\n', ', ')) from None
except subprocess.TimeoutExpired as exception:
raise RuntimeError(f'unoconv invocation failed due to timeout with output: ' +
exception.stderr.decode('utf-8', errors='ignore').replace('\n', ', ')) from None
except Exception as exception:
raise RuntimeError(f'unoconv invocation failed with a {type(exception).__name__} exception: {str(exception)}.') from None
decoded_stderr = result.stderr.decode('utf-8', errors='ignore').replace('\n', ', ')
if result.returncode == 0:
if len(result.stdout) == 0:
raise RuntimeError(f'unoconv invocation was successful but did not return any data. Output on stderr was: ' + decoded_stderr)
return BytesIO(result.stdout)
else:
raise RuntimeError(f'unoconv invocation failed with return code {result.returncode} and output: ' + decoded_stderr)
def _populate_args_for_image(*, import_format: _ImportFormat, export_format_name: str,
dimensions: _Dimensions) -> List[str]:
unoconv_args = ['--format', export_format_name]
if import_format.document_type is not None:
unoconv_args.extend(['--doctype', import_format.document_type])
if import_format.import_filter is not None:
unoconv_args.extend(['--import-filter-name', import_format.import_filter])
if dimensions.pixel_height:
unoconv_args.extend(['-e', f'PixelHeight={dimensions.pixel_height}'])
if dimensions.pixel_width:
unoconv_args.extend(['-e', f'PixelWidth={dimensions.pixel_width}'])
if dimensions.logical_height:
unoconv_args.extend(['-e', f'LogicalHeight={dimensions.logical_height}'])
if dimensions.logical_width:
unoconv_args.extend(['-e', f'LogicalWidth={dimensions.logical_width}'])
return unoconv_args
def _scale_dimensions(*, data: BinaryIO, dimensions: _Dimensions) -> _Dimensions:
assert dimensions.scale_height or dimensions.scale_width
try:
image = Image.open(data)
image.load()
except Exception as exception:
raise RuntimeError(f'Loading internal image data failed with a {type(exception).__name__} exception: {str(exception)}.') from None
scale_height = dimensions.scale_height
scale_width = dimensions.scale_width
if scale_height and scale_width:
ratio = image.height / image.width
# Image is wider than high
if ratio < 1.0:
scale_width = False
# Image is higher than wide
else:
scale_height = False
if scale_height:
scaled_pixel_height = round(dimensions.pixel_width * image.height / image.width)
scaled_pixel_width = dimensions.pixel_width
if dimensions.logical_width is not None:
scaled_logical_height = round(dimensions.logical_width * image.height / image.width)
else:
scaled_logical_height = dimensions.logical_height
scaled_logical_width = dimensions.logical_width
elif scale_width:
scaled_pixel_height = dimensions.pixel_height
scaled_pixel_width = round(dimensions.pixel_height * image.width / image.height)
scaled_logical_height = dimensions.logical_height
if dimensions.logical_height is not None:
scaled_logical_width = round(dimensions.logical_height * image.width / image.height)
else:
scaled_logical_width = dimensions.logical_width
return _Dimensions(
pixel_height=scaled_pixel_height,
pixel_width=scaled_pixel_width,
logical_height=scaled_logical_height,
logical_width=scaled_logical_width,
scale_height=dimensions.scale_height,
scale_width=dimensions.scale_width)
def _convert_to_jpg(*, data: BinaryIO, import_format: _ImportFormat, dimensions: _Dimensions, quality: int,
timeout: int) -> BytesIO:
if dimensions.scale_height or dimensions.scale_width:
unoconv_args = _populate_args_for_image(
import_format=import_format, export_format_name='jpg', dimensions=_null_dimensions)
unoconv_args.extend(['-e', f'Quality=1'])
image = _call_unoconv(args=unoconv_args, data=data, timeout=timeout)
dimensions = _scale_dimensions(data=image, dimensions=dimensions)
unoconv_args = _populate_args_for_image(
import_format=import_format, export_format_name='jpg', dimensions=dimensions)
if quality is not None:
unoconv_args.extend(['-e', f'Quality={quality}'])
data.seek(0, SEEK_SET)
return _call_unoconv(args=unoconv_args, data=data, timeout=timeout)
def _convert_to_png(*, data: BinaryIO, import_format: _ImportFormat, dimensions: _Dimensions, compression: int,
timeout: int) -> BytesIO:
if dimensions.scale_height or dimensions.scale_width:
unoconv_args = _populate_args_for_image(
import_format=import_format, export_format_name='png', dimensions=_null_dimensions)
unoconv_args.extend(['-e', f'Compression=1'])
image_io = _call_unoconv(args=unoconv_args, data=data, timeout=timeout)
dimensions = _scale_dimensions(data=image_io, dimensions=dimensions)
unoconv_args = _populate_args_for_image(
import_format=import_format, export_format_name='png', dimensions=dimensions)
if compression is not None:
unoconv_args.extend(['-e', f'Compression={compression}'])
data.seek(0, SEEK_SET)
return _call_unoconv(args=unoconv_args, data=data, timeout=timeout)
def _convert_to_pdf(*, data: BinaryIO, import_format: _ImportFormat, paper_format: str, paper_orientation: str,
timeout: int) -> BytesIO:
unoconv_args = ['--format', 'pdf']
if import_format.document_type is not None:
unoconv_args.extend(['--doctype', import_format.document_type])
if import_format.import_filter is not None:
unoconv_args.extend(['--import-filter-name', import_format.import_filter])
if paper_format is not None:
unoconv_args.extend(['-P', f'PaperFormat={paper_format}'])
if paper_orientation is not None:
unoconv_args.extend(['-P', f'PaperOrientation={paper_orientation}'])
return _call_unoconv(args=unoconv_args, data=data, timeout=timeout)
def _read_data(*, fs_url: str, file: str, mime_type: str, extension: str) -> Tuple[_ImportFormat, BytesIO]:
try:
with open_fs(fs_url) as fs:
data = BytesIO(fs.readbytes(file))
except ResourceNotFound:
raise FileNotFoundError(f'Input file {file} not found.')
except Exception as exception:
raise RuntimeError(f'Reading file failed with a {type(exception).__name__} exception: {str(exception)}.') from None
if extension is None:
_, determined_extension = os.path.splitext(file)
else:
determined_extension = extension
import_format = _determine_import_format(mime_type, determined_extension)
if import_format is None:
raise ValueError('Unsupported input document type.')
return import_format, data
def _write_data(*, fs_url: str, file: str, data: BinaryIO) -> None:
try:
with open_fs(fs_url) as fs:
fs.writebytes(file, data.read())
except Exception as exception:
raise RuntimeError(f'Writing file failed with a {type(exception).__name__} exception: {str(exception)}.') from None
def _build_dimensions(*, pixel_height: Optional[int], pixel_width: Optional[int], logical_height: Optional[int],
logical_width: Optional[int], scale_height: bool, scale_width: bool) -> _Dimensions:
if scale_height and pixel_width is None:
raise ValueError('When scaling the height the pixel width must be specified.')
if scale_width and pixel_height is None:
raise ValueError('When scaling the width the pixel height must be specified.')
if not scale_height and not scale_width and (pixel_height is None and pixel_width is not None or
pixel_height is not None and pixel_width is None):
raise ValueError('Both pixel height and width must be set or unset.')
if pixel_height is not None and pixel_height < 0:
raise ValueError('The pixel height must be a positive integer.')
if pixel_width is not None and pixel_width < 0:
raise ValueError('The pixel width must be a positive integer.')
if logical_height is not None and logical_height < 0:
raise ValueError('The logical height must be a positive integer.')
if logical_width is not None and logical_width < 0:
raise ValueError('The logical width must be a positive integer.')
return _Dimensions(
pixel_height=pixel_height,
pixel_width=pixel_width,
logical_height=logical_height,
logical_width=logical_width,
scale_height=scale_height,
scale_width=scale_width)
@app.task
def generate_preview_jpg(*,
input_fs_url: str,
input_file: str,
output_fs_url: str,
output_file: str,
mime_type: str = None,
extension: str = None,
pixel_height: int = None,
pixel_width: int = None,
maintain_ratio: bool = False,
quality: int = None,
timeout: int = UNOCONV_DEFAULT_TIMEOUT):
dimensions = _build_dimensions(
pixel_height=pixel_height,
pixel_width=pixel_width,
logical_height=None,
logical_width=None,
scale_height=maintain_ratio,
| |
= (centroid[1] * self.j["transform"]["scale"][1]) + self.j["transform"]["translate"][1]
centroid[2] = (centroid[2] * self.j["transform"]["scale"][2]) + self.j["transform"]["translate"][2]
return centroid
else:
return None
def get_identifier(self):
"""
Returns the identifier of this file.
If there is one in metadata, it will be returned. Otherwise, the filename will.
"""
if "metadata" in self.j:
if "citymodelIdentifier" in self.j["metadata"]:
cm_id = self.j["metadata"]["citymodelIdentifier"]
if cm_id:
template = "{cm_id} ({file_id})"
else:
template = "{file_id}"
if "metadata" in self.j:
if "fileIdentifier" in self.j["metadata"]:
return template.format(cm_id=cm_id, file_id=self.j["metadata"]["fileIdentifier"])
if self.path:
return os.path.basename(self.path)
return "unknown"
def get_title(self):
"""
Returns the description of this file from metadata.
If there is none, the identifier will be returned, instead.
"""
if "metadata" in self.j:
if "datasetTitle" in self.j["metadata"]:
return self.j["metadata"]["datasetTitle"]
return self.get_identifier()
def get_subset_bbox(self, bbox, exclude=False):
# print ('get_subset_bbox')
#-- new sliced CityJSON object
cm2 = CityJSON()
cm2.j["version"] = self.j["version"]
cm2.path = self.path
if "transform" in self.j:
cm2.j["transform"] = self.j["transform"]
re = set()
for coid in self.j["CityObjects"]:
centroid = self.get_centroid(coid)
if ((centroid is not None) and
(centroid[0] >= bbox[0]) and
(centroid[1] >= bbox[1]) and
(centroid[0] < bbox[2]) and
(centroid[1] < bbox[3]) ):
re.add(coid)
re2 = copy.deepcopy(re)
if exclude == True:
allkeys = set(self.j["CityObjects"].keys())
re = allkeys ^ re
#-- also add the parent-children
for theid in re2:
if "children" in self.j['CityObjects'][theid]:
for child in self.j['CityObjects'][theid]['children']:
re.add(child)
if "parents" in self.j['CityObjects'][theid]:
for each in self.j['CityObjects'][theid]['parents']:
re.add(each)
for each in re:
cm2.j["CityObjects"][each] = self.j["CityObjects"][each]
#-- geometry
subset.process_geometry(self.j, cm2.j)
#-- templates
subset.process_templates(self.j, cm2.j)
#-- appearance
if ("appearance" in self.j):
cm2.j["appearance"] = {}
subset.process_appearance(self.j, cm2.j)
#-- metadata
try:
cm2.j["metadata"] = copy.deepcopy(self.j["metadata"])
cm2.update_metadata(overwrite=True, new_uuid=True)
fids = [fid for fid in cm2.j["CityObjects"]]
cm2.add_lineage_item("Subset of {} by bounding box {}".format(self.get_identifier(), bbox), features=fids)
except:
pass
return cm2
def is_co_toplevel(self, co):
if ('toplevel' in co):
return co['toplevel']
if co["type"] in TOPLEVEL:
return True
else:
return False
def number_top_co(self):
count = 0
allkeys = list(self.j["CityObjects"].keys())
for k in allkeys:
if self.is_co_toplevel(self.j["CityObjects"][k]):
count += 1
return count
def get_ordered_ids_top_co(self, limit, offset):
re = []
allkeys = list(self.j["CityObjects"].keys())
for k in allkeys:
if self.is_co_toplevel(self.j["CityObjects"][k]):
re.append(k)
return re[offset:(offset+limit)]
def get_subset_random(self, number=1, exclude=False):
random.seed()
total = len(self.j["CityObjects"])
if number > total:
number = total
allkeys = list(self.j["CityObjects"].keys())
re = set()
count = 0
while (count < number):
t = allkeys[random.randint(0, total - 1)]
if self.is_co_toplevel(self.j["CityObjects"][t]):
re.add(t)
count += 1
if exclude == True:
sallkeys = set(self.j["CityObjects"].keys())
re = sallkeys ^ re
re = list(re)
cm = self.get_subset_ids(re)
try:
cm.j["metadata"]["lineage"][-1]["processStep"]["description"] = "Random subset of {}".format(self.get_identifier())
except:
pass
return cm
def get_subset_ids(self, lsIDs, exclude=False):
#-- new sliced CityJSON object
cm2 = CityJSON()
cm2.j["version"] = self.j["version"]
cm2.path = self.path
if "extensions" in self.j:
cm2.j["extensions"] = self.j["extensions"]
if "transform" in self.j:
cm2.j["transform"] = self.j["transform"]
#-- copy selected CO to the j2
re = subset.select_co_ids(self.j, lsIDs)
if exclude == True:
allkeys = set(self.j["CityObjects"].keys())
re = allkeys ^ re
for each in re:
cm2.j["CityObjects"][each] = self.j["CityObjects"][each]
#-- geometry
subset.process_geometry(self.j, cm2.j)
#-- templates
subset.process_templates(self.j, cm2.j)
#-- appearance
if ("appearance" in self.j):
cm2.j["appearance"] = {}
subset.process_appearance(self.j, cm2.j)
#-- metadata
try:
cm2.j["metadata"] = copy.deepcopy(self.j["metadata"])
cm2.update_metadata(overwrite=True, new_uuid=True)
fids = [fid for fid in cm2.j["CityObjects"]]
cm2.add_lineage_item("Subset of {} based on user specified IDs".format(self.get_identifier()), features=fids)
except:
pass
return cm2
def get_subset_cotype(self, cotype, exclude=False):
# print ('get_subset_cotype')
lsCOtypes = [cotype]
if cotype == 'Building':
lsCOtypes.append('BuildingInstallation')
lsCOtypes.append('BuildingPart')
if cotype == 'Bridge':
lsCOtypes.append('BridgePart')
lsCOtypes.append('BridgeInstallation')
lsCOtypes.append('BridgeConstructionElement')
if cotype == 'Tunnel':
lsCOtypes.append('TunnelInstallation')
lsCOtypes.append('TunnelPart')
#-- new sliced CityJSON object
cm2 = CityJSON()
cm2.j["version"] = self.j["version"]
cm2.path = self.path
if "transform" in self.j:
cm2.j["transform"] = self.j["transform"]
#-- copy selected CO to the j2
for theid in self.j["CityObjects"]:
if exclude == False:
if self.j["CityObjects"][theid]["type"] in lsCOtypes:
cm2.j["CityObjects"][theid] = self.j["CityObjects"][theid]
else:
if self.j["CityObjects"][theid]["type"] not in lsCOtypes:
cm2.j["CityObjects"][theid] = self.j["CityObjects"][theid]
#-- geometry
subset.process_geometry(self.j, cm2.j)
#-- templates
subset.process_templates(self.j, cm2.j)
#-- appearance
if ("appearance" in self.j):
cm2.j["appearance"] = {}
subset.process_appearance(self.j, cm2.j)
#-- metadata
try:
cm2.j["metadata"] = copy.deepcopy(self.j["metadata"])
cm2.update_metadata(overwrite=True, new_uuid=True)
cm2.add_lineage_item("Subset of {} by object type {}".format(self.get_identifier(), cotype))
except:
pass
return cm2
def get_textures_location(self):
"""Get the location of the texture files
Assumes that all textures are in the same location. Relative paths
are expanded to absolute paths.
:returns: path to the directory or URL of the texture files
:rtype: string (path) or None (on failure)
:raises: NotADirectoryError
"""
if "appearance" in self.j:
if "textures" in self.j["appearance"]:
p = self.j["appearance"]["textures"][0]["image"]
cj_dir = os.path.dirname(self.path)
url = re.match(r'http[s]?://|www\.', p)
if url:
return url
else:
d = os.path.dirname(p)
if len(d) == 0:
# textures are in the same dir as the cityjson file
return cj_dir
elif not os.path.isabs(d):
if os.path.isdir(os.path.abspath(d)):
# texture dir is not necessarily in the same dir
# as the input file
return os.path.abspath(d)
elif os.path.isdir(os.path.join(cj_dir, d)):
# texture dir is a subdirectory at the input file
return os.path.join(cj_dir, d)
else:
raise NotADirectoryError("Texture directory '%s' not found" % d)
return None
else:
print("This file does not have textures")
return None
else:
print("This file does not have textures")
return None
def update_textures_location(self, new_loc, relative=True):
"""Updates the location of the texture files
If the new location is a directory in the local file system, it is
expected to exists with the texture files in it.
:param new_loc: path to new texture directory
:type new_loc: string
:param relative: create texture links relative to the CityJSON file
:type relative: boolean
:returns: None -- modifies the CityJSON
:raises: InvalidOperation, NotADirectoryError
"""
curr_loc = self.get_textures_location()
if curr_loc:
if re.match(r'http[s]?://|www\.', new_loc):
apath = new_loc
for t in self.j["appearance"]["textures"]:
f = os.path.basename(t["image"])
t["image"] = os.path.join(apath, f)
else:
apath = os.path.abspath(new_loc)
if not os.path.isdir(apath):
raise NotADirectoryError("%s does not exits" % apath)
elif relative:
rpath = os.path.relpath(apath, os.path.dirname(self.path))
for t in self.j["appearance"]["textures"]:
f = os.path.basename(t["image"])
t["image"] = os.path.join(rpath, f)
else:
for t in self.j["appearance"]["textures"]:
f = os.path.basename(t["image"])
t["image"] = os.path.join(apath, f)
else:
raise InvalidOperation("Cannot update textures in a city model without textures")
def copy_textures(self, new_loc, json_path):
"""Copy the texture files to a new location
:param new_loc: path to new texture directory
:type new_loc: string
:param json_path: path to the CityJSON file directory
:type json_path: string
:returns: None -- modifies the CityJSON
:raises: InvalidOperation, IOError
"""
curr_loc = self.get_textures_location()
if curr_loc:
apath = os.path.abspath(new_loc)
if not os.path.isdir(apath):
os.mkdir(apath)
if not os.path.abspath(json_path):
jpath = os.path.abspath(json_path)
else:
jpath = json_path
curr_path = self.path
try:
self.path = jpath
for t in self.j["appearance"]["textures"]:
f = os.path.basename(t["image"])
curr_path = os.path.join(curr_loc, f)
shutil.copy(curr_path, apath)
# update the location relative to the CityJSON file
self.update_textures_location(apath, relative=True)
print("Textures copied to", apath)
except IOError:
raise
finally:
self.path = curr_path
else:
raise InvalidOperation("Cannot copy textures from a city model without textures")
def validate_textures(self):
"""Check if the texture files exist"""
raise NotImplemented
def remove_textures(self):
for i in self.j["CityObjects"]:
if "texture" in self.j["CityObjects"][i]:
del self.j["CityObjects"][i]["texture"]
if "appearance" in self.j:
if "textures" in self.j["appearance"]:
del self.j["appearance"]["textures"]
if "vertices-texture" in self.j["appearance"]:
del self.j["appearance"]["vertices-texture"]
if "default-theme-texture" in self.j["appearance"]:
del self.j["appearance"]["default-theme-texture"]
# print (len(self.j["appearance"]))
if self.j["appearance"] is None or len(self.j["appearance"]) == 0:
del self.j["appearance"]
return True
def remove_materials(self):
for i in self.j["CityObjects"]:
if "material" in self.j["CityObjects"][i]:
del self.j["CityObjects"][i]["material"]
if "appearance" in self.j:
if "materials" in self.j["appearance"]:
del self.j["appearance"]["materials"]
if "default-theme-material" in self.j["appearance"]:
del self.j["appearance"]["default-theme-material"]
if self.j["appearance"] is None or len(self.j["appearance"]) == 0:
del self.j["appearance"]
return True
def number_city_objects(self):
total = 0
for id in self.j["CityObjects"]:
if self.is_co_toplevel(self.j["CityObjects"][id]):
total += 1
return total
def get_info(self, long=False):
info = collections.OrderedDict()
info["cityjson_version"] = self.get_version()
info["epsg"] = self.get_epsg()
info["bbox"] = self.get_bbox()
if "extensions" in self.j:
d = set()
for i in self.j["extensions"]:
d.add(i)
info["extensions"] = sorted(list(d))
info["transform/compressed"] = "transform" in self.j
info["cityobjects_total"] = self.number_city_objects()
d = set()
for key in self.j["CityObjects"]:
d.add(self.j['CityObjects'][key]['type'])
info["cityobjects_present"] = sorted(list(d))
if 'appearance' in self.j:
info["materials"] = 'materials' in self.j['appearance']
info["textures"] = 'textures' in self.j['appearance']
else:
info["materials"] = False
info["textures"] = False
if long == False:
return json.dumps(info, indent=2)
#-- all/long version
info["vertices_total"] = len(self.j["vertices"])
d.clear()
lod = set()
sem_srf = set()
co_attributes = set()
for key in self.j["CityObjects"]:
if 'attributes' in self.j['CityObjects'][key]:
for attr in self.j['CityObjects'][key]['attributes'].keys():
co_attributes.add(attr)
for geom | |
<reponame>Luojiahong/Pycker
# -*- coding: utf-8 -*-
"""
Pycker Viewer provides a GUI to visualize seismic traces and manually pick
first break arrival times.
Author: <NAME> <<EMAIL>>
License: MIT
"""
from matplotlib.figure import Figure
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import FormatStrFormatter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from obspy.signal.filter import lowpass, highpass
from obspy.core.utcdatetime import UTCDateTime
import numpy as np
from ..pick import Pick
from ..wiggle import wiggle
from ..read_stream import StreamReader
import os, sys
if sys.version_info[0] < 3:
import Tkinter as tk
import tkFileDialog as tkfile
import tkMessageBox as tkmessage
import ttk
import tkFont as font
else:
import tkinter as tk
import tkinter.filedialog as tkfile
import tkinter.messagebox as tkmessage
import tkinter.ttk as ttk
from tkinter import font
from .ttk_spinbox import Spinbox
try:
import cPickle as pickle
except ImportError:
import pickle
__all__ = [ "PyckerGUI", "main" ]
class PyckerGUI():
"""
GUI for Pycker.
Pycker Viewer provides a GUI to visualize seismic traces and manually pick
first break arrival times.
Parameters
----------
master : tkinter object
tkinter root window.
ncolumn : int, default 2
Number of columns in non-gather plot.
"""
master = None
picks = None
_first_import = True
_current_file = None
_current_index = None
UNITS = [ "samples", "s", "ms", "us" ]
def __init__(self, master, ncolumn = 2):
self._ncolumn = ncolumn
self.master = master
master.title("Pycker Viewer")
master.protocol("WM_DELETE_WINDOW", self.close_window)
master.geometry("1200x700")
master.minsize(1200, 700)
UTCDateTime.DEFAULT_PRECISION = 9
default_font = font.nametofont("TkDefaultFont")
default_font.configure(family = "Helvetica", size = 9)
master.option_add("*Font", default_font)
self._stread = StreamReader()
self.define_variables()
self.trace_variables()
self.init_variables()
self.init_containers()
self.menubar()
self.init_frame1()
self.init_frame2()
self.init_frame3()
self.footer()
def about(self):
about = "Pycker Viewer 1.0" + "\n" \
+ "A picker program for first break arrival times" + "\n\n" \
+ "Created by <NAME>"
tkmessage.showinfo("About", about)
def menubar(self):
menubar = tk.Menu(self.master)
# File
filemenu = tk.Menu(menubar, tearoff = 0)
filemenu.add_command(label = "Import all picks", command = self.import_all_picks)
filemenu.add_separator()
filemenu.add_command(label = "Export current pick", command = self.export_current_pick)
filemenu.add_command(label = "Export all picks", command = self.export_all_picks)
filemenu.add_separator()
filemenu.add_command(label = "Exit", command = self.close_window)
# View
viewmenu = tk.Menu(menubar, tearoff = 0)
viewmenu.add_checkbutton(label = "Gather", onvalue = 1, offvalue = 0, variable = self.plot_type, command = self.plot)
viewmenu.add_checkbutton(label = "Fill", onvalue = 1, offvalue = 0, variable = self.fill, command = self.plot)
# Time axis
taxismenu = tk.Menu(viewmenu, tearoff = 0)
taxismenu.add_checkbutton(label = "Seconds", onvalue = 1, offvalue = 0, variable = self.taxis_seconds, command = self._set_taxis_seconds)
taxismenu.add_checkbutton(label = "Samples", onvalue = 1, offvalue = 0, variable = self.taxis_samples, command = self._set_taxis_samples)
# Help
helpmenu = tk.Menu(menubar, tearoff = 0)
helpmenu.add_command(label = "About", command = self.about)
# Display menu bar
menubar.add_cascade(label = "File", menu = filemenu)
menubar.add_cascade(label = "View", menu = viewmenu)
menubar.add_cascade(label = "Help", menu = helpmenu)
viewmenu.add_cascade(label = "Time axis", menu = taxismenu)
self.master.config(menu = menubar)
def init_containers(self):
self.root_container = ttk.Frame(self.master)
self.root_container.place(relwidth = 1, relheight = 1, anchor = "nw")
self.root_container.grid_rowconfigure(0, weight = 1)
self.root_container.grid_columnconfigure(0, weight = 1)
self.footer_container = ttk.Frame(self.root_container, width = 300, height = 35)
self.footer_container.grid(row = 1, column = 0, sticky = "ew")
self.main_container = ttk.Frame(self.root_container, width = 300, height = 300)
self.main_container.grid(row = 0, column = 0, sticky = "nsew")
self.main_container.grid_rowconfigure(0, weight = 1)
self.main_container.grid_columnconfigure(1, weight = 1)
self.data_container = ttk.Frame(self.main_container, width = 300, height = 300)
self.data_container.grid(row = 0, column = 0, padx = 5, sticky = "ns")
self.data_container.grid_rowconfigure(1, weight = 1)
self.data_container.grid_columnconfigure(0, weight = 1)
self.canvas_container = ttk.LabelFrame(self.main_container, text = "Traces", width = 300, height = 300)
self.canvas_container.grid(row = 0, column = 1, padx = 5, sticky = "nsew")
def init_frame1(self):
self.frame1 = ttk.LabelFrame(self.data_container, text = "Data", borderwidth = 2, relief = "groove", width = 330, height = 250)
self.frame1.grid_columnconfigure(0, weight = 1)
self.frame1.grid_rowconfigure(0, weight = 1)
self.frame1.grid(row = 0, column = 0)
# directory
data_label = ttk.Label(self.frame1, text = "Directory")
data_entry = ttk.Entry(self.frame1, width = 25, textvariable = self.input_dirname,
takefocus = True)
data_import_button = ttk.Button(self.frame1, text = "Import", command = self.import_traces,
takefocus = False)
# norm
norm_button = ttk.Checkbutton(self.frame1, text = "Normalize", variable = self.normalize,
takefocus = False)
norm_spinbox = Spinbox(self.frame1, from_ = 0.01, to_ = 1., increment = 0.01, textvariable = self.perc,
width = 7, justify = "right", takefocus = True)
# sampling_rate
fs_button = ttk.Checkbutton(self.frame1, text = "Sampling rate (Hz)", variable = self.enforce_fs,
takefocus = False)
fs_entry = ttk.Entry(self.frame1, width = 10, textvariable = self.sampling_rate,
justify = "right", takefocus = True)
# low
low_button = ttk.Checkbutton(self.frame1, text = "Lowpass (Hz)", variable = self.lowpass,
takefocus = False)
low_entry = ttk.Entry(self.frame1, width = 10, textvariable = self.lpcut,
justify = "right", takefocus = True)
# high
high_button = ttk.Checkbutton(self.frame1, text = "Highpass (Hz)", variable = self.highpass,
takefocus = False)
high_entry = ttk.Entry(self.frame1, width = 10, textvariable = self.hpcut,
justify = "right", takefocus = True)
# delay
delay_button = ttk.Checkbutton(self.frame1, text = "Delay", variable = self.delay,
takefocus = False)
delay_entry = ttk.Entry(self.frame1, width = 6, textvariable = self.delay_val,
justify = "right", takefocus = True)
delay_option_menu = ttk.OptionMenu(self.frame1, self.delay_unit, self.delay_unit.get(), *self.UNITS)
delay_option_menu.config(width = 7)
# apply
apply_button = ttk.Button(self.frame1, text = "Apply", command = self.apply,
takefocus = False)
# Layout
data_label.grid(row = 0, column = 0, padx = 5, sticky = "sw")
data_entry.grid(row = 1, column = 0, columnspan = 2, padx = 5, pady = 5, sticky = "we")
data_import_button.grid(row = 1, column = 2, padx = 5, pady = 5, sticky = "e")
norm_button.grid(row = 2, column = 0, padx = 5, pady = 1, sticky = "w")
norm_spinbox.grid(row = 2, column = 2, ipadx = 8, padx = 5, pady = 1)
fs_button.grid(row = 3, column = 0, padx = 5, pady = 1, sticky = "w")
fs_entry.grid(row = 3, column = 2, padx = 5, pady = 1)
low_button.grid(row = 4, column = 0, padx = 5, pady = 1, sticky = "w")
low_entry.grid(row = 4, column = 2, padx = 5, pady = 1)
high_button.grid(row = 5, column = 0, padx = 5, pady = 1, sticky = "w")
high_entry.grid(row = 5, column = 2, padx = 5, pady = 1)
delay_button.grid(row = 6, column = 0, padx = 5, pady = 1, sticky = "w")
delay_entry.grid(row = 6, column = 1, padx = 5, pady = 1)
delay_option_menu.grid(row = 6, column = 2, padx = 5, pady = 1, sticky = "ew")
apply_button.grid(row = 7, column = 2, padx = 5, pady = 5, sticky = "se")
def init_frame2(self):
self.frame2 = ttk.LabelFrame(self.data_container, text = "Files", borderwidth = 2, relief = "groove", width = 100, height = 100)
self.frame2.grid(row = 1, column = 0, sticky = "nsew")
def init_frame3(self):
self.frame3 = ttk.Frame(self.canvas_container, borderwidth = 0)
self.frame3.pack()
self.fig = Figure(figsize = (12, 8), facecolor = "white", dpi = 150)
self.canvas = FigureCanvasTkAgg(self.fig, master = self.frame3)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.frame3)
self.toolbar.update()
self.canvas.get_tk_widget().pack()
self.toolbar.pack(side = "top", fill = "both", expand = 1)
self.canvas.mpl_connect("pick_event", self.OnPick)
def footer(self):
# exit
exit_button = ttk.Button(self.footer_container, text = "Exit", command = self.close_window)
exit_button.place(width = 100, relx = 1, rely = 1, x = -5, y = -5, anchor = "se")
def close_window(self):
yes = tkmessage.askyesno("Exit", "Do you really want to exit?")
if yes:
self.close()
def import_traces(self):
dirname = tkfile.askdirectory(title = "Open data directory",
initialdir = os.getcwd(),
)
if len(dirname) > 0:
dirname += "/"
self.input_dirname.set(dirname)
self.fig.clear()
self.canvas.draw()
if not self._first_import:
self.frame2.forget()
self._current_file = None
self._current_index = None
else:
self._first_import = False
self.init_frame2()
# List all files in data directory
self._filenames = self._stread.read_dir(dirname)
nsrc = len(self._filenames)
self.picks = [ None ] * nsrc
if nsrc < 1:
tkmessage.showerror("Error", "Chosen directory is empty or contains incompatible files.")
self.input_dirname.set("")
pass
else:
# Scrollbar
scrollbar = ttk.Scrollbar(self.frame2)
scrollbar.pack(side = "right", fill = "y")
# Listbox
event_list = tk.Listbox(self.frame2, yscrollcommand = scrollbar.set)
for item in self._filenames:
event_list.insert(tk.END, item)
event_list.bind("<Double-Button-1>", self.OnDoubleClick)
event_list.bind("<Down>", self.OnEntryDown)
event_list.bind("<Up>", self.OnEntryUp)
# Layout
event_list.pack(expand = "y", fill = "both")
scrollbar.config(command = event_list.yview)
def apply(self):
if self._current_file is None:
tkmessage.showerror("Error", "No event chosen yet.")
else:
| |
del_items(0x8007DD34)
SetType(0x8007DD34, "int GetTpY__FUs(unsigned short tpage)")
del_items(0x8007DD50)
SetType(0x8007DD50, "int GetTpX__FUs(unsigned short tpage)")
del_items(0x8007DD5C)
SetType(0x8007DD5C, "void Remove96__Fv()")
del_items(0x8007DD94)
SetType(0x8007DD94, "void AppMain()")
del_items(0x8007DE3C)
SetType(0x8007DE3C, "void MAIN_RestartGameTask__Fv()")
del_items(0x8007DE68)
SetType(0x8007DE68, "void GameTask__FP4TASK(struct TASK *T)")
del_items(0x8007DF5C)
SetType(0x8007DF5C, "void MAIN_MainLoop__Fv()")
del_items(0x8007DFA4)
SetType(0x8007DFA4, "void CheckMaxArgs__Fv()")
del_items(0x8007DFD8)
SetType(0x8007DFD8, "unsigned char GPUQ_InitModule__Fv()")
del_items(0x8007DFE4)
SetType(0x8007DFE4, "void GPUQ_FlushQ__Fv()")
del_items(0x8007E158)
SetType(0x8007E158, "void GPUQ_LoadImage__FP4RECTli(struct RECT *Rect, long ImgHandle, int Offset)")
del_items(0x8007E20C)
SetType(0x8007E20C, "void GPUQ_DiscardHandle__Fl(long hnd)")
del_items(0x8007E2AC)
SetType(0x8007E2AC, "void GPUQ_LoadClutAddr__FiiiPv(int X, int Y, int Cols, void *Addr)")
del_items(0x8007E348)
SetType(0x8007E348, "void GPUQ_MoveImage__FP4RECTii(struct RECT *R, int x, int y)")
del_items(0x8007E3E8)
SetType(0x8007E3E8, "unsigned char PRIM_Open__FiiiP10SCREEN_ENVUl(int Prims, int OtSize, int Depth, struct SCREEN_ENV *Scr, unsigned long MemType)")
del_items(0x8007E504)
SetType(0x8007E504, "unsigned char InitPrimBuffer__FP11PRIM_BUFFERii(struct PRIM_BUFFER *Pr, int Prims, int OtSize)")
del_items(0x8007E5E0)
SetType(0x8007E5E0, "void PRIM_Clip__FP4RECTi(struct RECT *R, int Depth)")
del_items(0x8007E708)
SetType(0x8007E708, "unsigned char PRIM_GetCurrentScreen__Fv()")
del_items(0x8007E714)
SetType(0x8007E714, "void PRIM_FullScreen__Fi(int Depth)")
del_items(0x8007E750)
SetType(0x8007E750, "void PRIM_Flush__Fv()")
del_items(0x8007E964)
SetType(0x8007E964, "unsigned long *PRIM_GetCurrentOtList__Fv()")
del_items(0x8007E970)
SetType(0x8007E970, "void ClearPbOnDrawSync(struct PRIM_BUFFER *Pb)")
del_items(0x8007E9AC)
SetType(0x8007E9AC, "unsigned char ClearedYet__Fv()")
del_items(0x8007E9B8)
SetType(0x8007E9B8, "void PrimDrawSycnCallBack()")
del_items(0x8007E9D8)
SetType(0x8007E9D8, "void SendDispEnv__Fv()")
del_items(0x8007E9FC)
SetType(0x8007E9FC, "struct POLY_F4 *PRIM_GetNextPolyF4__Fv()")
del_items(0x8007EA14)
SetType(0x8007EA14, "struct POLY_FT4 *PRIM_GetNextPolyFt4__Fv()")
del_items(0x8007EA2C)
SetType(0x8007EA2C, "struct POLY_GT4 *PRIM_GetNextPolyGt4__Fv()")
del_items(0x8007EA44)
SetType(0x8007EA44, "struct POLY_G4 *PRIM_GetNextPolyG4__Fv()")
del_items(0x8007EA5C)
SetType(0x8007EA5C, "struct POLY_F3 *PRIM_GetNextPolyF3__Fv()")
del_items(0x8007EA74)
SetType(0x8007EA74, "struct DR_MODE *PRIM_GetNextDrArea__Fv()")
del_items(0x8007EA8C)
SetType(0x8007EA8C, "bool ClipRect__FRC4RECTR4RECT(struct RECT *ClipRect, struct RECT *RectToClip)")
del_items(0x8007EBA0)
SetType(0x8007EBA0, "bool IsColiding__FRC4RECTT0(struct RECT *ClipRect, struct RECT *NewRect)")
del_items(0x8007EC08)
SetType(0x8007EC08, "void VID_AfterDisplay__Fv()")
del_items(0x8007EC28)
SetType(0x8007EC28, "void VID_ScrOn__Fv()")
del_items(0x8007EC50)
SetType(0x8007EC50, "void VID_DoThisNextSync__FPFv_v(void (*Func)())")
del_items(0x8007ECA8)
SetType(0x8007ECA8, "unsigned char VID_NextSyncRoutHasExecuted__Fv()")
del_items(0x8007ECB4)
SetType(0x8007ECB4, "unsigned long VID_GetTick__Fv()")
del_items(0x8007ECC0)
SetType(0x8007ECC0, "void VID_DispEnvSend()")
del_items(0x8007ED18)
SetType(0x8007ED18, "void VID_SetXYOff__Fii(int x, int y)")
del_items(0x8007ED28)
SetType(0x8007ED28, "int VID_GetXOff__Fv()")
del_items(0x8007ED34)
SetType(0x8007ED34, "int VID_GetYOff__Fv()")
del_items(0x8007ED40)
SetType(0x8007ED40, "void VID_SetDBuffer__Fb(bool DBuf)")
del_items(0x8007EECC)
SetType(0x8007EECC, "void MyFilter__FUlUlPCc(unsigned long MemType, unsigned long Size, char *Name)")
del_items(0x8007EED4)
SetType(0x8007EED4, "void SlowMemMove__FPvT0Ul(void *Dest, void *Source, unsigned long size)")
del_items(0x8007EEF4)
SetType(0x8007EEF4, "int GetTpY__FUs_addr_8007EEF4(unsigned short tpage)")
del_items(0x8007EF10)
SetType(0x8007EF10, "int GetTpX__FUs_addr_8007EF10(unsigned short tpage)")
del_items(0x8007EF1C)
SetType(0x8007EF1C, "struct FileIO *SYSI_GetFs__Fv()")
del_items(0x8007EF28)
SetType(0x8007EF28, "struct FileIO *SYSI_GetOverlayFs__Fv()")
del_items(0x8007EF34)
SetType(0x8007EF34, "void SortOutFileSystem__Fv()")
del_items(0x8007F070)
SetType(0x8007F070, "void MemCb__FlPvUlPCcii(long hnd, void *Addr, unsigned long Size, char *Name, int Users, int TimeStamp)")
del_items(0x8007F090)
SetType(0x8007F090, "void Spanker__Fv()")
del_items(0x8007F0D0)
SetType(0x8007F0D0, "void GaryLiddon__Fv()")
del_items(0x8007F0D8)
SetType(0x8007F0D8, "void ReadPad__Fi(int NoDeb)")
del_items(0x8007F248)
SetType(0x8007F248, "void DummyPoll__Fv()")
del_items(0x8007F250)
SetType(0x8007F250, "void DaveOwens__Fv()")
del_items(0x8007F278)
SetType(0x8007F278, "unsigned short GetCur__C4CPad(struct CPad *this)")
del_items(0x8007F2A0)
SetType(0x8007F2A0, "unsigned char CheckActive__4CPad(struct CPad *this)")
del_items(0x8007F2AC)
SetType(0x8007F2AC, "int GetTpY__FUs_addr_8007F2AC(unsigned short tpage)")
del_items(0x8007F2C8)
SetType(0x8007F2C8, "int GetTpX__FUs_addr_8007F2C8(unsigned short tpage)")
del_items(0x8007F2D4)
SetType(0x8007F2D4, "void TimSwann__Fv()")
del_items(0x8007F2DC)
SetType(0x8007F2DC, "struct FileIO *__6FileIOUl(struct FileIO *this, unsigned long OurMemId)")
del_items(0x8007F32C)
SetType(0x8007F32C, "void ___6FileIO(struct FileIO *this, int __in_chrg)")
del_items(0x8007F380)
SetType(0x8007F380, "long Read__6FileIOPCcUl(struct FileIO *this, char *Name, unsigned long RamId)")
del_items(0x8007F4E8)
SetType(0x8007F4E8, "int FileLen__6FileIOPCc(struct FileIO *this, char *Name)")
del_items(0x8007F54C)
SetType(0x8007F54C, "void FileNotFound__6FileIOPCc(struct FileIO *this, char *Name)")
del_items(0x8007F56C)
SetType(0x8007F56C, "bool StreamFile__6FileIOPCciPFPUciib_bii(struct FileIO *this, char *Name, int Slice, bool (*Func)(), int Offset, int Size)")
del_items(0x8007F64C)
SetType(0x8007F64C, "bool ReadAtAddr__6FileIOPCcPUci(struct FileIO *this, char *Name, unsigned char *Dest, int Len)")
del_items(0x8007F710)
SetType(0x8007F710, "void DumpOldPath__6FileIO(struct FileIO *this)")
del_items(0x8007F774)
SetType(0x8007F774, "void SetSearchPath__6FileIOPCc(struct FileIO *this, char *Path)")
del_items(0x8007F850)
SetType(0x8007F850, "bool FindFile__6FileIOPCcPc(struct FileIO *this, char *Name, char *Buffa)")
del_items(0x8007F964)
SetType(0x8007F964, "char *CopyPathItem__6FileIOPcPCc(struct FileIO *this, char *Dst, char *Src)")
del_items(0x8007FA0C)
SetType(0x8007FA0C, "void LockSearchPath__6FileIO(struct FileIO *this)")
del_items(0x8007FA64)
SetType(0x8007FA64, "void UnlockSearchPath__6FileIO(struct FileIO *this)")
del_items(0x8007FABC)
SetType(0x8007FABC, "bool SearchPathExists__6FileIO(struct FileIO *this)")
del_items(0x8007FAD0)
SetType(0x8007FAD0, "bool Save__6FileIOPCcPUci(struct FileIO *this, char *Name, unsigned char *Addr, int Len)")
del_items(0x8007FB0C)
SetType(0x8007FB0C, "struct PCIO *__4PCIOUl(struct PCIO *this, unsigned long OurMemId)")
del_items(0x8007FB74)
SetType(0x8007FB74, "void ___4PCIO(struct PCIO *this, int __in_chrg)")
del_items(0x8007FBCC)
SetType(0x8007FBCC, "bool FileExists__4PCIOPCc(struct PCIO *this, char *Name)")
del_items(0x8007FC10)
SetType(0x8007FC10, "bool LoReadFileAtAddr__4PCIOPCcPUci(struct PCIO *this, char *Name, unsigned char *Dest, int Len)")
del_items(0x8007FCD4)
SetType(0x8007FCD4, "int GetFileLength__4PCIOPCc(struct PCIO *this, char *Name)")
del_items(0x8007FD8C)
SetType(0x8007FD8C, "bool LoSave__4PCIOPCcPUci(struct PCIO *this, char *Name, unsigned char *Addr, int Len)")
del_items(0x8007FE60)
SetType(0x8007FE60, "bool LoStreamFile__4PCIOPCciPFPUciib_bii(struct PCIO *this, char *Name, int Slice, bool (*Func)(), int Offset, int Size)")
del_items(0x80080070)
SetType(0x80080070, "struct SysObj *__6SysObj(struct SysObj *this)")
del_items(0x80080088)
SetType(0x80080088, "void *__nw__6SysObji(int Amount)")
del_items(0x800800B4)
SetType(0x800800B4, "void *__nw__6SysObjiUl(int Amount, unsigned long RamID)")
del_items(0x80080130)
SetType(0x80080130, "void __dl__6SysObjPv(void *ptr)")
del_items(0x8008019C)
SetType(0x8008019C, "struct DatIO *__5DatIOUl(struct DatIO *this, unsigned long OurMemId)")
del_items(0x800801D8)
SetType(0x800801D8, "void ___5DatIO(struct DatIO *this, int __in_chrg)")
del_items(0x80080230)
SetType(0x80080230, "bool FileExists__5DatIOPCc(struct DatIO *this, char *Name)")
del_items(0x80080270)
SetType(0x80080270, "bool LoReadFileAtAddr__5DatIOPCcPUci(struct DatIO *this, char *Name, unsigned char *Dest, int Len)")
del_items(0x80080330)
SetType(0x80080330, "int GetFileLength__5DatIOPCc(struct DatIO *this, char *Name)")
del_items(0x800803E4)
SetType(0x800803E4, "bool LoSave__5DatIOPCcPUci(struct DatIO *this, char *Name, unsigned char *Addr, int Len)")
del_items(0x8008048C)
SetType(0x8008048C, "bool LoStreamFile__5DatIOPCciPFPUciib_bii(struct DatIO *this, char *Name, int Slice, bool (*Func)(), int Offset, int Size)")
del_items(0x80080698)
SetType(0x80080698, "struct TextDat *__7TextDat(struct TextDat *this)")
del_items(0x800806D8)
SetType(0x800806D8, "void ___7TextDat(struct TextDat *this, int __in_chrg)")
del_items(0x80080720)
SetType(0x80080720, "void Use__7TextDat(struct TextDat *this)")
del_items(0x80080914)
SetType(0x80080914, "bool TpLoadCallBack__FPUciib(unsigned char *Mem, int ReadSoFar, int Size, bool LastChunk)")
del_items(0x800809BC)
SetType(0x800809BC, "void StreamLoadTP__7TextDat(struct TextDat *this)")
del_items(0x80080A74)
SetType(0x80080A74, "void FinishedUsing__7TextDat(struct TextDat *this)")
del_items(0x80080AE4)
SetType(0x80080AE4, "void MakeBlockOffsetTab__7TextDat(struct TextDat *this)")
del_items(0x80080B54)
SetType(0x80080B54, "long MakeOffsetTab__C9CBlockHdr(struct CBlockHdr *this)")
del_items(0x80080C80)
SetType(0x80080C80, "void SetUVTp__7TextDatP9FRAME_HDRP8POLY_FT4ii(struct TextDat *this, struct FRAME_HDR *Fr, struct POLY_FT4 *FT4, int XFlip, int YFlip)")
del_items(0x80080D80)
SetType(0x80080D80, "struct POLY_FT4 *PrintMonster__7TextDatiiibi(struct TextDat *this, int Frm, int X, int Y, bool XFlip, int OtPos)")
del_items(0x8008118C)
SetType(0x8008118C, "void PrepareFt4__7TextDatP8POLY_FT4iiiii(struct TextDat *this, struct POLY_FT4 *FT4, int Frm, int X, int Y, int XFlip, int YFlip)")
del_items(0x800813F8)
SetType(0x800813F8, "unsigned char *GetDecompBufffer__7TextDati(struct TextDat *this, int Size)")
del_items(0x80081558)
SetType(0x80081558, "void SetUVTpGT4__7TextDatP9FRAME_HDRP8POLY_GT4ii(struct TextDat *this, struct FRAME_HDR *Fr, struct POLY_GT4 *FT4, int XFlip, int YFlip)")
del_items(0x80081658)
SetType(0x80081658, "void PrepareGt4__7TextDatP8POLY_GT4iiiii(struct TextDat *this, struct POLY_GT4 *GT4, int Frm, int X, int Y, int XFlip, int YFlip)")
del_items(0x800818B0)
SetType(0x800818B0, "void SetUVTpGT3__7TextDatP9FRAME_HDRP8POLY_GT3(struct TextDat *this, struct FRAME_HDR *Fr, struct POLY_GT3 *GT3)")
del_items(0x80081934)
SetType(0x80081934, "void PrepareGt3__7TextDatP8POLY_GT3iii(struct TextDat *this, struct POLY_GT3 *GT3, int Frm, int X, int Y)")
del_items(0x80081AFC)
SetType(0x80081AFC, "struct POLY_FT4 *PrintFt4__7TextDatiiiiii(struct TextDat *this, int Frm, int X, int Y, int XFlip, int OtPos, int YFlip)")
del_items(0x80081C50)
SetType(0x80081C50, "struct POLY_GT4 *PrintGt4__7TextDatiiiiii(struct TextDat *this, int Frm, int X, int Y, int XFlip, int OtPos, int YFlip)")
del_items(0x80081DA4)
SetType(0x80081DA4, "struct POLY_GT3 *PrintGt3__7TextDatiiii(struct TextDat *this, int Frm, int X, int Y, int OtPos)")
del_items(0x80081E88)
SetType(0x80081E88, "void DecompFrame__7TextDatP9FRAME_HDR(struct TextDat *this, struct FRAME_HDR *Fr)")
del_items(0x80081FE0)
SetType(0x80081FE0, "void MakeCreatureOffsetTab__7TextDat(struct TextDat *this)")
del_items(0x80082120)
SetType(0x80082120, "void MakePalOffsetTab__7TextDat(struct TextDat *this)")
del_items(0x8008221C)
SetType(0x8008221C, "void InitData__7TextDat(struct TextDat *this)")
del_items(0x80082248)
SetType(0x80082248, "void DumpData__7TextDat(struct TextDat *this)")
del_items(0x80082390)
SetType(0x80082390, "struct TextDat *GM_UseTexData__Fi(int Id)")
del_items(0x800824B0)
SetType(0x800824B0, "void GM_FinishedUsing__FP7TextDat(struct TextDat *Fin)")
del_items(0x80082504)
SetType(0x80082504, "void SetPal__7TextDatP9FRAME_HDRP8POLY_FT4(struct TextDat *this, struct FRAME_HDR *Fr, struct POLY_FT4 *FT4)")
del_items(0x800825C8)
SetType(0x800825C8, "int GetFrNum__7TextDatiiii(struct TextDat *this, int Creature, int Action, int Direction, int Frame)")
del_items(0x8008261C)
SetType(0x8008261C, "bool IsDirAliased__7TextDatiii(struct TextDat *this, int Creature, int Action, int Direction)")
del_items(0x80082674)
SetType(0x80082674, "void DoDecompRequests__7TextDat(struct TextDat *this)")
del_items(0x80082798)
SetType(0x80082798, "void FindDecompArea__7TextDatR4RECT(struct TextDat *this, struct RECT *R)")
del_items(0x80082870)
SetType(0x80082870, "struct CTextFileInfo *GetFileInfo__7TextDati(struct TextDat *this, int Id)")
del_items(0x800828C0)
SetType(0x800828C0, "int GetSize__C15CCreatureAction(struct CCreatureAction *this)")
del_items(0x800828E8)
SetType(0x800828E8, "int GetFrNum__C15CCreatureActionii(struct CCreatureAction *this, int Direction, int Frame)")
del_items(0x80082990)
SetType(0x80082990, "void InitDirRemap__15CCreatureAction(struct CCreatureAction *this)")
del_items(0x80082A50)
SetType(0x80082A50, "int GetFrNum__C12CCreatureHdriii(struct CCreatureHdr *this, int Action, int Direction, int Frame)")
del_items(0x80082A94)
SetType(0x80082A94, "struct CCreatureAction *GetAction__C12CCreatureHdri(struct CCreatureHdr *this, int ActNum)")
del_items(0x80082B24)
SetType(0x80082B24, "void InitActionDirRemaps__12CCreatureHdr(struct CCreatureHdr *this)")
del_items(0x80082B94)
SetType(0x80082B94, "int GetSize__C12CCreatureHdr(struct CCreatureHdr *this)")
del_items(0x80082C00)
SetType(0x80082C00, "long LoadDat__C13CTextFileInfo(struct CTextFileInfo *this)")
del_items(0x80082C50)
SetType(0x80082C50, "long LoadHdr__C13CTextFileInfo(struct CTextFileInfo *this)")
del_items(0x80082C78)
SetType(0x80082C78, "long GetFile__C13CTextFileInfoPc(struct CTextFileInfo *this, char *Ext)")
del_items(0x80082D14)
SetType(0x80082D14, "bool HasFile__C13CTextFileInfoPc(struct CTextFileInfo *this, char *Ext)")
del_items(0x80082D7C)
SetType(0x80082D7C, "void Un64__FPUcT0l(unsigned char *Src, unsigned char *Dest, long SizeBytes)")
del_items(0x80082E50)
SetType(0x80082E50, "struct CScreen *__7CScreen(struct CScreen *this)")
del_items(0x80082E84)
SetType(0x80082E84, "void Load__7CScreeniii(struct CScreen *this, int Id, int tpx, int tpy)")
del_items(0x80083124)
SetType(0x80083124, "void Unload__7CScreen(struct CScreen *this)")
del_items(0x80083148)
SetType(0x80083148, "void Display__7CScreeniiii(struct CScreen *this, int Id, int tpx, int tpy, int fadeval)")
del_items(0x80083428)
SetType(0x80083428, "void SetRect__5CPartR7TextDatR4RECT(struct CPart *this, struct TextDat *TDat, struct RECT *R)")
del_items(0x800834A4)
SetType(0x800834A4, "void GetBoundingBox__6CBlockR7TextDatR4RECT(struct CBlock *this, struct TextDat *TDat, struct RECT *R)")
del_items(0x80083600)
SetType(0x80083600, "void _GLOBAL__D_DatPool()")
del_items(0x80083658)
SetType(0x80083658, "void _GLOBAL__I_DatPool()")
del_items(0x800836AC)
SetType(0x800836AC, "void PRIM_GetPrim__FPP8POLY_GT3(struct POLY_GT3 **Prim)")
del_items(0x80083728)
SetType(0x80083728, "void PRIM_GetPrim__FPP8POLY_GT4(struct POLY_GT4 **Prim)")
del_items(0x800837A4)
SetType(0x800837A4, "void PRIM_GetPrim__FPP8POLY_FT4(struct POLY_FT4 **Prim)")
del_items(0x80083820)
SetType(0x80083820, "bool CanXferFrame__C7TextDat(struct TextDat *this)")
del_items(0x80083848)
SetType(0x80083848, "bool CanXferPal__C7TextDat(struct TextDat *this)")
del_items(0x80083870)
SetType(0x80083870, "bool IsLoaded__C7TextDat(struct TextDat *this)")
del_items(0x8008387C)
SetType(0x8008387C, "int GetTexNum__C7TextDat(struct TextDat *this)")
del_items(0x80083888)
SetType(0x80083888, "struct CCreatureHdr *GetCreature__7TextDati(struct TextDat *this, int Creature)")
del_items(0x80083900)
SetType(0x80083900, "int GetNumOfCreatures__7TextDat(struct TextDat *this)")
del_items(0x80083914)
SetType(0x80083914, "void SetFileInfo__7TextDatPC13CTextFileInfoi(struct TextDat *this, struct CTextFileInfo *NewInfo, int NewTexNum)")
del_items(0x80083920)
SetType(0x80083920, "int GetNumOfFrames__7TextDat(struct TextDat *this)")
del_items(0x80083934)
SetType(0x80083934, "struct PAL *GetPal__7TextDati(struct TextDat *this, int PalNum)")
del_items(0x80083950)
SetType(0x80083950, "struct FRAME_HDR *GetFr__7TextDati(struct TextDat *this, int FrNum)")
del_items(0x8008396C)
SetType(0x8008396C, "char *GetName__C13CTextFileInfo(struct CTextFileInfo *this)")
del_items(0x80083978)
SetType(0x80083978, "bool HasDat__C13CTextFileInfo(struct CTextFileInfo *this)")
del_items(0x800839A0)
SetType(0x800839A0, "bool HasTp__C13CTextFileInfo(struct CTextFileInfo *this)")
del_items(0x800839C8)
SetType(0x800839C8, "int GetSize__C6CBlock(struct CBlock *this)")
del_items(0x800839DC)
SetType(0x800839DC, "struct CdIO *__4CdIOUl(struct CdIO *this, unsigned long OurMemId)")
del_items(0x80083A20)
SetType(0x80083A20, "void ___4CdIO(struct CdIO *this, int __in_chrg)")
del_items(0x80083A78)
SetType(0x80083A78, "bool FileExists__4CdIOPCc(struct CdIO *this, char *Name)")
del_items(0x80083A9C)
SetType(0x80083A9C, "bool LoReadFileAtAddr__4CdIOPCcPUci(struct CdIO *this, char *Name, unsigned char *Dest, int Len)")
del_items(0x80083B20)
SetType(0x80083B20, "int GetFileLength__4CdIOPCc(struct CdIO *this, char *Name)")
del_items(0x80083B44)
SetType(0x80083B44, "bool LoSave__4CdIOPCcPUci(struct CdIO *this, char *Name, unsigned char *Addr, int Len)")
del_items(0x80083C24)
SetType(0x80083C24, "void LoStreamCallBack__Fi(int handle)")
del_items(0x80083C34)
SetType(0x80083C34, "bool CD_GetCdlFILE__FPCcP7CdlFILE(char *Name, struct CdlFILE *RetFile)")
del_items(0x80083D80)
SetType(0x80083D80, "bool LoStreamFile__4CdIOPCciPFPUciib_bii(struct CdIO *this, char *Name, int Slice, bool (*Func)(), int Offset, int Size)")
del_items(0x8008400C)
SetType(0x8008400C, "bool LoAsyncStreamFile__4CdIOPCciPFPUciib_bii(struct CdIO *this, char *Name, int Slice, bool (*Func)(), int Offset, int Size)")
del_items(0x8008416C)
SetType(0x8008416C, "void BL_InitEAC__Fv()")
del_items(0x80084258)
SetType(0x80084258, "long BL_ReadFile__FPcUl(char *Name, unsigned long RamId)")
del_items(0x80084384)
SetType(0x80084384, "long BL_AsyncReadFile__FPcUl(char *Name, unsigned long RamId)")
del_items(0x800844F8)
SetType(0x800844F8, "void BL_LoadDirectory__Fv()")
del_items(0x80084620)
SetType(0x80084620, "void BL_LoadStreamDir__Fv()")
del_items(0x80084900)
SetType(0x80084900, "struct STRHDR *BL_MakeFilePosTab__FPUcUl(unsigned char *BL_DirPtr, unsigned long NoStreamFiles)")
del_items(0x80084A00)
SetType(0x80084A00, "struct STRHDR *BL_FindStreamFile__FPcc(char *Name, char LumpID)")
del_items(0x80084BCC)
SetType(0x80084BCC, "bool BL_FileExists__FPcc(char *Name, char LumpID)")
del_items(0x80084BF0)
SetType(0x80084BF0, "int BL_FileLength__FPcc(char *Name, char LumpID)")
del_items(0x80084C24)
SetType(0x80084C24, "bool BL_LoadFileAtAddr__FPcPUcc(char *Name, unsigned char *Dest, char LumpID)")
del_items(0x80084D0C)
SetType(0x80084D0C, "bool BL_AsyncLoadDone__Fv()")
del_items(0x80084D18)
SetType(0x80084D18, "void BL_WaitForAsyncFinish__Fv()")
del_items(0x80084D64)
SetType(0x80084D64, "void BL_AsyncLoadCallBack__Fi(int handle)")
del_items(0x80084D94)
SetType(0x80084D94, "long BL_LoadFileAsync__FPcc(char *Name, char LumpID)")
del_items(0x80084F0C)
SetType(0x80084F0C, "bool BL_AsyncLoadFileAtAddr__FPcPUcc(char *Name, unsigned char *Dest, char LumpID)")
del_items(0x80084FD4)
SetType(0x80084FD4, "struct STRHDR *BL_OpenStreamFile__FPcc(char *Name, char LumpID)")
del_items(0x80085000)
SetType(0x80085000, "bool BL_CloseStreamFile__FP6STRHDR(struct STRHDR *StreamHDR)")
del_items(0x80085038)
SetType(0x80085038, "int LZNP_Decode__FPUcT0(unsigned char *in, unsigned char *out)")
del_items(0x8008510C)
SetType(0x8008510C, "void *Tmalloc__Fi(int MemSize)")
del_items(0x80085230)
SetType(0x80085230, "void Tfree__FPv(void *Addr)")
del_items(0x800852E0)
SetType(0x800852E0, "void InitTmalloc__Fv()")
del_items(0x80085308)
SetType(0x80085308, "void strupr__FPc(char *Buffa)")
del_items(0x8008535C)
SetType(0x8008535C, "void PauseTask__FP4TASK(struct TASK *T)")
del_items(0x800853A8)
SetType(0x800853A8, "int GetPausePad__Fv()")
del_items(0x8008549C)
SetType(0x8008549C, "bool TryPadForPause__Fi(int PadNum)")
del_items(0x800854C8)
SetType(0x800854C8, "void DoPause__14CPauseMessagesi(struct CPauseMessages *this, int nPadNum)")
del_items(0x8008570C)
SetType(0x8008570C, "bool DoPausedMessage__14CPauseMessages(struct CPauseMessages *this)")
del_items(0x80085A24)
SetType(0x80085A24, "int DoQuitMessage__14CPauseMessages(struct CPauseMessages *this)")
del_items(0x80085B44)
SetType(0x80085B44, "bool AreYouSureMessage__14CPauseMessages(struct CPauseMessages *this)")
del_items(0x80085C48)
SetType(0x80085C48, "bool PA_SetPauseOk__Fb(bool NewPause)")
del_items(0x80085C58)
SetType(0x80085C58, "bool PA_GetPauseOk__Fv()")
del_items(0x80085C64)
SetType(0x80085C64, "void MY_PausePrint__17CTempPauseMessageiPciP4RECT(struct CTempPauseMessage *this, int s, char *Txt, int Menu, struct RECT *PRect)")
del_items(0x80085E34)
SetType(0x80085E34, "void InitPrintQuitMessage__17CTempPauseMessage(struct CTempPauseMessage *this)")
del_items(0x80085E3C)
SetType(0x80085E3C, "void PrintQuitMessage__17CTempPauseMessagei(struct CTempPauseMessage *this, int Menu)")
del_items(0x80085F58)
SetType(0x80085F58, "void LeavePrintQuitMessage__17CTempPauseMessagei(struct CTempPauseMessage *this, int Menu)")
del_items(0x80085F60)
SetType(0x80085F60, "void InitPrintAreYouSure__17CTempPauseMessage(struct CTempPauseMessage *this)")
del_items(0x80085F68)
SetType(0x80085F68, "void PrintAreYouSure__17CTempPauseMessagei(struct CTempPauseMessage *this, int Menu)")
del_items(0x80086084)
SetType(0x80086084, "void LeavePrintAreYouSure__17CTempPauseMessagei(struct CTempPauseMessage *this, int Menu)")
del_items(0x8008608C)
SetType(0x8008608C, "void InitPrintPaused__17CTempPauseMessage(struct CTempPauseMessage *this)")
del_items(0x80086094)
SetType(0x80086094, "void ShowInActive__17CTempPauseMessage(struct CTempPauseMessage *this)")
del_items(0x80086174)
SetType(0x80086174, "void PrintPaused__17CTempPauseMessage(struct CTempPauseMessage *this)")
del_items(0x800862C4)
SetType(0x800862C4, "void LeavePrintPaused__17CTempPauseMessage(struct CTempPauseMessage *this)")
del_items(0x800862CC)
SetType(0x800862CC, "void ___17CTempPauseMessage(struct CTempPauseMessage *this, int __in_chrg)")
del_items(0x800862F4)
SetType(0x800862F4, "void _GLOBAL__D_DoPause__14CPauseMessagesi()")
del_items(0x8008631C)
SetType(0x8008631C, "void _GLOBAL__I_DoPause__14CPauseMessagesi()")
del_items(0x80086344)
SetType(0x80086344, "struct CTempPauseMessage *__17CTempPauseMessage(struct CTempPauseMessage *this)")
del_items(0x80086388)
SetType(0x80086388, "void ___14CPauseMessages(struct CPauseMessages *this, int __in_chrg)")
del_items(0x800863BC)
SetType(0x800863BC, "struct CPauseMessages *__14CPauseMessages(struct CPauseMessages *this)")
del_items(0x800863D0)
SetType(0x800863D0, "void | |
'vcmpunord_ssd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x14\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpneq_ussd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x15\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpnlt_uqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x16\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpnle_uqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x17\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpord_ssd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x18\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpeq_ussd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x19\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpnge_uqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x1a\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpngt_uqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x1b\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpfalse_ossd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x1c\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpneq_ossd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x1d\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpge_oqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x1e\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmpgt_oqsd xmm8, xmm15, qword ptr [r8]')
Buffer = b'\xc4\x01\x83\xc2\x00\x1f\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + SSE_REG + REG8)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + SSE_REG + REG15)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
assert_equal(myDisasm.Argument3.ArgType, + MEMORY_TYPE)
assert_equal(myDisasm.Argument3.ArgSize, 64)
assert_equal(myDisasm.Argument3.AccessMode, READ)
assert_equal(myDisasm.Argument4.ArgType, + CONSTANT_TYPE+ABSOLUTE_)
assert_equal(myDisasm.Argument4.ArgSize, 8)
assert_equal(myDisasm.Argument4.AccessMode, READ)
assert_equal(myDisasm.CompleteInstr, 'vcmptrue_ussd xmm8, xmm15, qword ptr [r8]')
def test_mpx(self):
Buffer = b'\xf3\x41\x0f\x1a\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndcl bnd1, dword ptr [r11]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 32)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\xf3\x0f\x1a\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndcl bnd1, dword ptr [rbx]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 32)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\xf2\x41\x0f\x1a\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndcu bnd1, dword ptr [r11]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 32)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\xf2\x0f\x1a\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndcu bnd1, dword ptr [rbx]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 32)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\xf2\x41\x0f\x1b\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndcn bnd1, dword ptr [r11]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 32)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\xf2\x0f\x1b\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndcn bnd1, dword ptr [rbx]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 32)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\xf3\x41\x0f\x1b\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndmk bnd1, dword ptr [r11]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 32)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\xf3\x0f\x1b\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndmk bnd1, dword ptr [rbx]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, READ)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 32)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\x66\x0f\x1a\xc0\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 0
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndmov bnd0, bnd0')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG0)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, WRITE)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + MPX_REG + REG0)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\x66\x0f\x1a\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndmov bnd1, dqword ptr [rbx]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, WRITE)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\x66\x0f\x1a\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 0
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndmov bnd1, qword ptr [ebx]')
assert_equal(myDisasm.Argument1.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument1.ArgSize, 128)
assert_equal(myDisasm.Argument1.AccessMode, WRITE)
assert_equal(myDisasm.Argument2.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument2.ArgSize, 64)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\x66\x0f\x1b\x0b\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 0
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndmov qword ptr [ebx], bnd1')
assert_equal(myDisasm.Argument1.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument1.ArgSize, 64)
assert_equal(myDisasm.Argument1.AccessMode, WRITE)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
Buffer = b'\x0f\x1b\x0c\x10\x11\x11\x11\x11\x11\x11\x11\x11\x11'
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer(Buffer,len(Buffer))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'bndstx dword ptr [rax+rdx], bnd1')
assert_equal(myDisasm.Argument1.ArgType, MEMORY_TYPE)
assert_equal(myDisasm.Argument1.ArgSize, 32)
assert_equal(myDisasm.Argument1.AccessMode, WRITE)
assert_equal(myDisasm.Argument2.ArgType, REGISTER_TYPE + MPX_REG + REG1)
assert_equal(myDisasm.Argument2.ArgSize, 128)
assert_equal(myDisasm.Argument2.AccessMode, READ)
def test_lock(self):
'''Minimal regression tests for https://github.com/BeaEngine/beaengine/issues/9'''
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer('f04889ce'.decode('hex'))
myDisasm.EIP = addressof(Target)
InstrLength = Disasm(addressof(myDisasm))
assert_equal(myDisasm.CompleteInstr, 'lock mov rsi, rcx')
assert_equal(myDisasm.Prefix.LockPrefix, 1)
myDisasm = DISASM()
myDisasm.Archi = 64
Target = create_string_buffer('4889ce'.decode('hex'))
myDisasm.EIP = addressof(Target)
InstrLength | |
""" miscellaneous sorting / groupby utilities """
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
)
import numpy as np
from pandas._libs import algos, hashtable, lib
from pandas._libs.hashtable import unique_label_indices
from pandas._typing import IndexKeyFunc
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_extension_array_dtype,
)
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
from pandas.core.construction import extract_array
if TYPE_CHECKING:
from pandas import MultiIndex
from pandas.core.indexes.base import Index
_INT64_MAX = np.iinfo(np.int64).max
def get_indexer_indexer(
target: "Index",
level: Union[str, int, List[str], List[int]],
ascending: bool,
kind: str,
na_position: str,
sort_remaining: bool,
key: IndexKeyFunc,
) -> Optional[np.array]:
"""
Helper method that return the indexer according to input parameters for
the sort_index method of DataFrame and Series.
Parameters
----------
target : Index
level : int or level name or list of ints or list of level names
ascending : bool or list of bools, default True
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
na_position : {'first', 'last'}, default 'last'
sort_remaining : bool, default True
key : callable, optional
Returns
-------
Optional[ndarray]
The indexer for the new index.
"""
target = ensure_key_mapped(target, key, levels=level)
target = target._sort_levels_monotonic()
if level is not None:
_, indexer = target.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(target, ABCMultiIndex):
indexer = lexsort_indexer(
target._get_codes_for_sorting(), orders=ascending, na_position=na_position
)
else:
# Check monotonic-ness before sort an index (GH 11080)
if (ascending and target.is_monotonic_increasing) or (
not ascending and target.is_monotonic_decreasing
):
return None
indexer = nargsort(
target, kind=kind, ascending=ascending, na_position=na_position
)
return indexer
def get_group_index(labels, shape, sort: bool, xnull: bool):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels : sequence of arrays
Integers identifying levels at each location
shape : sequence of ints
Number of unique levels at each location
sort : bool
If the ranks of returned ids should match lexical ranks of labels
xnull : bool
If true nulls are excluded. i.e. -1 values in the labels are
passed through.
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
Notes
-----
The length of `labels` and `shape` must be identical.
"""
def _int64_cut_off(shape) -> int:
acc = 1
for i, mul in enumerate(shape):
acc *= int(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def maybe_lift(lab, size):
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
labels = list(labels)
shape = list(shape)
# Iteratively process all the labels in chunks sized so less
# than _INT64_MAX unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype="i8")
out = stride * labels[0].astype("i8", subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
break
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return out
def get_compressed_ids(labels, sizes):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : list of size of the levels
Returns
-------
tuple of (comp_ids, obs_group_ids)
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape) -> bool:
the_prod = 1
for x in shape:
the_prod *= int(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError("cannot deconstruct factorized group indices!")
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull: bool):
"""
Reconstruct labels from observed group ids.
Parameters
----------
xnull : bool
If nulls are excluded; i.e. -1 labels are passed through.
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype="i8")
shape = np.asarray(shape, dtype="i8") + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype("i8", subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def indexer_from_factorized(labels, shape, compress: bool = True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(
keys, orders=None, na_position: str = "last", key: Optional[Callable] = None
):
"""
Performs lexical sorting on a set of keys
Parameters
----------
keys : sequence of arrays
Sequence of ndarrays to be sorted by the indexer
orders : boolean or list of booleans, optional
Determines the sorting order for each element in keys. If a list,
it must be the same length as keys. This determines whether the
corresponding element in keys should be sorted in ascending
(True) or descending (False) order. if bool, applied to all
elements as above. if None, defaults to True.
na_position : {'first', 'last'}, default 'last'
Determines placement of NA elements in the sorted list ("last" or "first")
key : Callable, optional
Callable key function applied to every element in keys before sorting
.. versionadded:: 1.0.0
"""
from pandas.core.arrays import Categorical
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
keys = [ensure_key_mapped(k, key) for k in keys]
for k, order in zip(keys, orders):
cat = Categorical(k, ordered=True)
if na_position not in ["last", "first"]:
raise ValueError(f"invalid na_position: {na_position}")
n = len(cat.categories)
codes = cat.codes.copy()
mask = cat.codes == -1
if order: # ascending
if na_position == "last":
codes = np.where(mask, n, codes)
elif na_position == "first":
codes += 1
else: # not order means descending
if na_position == "last":
codes = np.where(mask, n, n - codes - 1)
elif na_position == "first":
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return indexer_from_factorized(labels, shape)
def nargsort(
items,
kind: str = "quicksort",
ascending: bool = True,
na_position: str = "last",
key: Optional[Callable] = None,
mask: Optional[np.ndarray] = None,
):
"""
Intended to be a drop-in replacement for np.argsort which handles NaNs.
Adds ascending, na_position, and key parameters.
(GH #6399, #5231, #27237)
Parameters
----------
kind : str, default 'quicksort'
ascending : bool, default True
na_position : {'first', 'last'}, default 'last'
key : Optional[Callable], default None
mask : Optional[np.ndarray], default None
Passed when called by ExtensionArray.argsort.
"""
if key is not None:
items = ensure_key_mapped(items, key)
return nargsort(
items,
kind=kind,
ascending=ascending,
na_position=na_position,
key=None,
mask=mask,
)
items = extract_array(items)
if mask is None:
mask = | |
0xa2, 0x22, 0xa0,
0x64, 0x42, 0x42, 0x4a, 0xa4, 0x66, 0x62, 0x28,
0x20, 0xb6, 0x22, 0xe6, 0x24, 0x04, 0x1f, 0x48,
0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f,
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x80,
0x04, 0x00, 0x48, 0x80, 0x24, 0x04, 0x00, 0x20,
0x04, 0x00, 0x80, 0x04, 0x20, 0x04, 0x42, 0x20,
0x04, 0x00, 0x00, 0x00, 0xcf, 0xa8, 0x08, 0x2a,
0xb5, 0x11, 0xa2, 0x12, 0x1b, 0x21, 0x6a, 0xb1,
0x11, 0xb2, 0x62, 0x91, 0x21, 0x2b, 0x16, 0x19,
0xb2, 0x62, 0x99, 0x21, 0x2b, 0x94, 0x11, 0x2b,
0x94, 0x22, 0x2f, 0x14, 0x2b, 0xf2, 0x42, 0xb1,
0x22, 0x2f, 0x14, 0xab, 0x42, 0x4e, 0xb1, 0x2e,
0x42, 0x1c, 0xea, 0x22, 0xe4, 0x15, 0xe2, 0x22,
0xe4, 0x1d, 0xe2, 0x22, 0xe4, 0x1d, 0xe2, 0x22,
0xa4, 0x2d, 0x2e, 0x42, 0x1b, 0x2d, 0x2e, 0x42,
0x1b, 0x29, 0x26, 0xb2, 0x91, 0x22, 0xb6, 0x91,
0x36, 0x62, 0x1b, 0x68, 0x23, 0x96, 0x61, 0x2b,
0x86, 0x19, 0xb6, 0x42, 0x98, 0x41, 0x2b, 0x84,
0x2a, 0xb4, 0x42, 0xaa, 0x42, 0x2b, 0xa4, 0x2a,
0xb4, 0x43, 0xaa, 0x42, 0x4a, 0xea, 0x22, 0xf4,
0x99, 0x41, 0x80, 0x94, 0x41, 0x48, 0x11, 0x42,
0x19, 0x38, 0x42, 0x11, 0x2b, 0x14, 0x11, 0x2b,
0x94, 0x11, 0x2b, 0xb4, 0x11, 0x2b, 0xb4, 0xf0,
0x42, 0x91, 0xf0, 0x42, 0x91, 0xd0, 0x12, 0x19,
0xd2, 0x12, 0xc9, 0x52, 0x1c, 0xc9, 0x82, 0x1e,
0x81, 0x2c, 0x64, 0x19, 0x24, 0x96, 0xc1, 0x43,
0x96, 0x41, 0x32, 0x91, 0x2c, 0xb4, 0x91, 0x44,
0xb2, 0x91, 0x24, 0x34, 0x91, 0x21, 0x13, 0x38,
0x42, 0x11, 0x29, 0x18, 0xb1, 0x42, 0x1a, 0x91,
0xa2, 0xb0, 0x42, 0x08, 0x29, 0x08, 0x29, 0x18,
0x92, 0x82, 0x24, 0x4f, 0xb5, 0x0e, 0x20, 0x88,
0x04, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00,
0x00, 0x00, 0x10, 0x02, 0x18, 0x80, 0x08, 0x00,
0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x80, 0x02, 0x00, 0x00, 0x00, 0x50, 0x12,
0xb0, 0xd1, 0x03, 0x00, 0x00, 0x00, 0x22, 0x00,
0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x42, 0x90,
0x21, 0x20, 0x22, 0x04, 0x20, 0x04, 0x28, 0x22,
0x00, 0x20, 0x01, 0x20, 0x22, 0x08, 0x00, 0x00,
0x80, 0x04, 0x48, 0x00, 0x00, 0x80, 0x84, 0x04,
0x80, 0xf2, 0x88, 0x22, 0x80, 0x21, 0x28, 0x82,
0x02, 0x80, 0xa8, 0x48, 0x22, 0x1a, 0x04, 0x00,
0x88, 0x80, 0x82, 0x02, 0x80, 0x04, 0x48, 0x42,
0x88, 0x00, 0xc0, 0x41, 0x00, 0x20, 0x04, 0x22,
0x00, 0x20, 0x02, 0x62, 0x42, 0x20, 0x14, 0x11,
0x92, 0x41, 0x20, 0x84, 0x08, 0x42, 0x8e, 0x41,
0x48, 0x00, 0x11, 0xe0, 0x14, 0x02, 0x9f, 0x86,
0x06, 0x20, 0x0c, 0x28, 0x00, 0xa8, 0x00, 0x42,
0x00, 0x22, 0x00, 0x22, 0x00, 0x00, 0x2c, 0x84,
0xc4, 0x41, 0x92, 0xa0, 0x48, 0x80, 0x84, 0x04,
0x22, 0x11, 0x00, 0x20, 0x21, 0x04, 0x22, 0x28,
0x00, 0x42, 0x11, 0x20, 0x24, 0xa4, 0x42, 0x4a,
0x08, 0x42, 0x88, 0x00, 0x42, 0x17, 0x24, 0x40,
0x21, 0xd2, 0xb1, 0x01, 0x18, 0x20, 0x02, 0x00,
0xa0, 0x48, 0x22, 0x12, 0x00, 0x20, 0x08, 0x80,
0x02, 0x28, 0x48, 0x00, 0x28, 0x80, 0x02, 0x10,
0x01, 0x00, 0x2a, 0x04, 0x22, 0x00, 0x20, 0x02,
0x4a, 0x88, 0x04, 0x40, 0x02, 0x14, 0x00, 0x00,
0x30, 0x41, 0x48, 0x00, 0x00, 0x4a, 0x02, 0xeb,
0xbb, 0x20, 0x02, 0x00, 0x80, 0x02, 0x00, 0x00,
0x48, 0x00, 0x00, 0x20, 0x01, 0x48, 0x18, 0x20,
0x04, 0x42, 0x22, 0x42, 0x20, 0x85, 0x61, 0x14,
0x20, 0xa1, 0x42, 0x20, 0x02, 0x00, 0x80, 0x04,
0x20, 0x02, 0x42, 0x82, 0x20, 0x08, 0x48, 0x20,
0x88, 0x22, 0x28, 0x04, 0x4a, 0x02, 0xef, 0xd5,
0x07, 0x00, 0x32, 0x80, 0x03, 0x2a, 0xa1, 0x41,
0x12, 0x58, 0x80, 0x01, 0x00, 0x52, 0x12, 0x42,
0x22, 0x60, 0x12, 0x80, 0x22, 0x02, 0xa0, 0x12,
0x1a, 0x22, 0x02, 0xb8, 0x52, 0x30, 0x12, 0x20,
0x84, 0xa1, 0x12, 0x11, 0x18, 0x00, 0x48, 0xa0,
0x84, 0xa2, 0x42, 0x00, 0x00, 0x2a, 0x04, 0x80,
0x84, 0x82, 0x08, 0x82, 0x7f, 0x8c, 0x01, 0x28,
0x20, 0x01, 0x38, 0x80, 0x22, 0x21, 0x01, 0x20,
0x04, 0x00, 0x18, 0x12, 0x42, 0x22, 0x10, 0x01,
0x2a, 0x24, 0x02, 0x20, 0x82, 0xa3, 0x12, 0xe0,
0x12, 0x8d, 0x01, 0x00, 0x48, 0x00, 0x20, 0x21,
0x04, 0x80, 0x02, 0xe2, 0x82, 0x20, 0x08, 0x48,
0x62, 0x82, 0x22, 0x48, 0x80, 0x88, 0x02, 0x87,
0x68, 0x00, 0x20, 0x02, 0x20, 0x22, 0x21, 0x04,
0x58, 0x80, 0x01, 0x00, 0x48, 0x20, 0x01, 0x42,
0x1a, 0x02, 0x00, 0x48, 0x22, 0x1a, 0x04, 0x48,
0x12, 0x80, 0x84, 0xb1, 0x22, 0x05, 0x22, 0x12,
0x32, 0x11, 0x00, 0x80, 0x04, 0x8a, 0x04, 0x48,
0x00, 0x00, 0x00, 0x80, 0x88, 0x06, 0x42, 0x88,
0x2f, 0x59, 0x08, 0x42, 0x18, 0x80, 0x21, 0x04,
0x28, 0x22, 0x48, 0x42, 0x20, 0xa4, 0x21, 0x20,
0x81, 0x22, 0xa4, 0x24, 0x20, 0x02, 0x20, 0x81,
0x01, 0x42, 0x80, 0x84, 0x04, 0x48, 0x00, 0x22,
0x22, 0x20, 0x04, 0x00, 0x00, 0x42, 0x00, 0x00,
0x28, 0x00, 0x82, 0x00, 0x00, 0x82, 0x00, 0x88,
0x5f, 0x5b, 0x03, 0x20, 0x26, 0x01, 0x38, 0x20,
0x81, 0x81, 0x22, 0x21, 0x01, 0x28, 0x48, 0x00,
0x20, 0x01, 0x00, 0x00, 0x4a, 0x21, 0x02, 0x12,
0x00, 0x18, 0x12, 0x20, 0x86, 0x04, 0x12, 0x62,
0xa0, 0x12, 0x00, 0x28, 0x20, 0x26, 0x02, 0x22,
0xe8, 0x48, 0x20, 0x02, 0x00, 0x22, 0x20, 0x24,
0x22, 0xf4, 0x92, 0xed, 0x00, 0x00, 0x28, 0x80,
0x01, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x18,
0x40, 0x81, 0x24, 0x04, 0x42, 0x28, 0x48, 0x22,
0x28, 0x80, 0x88, 0x02, 0x00, 0x42, 0x30, 0x12,
0x80, 0x86, 0x02, 0x28, 0x00, 0x00, 0x80, 0x98,
0x42, 0xa0, 0x84, 0x80, 0x04, 0x22, 0x28, 0x20,
0xf2, 0xf6, 0x56, 0x00, 0x18, 0xa0, 0x12, 0x38,
0x18, 0x28, 0x28, 0x48, 0x42, 0x20, 0x24, 0x21,
0x84, 0x81, 0x01, 0x28, 0x80, 0x42, 0x81, 0xa4,
0x41, 0x48, 0x42, 0x4a, 0xa2, 0x14, 0x28, 0x2a,
0x84, 0xa1, 0x49, 0x22, 0x48, 0x62, 0xa0, 0x41,
0x10, 0xa2, 0x21, 0x42, 0x28, 0x22, 0x42, 0x2a,
0x24, 0x02, 0x22, 0x42, 0x8a, 0x04, 0xa2, 0x00,
0x42, 0x00, 0x68, 0x20, 0xf8, 0x57, 0xc7, 0x80,
0x84, 0x82, 0x01, 0x42, 0x80, 0x21, 0x21, 0x22,
0x81, 0x01, 0x00, 0x20, 0x22, 0xa4, 0x41, 0x18,
0x00, 0x00, 0x80, 0x02, 0x80, 0x04, 0x00, 0x80,
0x02, 0x28, 0x80, 0x82, 0x01, 0x22, 0x00, 0x28,
0x00, 0x00, 0x88, 0x30, 0x42, 0xa0, 0x84, 0x00,
0x22, 0xa8, 0x80, 0xa2, 0x42, 0xbf, 0x15, 0x08,
0x48, 0x1a, 0x02, 0x22, 0x00, 0x62, 0x68, 0x28,
0x28, 0x80, 0x02, 0x42, 0x00, 0x00, 0x20, 0x82,
0x24, 0x24, 0x03, 0x48, 0x18, 0x22, 0x42, 0x2a,
0x04, 0x80, 0x82, 0x01, 0x22, 0xa0, 0x21, 0xa0,
0x41, 0x20, 0x01, 0x62, 0x00, 0x00, 0x28, 0x82,
0x20, 0x24, 0x08, 0x88, 0x80, 0x84, 0x88, 0x24,
0xf4, 0x5f, 0xde, 0x00, 0x80, 0x01, 0x00, 0x20,
0x21, 0x24, 0xa4, 0x52, 0x48, 0x22, 0x00, 0x32,
0x42, 0x28, 0x28, 0x00, 0x00, 0x48, 0x00, 0x12,
0x12, 0x00, 0x28, 0x20, 0x84, 0x04, 0x00, 0x80,
0x22, 0x84, 0x84, 0x01, 0x42, 0x28, 0x00, 0x20,
0x24, 0x06, 0x00, 0x48, 0x42, 0x28, 0x00, 0xf0,
0xbe, 0x33, 0x20, 0x01, 0x28, 0x4a, 0x21, 0x21,
0x82, 0x02, 0x00, 0x00, 0x48, 0x20, 0x06, 0x12,
0x20, 0x01, 0x00, 0x80, 0x21, 0x24, 0x07, 0x80,
0x81, 0x22, 0x21, 0x84, 0x06, 0x00, 0x00, 0x00,
0x28, 0x32, 0x00, 0x20, 0x88, 0x22, 0x08, 0xa2,
0x28, 0x80, 0x02, 0x80, 0x22, 0x24, 0x22, 0xb2,
0x9a, 0x0d, 0x00, 0x00, 0x20, 0x81, 0x01, 0x38,
0x18, 0x00, 0x18, 0x80, 0x01, 0x00, 0x42, 0x42,
0x80, 0x02, 0x28, 0x00, 0x80, 0x22, 0x82, 0x24,
0x02, 0x00, 0x48, 0x48, 0x80, 0x04, 0x00, 0x42,
0x00, 0x00, 0x80, 0x04, 0x00, 0x62, 0x00, 0x28,
0x20, 0x82, 0x08, 0x2f, 0xa7, 0x0e, 0x42, 0x68,
0x80, 0x02, 0x20, 0xa4, 0x21, 0x1a, 0x04, 0x00,
0x00, 0x00, 0x68, 0x80, 0x04, 0x22, 0x2a, 0x01,
0x00, 0x20, 0x04, 0x00, 0x1a, 0x04, 0x00, 0x00,
0x22, 0x20, 0x06, 0x28, 0x00, 0x00, 0x28, 0x20,
0x28, 0x04, 0x20, 0x08, 0x88, 0x22, 0x82, 0x00,
0x14, 0x1d, 0xb3, 0x00, 0x80, 0x04, 0x80, 0x02,
0x00, 0x48, 0x28, 0x80, 0x81, 0x24, 0x24, 0x02,
0x00, 0x18, 0x00, 0x22, 0x80, 0x02, 0x28, 0x00,
0x18, 0x80, 0x04, 0x00, 0x80, 0x84, 0x04, 0x20,
0x01, 0x22, 0x22, 0x00, 0x00, 0x82, 0x00, 0xa0,
0x84, 0x20, 0x04, 0x2a, 0x08, 0x88, 0x7f, 0xba,
0x06, 0x00, 0x58, 0x00, 0x00, 0x48, 0x00, 0x20,
0x84, 0x04, 0xa0, 0x14, 0x12, 0x80, 0x02, 0x80,
0x24, 0x81, 0x02, 0x00, 0x1a, 0x02, 0x62, 0x20,
0x23, 0x22, 0x02, 0x22, 0x18, 0x1a, 0x06, 0x22,
0x48, 0x00, 0x20, 0x2c, 0x28, | |
'''
<NAME>
Python F-16 subf16
outputs aircraft state vector deriative
'''
# x[0] = air speed, VT (ft/sec)
# x[1] = angle of attack, alpha (rad)
# x[2] = angle of sideslip, beta (rad)
# x[3] = roll angle, phi (rad)
# x[4] = pitch angle, theta (rad)
# x[5] = yaw angle, psi (rad)
# x[6] = roll rate, P (rad/sec)
# x[7] = pitch rate, Q (rad/sec)
# x[8] = yaw rate, R (rad/sec)
# x[9] = northward horizontal displacement, pn (feet)
# x[10] = eastward horizontal displacement, pe (feet)
# x[11] = altitude, h (feet)
# x[12] = engine thrust dynamics lag state, pow
#
# u[0] = throttle command 0.0 < u(1) < 1.0
# u[1] = elevator command in degrees
# u[2] = aileron command in degrees
# u[3] = rudder command in degrees
#
from math import sin, cos, pi
from adc import adc, adc_tf
from tgear import tgear, tgear_tf
from pdot import pdot, pdot_tf
from thrust import thrust, thrust_tf
from cx import cx
from cy import cy
from cz import cz
from cl import cl
from dlda import dlda
from dldr import dldr
from cm import cm
from cn import cn
from dnda import dnda
from dndr import dndr
from dampp import dampp, dampp_tf
from Morellif16 import Morellif16
def subf16_model(x, u, model, adjust_cy=True, multipliers=None):
'''output aircraft state vector derivative for a given input
The reference for the model is Appendix A of Stevens & Lewis
if multipliers is not None, it should be a 7-tuple:
xcg_mult, cxt_mult, cyt_mult, czt_mult, clt_mult, cmt_mult, cnt_mult
xcg is x component of center of gravity (between 0.0 and 1.0, default 0.35)
cxt is the x-axis aerodynamic force coefficient
cyt is the sideforce coefficient
czt is the z-axis force coefficient
clt is a sum of the rolling moment coefficients
cmt is the pitching moment coefficient
cnt is a sum of the yawing moment coefficients
'''
assert model == 'stevens' or model == 'morelli'
assert len(x) == 13
assert len(u) == 4
assert multipliers is None or len(multipliers) == 7
xcg = 0.35
if multipliers is not None:
xcg *= multipliers[0]
thtlc, el, ail, rdr = u
s = 300
b = 30
cbar = 11.32
rm = 1.57e-3
xcgr = .35
he = 160.0
c1 = -.770
c2 = .02755
c3 = 1.055e-4
c4 = 1.642e-6
c5 = .9604
c6 = 1.759e-2
c7 = 1.792e-5
c8 = -.7336
c9 = 1.587e-5
rtod = 57.29578
g = 32.17
xd = x.copy()
vt = x[0]
alpha = x[1]*rtod
beta = x[2]*rtod
phi = x[3]
theta = x[4]
psi = x[5]
p = x[6]
q = x[7]
r = x[8]
alt = x[11]
power = x[12]
# air data computer and engine model
amach, qbar = adc(vt, alt)
cpow = tgear(thtlc)
xd[12] = pdot(power, cpow)
t = thrust(power, alt, amach)
dail = ail/20
drdr = rdr/30
# component build up
if model == 'stevens':
# stevens & lewis (look up table version)
cxt = cx(alpha, el)
cyt = cy(beta, ail, rdr)
czt = cz(alpha, beta, el)
clt = cl(alpha, beta) + dlda(alpha, beta) * dail + dldr(alpha, beta) * drdr
cmt = cm(alpha, el)
cnt = cn(alpha, beta) + dnda(alpha, beta) * dail + dndr(alpha, beta) * drdr
else:
# morelli model (polynomial version)
cxt, cyt, czt, clt, cmt, cnt = Morellif16(alpha*pi/180, beta*pi/180, el*pi/180, ail*pi/180, rdr*pi/180, \
p, q, r, cbar, b, vt, xcg, xcgr)
# multipliers adjustement
if multipliers is not None:
cxt *= multipliers[1]
cyt *= multipliers[2]
czt *= multipliers[3]
clt *= multipliers[4]
cmt *= multipliers[5]
cnt *= multipliers[6]
# add damping derivatives
tvt = .5 / vt
b2v = b * tvt
cq = cbar * q * tvt
# get ready for state equations
d = dampp(alpha)
cxt = cxt + cq * d[0]
cyt = cyt + b2v * (d[1] * r + d[2] * p)
czt = czt + cq * d[3]
clt = clt + b2v * (d[4] * r + d[5] * p)
cmt = cmt + cq * d[6] + czt * (xcgr-xcg)
cnt = cnt + b2v * (d[7] * r + d[8] * p)-cyt * (xcgr-xcg) * cbar/b
cbta = cos(x[2])
u = vt * cos(x[1]) * cbta
v = vt * sin(x[2])
w = vt * sin(x[1]) * cbta
sth = sin(theta)
cth = cos(theta)
sph = sin(phi)
cph = cos(phi)
spsi = sin(psi)
cpsi = cos(psi)
qs = qbar * s
qsb = qs * b
rmqs = rm * qs
gcth = g * cth
qsph = q * sph
ay = rmqs * cyt
az = rmqs * czt
# force equations
udot = r * v-q * w-g * sth + rm * (qs * cxt + t)
vdot = p * w-r * u + gcth * sph + ay
wdot = q * u-p * v + gcth * cph + az
dum = (u * u + w * w)
xd[0] = (u * udot + v * vdot + w * wdot)/vt
xd[1] = (u * wdot-w * udot)/dum
xd[2] = (vt * vdot-v * xd[0]) * cbta/dum
# kinematics
xd[3] = p + (sth/cth) * (qsph + r * cph)
xd[4] = q * cph-r * sph
xd[5] = (qsph + r * cph)/cth
# moments
xd[6] = (c2 * p + c1 * r + c4 * he) * q + qsb * (c3 * clt + c4 * cnt)
xd[7] = (c5 * p-c7 * he) * r + c6 * (r * r-p * p) + qs * cbar * c7 * cmt
xd[8] = (c8 * p-c2 * r + c9 * he) * q + qsb * (c4 * clt + c9 * cnt)
# navigation
t1 = sph * cpsi
t2 = cph * sth
t3 = sph * spsi
s1 = cth * cpsi
s2 = cth * spsi
s3 = t1 * sth-cph * spsi
s4 = t3 * sth + cph * cpsi
s5 = sph * cth
s6 = t2 * cpsi + t3
s7 = t2 * spsi-t1
s8 = cph * cth
xd[9] = u * s1 + v * s3 + w * s6 # north speed
xd[10] = u * s2 + v * s4 + w * s7 # east speed
xd[11] = u * sth-v * s5-w * s8 # vertical speed
# outputs
xa = 15.0 # sets distance normal accel is in front of the c.g. (xa = 15.0 at pilot)
az = az-xa * xd[7] # moves normal accel in front of c.g.
####################################
###### peter additionls below ######
if adjust_cy:
ay = ay+xa*xd[8] # moves side accel in front of c.g.
# For extraction of Nz
Nz = (-az / g) - 1 # zeroed at 1 g, positive g = pulling up
Ny = ay / g
return xd, Nz, Ny, az, ay
import tensorflow as tf
def subf16_model_tf(x, u, multipliers):
with tf.name_scope("subf16_model"):
xcg = 0.35
if multipliers is not None:
xcg *= multipliers[0]
thtlc = u[0]
el = u[1]
ail = u[2]
rdr = u[3]
s = 300
b = 30
cbar = 11.32
rm = 1.57e-3
xcgr = .35
he = 160.0
c1 = -.770
c2 = .02755
c3 = 1.055e-4
c4 = 1.642e-6
c5 = .9604
c6 = 1.759e-2
c7 = 1.792e-5
c8 = -.7336
c9 = 1.587e-5
rtod = 57.29578
g = 32.17
vt = x[0]
alpha = x[1]*rtod
beta = x[2]*rtod
phi = x[3]
theta = x[4]
psi = x[5]
p = x[6]
q = x[7]
r = x[8]
alt = x[11]
power = x[12]
# air data computer and engine model
amach, qbar = adc_tf(vt, alt)
cpow = tgear_tf(thtlc)
xd12 = pdot_tf(power, cpow)
t = thrust_tf(power, alt, amach)
# morelli model (polynomial version)
cxt, cyt, czt, clt, cmt, cnt = Morellif16(alpha*pi/180, beta*pi/180, el*pi/180, ail*pi/180, rdr*pi/180, \
p, q, r, cbar, b, vt, xcg, xcgr)
# multipliers adjustement
if multipliers is not None:
cxt *= multipliers[1]
cyt *= | |
= self.labels[:i] + new_labels + self.labels[i + 1:]
self.data = np.reshape(self.data, new_shape)
self.labels = new_labels
def contract_internal(self, label1, label2, index1=0, index2=0):
"""By default will contract the first index with label1 with the
first index with label2. index1 and index2 can be specified to contract
indices that are not the first with the specified label."""
label1_indices = [i for i, x in enumerate(self.labels) if x == label1]
label2_indices = [i for i, x in enumerate(self.labels) if x == label2]
index_to_contract1 = label1_indices[index1]
index_to_contract2 = label2_indices[index2]
self.data = np.trace(self.data, axis1=index_to_contract1, axis2=
index_to_contract2)
# The following removes the contracted indices from the list of labels
self.labels = [label for j, label in enumerate(self.labels)
if j not in [index_to_contract1, index_to_contract2]]
# aliases for contract_internal
trace = contract_internal
tr = contract_internal
def consolidate_indices(self, labels=[]):
"""Combines all indices with the same label into a single label.
If `labels` keyword argument is non-empty, only labels in `labels` will
be consolidated. Puts labels in alphabetical order (and reshapes data
accordingly) if `labels` is empty.
"""
labels_unique = sorted(set(self.labels))
if len(labels) !=0:
#If `labels` is set, only consolidate indices in `labels`
labels_unique=[x for x in labels_unique if x in labels]
for p, label in enumerate(labels_unique):
indices = [i for i, j in enumerate(self.labels) if j == label]
# Put all of these indices together
for k, q in enumerate(indices):
self.data = np.rollaxis(self.data, q, p + k)
# Total dimension of all indices with label
total_dim = self.data.shape[p]
for r in range(1, len(indices)):
total_dim = total_dim * self.data.shape[p + r]
# New shape after consolidating all indices with label into
# one at position p
new_shape = (list(self.data.shape[0:p]) + [total_dim] +
list(self.data.shape[p + len(indices):]))
self.data = np.reshape(self.data, tuple(new_shape))
# Update self.labels
# Remove all instances of label from self.labels
new_labels = [x for x in self.labels if x != label]
# Reinsert label at position p
new_labels.insert(p, label)
self.labels = new_labels
def sort_labels(self):
self.consolidate_indices()
def copy(self):
"""Creates a copy of the tensor that does not point to the original"""
"""Never use A=B in python as modifying A will modify B"""
return Tensor(data=self.data.copy(), labels=copy.copy(self.labels))
def move_index(self, label, position):
"""Change the order of the indices by moving the first index with label
`label` to position `position`, possibly shifting other indices forward
or back in the process. """
index = self.labels.index(label)
# Move label in list
self.labels.pop(index)
self.labels.insert(position, label)
# To roll axis of self.data
# Not 100% sure why, but need to add 1 when rolling an axis backward
if position <= index:
self.data = np.rollaxis(self.data, index, position)
else:
self.data = np.rollaxis(self.data, index, position + 1)
def move_indices(self, labels, position,
preserve_relative_order=False):
"""Move indices with labels in `labels` to consecutive positions
starting at `position`. If `preserve_relative_order`==True, the
relative order of the moved indices will be identical to their order in
the original tensor. If not, the relative order will be determined by
the order in the `labels` argument.
Examples
--------
First initialise a random tensor.
>>> from tncontract import random_tensor
>>> t=random_tensor(2,3,4,5,6, labels=["a", "b", "c", "b", "d"])
Now we move the indices labelled "d", "b" and "c" to position 0 (i.e.
the beginning). When preserve_relative_order is True, the relative
order of these indices is identical to the original tensor.
>>> t.move_indices(["d","b","c"], 0, preserve_relative_order=True)
>>> print(t)
Tensor object:
Data type: float64
Number of indices: 5
Index labels:
0. (dim=3) b
1. (dim=4) c
2. (dim=5) b
3. (dim=6) d
4. (dim=2) a
If, on the other hand, preserve_relative_order is False, the order of
the indices is determined by the order in which they appear in the
`labels` argument of `move_indices`. In this case, "d" comes first
then the "b" indices then "c".
>>> t=random_tensor(2,3,4,5,6, labels=["a", "b", "c", "b", "d"])
>>> t.move_indices(["d","b","c"], 0, preserve_relative_order=False)
>>> print(t)
Tensor object:
Data type: float64
Number of indices: 5
Index labels:
0. (dim=6) d
1. (dim=3) b
2. (dim=5) b
3. (dim=4) c
4. (dim=2) a
"""
if not isinstance(labels, list):
labels = [labels]
if preserve_relative_order:
orig_labels = self.labels.copy()
n_indices_to_move = 0
for label in orig_labels:
if label in labels:
# Move label to end of list
self.move_index(label, len(self.labels) - 1)
n_indices_to_move += 1
else:
# Remove duplicates
unique_labels = []
for label in labels:
if label not in unique_labels:
unique_labels.append(label)
labels = unique_labels
n_indices_to_move = 0
for label in labels:
for i in range(self.labels.count(label)):
# Move label to end of list
self.move_index(label, len(self.labels) - 1)
n_indices_to_move += 1
if position + n_indices_to_move > len(self.labels):
raise ValueError("Specified position too far right.")
# All indices to move are at the end of the array
# Now put put them in desired place
for j in range(n_indices_to_move):
old_index = len(self.labels) - n_indices_to_move + j
label = self.labels[old_index]
# Move label in list
self.labels.pop(old_index)
self.labels.insert(position + j, label)
# Reshape accordingly
self.data = np.rollaxis(self.data, old_index, position + j)
def conjugate(self):
self.data = self.data.conjugate()
def inv(self):
self.data = np.linalg.inv(self.data)
def add_suffix_to_labels(self, suffix):
"""Warning: by changing the labels, e.g. with this method,
the MPS will no longer be in the correct form for various MPS functions
."""
new_labels = []
for label in self.labels:
new_labels.append(label + suffix)
self.labels = new_labels
def add_dummy_index(self, label, position=0):
"""Add an additional index to the tensor with dimension 1, and label
specified by the index "label". The position argument specifies where
the index will be inserted. """
# Will insert an axis of length 1 in the first position
self.data = self.data[np.newaxis, :]
self.labels.insert(0, label)
self.move_index(label, position)
def remove_all_dummy_indices(self, labels=None):
"""Removes all dummy indices (i.e. indices with dimension 1)
which have labels specified by the labels argument. None
for the labels argument implies all labels."""
orig_shape = self.shape
for i, x in enumerate(self.labels):
if labels != None:
if x in labels and orig_shape[i] == 1:
self.move_index(x, 0)
self.data = self.data[0]
self.labels = self.labels[1:]
elif orig_shape[i] == 1:
self.move_index(x, 0)
self.data = self.data[0]
self.labels = self.labels[1:]
def index_dimension(self, label):
"""Will return the dimension of the first index with label=label"""
index = self.labels.index(label)
return self.data.shape[index]
def to_matrix(self, row_labels):
"""
Convert tensor to a matrix regarding row_labels as row index
(output) and the remaining indices as column index (input).
"""
return tensor_to_matrix(self, row_labels)
def pad_index(self, label, inc, before=False):
"""
Increase the dimension of first index with `label` by `inc` by padding
with zeros.
By default zeros are appended after the last edge of the axis in
question, e.g., [1,2,3] -> [1,2,3,0..0]. If `before=True` the zeros
will be padded before the first edge of the index instead,
e.g., [1,2,3] -> [0,..,0,1,2,3].
See also
--------
numpy.pad
"""
if before:
npad = ((inc, 0),)
else:
npad = ((0, inc),)
index = self.labels.index(label)
npad = ((0, 0),) * (index) + npad + ((0, 0),) * (self.rank - index - 1)
self.data = np.pad(self.data, npad, mode='constant', constant_values=0)
def contract(self, *args, **kwargs):
"""
A method that calls the function `contract`, passing `self` as the
first argument.
See also
--------
contract (function)
"""
t = contract(self, *args, **kwargs)
self.data = t.data
self.labels = t.labels
@property
def shape(self):
return self.data.shape
@property
def rank(self):
return len(self.shape)
def norm(self):
"""Return the frobenius norm of the tensor, equivalent to taking the
sum of absolute values squared of every element. """
return np.linalg.norm(self.data)
class ToContract():
"""A simple class that contains a Tensor and a list of indices (labels) of
that tensor which are to be contracted with another tensor. Used in
__mul__, __rmul__ for convenient tensor contraction."""
def __init__(self, tensor, labels):
self.tensor = tensor
self.labels = labels
def __mul__(self, other):
# If label argument is not a tuple, simply use that as the argument to
# contract function. Otherwise convert to a list.
if not isinstance(self.labels, tuple):
labels1 = self.labels
else:
labels1 = list(self.labels)
if not isinstance(other.labels, tuple):
labels2 = other.labels
else:
labels2 = list(other.labels)
return contract(self.tensor, other.tensor, | |
<reponame>oleksiilytvyn/grailkit
# -*- coding: UTF-8 -*-
"""
Implements access to bible files.
Grail bible format consists of standard grail DNA format plus
two additional tables: `books` and `verses`.
SQL structure:
.. code:: sql
CREATE TABLE books
(id INTEGER PRIMARY KEY AUTOINCREMENT,
osisid TEXT,
name TEXT,
title TEXT,
abbr TEXT );
CREATE TABLE verses(osisid TEXT, book INT, chapter INT, verse INT, text TEXT );
:copyright: (c) 2017-2020 by <NAME> (http://alexlitvin.name).
:license: MIT, see LICENSE for more details.
"""
from __future__ import annotations
from typing import Dict, List, Optional
import os
import re
import glob
import json
import sqlite3
import logging
from grailkit import PATH_SHARED
from grailkit.util import copy_file, default_key, file_exists
from grailkit.dna import DNA
def verse_factory(cursor: sqlite3.Cursor, row: sqlite3.Row) -> Verse:
"""Parse sqlite row into Verse object.
Args:
cursor (sqlite3.Cursor): sqlite3 cursor
row (sqlite3.Row): row data
Returns:
Verse object parsed from sqlite row
"""
return Verse.from_sqlite(row)
def book_factory(cursor: sqlite3.Cursor, row: sqlite3.Row) -> Book:
"""Parse sqlite row into Book object.
Args:
cursor (sqlite3.Cursor): sqlite3 cursor
row (sqlite3.Row): row data
Returns:
Book object parsed from sqlite row
"""
return Book.from_sqlite(row)
class BibleError(Exception):
"""Base error thrown when a bible could to be read."""
pass
class Verse:
"""Representation of Bible verse."""
def __init__(self):
"""Create verse."""
self._osisid = ""
self._book = ""
self._book_id = 1
self._chapter = 1
self._verse = 1
self._text = ""
@property
def type(self) -> int:
"""Return type of the DNA node (int)."""
return DNA.TYPE_VERSE
@property
def book(self) -> str:
"""Return book name (str)."""
return self._book
@property
def book_id(self) -> int:
"""Return book id (int)."""
return self._book_id
@property
def chapter(self) -> int:
"""Return chapter number (int)."""
return self._chapter
@property
def verse(self) -> int:
"""Return verse number (int)."""
return self._verse
@property
def reference(self) -> str:
"""Return complete reference string.
Examples:
Genesis 1:1
1 Corinthians 2:12
"""
return "%s %d:%d" % (self._book, self._chapter, self._verse)
@property
def name(self) -> str:
return "%s\n%s" % (self._text, self.reference);
@property
def text(self) -> str:
"""Text of verse."""
return self._text
def parse(self, row: sqlite3.Row) -> None:
"""Parse sqlite row into verse."""
self._book = row[5]
self._book_id = row[1]
self._chapter = row[2]
self._verse = row[3]
self._text = row[4]
self._osisid = row[0]
@staticmethod
def from_sqlite(row: sqlite3.Row) -> Verse:
"""Parse sqlite row and return Verse.
Args:
row: sqlite3 row
Returns:
Verse parsed from given sqlite row
"""
verse = Verse()
verse.parse(row)
return verse
class Book:
"""Representation of bible book."""
def __init__(self):
"""Create book."""
self._id = 0
self._abbr = ""
self._name = ""
self._title = ""
self._osisid = ""
@property
def type(self) -> int:
"""Return type of DNA node (int)."""
return DNA.TYPE_BOOK
@property
def id(self) -> int:
"""Return Book id (int)."""
return self._id
@property
def abbr(self) -> str:
"""Return Book abbreviations (str)."""
return self._abbr
@property
def name(self) -> str:
"""Return Name of book (str)."""
return self._name
@property
def title(self) -> str:
"""Full name of book, it might be bigger than name."""
return self._title
@property
def osisid(self) -> str:
"""OSIS identifier, can be used for cross-referencing."""
return self._osisid
def parse(self, row: sqlite3.Row) -> None:
"""Parse sqlite row and fill Book."""
self._id = row[0]
self._abbr = row[4]
self._name = row[2]
self._title = row[3]
self._osisid = row[1]
@staticmethod
def from_sqlite(row: sqlite3.Row) -> Book:
"""Parse sqlite row and return Book.
Args:
row: sqlite row
Returns:
Book
"""
book = Book()
book.parse(row)
return book
class BibleInfo:
"""Read only representation of bible file."""
def __init__(self):
"""Create a object."""
self._file = ""
self._date = ""
self._title = ""
self._subject = ""
self._language = ""
self._publisher = ""
self._copyright = ""
self._identifier = ""
self._description = ""
self._version = 1
@property
def file(self) -> str:
"""File location."""
return self._file
@property
def date(self) -> str:
"""Date of publication."""
return self._date
@property
def title(self) -> str:
"""Bible title."""
return self._title
@property
def subject(self) -> str:
"""Subject of a bible."""
return self._subject
@property
def language(self) -> str:
"""Language of bible."""
return self._language
@property
def publisher(self) -> str:
"""Publisher information."""
return self._publisher
@property
def copyright(self) -> str:
"""Copyright information."""
return self._copyright
@property
def identifier(self) -> str:
"""Bible identifier, must be unique."""
return self._identifier
@property
def description(self) -> str:
"""A little description of Bible."""
return self._description
@property
def version(self) -> int:
"""Schema version number."""
return self._version
@staticmethod
def from_json(data: dict) -> BibleInfo:
"""Fill properties from json string.
Args:
data (dict): parsed json object
Returns:
BibleInfo object
"""
info = BibleInfo()
info._file = default_key(data, 'file', '')
info._date = default_key(data, 'date')
info._title = default_key(data, 'title')
info._subject = default_key(data, 'subject')
info._language = default_key(data, 'language')
info._publisher = default_key(data, 'publisher')
info._copyright = default_key(data, 'copyright')
info._identifier = default_key(data, 'identifier')
info._description = default_key(data, 'description')
return info
class Bible(DNA):
"""Representation of grail bible file.
This class gives you read only access to file
"""
# file extension
_file_extension = ".grail-bible"
def __init__(self, file_path: str):
"""Read grail bible file into Bible class.
Args:
file_path (str): file location
Raises:
DNAError if file does not exists
"""
super(Bible, self).__init__(file_path, create=False)
# read bible info
self._date = self._get(0, "date", default="")
self._title = self._get(0, "title", default="Untitled")
self._subject = self._get(0, "subject", default="")
self._language = self._get(0, "language", default="unknown")
self._publisher = self._get(0, "publisher", default="Unknown Publisher")
self._copyright = self._get(0, "copyright", default="copyright information unavailable")
self._identifier = self._get(0, "identifier", default="NONE")
self._description = self._get(0, "description", default="")
self._version = self._get(0, "version", default=1)
@property
def date(self) -> str:
"""Date of publication."""
return self._date
@property
def title(self) -> str:
"""Bible title."""
return self._title
@property
def subject(self) -> str:
"""Subject of a bible."""
return self._subject
@property
def language(self) -> str:
"""Language of bible."""
return self._language
@property
def publisher(self) -> str:
"""Publisher information."""
return self._publisher
@property
def copyright(self) -> str:
"""Copyright information."""
return self._copyright
@property
def identifier(self) -> str:
"""Bible identifier, must be unique."""
return self._identifier
@property
def description(self) -> str:
"""A little description of Bible."""
return self._description
@property
def version(self) -> str:
"""Schema version number."""
return self._version
def books(self) -> List[Book]:
"""Return list of all books."""
return self._db.all("""SELECT
`books`.`id`,
`books`.`osisid`,
`books`.`name`,
`books`.`title`,
`books`.`abbr`
FROM books""", factory=book_factory)
def book(self, book: int) -> Book:
"""Return single book.
Args:
book (int): book id
"""
return self._db.get("""SELECT
`books`.`id`,
`books`.`osisid`,
`books`.`name`,
`books`.`title`,
`books`.`abbr` FROM books WHERE id = ?""", (book,), factory=book_factory)
def chapter(self, book: int, chapter: int) -> List[Verse]:
"""Return all verses in chapter.
Args:
book (int): book id
chapter (int): chapter id
"""
return self._db.all("""SELECT
`verses`.`osisid`,
`verses`.`book`,
`verses`.`chapter`,
`verses`.`verse`,
`verses`.`text`,
`books`.`name` as book_name
FROM verses
LEFT JOIN `books` ON `verses`.`book` = `books`.`id`
WHERE `verses`.`book` = ? AND `verses`.`chapter` = ?
ORDER BY `verses`.`verse` ASC""", (book, chapter), factory=verse_factory)
def verse(self, book: int, chapter: int, verse: int) -> Verse:
"""Return single verse.
Args:
book (int): book id
chapter (int): chapter id
verse (int): verse number
"""
return self._db.get("""SELECT
`verses`.`osisid`,
`verses`.`book`,
`verses`.`chapter`,
`verses`.`verse`,
`verses`.`text`,
`books`.`name` as book_name
FROM verses
LEFT JOIN `books` ON `verses`.`book` = `books`.`id`
WHERE `verses`.`book` = ?
AND `verses`.`chapter` = ?
AND `verses`.`verse` = ?""",
(book, chapter, verse), factory=verse_factory)
def count_verses(self, book: int, chapter: int) -> int:
"""Return number of verses in chapter.
Args:
book (int): book id
chapter (int): chapter id
"""
return self._db.get("SELECT COUNT(*) as count FROM verses WHERE book = ? AND chapter = ?",
(book, chapter))["count"]
def count_chapters(self, book: int) -> int:
"""Return number of chapters in book.
Args:
book (int): book id
"""
return self._db.get("SELECT COUNT(*) as count FROM verses WHERE book = ? AND verse = 1",
(book,))["count"]
def match_book(self, keyword: str) -> List[Book]:
"""Find books by keyword.
Args:
keyword (str): search phrase
"""
keyword = "%" + keyword + "%"
return self._db.all("""SELECT
`books`.`id`,
`books`.`osisid`,
`books`.`name`,
`books`.`title`,
`books`.`abbr`
FROM books
WHERE
lowercase(`title`) LIKE lowercase( ? )
OR lowercase(`short`) LIKE lowercase( ? )
OR lowercase(`full`) LIKE lowercase( ? )
""", (keyword, keyword, keyword), factory=book_factory)
def match_reference(self, keyword: str, limit: int = 3) -> List[Verse]:
"""find verse by keyword.
Args:
keyword (str): keywords
limit (int): limit results
"""
# default values
chapter = 1
verse = 1
keyword = keyword.lstrip().rstrip().lower()
# match book
match = re.search(r'([0-9]+)$', keyword)
# match book and chapter
match_chapter = re.search(r'([0-9]+)([\D])([0-9]+)$', keyword)
# match book, chapter and verse
match_verse = re.search(r'([0-9]+)([\D])([0-9]+)-([0-9]+)$', keyword)
if match_verse:
chapter = int(match_verse.group(1))
verse = int(match_verse.group(3))
keyword = re.sub(r'([0-9]+)([\D])([0-9]+)-([0-9]+)$', '', keyword)
| |
""" imported math and Mathematics class """
import math
class Mathematics():
"""This class is made for making math calculations"""
@classmethod
def print_menu(cls):
"""Printing the menu"""
print("\n\n***** Mathematical Operations *****")
print("1- Factorial\n2- Multiplication Table\n3- Fibonacci\n4- Sum up to given num")
print("5- Decimal to others\n6- Advanced Calculator\n7- Divide remain")
print("8- Greatest Common Divisor\n9- Logarithm")
print("10- Power of the given number\n11- Square Root\n12- Radian to Degree")
print("13- Degree to Radian\n14- Radian to Sinus\n15- Radian to Cosinus")
print("16- Radian to Tangent\n17- Sinus to Radian")
print("18- Cosinus to Radian\n19- Tangent to Radian\n20- Basic Calculator")
print("00- EXIT to Main Menu\n99- EXIT")
@classmethod
def factorial(cls):
"""Calculating factorial"""
print("***** WELCOME TO FACTORIAL CALCULATOR *****")
num = int(input(" Input number : "))
fact = int(input(" Input Factorial : "))
if num < 0:
print("Sorry, factorial does not exist for negative numbers\n**********************")
elif num == 0:
print("The factorial of 0 is 1\n**********************")
else:
for i in range(1,num + 1):
fact = fact * i
print("The factorial of",num,"is",fact,"\n**********************")
@classmethod
def multiplication_table(cls):
"""Multiplication table up to given number"""
print("***** WELCOME TO MULTIPLICATION TABLE CALCULATOR *****")
num1 = int(input(" Input number :"))
for i in range(1, num1+1):
print(num1, 'x', i , '=' , i*num1,"\n************************")
@classmethod
def fibonacci(cls):
"""Simple fibonacci"""
print("***** WELCOME TO FIBONACCI CALCULATOR *****")
nterms = int(input("How many terms? "))
first, second = 0, 1
count = 0
if nterms <= 0:
print("Please enter a positive integer")
elif nterms == 1:
print("Fibonacci sequence upto",nterms,":")
print(first)
else:
print("Fibonacci sequence:\n")
while count < nterms:
print(first)
nth = first + second
first = second
second = nth
count += 1
print("\n ***********************")
@classmethod
def sum_up_to_num(cls):
"""Summary up to given number"""
print("***** WELCOME TO SUM UP TO NUMBER CALCULATOR *****")
num1 = int(input("Enter a positive number : "))
if num1 < 0:
print("Enter a postive number.")
else:
sum1 = 0
while num1>0:
sum1 += num1
num1 -= 1
print(f"The sum is : {sum1}" )
@classmethod
def dec_to_others(cls):
"""Converts decimal to binary,octal and hexadecimal"""
print("***** WELCOME DECIMAL CALCULATOR *****")
dec = int(input("Enter the decimal number : "))
print("The decimal value of", dec, "is: ")
print(bin(dec), "in binary.")
print(oct(dec), "in octal.")
print(hex(dec), "in hexadecimal.")
@classmethod
def advanced_calculator(cls):
"""Calculator"""
print("***** WELCOME TO ADVANCED CALCULATOR *****")
calc = input("Type calculation:\n")
print("Answer: " + str(eval(calc)))
@classmethod
def fmod(cls):
"""Divide remain calculate"""
print("***** WELCOME TO DIVIDE REMAIN CALCULATOR *****")
div = float(input("Enter the number to divide : "))
remain = float(input("Enter the division number : "))
result = math.fmod(div,remain)
print(f"Answer: {result}")
@classmethod
def gcd(cls):
"""Greatest common divisor calculation"""
print("***** WELCOME TO GREATEST COMMON DIVISOR CALCULATOR *****")
first = int(input("Enter the first number : "))
second = int(input("Enter the second number : "))
result = math.gcd(first,second)
print(f"The result is : {result}")
@classmethod
def log(cls):
"""Logarithm"""
print("***** WELCOME TO LOG OF FIRST VALUE OF SECOND CALCULATOR *****")
first = int(input("Enter the first number : "))
second = int(input("Enter the second number : "))
result = math.log(first,second)
print(f"The result is : {result}")
@classmethod
def power(cls):
"""Power"""
print("***** WELCOME TO 1. NUMBERS POWER OF 2. NUMBER CALCULATOR *****")
first = int(input("Enter the first number : "))
second = int(input("Enter the second number : "))
result = math.pow(first,second)
print(f"The result is : {result}")
@classmethod
def sqrt(cls):
"""SQRT"""
print("***** WELCOME TO SQUARE ROOT CALCULATOR *****")
number = math.sqrt(int(input("Enter the number : ")))
print(f"The result is : {number}")
@classmethod
def rad_deg(cls):
"""Radian to degree convert"""
print("***** WELCOME TO RADIAN TO DEGREE CONVERTER *****")
number = math.degrees(float(input("Enter the radian : ")))
print(f"Entered radian is equal to {number} degrees.")
@classmethod
def deg_rad(cls):
"""Degrees to radian convert"""
print("***** WELCOME TO DEGREE TO RADIAN CONVERTER *****")
number = math.radians(float(input("Enter the degree : ")))
print(f"Entered degree is equal to {number} radian.")
@classmethod
def sin(cls):
"""Radian to sinus """
print("***** WELCOME TO RADIAN TO SINUS CONVERTER *****")
number = math.sin(float(input("Enter the radian : ")))
print(f"Entered radian is equal to sin {number} .")
@classmethod
def cos(cls):
"""Radian to cosinus"""
print("***** WELCOME TO RADIAN TO COSINUS CONVERTER *****")
number = math.cos(float(input("Enter the radian : ")))
print(f"Entered radian is equal to cos {number} .")
@classmethod
def tan(cls):
"""Radian to tangent"""
print("***** WELCOME TO RADIAN TO TANGENT CONVERTER *****")
number = math.tan(float(input("Enter the radian : ")))
print(f"Entered radian is equal to tan {number} .")
@classmethod
def sin_rad(cls):
"""Sinus to radian"""
print("***** WELCOME TO SINUS TO RADIAN CONVERTER *****")
number = math.asin(int(input("Enter the SIN : ")))
print(f"Entered sin is equal to radian {number} .")
@classmethod
def cos_rad(cls):
"""Cosinus to radian"""
print("***** WELCOME TO COSINUS TO RADIAN CONVERTER *****")
number = math.acos(float(input("Enter the COS : ")))
print(f"Entered cos is equal to radian {number} .")
@classmethod
def tan_rad(cls):
"""Tangent to radian"""
print("***** WELCOME TO TANGENT TO RADIAN CONVERTER *****")
number = math.atan(float(input("Enter the TAN : ")))
print(f"Entered tan is equal to radian {number} .")
def main(self):
"""This module is made as menu of this class"""
work_on = True
while work_on:
self.print_menu()
choice = input("Enter your choice : ")
if choice == "1":
self.factorial()
self.continue_ask()
elif choice == "2":
self.multiplication_table()
self.continue_ask()
elif choice == "3":
self.fibonacci()
self.continue_ask()
elif choice == "4":
self.sum_up_to_num()
self.continue_ask()
elif choice == "5":
self.dec_to_others()
self.continue_ask()
elif choice == "6":
self.advanced_calculator()
self.continue_ask()
elif choice == "7":
self.fmod()
self.continue_ask()
elif choice == "8":
self.gcd()
self.continue_ask()
elif choice == "9":
self.log()
self.continue_ask()
elif choice == "10":
self.power()
self.continue_ask()
elif choice == "11":
self.sqrt()
self.continue_ask()
elif choice == "12":
self.rad_deg()
self.continue_ask()
elif choice == "13":
self.deg_rad()
self.continue_ask()
elif choice == "14":
self.sin()
self.continue_ask()
elif choice == "15":
self.cos()
self.continue_ask()
elif choice == "16":
self.tan()
self.continue_ask()
elif choice == "17":
self.sin_rad()
self.continue_ask()
elif choice == "18":
self.cos_rad()
self.continue_ask()
elif choice == "19":
self.tan_rad()
self.continue_ask()
elif choice == "20":
calc = Calculator()
calc.main()
elif choice == "99":
self.quit()
elif choice == "00":
break
else:
print("INVALID INPUT! PLEASE CHOOSE BETWEEN 1-19 OR 99 TO EXIT")
continue
def continue_ask(self):
"""This module is made for asking continue the current menu or quit"""
continue_on = True
while continue_on:
print("Do you want to continue Mathematic Calculators (Y/N) ? ")
continue_ans = str(input().upper())
if continue_ans == "Y":
self.main()
elif continue_ans == "N":
self.quit()
else:
print("INVALID INPUT. PLEASE CHOOSE Y OR N ")
continue
@classmethod
def quit(cls):
"""Quitting from program"""
raise SystemExit
'''
Calculator
'''
class Calculator():
"""Calculator class"""
@classmethod
def add(cls, number1, number2):
"""add method"""
return number1 + number2
@classmethod
def substract(cls, number1, number2):
"""substract method"""
return number1 - number2
@classmethod
def multiply(cls, number1, number2):
"""multiply method"""
return number1 - number2
@classmethod
def divide(cls, number1, number2):
"""division method"""
return number1 / number2
@classmethod
def modulo(cls, number1, number2):
"""modulo method"""
return number1 % number2
@classmethod
def power(cls,number1, number2):
"""power method"""
return number1 ** number2
@classmethod
def floor_division(cls,number1, number2):
"""floor division method"""
return number1 // number2
def main(self):
"""main menu"""
while True:
print("Please select an operation :\n1- Add\n2- Substract")
print("3- Multiply\n4- Divide")
print("5- Modulo\n6- Power of the given number\n7- Floor Division")
print("99- EXIT to Maths Menu")
select = float(input("Select operation : "))
if select == 1:
num1 = float(input("Enter number 1 : "))
num2 = float(input("Enter number 2 : "))
print(num1 , " + " , num2 , " = " , self.add(num1, num2))
cont = input("Do you want to continue y/n ")
if cont == "n":
break
elif select == 2:
num1 = float(input("Enter number 1 : "))
num2 = float(input("Enter number 2 : "))
print(num1 , " - ", num2 , " = " , self.substract(num1, num2))
cont = input("Do you want to continue y/n ")
if cont == "n":
break
elif select == 3:
num1 = float(input("Enter number 1 : "))
num2 = float(input("Enter number 2 : "))
print(num1 , " * ", num2 , " = " , self.multiply(num1, num2))
cont = input("Do you want to continue y/n ")
if cont == "n":
break
elif select == 4:
num1 = float(input("Enter number 1 : "))
num2 = float(input("Enter number 2 : "))
print(num1 , " / ", num2 , " = " , self.divide(num1, num2))
| |
<filename>tensorflow_datasets/text/civil_comments.py<gh_stars>0
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CivilComments from Jigsaw Unintended Bias Kaggle Competition."""
import ast
import csv
import os
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
# General (main) citation for CivilComments and CivilCommentsIdentities.
_CITATION = """
@article{DBLP:journals/corr/abs-1903-04561,
author = {<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME>},
title = {Nuanced Metrics for Measuring Unintended Bias with Real Data for Text
Classification},
journal = {CoRR},
volume = {abs/1903.04561},
year = {2019},
url = {http://arxiv.org/abs/1903.04561},
archivePrefix = {arXiv},
eprint = {1903.04561},
timestamp = {Sun, 31 Mar 2019 19:01:24 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1903-04561},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
# Citation for CivilCommentsCovert.
_COVERT_CITATION = """
@inproceedings{lees-etal-2021-capturing,
title = "Capturing Covertly Toxic Speech via Crowdsourcing",
author = "<NAME> and
Borkan, Daniel and
<NAME> and
<NAME> and
<NAME>",
booktitle = "Proceedings of the First Workshop on Bridging Human{--}Computer Interaction and Natural Language Processing",
month = apr,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.hcinlp-1.3",
pages = "14--20"
}
"""
# Citation for CivilComments Toxic Spans.
_SPANS_CITATION = """
@inproceedings{pavlopoulos-etal-2021-semeval,
title = "{S}em{E}val-2021 Task 5: Toxic Spans Detection",
author = "<NAME> and Sorensen, Jeffrey and <NAME> and <NAME>",
booktitle = "Proceedings of the 15th International Workshop on Semantic Evaluation (SemEval-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.semeval-1.6",
doi = "10.18653/v1/2021.semeval-1.6",
pages = "59--69",
}
"""
# Citation for CivilComments Context.
_CONTEXT_CITATION = """
@misc{pavlopoulos2020toxicity,
title={Toxicity Detection: Does Context Really Matter?},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2020}, eprint={2006.00998}, archivePrefix={arXiv}, primaryClass={cs.CL}
}
"""
_COMMON_DESCRIPTION = """
This version of the CivilComments Dataset provides access to the primary
seven labels that were annotated by crowd workers, the toxicity and other
tags are a value between 0 and 1 indicating the fraction of annotators that
assigned these attributes to the comment text.
The other tags are only available for a fraction of the input examples. They
are currently ignored for the main dataset; the CivilCommentsIdentities set
includes those labels, but only consists of the subset of the data with them.
The other attributes that were part of the original CivilComments release are
included only in the raw data. See the Kaggle documentation for more details
about the available features.
The comments in this dataset come from an archive of the Civil Comments
platform, a commenting plugin for independent news sites. These public comments
were created from 2015 - 2017 and appeared on approximately 50 English-language
news sites across the world. When Civil Comments shut down in 2017, they chose
to make the public comments available in a lasting open archive to enable future
research. The original data, published on figshare, includes the public comment
text, some associated metadata such as article IDs, timestamps and
commenter-generated "civility" labels, but does not include user ids. Jigsaw
extended this dataset by adding additional labels for toxicity, identity
mentions, as well as covert offensiveness. This data set is an exact replica of
the data released for the Jigsaw Unintended Bias in Toxicity Classification
Kaggle challenge. This dataset is released under CC0, as is the underlying
comment text.
For comments that have a parent_id also in the civil comments data, the
text of the previous comment is provided as the "parent_text" feature. Note
that the splits were made without regard to this information, so using previous
comments may leak some information. The annotators did not have access to the
parent text when making the labels.
"""
_CC_DESCRIPTION = """
The CivilComments set here includes all the data, but only the basic seven
labels (toxicity, severe_toxicity, obscene, threat, insult, identity_attack, and
sexual_explicit).
"""
_CC_IDENTITIES_DESCRIPTION = """
The CivilCommentsIdentities set here includes an extended set of identity labels
in addition to the basic seven labels. However, it only includes the subset
(roughly a quarter) of the data with all these features.
"""
_CC_COVERT_DESCRIPTION = """
The CivilCommentsCovert set is a subset of CivilCommentsIdentities with ~20% of
the train and test splits further annotated for covert offensiveness, in
addition to the toxicity and identity labels. Raters were asked to categorize
comments as one of explicitly, implicitly, not, or not sure if offensive, as
well as whether it contained different types of covert offensiveness. The full
annotation procedure is detailed in a forthcoming paper at
https://sites.google.com/corp/view/hciandnlp/accepted-papers.
"""
_CC_SPANS_DESCRIPTION = """
The CivilComments Toxic Spans are a subset of CivilComments that is
labeled at the span level - the indices of all character (unicode codepoints)
boundaries that were tagged as toxic by a majority of the annotators is
returned in a 'spans' feature.
"""
_CC_CONTEXT_DESCRIPTION = """
The CivilComments Toxic Spans are a subset of CivilComments that was
labeled by making available to the labelers the parent_text. It includes
a contextual_toxicity feature.
"""
_DOWNLOAD_URL = 'https://storage.googleapis.com/jigsaw-unintended-bias-in-toxicity-classification/civil_comments_v1.2.zip'
IDENTITY_LABELS = [
'male', 'female', 'transgender', 'other_gender', 'heterosexual',
'homosexual_gay_or_lesbian', 'bisexual', 'other_sexual_orientation',
'christian', 'jewish', 'muslim', 'hindu', 'buddhist', 'atheist',
'other_religion', 'black', 'white', 'asian', 'latino',
'other_race_or_ethnicity', 'physical_disability',
'intellectual_or_learning_disability', 'psychiatric_or_mental_illness',
'other_disability'
]
COVERT_LABELS = [
'explicitly_offensive', 'implicitly_offensive', 'not_sure_offensive',
'not_offensive', 'covert_humor', 'covert_obfuscation',
'covert_emoticons_emojis', 'covert_sarcasm', 'covert_microaggression',
'covert_masked_harm', 'covert_political'
]
def _labels(mode):
"""Return the list of label features appropriate for the mode."""
if mode == 'spans':
return ['spans']
labels = [
'toxicity', 'severe_toxicity', 'obscene', 'threat', 'insult',
'identity_attack', 'sexual_explicit'
]
if mode in ['identity', 'covert']:
labels += IDENTITY_LABELS
if mode == 'covert':
labels += COVERT_LABELS
if mode == 'context':
labels += ['contextual_toxicity']
return labels
def _parse_common(row):
"""Parse common elements to TF Example from CSV row for non-spans mode."""
example = {}
example['id'] = row['id']
example['text'] = row['comment_text']
example['parent_text'] = row['parent_text']
parent_id = row['parent_id'] or 0
example['parent_id'] = int(float(parent_id))
example['article_id'] = int(row['article_id'])
return example
def _parse_row_as_example(row, mode):
"""Parse elements to TF Example, as appropriate for specified mode."""
example = _parse_common(row)
for label in _labels(mode):
if not row[label]:
return
example[label] = float(row[label])
return example
def _parse_spans_row_as_example(row):
"""Parse elements to TF Example for toxic spans mode."""
example = _parse_common(row)
example['spans'] = np.array(ast.literal_eval(row['spans']), dtype=np.int32)
return example
class CivilCommentsConfig(tfds.core.BuilderConfig):
"""Configuration for `CivilComments`."""
def __init__(self, name, description, mode):
super(CivilCommentsConfig, self).__init__(
name=name, description=description)
self.mode = mode
class CivilComments(tfds.core.GeneratorBasedBuilder):
"""Classification and tagging of 2M comments on news sites.
This version of the CivilComments Dataset provides access to the primary
seven labels that were annotated by crowd workers, the toxicity and other
tags are a value between 0 and 1 indicating the fraction of annotators that
assigned these attributes to the comment text.
The other tags are only available for a fraction of the input examples. They
are currently ignored for the main dataset; the CivilCommentsIdentities set
includes those labels, but only consists of the subset of the data with them.
The other attributes that were part of the original CivilComments release are
included only in the raw data. See the Kaggle documentation for more details
about the available features.
"""
BUILDER_CONFIGS = [
CivilCommentsConfig(
name='CivilComments', description=_CC_DESCRIPTION, mode='base'),
CivilCommentsConfig(
name='CivilCommentsIdentities',
description=_CC_IDENTITIES_DESCRIPTION,
mode='identity'),
CivilCommentsConfig(
name='CivilCommentsCovert',
description=_CC_COVERT_DESCRIPTION,
mode='covert'),
CivilCommentsConfig(
name='CivilCommentsToxicSpans',
description=_CC_SPANS_DESCRIPTION,
mode='spans'),
CivilCommentsConfig(
name='CivilCommentsInContext',
description=_CC_CONTEXT_DESCRIPTION,
mode='context'),
]
VERSION = tfds.core.Version('1.2.2')
RELEASE_NOTES = {
'1.2.2': 'Update to reflect context only having a train split.',
'1.2.1': 'Fix incorrect formatting in context splits.',
'1.2.0': 'Add toxic spans, context, and parent comment text features.',
'1.1.3': 'Corrected id types from float to string.',
'1.1.2': 'Added separate citation for CivilCommentsCovert dataset.',
'1.1.1': 'Added CivilCommentsCovert config with correct checksum.',
'1.1.0': 'Added CivilCommentsCovert config.',
'1.0.1': 'Added a unique id for each comment.',
'1.0.0': 'Initial full release.',
}
def _info(self):
mode = self.builder_config.mode
citation = {
'base': _CITATION,
'identity': _CITATION,
'covert': _COVERT_CITATION,
'spans': _SPANS_CITATION,
'context': _CONTEXT_CITATION
}[mode]
features = {
'text': tfds.features.Text(),
'id': tf.string,
'parent_text': tfds.features.Text(),
'parent_id': tf.int32,
'article_id': tf.int32,
}
if mode == 'spans':
features['spans'] = tfds.features.Tensor(shape=(None,), dtype=tf.int32)
supervised_value = 'spans'
else:
for label in _labels(mode):
features[label] = tf.float32
supervised_value = 'toxicity'
return tfds.core.DatasetInfo(
builder=self,
description=_COMMON_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
# The supervised_keys version is very impoverished.
supervised_keys=('text', supervised_value),
homepage='https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/data',
citation=citation,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = | |
<filename>nerblackbox/modules/ner_training/metrics/ner_metrics.py
from dataclasses import dataclass
from dataclasses import asdict
from typing import List, Tuple, Callable
import numpy as np
from sklearn.metrics import accuracy_score as accuracy_sklearn
from sklearn.metrics import precision_score as precision_sklearn
from sklearn.metrics import recall_score as recall_sklearn
from sklearn.metrics import precision_recall_fscore_support as prf_sklearn
from sklearn.exceptions import UndefinedMetricWarning
import warnings
from seqeval.metrics import precision_score as precision_seqeval
from seqeval.metrics import recall_score as recall_seqeval
from seqeval.metrics import f1_score as f1_seqeval
from seqeval.scheme import IOB2, BILOU
from nerblackbox.modules.ner_training.annotation_tags.tags import Tags
class NerMetrics:
"""
On the token level, the tags are evaluated in the given annotation scheme (e.g. plain, BIO)
On the entity level, the tags are evaluated in the BIO scheme (after converting if needed)
"""
def __init__(
self,
true_flat,
pred_flat,
level,
scheme,
classes=None,
class_index=None,
verbose=False,
):
"""
:param true_flat: [np array] of shape [batch_size * seq_length]
:param pred_flat: [np array] of shape [batch_size * seq_length]
:param level: [str] 'token' or 'entity'
:param scheme: [str] e.g. 'plain', 'bio'
:param classes: [optional, list] of [str] labels to take into account for metrics -> if level = 'token'
:param class_index: [optional, int] index to take into account for metrics -> if level = 'entity'
:param verbose: [optional, bool] if True, show verbose output
"""
self.true_flat = true_flat # token -> plain. entity -> plain, bio, bilou
self.pred_flat = pred_flat # token -> plain. entity -> plain, bio, bilou
self.scheme = scheme # token -> plain. entity -> plain, bio, bilou
self.classes = classes
self.class_index = class_index
self.level = level
self.verbose = verbose
if self.scheme == "bilou":
self.scheme_entity = "bilou"
self.scheme_entity_seqeval = BILOU
else: # plain, bio
self.scheme_entity = "bio"
self.scheme_entity_seqeval = IOB2
self.results = Results()
self.failure_value = -1
assert self.level in [
"token",
"entity",
], f"ERROR! level = {self.level} unknown."
if self.level == "entity":
self.true_flat_bio: List[str] = Tags(self.true_flat,).convert_scheme(
source_scheme=self.scheme, target_scheme=self.scheme_entity
) # entity -> bio, bilou
self.pred_flat_bio: List[str] = Tags(self.pred_flat).convert_scheme(
source_scheme=self.scheme, target_scheme=self.scheme_entity
) # entity -> bio, bilou
# ASR
self.pred_flat_bio_corrected: List[str]
self.pred_flat_bio_corrected, self.results.asr_abidance = Tags(
self.pred_flat_bio
).restore_annotation_scheme_consistency(
scheme=self.scheme_entity
) # entity -> bio, bilou
def results_as_dict(self):
return asdict(self.results)
def compute(self, _metrics):
"""
computes selected metrics
----------------------------------------------------------
:param _metrics: [list] of [str], e.g. ['acc, 'precision']
:return: -
"""
warnings.filterwarnings("error")
if "acc" in _metrics:
self.accuracy()
if "precision" in _metrics or "recall" in _metrics or "f1" in _metrics:
self._compute_well_defined_classes()
if "precision" in _metrics or "f1" in _metrics:
self.precision()
if "recall" in _metrics or "f1" in _metrics:
self.recall()
if "f1" in _metrics:
self.f1_score()
if (
"asr_abidance" in _metrics
or "asr_precision" in _metrics
or "asr_recall" in _metrics
or "asr_f1" in _metrics
):
self.compute_asr_results()
warnings.resetwarnings()
def accuracy(self):
"""
computes accuracy of predictions (_np_logits) w.r.t. ground truth (_np_label_ids)
---------------------------------------------------------------------------------
:return: acc [np float]
"""
self.results.acc = accuracy_sklearn(
self.true_flat, self.pred_flat, normalize=True
)
def precision(self):
"""
computes precision (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
precision_micro [np array] for all examples
precision_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.precision_micro = self._token_evaluation(
evaluation_function=precision_sklearn, average="micro"
)
self.results.precision_macro = self._token_evaluation(
evaluation_function=precision_sklearn, average="macro"
)
elif self.level == "entity":
self.results.precision_micro = self._entity_evaluation_micro(
evaluation_function=precision_seqeval
)
self.results.precision_macro = self._entity_evaluation_macro(
evaluation_function=precision_seqeval,
)
def recall(self):
"""
computes recall (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
recall_micro [np array] for all examples
recall_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.recall_micro = self._token_evaluation(
evaluation_function=recall_sklearn, average="micro"
)
self.results.recall_macro = self._token_evaluation(
evaluation_function=recall_sklearn, average="macro"
)
elif self.level == "entity":
self.results.recall_micro = self._entity_evaluation_micro(
evaluation_function=recall_seqeval
)
self.results.recall_macro = self._entity_evaluation_macro(
evaluation_function=recall_seqeval
)
def f1_score(self):
"""
computes f1 score (macro/micro) of predictions (_pred_flat) w.r.t. ground truth (_true_flat)
Returns:
f1_score_micro [np array] for all examples
f1_score_macro [np array] for each class, then averaged
"""
if self.level == "token":
self.results.f1_micro = self._token_evaluation(
evaluation_function=prf_sklearn, average="micro"
)
self.results.f1_macro = self._token_evaluation(
evaluation_function=prf_sklearn, average="macro"
)
elif self.level == "entity":
self.results.f1_micro, self.results.f1_macro = self._entity_evaluation_f1(
evaluation_function=f1_seqeval,
)
def compute_asr_results(self):
"""
computes
- self.results.asr_precision_micro
- self.results.asr_recall_micro
- self.results.asr_f1_micro
"""
def _entity_evaluation_micro_asr(evaluation_function: Callable) -> float:
"""helper function"""
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio_corrected], # corrected !!!
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
return metric
self.results.asr_precision_micro = _entity_evaluation_micro_asr(
evaluation_function=precision_seqeval
)
self.results.asr_recall_micro = _entity_evaluation_micro_asr(
evaluation_function=recall_seqeval
)
self.results.asr_f1_micro = _entity_evaluation_micro_asr(
evaluation_function=f1_seqeval
)
def _token_evaluation(self, evaluation_function: Callable, average: str) -> float:
"""
compute precision/recall/f1 on token level
Args:
evaluation_function: precision_sklearn, recall_sklearn, prf_sklearn
average: 'micro' or 'macro'
Returns:
metric: precision/recall on token level, 'micro' or 'macro' averaged
"""
assert evaluation_function in [
precision_sklearn,
recall_sklearn,
prf_sklearn,
], f"evaluation function = {evaluation_function} unknown / not allowed."
assert average in ["micro", "macro"], f"average = {average} unknown."
if self.classes is None or len(self.classes) > 1: # "all" / "fil"
if evaluation_function != prf_sklearn:
metric = evaluation_function(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division=0,
)
else:
_, _, metric, _ = prf_sklearn(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division=0,
)
else:
try:
if evaluation_function != prf_sklearn:
metric = evaluation_function(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
zero_division="warn",
)
else:
_, _, metric, _ = prf_sklearn(
self.true_flat,
self.pred_flat,
labels=self.classes,
average=average,
warn_for=("precision", "recall", "f-score"),
zero_division="warn",
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
return metric
def _entity_evaluation_micro(self, evaluation_function: Callable) -> float:
"""
compute precision/recall micro average on entity level
Args:
evaluation_function: precision_seqeval, recall_seqeval
Returns:
metric: precision/recall on entity level, 'macro' averaged
"""
assert evaluation_function in [
precision_seqeval,
recall_seqeval,
], f"evaluation function = {evaluation_function} unknown / not allowed."
if self.class_index is None: # "fil"
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
except UndefinedMetricWarning as e:
if self.verbose:
print(e)
metric = self.failure_value
else: # "ind"
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division="warn",
)[self.class_index]
except UndefinedMetricWarning:
try:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division=0,
)[self.class_index]
except IndexError:
metric = self.failure_value
if metric == 0:
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division=1,
)[self.class_index]
if metric == 1:
metric = self.failure_value
except IndexError:
metric = self.failure_value
return metric
def _compute_well_defined_classes(self) -> None:
"""
Created Attributes:
results.classindices_macro: list of indices of well-defined classes in terms of precision, recall, f1
results.numberofclasses_macro: number of well-defined classes in terms of precision, recall, f1
"""
def _get_index_list(
evaluation_function: Callable, true_array, pred_array, scheme_seqeval=None
):
kwargs = (
{"mode": "strict", "scheme": scheme_seqeval}
if scheme_seqeval is not None
else {}
)
try:
metric_list = evaluation_function(
true_array,
pred_array,
average=None,
zero_division="warn",
**kwargs,
)
index_list = [i for i in range(len(metric_list))]
except UndefinedMetricWarning:
metric_list_all = evaluation_function(
true_array,
pred_array,
average=None,
zero_division=0,
**kwargs,
)
index_list = list()
for index, metric_elem in enumerate(metric_list_all):
if metric_elem != 0:
index_list.append(index)
else:
metric_elem_alt = evaluation_function(
true_array,
pred_array,
average=None,
zero_division=1,
**kwargs,
)[index]
if metric_elem_alt != 1:
index_list.append(index)
return index_list
if self.level == "token":
index_list_precision = _get_index_list(
evaluation_function=precision_sklearn,
true_array=self.true_flat,
pred_array=self.pred_flat,
)
index_list_recall = _get_index_list(
evaluation_function=recall_sklearn,
true_array=self.true_flat,
pred_array=self.pred_flat,
)
else:
index_list_precision = _get_index_list(
evaluation_function=precision_seqeval,
true_array=[self.true_flat_bio],
pred_array=[self.pred_flat_bio],
scheme_seqeval=self.scheme_entity_seqeval,
)
index_list_recall = _get_index_list(
evaluation_function=recall_seqeval,
true_array=[self.true_flat_bio],
pred_array=[self.pred_flat_bio],
scheme_seqeval=self.scheme_entity_seqeval,
)
self.results.classindices_macro = tuple(
[index for index in index_list_precision if index in index_list_recall]
)
if self.level == "token":
self.results.numberofclasses_macro = (
len(self.results.classindices_macro) - 1
) # disregard "O" label
else:
self.results.numberofclasses_macro = len(self.results.classindices_macro)
def _entity_evaluation_macro(
self,
evaluation_function: Callable,
) -> float:
"""
compute precision/recall macro average on entity level
Args:
evaluation_function: precision_seqeval, recall_seqeval
Returns:
metric: precision/recall on entity level, 'macro' averaged on well-defined classes
"""
assert evaluation_function in [
precision_seqeval,
recall_seqeval,
], f"evaluation function = {evaluation_function} unknown / not allowed."
metric = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average="macro",
zero_division=0,
)
return metric
def _entity_evaluation_f1(
self, evaluation_function: Callable
) -> Tuple[float, float]:
"""
compute f1 micro or macro average on entity level
Args:
evaluation_function: f1_seqeval
Returns:
f1_micro: f1 on entity level, 'micro' averaged
f1_macro: f1 on entity level, 'macro' averaged on well-defined classes
"""
assert evaluation_function in [
f1_seqeval
], f"evaluation function = {evaluation_function} unknown / not allowed."
# ensure that precision and recall have been called:
# self.precision()
# self.recall()
# f1_micro
if (
self.results.precision_micro == self.failure_value
or self.results.recall_micro == self.failure_value
):
f1_micro = self.failure_value
else:
if self.class_index is None: # "fil"
f1_micro = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
average="micro",
mode="strict",
scheme=self.scheme_entity_seqeval,
)
else: # "ind"
f1_micro = evaluation_function(
[self.true_flat_bio],
[self.pred_flat_bio],
mode="strict",
scheme=self.scheme_entity_seqeval,
average=None,
zero_division="warn",
)[self.class_index]
# f1_macro
if (
self.results.precision_macro == | |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NSX-V3 Plugin security & Distributed Firewall integration module
"""
from neutron_lib import constants
from oslo_log import log
from oslo_utils import excutils
from vmware_nsxlib.v3 import exceptions
from vmware_nsxlib.v3 import nsx_constants as consts
from vmware_nsxlib.v3 import utils
LOG = log.getLogger(__name__)
PORT_SG_SCOPE = 'os-security-group'
MAX_NSGROUPS_CRITERIA_TAGS = 10
class NsxLibNsGroup(utils.NsxLibApiBase):
def __init__(self, client, max_attempts, firewall_section_handler):
self.firewall_section = firewall_section_handler
super(NsxLibNsGroup, self).__init__(client, max_attempts)
def update_on_backend(self, context, security_group,
nsgroup_id, section_id,
log_sg_allowed_traffic):
name = self.get_name(security_group)
description = security_group['description']
logging = (log_sg_allowed_traffic or
security_group[consts.LOGGING])
rules = self.firewall_section._process_rules_logging_for_update(
section_id, logging)
self.update(nsgroup_id, name, description)
self.firewall_section.update(section_id, name, description,
rules=rules)
def get_name(self, security_group):
# NOTE(roeyc): We add the security-group id to the NSGroup name,
# for usability purposes.
return '%(name)s - %(id)s' % security_group
def get_lport_tags(self, secgroups):
if len(secgroups) > MAX_NSGROUPS_CRITERIA_TAGS:
raise exceptions.NumberOfNsgroupCriteriaTagsReached(
max_num=MAX_NSGROUPS_CRITERIA_TAGS)
tags = []
for sg in secgroups:
tags = utils.add_v3_tag(tags, PORT_SG_SCOPE, sg)
if not tags:
# This port shouldn't be associated with any security-group
tags = [{'scope': PORT_SG_SCOPE, 'tag': None}]
return tags
def update_lport(self, context, lport_id, original, updated):
added = set(updated) - set(original)
removed = set(original) - set(updated)
for nsgroup_id in added:
try:
self.add_members(
nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT,
[lport_id])
except exceptions.NSGroupIsFull:
for nsgroup_id in added:
# NOTE(roeyc): If the port was not added to the nsgroup
# yet, then this request will silently fail.
self.remove_member(
nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT,
lport_id)
raise exceptions.SecurityGroupMaximumCapacityReached(
sg_id=nsgroup_id)
except exceptions.ResourceNotFound:
with excutils.save_and_reraise_exception():
LOG.error("NSGroup %s doesn't exists", nsgroup_id)
for nsgroup_id in removed:
self.remove_member(
nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT, lport_id)
def get_nsservice(self, resource_type, **properties):
service = {'resource_type': resource_type}
service.update(properties)
return {'service': service}
def get_nsgroup_complex_expression(self, expressions):
return {'resource_type': consts.NSGROUP_COMPLEX_EXP,
'expressions': expressions}
def get_switch_tag_expression(self, scope, tag):
return {'resource_type': consts.NSGROUP_TAG_EXP,
'target_type': consts.TARGET_TYPE_LOGICAL_SWITCH,
'scope': scope,
'tag': tag}
def get_port_tag_expression(self, scope, tag):
return {'resource_type': consts.NSGROUP_TAG_EXP,
'target_type': consts.TARGET_TYPE_LOGICAL_PORT,
'scope': scope,
'tag': tag}
def create(self, display_name, description, tags,
membership_criteria=None):
body = {'display_name': display_name,
'description': description,
'tags': tags,
'members': []}
if membership_criteria:
# Allow caller to pass a list of membership criterias.
# The 'else' block is maintained for backwards compatibility
# where in a caller might only send a single membership criteria.
if isinstance(membership_criteria, list):
body.update({'membership_criteria': membership_criteria})
else:
body.update({'membership_criteria': [membership_criteria]})
return self.client.create('ns-groups', body)
def list(self):
return self.client.list(
'ns-groups?populate_references=false').get('results', [])
def update(self, nsgroup_id, display_name=None, description=None,
membership_criteria=None, members=None):
# Using internal method so we can access max_attempts in the decorator
@utils.retry_upon_exception(
exceptions.StaleRevision,
max_attempts=self.nsxlib_config.max_attempts)
def _do_update():
nsgroup = self.read(nsgroup_id)
if display_name is not None:
nsgroup['display_name'] = display_name
if description is not None:
nsgroup['description'] = description
if members is not None:
nsgroup['members'] = members
if membership_criteria is not None:
nsgroup['membership_criteria'] = [membership_criteria]
return self.client.update(
'ns-groups/%s' % nsgroup_id, nsgroup)
return _do_update()
def get_member_expression(self, target_type, target_id):
return {
'resource_type': consts.NSGROUP_SIMPLE_EXP,
'target_property': 'id',
'target_type': target_type,
'op': consts.EQUALS,
'value': target_id}
def _update_with_members(self, nsgroup_id, members, action):
members_update = 'ns-groups/%s?action=%s' % (nsgroup_id, action)
return self.client.create(members_update, members)
def add_members(self, nsgroup_id, target_type, target_ids):
members = []
for target_id in target_ids:
member_expr = self.get_member_expression(
target_type, target_id)
members.append(member_expr)
members = {'members': members}
try:
return self._update_with_members(
nsgroup_id, members, consts.NSGROUP_ADD_MEMBERS)
except (exceptions.StaleRevision, exceptions.ResourceNotFound):
raise
except exceptions.ManagerError:
# REVISIT(roeyc): A ManagerError might have been raised for a
# different reason, e.g - NSGroup does not exists.
LOG.warning("Failed to add %(target_type)s resources "
"(%(target_ids)s) to NSGroup %(nsgroup_id)s",
{'target_type': target_type,
'target_ids': target_ids,
'nsgroup_id': nsgroup_id})
raise exceptions.NSGroupIsFull(nsgroup_id=nsgroup_id)
def remove_member(self, nsgroup_id, target_type,
target_id, verify=False):
member_expr = self.get_member_expression(
target_type, target_id)
members = {'members': [member_expr]}
try:
return self._update_with_members(
nsgroup_id, members, consts.NSGROUP_REMOVE_MEMBERS)
except exceptions.ManagerError:
if verify:
raise exceptions.NSGroupMemberNotFound(member_id=target_id,
nsgroup_id=nsgroup_id)
def read(self, nsgroup_id):
return self.client.get(
'ns-groups/%s?populate_references=true' % nsgroup_id)
def delete(self, nsgroup_id):
try:
return self.client.delete(
'ns-groups/%s?force=true' % nsgroup_id)
# FIXME(roeyc): Should only except NotFound error.
except Exception:
LOG.debug("NSGroup %s does not exists for delete request.",
nsgroup_id)
def find_by_display_name(self, display_name):
found = []
for resource in self.list():
if resource['display_name'] == display_name:
found.append(resource)
return found
class NsxLibFirewallSection(utils.NsxLibApiBase):
def add_member_to_fw_exclude_list(self, target_id, target_type):
resource = 'firewall/excludelist?action=add_member'
body = {"target_id": target_id,
"target_type": target_type}
self.client.create(resource, body)
def remove_member_from_fw_exclude_list(self, target_id, target_type):
resource = ('firewall/excludelist?action=remove_member&object_id='
+ target_id)
self.client.create(resource)
def get_excludelist(self):
return self.client.list('firewall/excludelist')
def _get_direction(self, sg_rule):
return (
consts.IN if sg_rule['direction'] == 'ingress'
else consts.OUT
)
def _get_l4_protocol_name(self, protocol_number):
if protocol_number is None:
return
protocol_number = constants.IP_PROTOCOL_MAP.get(protocol_number,
protocol_number)
protocol_number = int(protocol_number)
if protocol_number == 6:
return consts.TCP
elif protocol_number == 17:
return consts.UDP
elif protocol_number == 1:
return consts.ICMPV4
else:
return protocol_number
def get_nsservice(self, resource_type, **properties):
service = {'resource_type': resource_type}
service.update(properties)
return {'service': service}
def _decide_service(self, sg_rule):
l4_protocol = self._get_l4_protocol_name(sg_rule['protocol'])
direction = self._get_direction(sg_rule)
if l4_protocol in [consts.TCP, consts.UDP]:
# If port_range_min is not specified then we assume all ports are
# matched, relying on neutron to perform validation.
source_ports = []
if sg_rule['port_range_min'] is None:
destination_ports = []
elif sg_rule['port_range_min'] != sg_rule['port_range_max']:
# NSX API requires a non-empty range (e.g - '22-23')
destination_ports = ['%(port_range_min)s-%(port_range_max)s'
% sg_rule]
else:
destination_ports = ['%(port_range_min)s' % sg_rule]
if direction == consts.OUT:
source_ports, destination_ports = destination_ports, []
return self.get_nsservice(
consts.L4_PORT_SET_NSSERVICE,
l4_protocol=l4_protocol,
source_ports=source_ports,
destination_ports=destination_ports)
elif l4_protocol == consts.ICMPV4:
return self.get_nsservice(
consts.ICMP_TYPE_NSSERVICE,
protocol=l4_protocol,
icmp_type=sg_rule['port_range_min'],
icmp_code=sg_rule['port_range_max'])
elif l4_protocol is not None:
return self.get_nsservice(
consts.IP_PROTOCOL_NSSERVICE,
protocol_number=l4_protocol)
def _build(self, display_name, description, applied_tos, tags):
return {'display_name': display_name,
'description': description,
'stateful': True,
'section_type': consts.FW_SECTION_LAYER3,
'applied_tos': [self.get_nsgroup_reference(t_id)
for t_id in applied_tos],
'tags': tags}
def create_empty(self, display_name, description,
applied_tos, tags,
operation=consts.FW_INSERT_BOTTOM,
other_section=None):
resource = 'firewall/sections?operation=%s' % operation
body = self._build(display_name, description,
applied_tos, tags)
if other_section:
resource += '&id=%s' % other_section
return self.client.create(resource, body)
def create_with_rules(self, display_name, description, applied_tos=None,
tags=None, operation=consts.FW_INSERT_BOTTOM,
other_section=None, rules=None):
resource = 'firewall/sections?operation=%s' % operation
body = {
'display_name': display_name,
'description': description,
'stateful': True,
'section_type': consts.FW_SECTION_LAYER3,
'applied_tos': applied_tos or [],
'rules': rules or [],
'tags': tags or []
}
if rules:
resource += '&action=create_with_rules'
if other_section:
resource += '&id=%s' % other_section
return self.client.create(resource, body)
def update(self, section_id, display_name=None, description=None,
applied_tos=None, rules=None, tags_update=None, force=False):
# Using internal method so we can access max_attempts in the decorator
@utils.retry_upon_exception(
exceptions.StaleRevision,
max_attempts=self.nsxlib_config.max_attempts)
def _do_update():
resource = 'firewall/sections/%s' % section_id
section = self.read(section_id)
if rules is not None:
resource += '?action=update_with_rules'
section.update({'rules': rules})
if display_name is not None:
section['display_name'] = display_name
if description is not None:
section['description'] = description
if applied_tos is not None:
section['applied_tos'] = [self.get_nsgroup_reference(nsg_id)
for nsg_id in applied_tos]
if tags_update is not None:
section['tags'] = utils.update_v3_tags(section.get('tags', []),
tags_update)
headers = None
if force:
# shared sections (like default section) can serve multiple
# openstack deployments. If some operate under protected
# identities, force-owerwrite is needed.
# REVISIT(annak): find better solution for shared sections
headers = {'X-Allow-Overwrite': 'true'}
if rules is not None:
return self.client.create(resource, section, headers=headers)
elif any(p is not None for p in (display_name, description,
applied_tos)):
return self.client.update(resource, section, headers=headers)
return _do_update()
def read(self, section_id):
resource = 'firewall/sections/%s' % section_id
return self.client.get(resource)
def list(self):
resource = 'firewall/sections'
return self.client.list(resource).get('results', [])
def delete(self, section_id):
resource = 'firewall/sections/%s?cascade=true' % section_id
return self.client.delete(resource)
def get_nsgroup_reference(self, nsgroup_id):
return {'target_id': nsgroup_id,
'target_type': consts.NSGROUP}
def get_logicalport_reference(self, port_id):
return {'target_id': port_id,
'target_type': consts.TARGET_TYPE_LOGICAL_PORT}
def get_ip_cidr_reference(self, ip_cidr_block, ip_protocol):
target_type = (consts.TARGET_TYPE_IPV4ADDRESS
if ip_protocol == consts.IPV4
else consts.TARGET_TYPE_IPV6ADDRESS)
return {'target_id': ip_cidr_block,
'target_type': target_type}
def get_rule_address(self, target_id, display_name=None, is_valid=True,
target_type=consts.TARGET_TYPE_IPV4ADDRESS):
return {'target_display_name': display_name or '',
'target_id': target_id,
'is_valid': is_valid,
'target_type': target_type}
def get_l4portset_nsservice(self, sources=None, destinations=None,
protocol=consts.TCP):
return {
'service': {
'resource_type': 'L4PortSetNSService',
'source_ports': sources or [],
'destination_ports': destinations or [],
'l4_protocol': protocol}
}
def get_rule_dict(self, display_name, sources=None, destinations=None,
direction=consts.IN_OUT, ip_protocol=consts.IPV4_IPV6,
services=None, action=consts.FW_ACTION_ALLOW,
logged=False, disabled=False, applied_tos=None):
rule_dict = {'display_name': display_name,
'direction': direction,
'ip_protocol': ip_protocol,
'action': action,
'logged': logged,
'disabled': disabled,
'sources': sources or [],
'destinations': destinations or [],
'services': services or []}
if applied_tos is not None:
rule_dict['applied_tos'] = applied_tos
return rule_dict
def add_rule(self, rule, section_id):
resource = 'firewall/sections/%s/rules' % section_id
params = '?operation=insert_bottom'
return self.client.create(resource + params, rule)
def add_rules(self, rules, section_id):
resource = 'firewall/sections/%s/rules' | |
import argparse
import gc
import math
import os
from argparse import Namespace
from datetime import timedelta
from multiprocessing import cpu_count
from typing import List
import joblib
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchmetrics
import transformers
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from sklearn.metrics import matthews_corrcoef
from sklearn.preprocessing import OrdinalEncoder
from torch import nn as nn
from torch.nn.utils.rnn import pack_sequence, pad_packed_sequence
from torch.utils.data import DataLoader, Dataset
from torchmetrics import Metric
from transformers import (AdamW, AutoConfig, AutoModel, AutoModelWithLMHead,
AutoTokenizer, get_linear_schedule_with_warmup, BertModel, BigBirdTokenizer, BertConfig)
import wandb
# In[2]:
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
seed_everything(314)
# In[3]:
from typing import Any, List, Optional, Tuple, Union, Dict
class MCC(Metric):
def __init__(
self,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
self.add_state("preds", default=[], dist_reduce_fx=None)
self.add_state("target", default=[], dist_reduce_fx=None)
def update(self, preds: torch.Tensor, target: torch.Tensor):
self.preds.append(preds.flatten().long())
self.target.append(target.flatten().long())
def compute(self) -> Union[Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
Tuple[List[torch.Tensor], List[torch.Tensor], List[torch.Tensor]]]:
preds = torch.cat(self.preds, dim=0).cpu().numpy()
target = torch.cat(self.target, dim=0).cpu().numpy()
return matthews_corrcoef(preds, target), preds, target
class ModelBigBird(nn.Module):
def __init__(self, model_name: str, bertconfig: BertConfig, drop_mult: float, use_special_classifier:str):
super().__init__()
self.bert = transformers.AutoModel.from_pretrained(model_name, add_pooling_layer=False)
self.bert.config = bertconfig
self.dropout_mult = drop_mult
self.dropout = nn.Dropout(self.dropout_mult)
sizes_classifier = [self.bert.config.hidden_size*4, self.bert.config.hidden_size, int(self.bert.config.hidden_size//2), 1]
if use_special_classifier == 'ln':
self.classifier = nn.Sequential(
self.dropout,
nn.Linear(sizes_classifier[0], sizes_classifier[1]),
nn.LayerNorm(sizes_classifier[1]),
Mish(),
self.dropout,
nn.Linear(sizes_classifier[1], sizes_classifier[2]),
nn.LayerNorm(sizes_classifier[2]),
Mish(),
self.dropout,
nn.Linear(sizes_classifier[2], sizes_classifier[3]),
nn.LayerNorm(sizes_classifier[3]),
)
elif use_special_classifier == 'bn':
self.classifier = nn.Sequential(
self.dropout,
nn.Linear(sizes_classifier[0], sizes_classifier[1]),
nn.BatchNorm1d(sizes_classifier[1]),
Mish(),
self.dropout,
nn.Linear(sizes_classifier[1], sizes_classifier[2]),
nn.BatchNorm1d(sizes_classifier[2]),
Mish(),
self.dropout,
nn.Linear(sizes_classifier[2], sizes_classifier[3]),
nn.BatchNorm1d(sizes_classifier[3]),
)
elif use_special_classifier == 'none':
self.classifier = nn.Sequential(
self.dropout,
nn.Linear(sizes_classifier[0], sizes_classifier[1]),
Mish(),
self.dropout,
nn.Linear(sizes_classifier[1], sizes_classifier[2]),
Mish(),
self.dropout,
nn.Linear(sizes_classifier[2], sizes_classifier[3])
)
#input_ids, token_type_ids, attention_masks
def forward(self, input_ids: torch.Tensor, attention_masks: torch.Tensor, device='cuda'):
#out_bert = self.bert(input_ids, attention_masks)[0][:,0,:]
out_bert = self.bert(input_ids, attention_masks, output_hidden_states=True)
hidden_states = out_bert[1]
h12 = hidden_states[-1][:,0]
h11 = hidden_states[-2][:,0]
h10 = hidden_states[-3][:,0]
h09 = hidden_states[-4][:,0]
concat_hidden = torch.cat([h09, h10, h11, h12], axis=-1)
out = self.classifier(concat_hidden)
return out
def freeze_bert_encoder(self):
print('Freezing all bert encoder')
for param in self.bert.parameters():
param.requires_grad = False
def unfreeze_bert_encoder(self):
print('Unreezing all bert encoder')
for param in self.bert.parameters():
param.requires_grad = True
def unfreeze_bert_encoder_last_layers(self):
print('Unfreezing bert encoder last layers')
for name, param in self.bert.named_parameters():
if "encoder.layer.11" in name or "pooler" in name:
param.requires_grad = True
def unfreeze_bert_encoder_pooler_layer(self):
print('Unfreezing bert encoder last pooler layer')
for name, param in self.bert.named_parameters():
if "pooler" in name:
print(name)
param.requires_grad = True
class EncodeCollateFn:
def slice_text(self, text):
split = text.split()
size = len(split)
if size > self.max_tokens:
new_text = split[:self.max_tokens//2] + split[-self.max_tokens//2:]
text = ' '.join(new_text)
return text
def __init__(self, tokenizer: AutoTokenizer, max_input_length=7680):
self.tokenizer = tokenizer
self.max_tokens = max_input_length
def __call__(self, batch):
documents = [self.slice_text(x[0]) for x in batch]
labels = torch.tensor([x[1] for x in batch], dtype=torch.int8)
assert type(documents) == list, 'Needs to be a list of strings'
tokenized = self.tokenizer(documents, return_tensors='pt', padding=True, truncation=True, max_length=self.max_tokens)
return tokenized['input_ids'], tokenized['attention_mask'], labels
# from https://github.com/digantamisra98/Mish/blob/b5f006660ac0b4c46e2c6958ad0301d7f9c59651/Mish/Torch/mish.py
@torch.jit.script
def mish(input):
return input * torch.tanh(F.softplus(input))
class Mish(nn.Module):
def forward(self, input):
return mish(input)
class JEFDataset(Dataset):
def __init__(self, path, dep_var, text_col, lowercase):
super().__init__()
self.dep_var = dep_var
self.text_col = text_col
all_columns = [self.dep_var, self.text_col] + ['date_appeal_panel_ruling']
data = pd.read_parquet(path, columns=all_columns)
if len(data) > 600_000:
print(f'Previous size of training data: {len(data)}. Selecting only last 5 years of the training dataset')
data.date_appeal_panel_ruling = pd.to_datetime(data.date_appeal_panel_ruling, infer_datetime_format=True, yearfirst=True, dayfirst=False)
thresh = data.date_appeal_panel_ruling.max() - timedelta(days=365*5)
data = data[data.date_appeal_panel_ruling >= thresh].copy()
print(f'New size of training data: {len(data)}')
data.drop('date_appeal_panel_ruling', axis=1, inplace=True)
data[self.dep_var] = data[self.dep_var].replace('PROVIMENTO PARCIAL', 'PROVIMENTO')
data = data[data[self.dep_var].isin(['PROVIMENTO', 'NÃO PROVIMENTO'])]
data[self.dep_var] = data[self.dep_var].map({'NÃO PROVIMENTO': 0, 'PROVIMENTO': 1})
if lowercase:
data[self.text_col] = data[self.text_col].str.lower()
print(f'Size before: {len(data)} - {path.split("/")[-1]}')
data.dropna(inplace=True)
print(f'Size after: {len(data)} - {path.split("/")[-1]}')
data.reset_index(drop=True, inplace=True)
self.data = data.copy()
def __getitem__(self, idx):
return self.data.loc[idx, self.text_col], self.data.loc[idx, self.dep_var]
def __len__(self):
return len(self.data)
class TrainingModule(pl.LightningModule):
def __init__(self, hparams):
super().__init__()
self.save_hyperparameters(hparams)
self.num_labels = len(self.hparams['labels'])
self.tokenizer = BigBirdTokenizer.from_pretrained(self.hparams['bert_model_path'])
config = AutoConfig.from_pretrained(self.hparams['bert_model_path'])
config.__setattr__('num_labels', len(self.hparams['labels']))
self.accuracy = torchmetrics.Accuracy()
self.mcc = MCC()
self.valor_mcc = {'val_mcc': -1, 'test_mcc': -1}
self.best_mcc = -1.0
self.precision_metric = torchmetrics.Precision(num_classes=self.num_labels)
self.recall_metric = torchmetrics.Recall(num_classes=self.num_labels)
#self.confmat = torchmetrics.ConfusionMatrix(num_classes=self.num_labels)
self.f1_score = torchmetrics.F1(num_classes=self.num_labels)
self.model = ModelBigBird(self.hparams['bert_model_path'],
bertconfig=config,
drop_mult=self.hparams.drop_mult,
use_special_classifier=self.hparams.use_special_classifier)
if self.hparams.bert_unfreeze_mode == 'encoder_last':
self.model.freeze_bert_encoder()
self.model.unfreeze_bert_encoder_last_layers()
elif self.hparams.bert_unfreeze_mode == 'pooler_last':
self.model.freeze_bert_encoder()
self.model.unfreeze_bert_encoder_pooler_layer()
elif self.hparams.bert_unfreeze_mode == 'all':
self.model.unfreeze_bert_encoder()
elif self.hparams.bert_unfreeze_mode == 'none':
self.model.freeze_bert_encoder()
if self.hparams.weighted_loss:
weights = torch.FloatTensor(self.hparams.train_weights)
print(f'Using weighted loss: {weights}')
else:
weights = None
# weight = torch.FloatTensor(self.hparams.train_weights).to(self.hparams.device) if self.hparams.train_weights is not None else None)
self.loss = nn.BCEWithLogitsLoss(pos_weight=weights)
self.lr = self.hparams.lr
self.save_hyperparameters()
def step(self, batch, step_name='train'):
thresh = self.hparams.thresh_step
input_ids, attention_masks, y = batch
logits = self.forward(input_ids, attention_masks).squeeze()
y = y.type_as(logits)
loss = self.loss(logits, y)
if step_name == 'train':
self.log('train_loss', loss, on_step=True, on_epoch=True,
logger=True, prog_bar=True, sync_dist=True, sync_dist_op='mean')
result = {'train_loss': loss}
return loss
else:
self.log(f'{step_name}_loss', loss, on_step=False, on_epoch=True, logger=True,
prog_bar=True, reduce_fx=torch.mean, sync_dist=True, sync_dist_op='mean')
y_pred = torch.sigmoid(logits)
y_pred = torch.where(y_pred > thresh, 1.0, 0.0).long()
y = y.long()
#y_pred = torch.argmax(logits, dim=1)
self.accuracy(y_pred, y)
#self.mymetric(y_pred, y)
self.precision_metric(y_pred, y)
self.recall_metric(y_pred, y)
self.f1_score(y_pred, y)
#self.confmat(y_pred, y)
self.mcc(y_pred, y)
result = {f'{step_name}_loss': loss}
return result
def calculate_metrics(self, outputs, step_name='val'):
mcc, preds, target = self.mcc.compute()
tn = ((target == preds) & (target == 0)).sum()
tp = ((target == preds) & (target == 1)).sum()
fn = ((target != preds) & (target == 1)).sum()
fp = ((target != preds) & (target == 0)).sum()
outs = {}
outs[f'{step_name}_acc'] = self.accuracy.compute()
outs[f'{step_name}_loss'] = torch.mean(torch.tensor([i[f'{step_name}_loss'] for i in outputs]))
outs[f'{step_name}_tn'] = tn
outs[f'{step_name}_fp'] = fp
outs[f'{step_name}_fn'] = fn
outs[f'{step_name}_tp'] = tp
outs[f'{step_name}_f1_score'] = self.f1_score.compute()
outs[f'{step_name}_precision'] = self.precision_metric.compute()
outs[f'{step_name}_recall'] = self.recall_metric.compute()
outs[f'{step_name}_mcc'] = mcc
#outs[f'{step_name}_mccold'] = mcc2
#confmat = self.confmat.compute().long().detach().cpu().numpy()
self.recall_metric.reset()
self.precision_metric.reset()
self.f1_score.reset()
self.accuracy.reset()
self.mcc.reset()
#self.confmat.reset()
if float(mcc) > self.best_mcc:
self.best_mcc = float(mcc)
self.log('best_mcc', mcc)
if self.valor_mcc[f'{step_name}_mcc'] < float(outs[f'{step_name}_mcc']):
self.valor_mcc[f'{step_name}_mcc'] = float(outs[f'{step_name}_mcc'])
print(matthews_corrcoef(preds, target), mcc, len(target), len(preds))
if self.global_rank == 0:
print(matthews_corrcoef(preds, target), mcc, len(target), len(preds))
if self.valor_mcc[f'{step_name}_mcc'] < float(outs[f'{step_name}_mcc']):
self.valor_mcc[f'{step_name}_mcc'] = float(outs[f'{step_name}_mcc'])
#self.logger.experiment.log({f"best_mcc-confusion_matrix" : wandb.plot.confusion_matrix(preds=preds, y_true=target, class_names=[i[:3].upper() for i in self.hparams.labels])}, commit=False)
#print("\n\nCONFUSION MATRIX:\n", confmat, "\n")
print(f"{step_name}_acc: {float(outs[f'{step_name}_acc']):.5f}")
print(f"{step_name}_mcc: {float(outs[f'{step_name}_mcc']):.5f}")
print(f"Number of cases: {int(tn+fp+fn+tp)}")
print('\n')
for k, v in outs.items():
self.log(k, v)
def forward(self, input_ids, attention_masks, *args):
return self.model(input_ids, attention_masks, *args)
def training_step(self, batch, batch_idx):
return self.step(batch, "train")
def validation_step(self, batch, batch_idx):
return self.step(batch, "val")
def test_step(self, batch, batch_idx):
return self.step(batch, "test")
def validation_epoch_end(self, outputs: List[dict]):
return self.calculate_metrics(outputs, step_name='val')
def test_epoch_end(self, outputs: List[dict]):
return self.calculate_metrics(outputs, step_name='test')
def train_dataloader(self):
return self.create_data_loader(self.hparams.train_path, shuffle=True)
def val_dataloader(self):
return self.create_data_loader(self.hparams.val_path)
def test_dataloader(self):
return self.create_data_loader(self.hparams.test_path)
def create_data_loader(self, ds_path: str, shuffle=False):
#print(self.hparams.cat_names)
return DataLoader(
JEFDataset(ds_path, self.hparams.dep_var, self.hparams.text_col, self.hparams.lowercase),
batch_size=self.hparams.batch_size,
shuffle=shuffle,
pin_memory=True,
drop_last=True,
num_workers=int(cpu_count()),
collate_fn=EncodeCollateFn(self.tokenizer)
)
def setup(self, stage):
if stage == 'fit':
total_devices = max(1, self.hparams.n_gpus) * 1
train_batches = len(self.train_dataloader()) // total_devices
self.train_steps = math.ceil((self.hparams.epochs * train_batches) / self.hparams.accumulate_grad_batches)
self.train_steps = int(math.ceil(self.train_steps * 1.01))#1.04)
def configure_optimizers(self):
train_steps = self.train_steps
optimizer = torch.optim.AdamW([p for p in self.model.parameters() if p.requires_grad], lr=self.lr, weight_decay=0.1)
lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer,
max_lr=self.lr,
total_steps=train_steps,
three_phase=True,
epochs=self.hparams.epochs)
return [optimizer], [{"scheduler": lr_scheduler, "interval": "step"}]
def _setup_parser():
"""Set up Python's ArgumentParser with data, model, trainer, and other arguments."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--drop_mult", type=float)
parser.add_argument("--use_special_classifier", type=str)
parser.add_argument("--lr", type=float)
parser.add_argument("--thresh_step", type=float)
parser.add_argument("--accumulate_grad_batches", type=int)
parser.add_argument("--gradient_clip_val", type=float)
parser.add_argument("--lowercase", type=str),
parser.add_argument("--stochastic_weight_avg", type=str)
parser.add_argument("--epochs", type=int)
parser.add_argument("--batch_size", type=int)
parser.add_argument("--project_name", type=str)
parser.add_argument("--experiment_name", type=str)
parser.add_argument("--train_path", type=str)
parser.add_argument("--valid_path", type=str)
parser.add_argument("--test_path", type=str)
parser.add_argument("--help", "-h", action="help")
return parser
def main():
"""
Run an experiment.
Sample command:
```
python training/run_experiment.py --max_epochs=3 --gpus='0,' --num_workers=20 --model_class=MLP --data_class=MNIST
```
"""
parser = _setup_parser()
args = parser.parse_args()
train_path = args.train_path
val_path = args.valid_path
test_path = args.test_path
assert os.path.exists(train_path), f"File doesn't exist: {train_path}"
assert os.path.exists(val_path), f"File doesn't exist: {val_path}"
assert os.path.exists(test_path), f"File doesn't exist: {val_path}"
df_train = pd.read_parquet(train_path, columns=['label'])
df_train.label = df_train.label.replace('PROVIMENTO PARCIAL', 'PROVIMENTO')
df_train.label = df_train.label.replace('NÃO PROVIMENTO', 0)
df_train.label = df_train.label.replace('PROVIMENTO', 1)
correct_output = torch.tensor(df_train.label.values)
trn_wgts = ((correct_output.shape[0] / torch.sum(correct_output, dim=0))-1)
trn_wgts = trn_wgts.cpu().numpy()
del(df_train)
hparams = Namespace(
train_path=train_path,
val_path=val_path,
test_path=test_path,
batch_size=args.batch_size, #12 3090
epochs=args.epochs, #7
drop_mult=args.drop_mult,
use_special_classifier=args.use_special_classifier,
lowercase=str2bool(args.lowercase),
lr=args.lr,
thresh_step=args.thresh_step,
accumulate_grad_batches=args.accumulate_grad_batches,
gradient_clip_val=args.gradient_clip_val,
stochastic_weight_avg=str2bool(args.stochastic_weight_avg),
dep_var = 'label',
text_col = "preprocessed_full_text_first_instance_court_ruling",
bert_model_path='./bigbird-jus',
labels=[0, 1],
sync_batchnorm=True,
device='cuda',
train_weights=trn_wgts,
bert_unfreeze_mode='encoder_last', # 'encoder_last', 'pooler_last', 'all', 'none'
weighted_loss=True,
precision='bf16' ,
n_gpus= 2,
deterministic=True,
)
module = TrainingModule(hparams)
gc.collect()
torch.cuda.empty_cache()
PROJECT_NAME = args.project_name
EXPERIMENT_NAME = args.experiment_name
lr_logger = LearningRateMonitor(logging_interval='step', log_momentum=True)
if len(EXPERIMENT_NAME) > 2:
wandb_logger = WandbLogger(name=EXPERIMENT_NAME, project=PROJECT_NAME, offline=False)
else:
wandb_logger = WandbLogger(project=PROJECT_NAME)
EXPERIMENT_NAME = | |
None
self.ClusterIp = None
self.NodePort = None
self.CpuLimit = None
self.MemLimit = None
self.AccessType = None
self.UpdateType = None
self.UpdateIvl = None
self.ProtocolPorts = None
self.Envs = None
self.ApplicationName = None
self.Message = None
self.Status = None
self.MicroserviceType = None
self.CpuRequest = None
self.MemRequest = None
self.SubnetId = None
self.GroupResourceType = None
self.InstanceCount = None
self.UpdatedTime = None
self.MaxSurge = None
self.MaxUnavailable = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.InstanceNum = params.get("InstanceNum")
self.CurrentNum = params.get("CurrentNum")
self.CreateTime = params.get("CreateTime")
self.Server = params.get("Server")
self.Reponame = params.get("Reponame")
self.TagName = params.get("TagName")
self.ClusterId = params.get("ClusterId")
self.ClusterName = params.get("ClusterName")
self.NamespaceId = params.get("NamespaceId")
self.NamespaceName = params.get("NamespaceName")
self.ApplicationId = params.get("ApplicationId")
self.LbIp = params.get("LbIp")
self.ApplicationType = params.get("ApplicationType")
self.ClusterIp = params.get("ClusterIp")
self.NodePort = params.get("NodePort")
self.CpuLimit = params.get("CpuLimit")
self.MemLimit = params.get("MemLimit")
self.AccessType = params.get("AccessType")
self.UpdateType = params.get("UpdateType")
self.UpdateIvl = params.get("UpdateIvl")
if params.get("ProtocolPorts") is not None:
self.ProtocolPorts = []
for item in params.get("ProtocolPorts"):
obj = ProtocolPort()
obj._deserialize(item)
self.ProtocolPorts.append(obj)
if params.get("Envs") is not None:
self.Envs = []
for item in params.get("Envs"):
obj = Env()
obj._deserialize(item)
self.Envs.append(obj)
self.ApplicationName = params.get("ApplicationName")
self.Message = params.get("Message")
self.Status = params.get("Status")
self.MicroserviceType = params.get("MicroserviceType")
self.CpuRequest = params.get("CpuRequest")
self.MemRequest = params.get("MemRequest")
self.SubnetId = params.get("SubnetId")
self.GroupResourceType = params.get("GroupResourceType")
self.InstanceCount = params.get("InstanceCount")
self.UpdatedTime = params.get("UpdatedTime")
self.MaxSurge = params.get("MaxSurge")
self.MaxUnavailable = params.get("MaxUnavailable")
class CosCredentials(AbstractModel):
"""cos临时帐号信息
"""
def __init__(self):
"""
:param SessionToken: 会话Token
注意:此字段可能返回 null,表示取不到有效值。
:type SessionToken: str
:param TmpAppId: 临时应用ID
注意:此字段可能返回 null,表示取不到有效值。
:type TmpAppId: str
:param TmpSecretId: 临时调用者身份ID
注意:此字段可能返回 null,表示取不到有效值。
:type TmpSecretId: str
:param TmpSecretKey: 临时密钥
注意:此字段可能返回 null,表示取不到有效值。
:type TmpSecretKey: str
:param ExpiredTime: 过期时间
注意:此字段可能返回 null,表示取不到有效值。
:type ExpiredTime: int
:param Domain: 所在域
注意:此字段可能返回 null,表示取不到有效值。
:type Domain: str
"""
self.SessionToken = None
self.TmpAppId = None
self.TmpSecretId = None
self.TmpSecretKey = None
self.ExpiredTime = None
self.Domain = None
def _deserialize(self, params):
self.SessionToken = params.get("SessionToken")
self.TmpAppId = params.get("TmpAppId")
self.TmpSecretId = params.get("TmpSecretId")
self.TmpSecretKey = params.get("TmpSecretKey")
self.ExpiredTime = params.get("ExpiredTime")
self.Domain = params.get("Domain")
class CosDownloadInfo(AbstractModel):
"""Cos下载所需信息
"""
def __init__(self):
"""
:param Bucket: 桶名称
注意:此字段可能返回 null,表示取不到有效值。
:type Bucket: str
:param Region: 地域
注意:此字段可能返回 null,表示取不到有效值。
:type Region: str
:param Path: 路径
注意:此字段可能返回 null,表示取不到有效值。
:type Path: str
:param Credentials: 鉴权信息
注意:此字段可能返回 null,表示取不到有效值。
:type Credentials: :class:`tencentcloud.tsf.v20180326.models.CosCredentials`
"""
self.Bucket = None
self.Region = None
self.Path = None
self.Credentials = None
def _deserialize(self, params):
self.Bucket = params.get("Bucket")
self.Region = params.get("Region")
self.Path = params.get("Path")
if params.get("Credentials") is not None:
self.Credentials = CosCredentials()
self.Credentials._deserialize(params.get("Credentials"))
class CosUploadInfo(AbstractModel):
"""cos上传所需信息
"""
def __init__(self):
"""
:param PkgId: 程序包ID
注意:此字段可能返回 null,表示取不到有效值。
:type PkgId: str
:param Bucket: 桶
注意:此字段可能返回 null,表示取不到有效值。
:type Bucket: str
:param Region: 目标地域
注意:此字段可能返回 null,表示取不到有效值。
:type Region: str
:param Path: 存储路径
注意:此字段可能返回 null,表示取不到有效值。
:type Path: str
:param Credentials: 鉴权信息
:type Credentials: :class:`tencentcloud.tsf.v20180326.models.CosCredentials`
"""
self.PkgId = None
self.Bucket = None
self.Region = None
self.Path = None
self.Credentials = None
def _deserialize(self, params):
self.PkgId = params.get("PkgId")
self.Bucket = params.get("Bucket")
self.Region = params.get("Region")
self.Path = params.get("Path")
if params.get("Credentials") is not None:
self.Credentials = CosCredentials()
self.Credentials._deserialize(params.get("Credentials"))
class CreateApplicationRequest(AbstractModel):
"""CreateApplication请求参数结构体
"""
def __init__(self):
"""
:param ApplicationName: 应用名称
:type ApplicationName: str
:param ApplicationType: 应用类型,V:虚拟机应用;C:容器应用;S:serverless应用
:type ApplicationType: str
:param MicroserviceType: 应用微服务类型,M:service mesh应用;N:普通应用;G:网关应用
:type MicroserviceType: str
:param ApplicationDesc: 应用描述
:type ApplicationDesc: str
:param ApplicationLogConfig: 应用日志配置项,废弃参数
:type ApplicationLogConfig: str
:param ApplicationResourceType: 应用资源类型,废弃参数
:type ApplicationResourceType: str
:param ApplicationRuntimeType: 应用runtime类型
:type ApplicationRuntimeType: str
"""
self.ApplicationName = None
self.ApplicationType = None
self.MicroserviceType = None
self.ApplicationDesc = None
self.ApplicationLogConfig = None
self.ApplicationResourceType = None
self.ApplicationRuntimeType = None
def _deserialize(self, params):
self.ApplicationName = params.get("ApplicationName")
self.ApplicationType = params.get("ApplicationType")
self.MicroserviceType = params.get("MicroserviceType")
self.ApplicationDesc = params.get("ApplicationDesc")
self.ApplicationLogConfig = params.get("ApplicationLogConfig")
self.ApplicationResourceType = params.get("ApplicationResourceType")
self.ApplicationRuntimeType = params.get("ApplicationRuntimeType")
class CreateApplicationResponse(AbstractModel):
"""CreateApplication返回参数结构体
"""
def __init__(self):
"""
:param Result: 应用ID
注意:此字段可能返回 null,表示取不到有效值。
:type Result: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
self.Result = params.get("Result")
self.RequestId = params.get("RequestId")
class CreateClusterRequest(AbstractModel):
"""CreateCluster请求参数结构体
"""
def __init__(self):
"""
:param ClusterName: 集群名称
:type ClusterName: str
:param ClusterType: 集群类型
:type ClusterType: str
:param VpcId: 私有网络ID
:type VpcId: str
:param ClusterCIDR: 分配给集群容器和服务IP的CIDR
:type ClusterCIDR: str
:param ClusterDesc: 集群备注
:type ClusterDesc: str
:param TsfRegionId: 集群所属TSF地域
:type TsfRegionId: str
:param TsfZoneId: 集群所属TSF可用区
:type TsfZoneId: str
:param SubnetId: 私有网络子网ID
:type SubnetId: str
:param ClusterVersion: 集群版本
:type ClusterVersion: str
"""
self.ClusterName = None
self.ClusterType = None
self.VpcId = None
self.ClusterCIDR = None
self.ClusterDesc = None
self.TsfRegionId = None
self.TsfZoneId = None
self.SubnetId = None
self.ClusterVersion = None
def _deserialize(self, params):
self.ClusterName = params.get("ClusterName")
self.ClusterType = params.get("ClusterType")
self.VpcId = params.get("VpcId")
self.ClusterCIDR = params.get("ClusterCIDR")
self.ClusterDesc = params.get("ClusterDesc")
self.TsfRegionId = params.get("TsfRegionId")
self.TsfZoneId = params.get("TsfZoneId")
self.SubnetId = params.get("SubnetId")
self.ClusterVersion = params.get("ClusterVersion")
class CreateClusterResponse(AbstractModel):
"""CreateCluster返回参数结构体
"""
def __init__(self):
"""
:param Result: 集群ID
:type Result: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
self.Result = params.get("Result")
self.RequestId = params.get("RequestId")
class CreateConfigRequest(AbstractModel):
"""CreateConfig请求参数结构体
"""
def __init__(self):
"""
:param ConfigName: 配置项名称
:type ConfigName: str
:param ConfigVersion: 配置项版本
:type ConfigVersion: str
:param ConfigValue: 配置项值
:type ConfigValue: str
:param ApplicationId: 应用ID
:type ApplicationId: str
:param ConfigVersionDesc: 配置项版本描述
:type ConfigVersionDesc: str
:param ConfigType: 配置项值类型
:type ConfigType: str
:param EncodeWithBase64: Base64编码的配置项
:type EncodeWithBase64: bool
"""
self.ConfigName = None
self.ConfigVersion = None
self.ConfigValue = None
self.ApplicationId = None
self.ConfigVersionDesc = None
self.ConfigType = None
self.EncodeWithBase64 = None
def _deserialize(self, params):
self.ConfigName = params.get("ConfigName")
self.ConfigVersion = params.get("ConfigVersion")
self.ConfigValue = params.get("ConfigValue")
self.ApplicationId = params.get("ApplicationId")
self.ConfigVersionDesc = params.get("ConfigVersionDesc")
self.ConfigType = params.get("ConfigType")
self.EncodeWithBase64 = params.get("EncodeWithBase64")
class CreateConfigResponse(AbstractModel):
"""CreateConfig返回参数结构体
"""
def __init__(self):
"""
:param Result: true:创建成功;false:创建失败
注意:此字段可能返回 null,表示取不到有效值。
:type Result: bool
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
self.Result = params.get("Result")
self.RequestId = params.get("RequestId")
class CreateContainGroupRequest(AbstractModel):
"""CreateContainGroup请求参数结构体
"""
def __init__(self):
"""
:param ApplicationId: 分组所属应用ID
:type ApplicationId: str
:param NamespaceId: 分组所属命名空间ID
:type NamespaceId: str
:param GroupName: 分组名称字段,长度1~60,字母或下划线开头,可包含字母数字下划线
:type GroupName: str
:param InstanceNum: 实例数量
:type InstanceNum: int
:param AccessType: 0:公网 1:集群内访问 2:NodePort
:type AccessType: int
:param ProtocolPorts: 数组对象,见下方定义
:type ProtocolPorts: list of ProtocolPort
:param ClusterId: 集群ID
:type ClusterId: str
:param CpuLimit: 最大分配 CPU 核数,对应 K8S limit
:type CpuLimit: str
:param MemLimit: 最大分配内存 MiB 数,对应 K8S limit
:type MemLimit: str
:param GroupComment: 分组备注字段,长度应不大于200字符
:type GroupComment: str
:param UpdateType: 更新方式:0:快速更新 1:滚动更新
:type UpdateType: int
:param UpdateIvl: 滚动更新必填,更新间隔
:type UpdateIvl: int
:param CpuRequest: 初始分配的 CPU 核数,对应 K8S request
:type CpuRequest: str
:param MemRequest: 初始分配的内存 MiB 数,对应 K8S request
:type MemRequest: str
:param GroupResourceType: 部署组资源类型
:type GroupResourceType: str
:param SubnetId: 子网ID
:type SubnetId: str
:param AgentCpuRequest: agent 容器分配的 CPU 核数,对应 K8S 的 request
:type AgentCpuRequest: str
:param AgentCpuLimit: agent 容器最大的 CPU 核数,对应 K8S 的 limit
:type AgentCpuLimit: str
:param AgentMemRequest: agent 容器分配的内存 MiB 数,对应 K8S 的 request
:type AgentMemRequest: str
:param AgentMemLimit: agent 容器最大的内存 MiB 数,对应 K8S 的 limit
:type AgentMemLimit: str
:param IstioCpuRequest: istioproxy 容器分配的 CPU 核数,对应 K8S 的 request
:type IstioCpuRequest: str
:param IstioCpuLimit: istioproxy 容器最大的 CPU 核数,对应 K8S 的 limit
:type IstioCpuLimit: str
:param IstioMemRequest: istioproxy 容器分配的内存 MiB 数,对应 K8S 的 request
:type IstioMemRequest: str
:param IstioMemLimit: istioproxy 容器最大的内存 MiB 数,对应 K8S 的 limit
:type IstioMemLimit: str
"""
self.ApplicationId = None
self.NamespaceId = None
self.GroupName = None
self.InstanceNum = None
self.AccessType = None
self.ProtocolPorts = None
self.ClusterId = None
self.CpuLimit = None
self.MemLimit = None
self.GroupComment = None
self.UpdateType = None
self.UpdateIvl = None
self.CpuRequest = None
self.MemRequest = None
self.GroupResourceType = None
self.SubnetId = None
self.AgentCpuRequest = None
self.AgentCpuLimit = None
self.AgentMemRequest = None
self.AgentMemLimit = None
self.IstioCpuRequest = None
self.IstioCpuLimit = None
self.IstioMemRequest = None
self.IstioMemLimit = None
def _deserialize(self, params):
self.ApplicationId = params.get("ApplicationId")
self.NamespaceId = params.get("NamespaceId")
self.GroupName = params.get("GroupName")
self.InstanceNum = params.get("InstanceNum")
self.AccessType = params.get("AccessType")
if params.get("ProtocolPorts") is not None:
self.ProtocolPorts = []
for item in params.get("ProtocolPorts"):
obj = ProtocolPort()
obj._deserialize(item)
self.ProtocolPorts.append(obj)
self.ClusterId = params.get("ClusterId")
self.CpuLimit = params.get("CpuLimit")
self.MemLimit = params.get("MemLimit")
self.GroupComment = params.get("GroupComment")
self.UpdateType = params.get("UpdateType")
self.UpdateIvl = params.get("UpdateIvl")
self.CpuRequest = params.get("CpuRequest")
self.MemRequest = params.get("MemRequest")
self.GroupResourceType = params.get("GroupResourceType")
self.SubnetId = params.get("SubnetId")
self.AgentCpuRequest = params.get("AgentCpuRequest")
self.AgentCpuLimit = params.get("AgentCpuLimit")
self.AgentMemRequest = params.get("AgentMemRequest")
self.AgentMemLimit = params.get("AgentMemLimit")
self.IstioCpuRequest = params.get("IstioCpuRequest")
self.IstioCpuLimit = params.get("IstioCpuLimit")
self.IstioMemRequest = params.get("IstioMemRequest")
self.IstioMemLimit = params.get("IstioMemLimit")
class CreateContainGroupResponse(AbstractModel):
"""CreateContainGroup返回参数结构体
"""
def __init__(self):
"""
:param Result: 返回创建成功的部署组ID,返回null表示失败
:type Result: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
self.Result = params.get("Result")
self.RequestId = params.get("RequestId")
class CreateGroupRequest(AbstractModel):
"""CreateGroup请求参数结构体
"""
def __init__(self):
"""
:param | |
<reponame>justyncw/STAR_MELT<filename>utils_saha_av.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 10 11:23:41 2021
@author: jcampbellwhite001
"""
from numpy import *
from scipy.special import gamma
from matplotlib import *
from matplotlib.pyplot import *
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from scipy import optimize
#from meanclip import meanclip
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
from mpl_toolkits.axes_grid1.parasite_axes import SubplotHost
from scipy.stats import spearmanr
from astropy.io import fits
import time
from scipy import interpolate
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import utils_shared_variables as USH
starttime=time.time()
def saha_av(em_lines_fit,N_range=[3,15],T_range=[1000,15500]):
#subplots_adjust(left=0.15, bottom=0.15, right=0.97, top=0.97, wspace=0.1, hspace=0.05)
matplotlib.rc('font', family='serif',size=14)
#----------------------------------------------
#natural constants
clightkms=299792. #km/s
clight=29979245800.0 #cm/s
eVtoK=1.16045221e4 #from NIST conversion from eV to K
kboltzmann= 1.3806e-16 #erg/K
hplanck=6.626e-27 #erg s
me=9.109e-28 #electron mass in g
xi_Fe= 7.9024681 #ionization potential of Fe in eV
xi_He=24.58741
xiFe=xi_Fe*eVtoK #ionization potential of Fe in K
xiHe=xi_He*eVtoK #ionization potential of He in K
#numberobs=3 #number of observations
#minaki=1e5 #Minimum Aki to be considered
#new: here I put the N and T ranges so I can find them easier.
N=10**arange(3,15,0.05) #for real
T=arange(1000.,12500.,25.)
#N=10**arange(3,15,0.5) #
#T=arange(1000.,12500.,500.)
N=10**arange(N_range[0],N_range[1],0.3)
T=arange(T_range[0],T_range[1],25)
#read in dataframe generated from STAR_MELT gauss_stats()
# D = pd.read_csv('saha_av_df_test.csv',delimiter=',',index_col=0)
# #min Aki
# D.drop(D[D.Aki < 1e5].index, inplace=True)
# D.drop(D[D.int_flux > 100].index, inplace=True)
# D.drop(D[D.w0==8217.48].index, inplace=True)
# D.drop(D[D.element=='Fe II'].index, inplace=True)
D=em_lines_fit
#remove any duplicate entries for the same w0 and mjd
D.drop_duplicates(subset=['w0','mjd'], inplace=True)
print('Saha calculation for',D.element.any())
if D.element.any() != 'Fe' and D.element.any() != 'He':
print('ERROR, only have partition fn for Fe or He')
if D.element.any()=='Fe':
xi=xiFe
elif D.element.any()=='He':
xi=xiHe
#only keep lines that have data for all dates given in D
numberobs=len(unique(D.mjd))
value_counts = D['w0'].value_counts()
to_remove = value_counts[value_counts < numberobs].index
D = D[~D.w0.isin(to_remove)]
dates=D.mjd.to_numpy()
#create df of the fluxes and the erros for each wl
int_fluxs=D.pivot(index='w0',columns='mjd',values='int_flux').to_numpy()
errors=D.pivot(index='w0',columns='mjd',values='gof').to_numpy()
# if D['element'].str.contains('FeI').any() == False:
# D['ele'] = np.where(
# D['sp_num'] == 1, 'FeI', np.where(
# D['sp_num'] == 2, 'FeII', -1))
# else:
# D['ele']=D['element']
#keep one of each line for theoretical caluclations
D_lines=D.drop_duplicates(subset=['w0']).sort_values('w0')
ele=D_lines.element.to_numpy()
sp=D_lines.sp_num.to_numpy()
wl=D_lines.w0.to_numpy()
aki=D_lines.Aki.to_numpy()
gi=D_lines.g_k.to_numpy()
gj=D_lines.g_i.to_numpy()
ej=D_lines.Ei.to_numpy()
qual=D_lines.Acc.to_numpy()
# t1=int_fluxs[:,0]
# e1=errors[:,0]
# t2=int_fluxs[:,1]
# e2=errors[:,1]
# t3=int_fluxs[:,2]
# e3=errors[:,2]
t1=int_fluxs
e1=errors
qualo=zeros([size(wl)])
for i in range(0, size(wl)):
if qual[i]=='AAA':
qualo[i]=0.01
if qual[i]=='A+':
qualo[i]=0.02
if qual[i]=='AA':
qualo[i]=0.02
if qual[i]=='A':
qualo[i]=0.03
if qual[i]=='B+':
qualo[i]=0.07
if qual[i]=='B':
qualo[i]=0.1
if qual[i]=='C+':
qualo[i]=0.18
if qual[i]=='C':
qualo[i]=0.25
if qual[i]=='D+':
qualo[i]=0.4
if qual[i]=='D':
qualo[i]=0.5
if qual[i]=='E':
qualo[i]=0.5
else:
qualo[i]=0.1
#From now on, I only work with the "faint" lines in the hope they won't be saturated.
#Calculate the scale factor: since data are not calibrated.
# scal=mean([t1,t2,t3])
# scal1=mean(t1)
# scal2=mean(t2)
# scal3=mean(t3)
# t1s=t1/scal1
# e1s=e1/scal1
# t2s=t2/scal2
# e2s=e2/scal2
# t3s=t3/scal3
# e3s=e2/scal3
scal=mean(t1)
t1s=t1/scal
e1s=e1/scal
"""errorbar(wl,t1s,yerr=e1s, fmt='b.',ecolor='b')
errorbar(wl,t2s,yerr=e2s, fmt='r.',ecolor='r')
errorbar(wl,t3s,yerr=e3s, fmt='g.',ecolor='g')
errorbar(wl,t4s,yerr=e4s, fmt='m.',ecolor='m')
errorbar(wl,t5s,yerr=e5s, fmt='c.',ecolor='c')
errorbar(wl,t6s,yerr=e6s, fmt='k.',ecolor='k')
errorbar(wl,t7s,yerr=e7s, fmt='w.',ecolor='grey')
errorbar(wl,t8s,yerr=e8s, fmt='y.',ecolor='Orange')"""
#-----------------------------
#Read in partition functions, taken from NIST, for Fe I and Fe II:
#The partition function is trickier, so I am using some values from NIST
#measured for various T and then interpolating
#You get the values from NIST level form https://physics.nist.gov/PhysRefData/ASD/levels_form.html
#For Fe I
t_part_eV=[0.05,0.1,0.25,0.5,0.75,1.0,1.25,1.5,2.0,2.5,3.0,3.5,4.0,4.5,6,]
t_part=transpose(transpose(t_part_eV)*eVtoK) #transposing or it complains in unit conversion
z_part_FeI=[12.79,16.57,21.71,30.89,47.99,78.54,127.43,196.65,388.89,628.86,889.55, 1152.77, 1408.13,1650.38 , 2284.12]
z_part_HeI=[1,1,1,1,1,1,1,1,1.01,1.08,1.42,2.33,4.16,7.18,24.66]
if D.element.any()=='Fe':
U=interpolate.interp1d(t_part,z_part_FeI)
elif D.element.any()=='He':
U=interpolate.interp1d(t_part,z_part_HeI)
#For Fe II
t_part_eV=[0.05,0.1,0.25,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,6]
t_part=transpose(transpose(t_part_eV)*eVtoK) #transposing or it complains in unit conversion
z_part_FeII=[15.00,21.19,33.78,46.73,76.51,121.10,184.22,270.66,381.79,515.11,666.02,829.49, 1353.32] #FeII
z_part_HeII=[2,2,2,2,2,2,2,2,2,2,2,2.01,2.19]
if D.element.any()=='Fe':
U1=interpolate.interp1d(t_part,z_part_FeII)
elif D.element.any()=='He':
U1=interpolate.interp1d(t_part,z_part_HeII)
#Define the functions that I need here:
def B_einst(Aul,gu,gl,wavelength): #remember I am giving lambda in AA
return Aul*(gu/gl)*(wavelength*1e-8)**3/(2*hplanck*clight)
def saha(t, ne, xj): #gives the ratio nj+1/nj, assumes xj is in K or needs a kboltzmann*t
return (2*np.pi*me*kboltzmann*t/hplanck**2)**(3./2.)*(1/ne)*(2*U1(t)/U(t))*exp(-xj/t)
def boltzmann(t,gi,gj,xj): #gives the ratio between i and j, assumes xj is in K or needs a kboltzmann*t
return ((1.0*gi)/(1.0*gj))* exp(-xj/t)
def opthinratio(a1,a2,l1,l2): #optically thin limit of the intensity ratio for lines with wavelentths l1,l2 and aki a1, a2 respectively
return (a1*l2)/(a2*l1) #intensity ratio of line 1/ line2
#All the ratios are relative. I need to give them with respect to the lowest
#Minimum for the Fe I transitions
fol=(sp!=2)
try:
ej_feI_min=min(ej[fol])
except(ValueError):
ej_feI_min=nan
#and for the Fe II transitions:
fil=(sp==2)
try:
ej_feII_min=min(ej[fil])
except(ValueError):
ej_feII_min=nan
#Now what I need for the Saha part is the ionization potential (OK, that's xiFe in K or xi in eV)
#but for the Boltzmann relation I need the excitation between the levels.
#This is relative, so I need to choose one (e.g. the lowest ionization potential for upper level)
#and make them all relative to this one.
#The energy of the upper level is the lower level plus hnu=h*c/lambda
eup= ej + (hplanck*clight/(wl*1.e-8))*(1/(kboltzmann*eVtoK))
#Then I get the minimum excitation energy, which is the one I can use as a reference for Boltzmann.
fol=(sp!=2)
try:
ei_feI_min=min(eup[fol])
except(ValueError):
ei_feI_min=nan
#and for the Fe II transitions:
fil=(sp==2)
try:
ei_feII_min=min(eup[fil])
except(ValueError):
ei_feII_min=nan
#Note: this means that at the end everything will be in terms of the lowest energy level
#but since I will normalize things again, that should be fine.
#Thus, the excitation energy relative to the lower one:
x_rel=zeros([size(wl)])
nFeII=0. #to count the number of II
for i in range(0, size(wl)):
if sp[i]==2:
x_rel[i]= (eup[i]-ei_feII_min)*eVtoK
nFeII=nFeII+1.
else:
x_rel[i]=(eup[i]-ei_feI_min)*eVtoK
#-----------------------------
#Now, for all the densities and temperatures in the array, calculate the result.
#I call it ratioline although it is the non-scaled flux per line.
ratiolines=zeros([size(wl), size(T), size(N)]) #first index is the line, second one is the T, third is N.
fornorm=zeros([size(T), size(N)])
for k in range(0, size(wl)): #for the line
for i in range(0, size(T)): #for the temperature
for j in range(0, size(N)): #for the density
#now the line ratio is obtained by taking into account Saha (for ions), Boltzmann (for all) and the natural ratio (for all)
if sp[k]==2: # and aki[k]<1e5:
ratiolines[k,i,j]=(aki[k]/(1.e-8*wl[k]))*saha(T[i], N[j], xi)*boltzmann(T[i],gi[k],gj[k], x_rel[k])
#print 'For FeII, T, N, line ratio', wl[k],T[i], log10(N[j]), ratiolines[k,i,j]
#print 'Aki term, saha, boltzmann', (aki[k]/wl[k]), saha(T[i], N[j], xi), boltzmann(T[i],gi[k],gj[k], x_rel[k])
#else
if sp[k]!=2: # and aki[k]<1e5: #this is for the FeI, which I also use for normalization
ratiolines[k,i,j]=(aki[k]/(1.e-8*wl[k]))*boltzmann(T[i],gi[k],gj[k], x_rel[k])
#print 'For FeI, T, N, line ratio', wl[k],T[i], log10(N[j]), ratiolines[k,i,j]
#print 'Aki term, boltzmann', (aki[k]/wl[k]), boltzmann(T[i],gi[k],gj[k], x_rel[k])
fornorm[i,j]=fornorm[i,j]+ratiolines[k,i,j] #sum them all FeIs for normalization
#Now that all the line ratios are done, I normalize them so that they are kind of reasonable:
fornorm=fornorm/(1.*size(wl)-1.*nFeII) #per line normalization, count only Fe I lines
#print 'Old ratiolines', ratiolines
for k in range(0, size(wl)): #for the line
for i in range(0, size(T)): #for the temperature
for j in range(0, size(N)): #for the density
if fornorm[i,j]>0:
#print 'Old ratiolines', ratiolines[k,i,j]
ratiolines[k,i,j]=ratiolines[k,i,j]/fornorm[i,j]
#print 'New ratiolines', ratiolines[k,i,j]
else:
ratiolines[k,i,j]=nan
#print 'New ratiolines', ratiolines
#Note: The Fe I ratiolines are quite reasonable, the Fe II go crazy in the extremes.
for l in range(0, size(wl)):
print('Range of line ratios for line lambda=', l, ele[l],int(sp[l]),np.round(wl[l],2), np.min(ratiolines[l,:,:]),'-', np.max(ratiolines[l,:,:]))
# print('As of here, I have ratiolines which are the theoretical fluxes')
# print('for each line, T, N, and normalized to some decent number.')
# print('What I do now is to compare them with the observed ones.')
# print('Leaving aside normalizations, the best one will be the one for which the ratios obs/theo are about the same')
# print('which is, the std of the ratios will be smallest.')
#-----------------------------
#Now it is the time to compare the data and the models.
#Note that things are still not calibrated against each other. I choose to fix one line
#(so for this line the error will be zero) and compare them all to this one.
#I may need to check other lines too in case this is bad.
#Which line to choose? The one for which the data on more nights seem robust enough.
# ts=[t1s,t2s,t3s] #put all values in the same array
# ts=transpose(ts)
# #now, ts[:,l] | |
<filename>ixian/task.py
# Copyright [2018-2020] <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import typing
from ixian.check.checker import hash_object
from ixian.config import CONFIG
from ixian.exceptions import AlreadyComplete
from ixian.utils.color_codes import BOLD_WHITE, ENDC, GRAY, OK_GREEN
logger = logging.getLogger(__name__)
TASKS = {}
class TaskRunner(object):
"""
A task is a wrapper around functions that adds in various functionality
such as dependencies and check functions.
func - function to run
category - category to add this task to
check - list of checkers
clean - clean before building
config - list of relevant settings to display in task help
name - name of this task
short_description - short description for task
description - long description for task
parent - parent task (this task will be run first if it is run)
children - tasks this task depends on (They will be run first)
"""
checkers = None
def __init__(
self,
task=None,
func=None,
category=None,
check=None,
clean=None,
config=None,
depends=None,
name=None,
parent=None,
short_description=None,
description=None,
):
self.task = task
self.func = func
self._depends = depends or []
self.category = category.upper() if category else None
self.clean = clean
self.short_description = short_description or ""
self.config = config
# determine task name
if name is not None:
self.name = name
# determine description
self.description = description
# Add task to global registry. Merge virtual target's dependencies if they exist.
if self.name in TASKS:
task_instance = TASKS[self.name]
# The task is virtual if there is no func, replace it.
if task_instance.func is None:
self.add_dependency(*task_instance._depends)
task_instance = self
else:
logger.warning("Duplicate task definition: {}".format(self.name))
else:
task_instance = self
TASKS[self.name] = task_instance
# add task to VirtualTargets if a parent is specified
if parent:
for parent in parent if isinstance(parent, list) else [parent]:
self.add_to_parent(parent)
# Setup checkers, clean method
if check:
if isinstance(check, (list, tuple)):
self.checkers = check
else:
self.checkers = [check]
def __str__(self):
return f"<{type(self).__name__}@{id(self)} func={self.name}>"
def __unicode__(self):
return f"<{type(self).__name__}@{id(self)} func={self.name}>"
def __repr__(self):
return f"<{type(self).__name__}@{id(self)} func={self.name}>"
@property
def in_context(self):
if not self.task:
return False
return self.task.contexts is True or CONFIG.RUN_CONTEXT in self.task.contexts
def add_to_parent(self, name: str):
"""Add a task to as a dependency of a another task.
This is a grouping method that allows modules to inject
dependencies into common targets.
If the target task is not defined a no-op task will be created to wrap
the added tasks.
:param name: name of parent task to add task to
:return: parent task
"""
try:
parent = TASKS[name]
except KeyError:
# VirtualTarget wasn't defined explicitly, or task hasn't been loaded yet.
# create a TaskRunner for the target. If an explicit task is loaded after
# it will replace this and assume the children that were already added.
parent = TaskRunner(name=name)
TASKS[name] = parent
parent.add_dependency(self)
return parent
def __call__(self, *args, **kwargs):
return self.execute(args, **kwargs)
def execute(self, args, **kwargs):
"""Execute this task.
Executes this task including any dependencies that fail their checks.
If a dependency fails it's check then this task will execute even if
it's own checks pass.
Tasks and dependencies may be forced by passing `force=True` or
`force-all=True` as kwargs.
Tasks and dependency clean methods may be run by passing `clean=True`
or `clean-all=False` as kwargs. Clean implies `force=True`.
:param args: args to pass through to the task
:param kwargs: options for task execution
:return: return value from task function
"""
clean_root = kwargs.get("clean", False)
clean_all = kwargs.pop("clean_all", False)
force_root = kwargs.pop("force", False)
force_all = kwargs.pop("force_all", False)
if clean_root:
force_root = True
if clean_all:
clean_root = True
force_all = True
if force_all:
force_root = True
# save force to task instance so it may be referenced downstream
# TODO: this should be passing in `force`
self.force = True
args_as_str = CONFIG.format(" ".join([str(arg) for arg in args]))
logger.debug(f"[exec] {self.name}({args_as_str}) force={force_root} clean={clean_root}")
def execute_node(node, clean, force, args=None):
runner = TASKS[node["name"]]
if runner and runner.clean and clean:
logger.debug(f"Cleaning Task: {runner.clean}")
runner.clean()
complete_dependencies = 0
for dependency in node["dependencies"]:
try:
execute_node(dependency, clean_all, force_all)
except AlreadyComplete:
complete_dependencies += 1
dependencies_complete = complete_dependencies == len(node["dependencies"])
# Execute function if there is one. Targets may not have a function. If any dependency
# was run, then this task must run too.
if runner and runner.func:
passes, checkers = runner.check(force)
if dependencies_complete and passes:
logger.debug(f"[skip] {node['name']}, already complete.")
raise AlreadyComplete()
else:
# set tasks force attribute so it's setup the same as if it were run directly.
runner.task.__task__.force = force
return_value = runner.func(*args or [])
# save checker only after function has completed successfully. Save should be
# called even if force=True
if checkers:
for checker in checkers:
checker.save()
logger.debug(f"[fini] {runner.name}")
return return_value
return execute_node(self.tree(flatten=False), clean_root, force_root, args)
def check(self, force: bool = False) -> (bool, list):
"""Return True if the task is complete based on configured checks.
If the task does not have a checker this method always returns `False`.
:param force: override the check and return True if True.
:return:
"""
checkers = [checker.clone() for checker in self.checkers] if self.checkers else None
passes = False
if self.checkers:
if force:
passes = False
else:
checks = [checker.check() for checker in checkers]
passes = all(checks)
return passes, checkers
def state(self, shallow: bool = True) -> typing.Optional[dict]:
"""
Calculates a dict of state generated from the tasks checkers.
:param shallow: only return hash for dependencies
:return: dict of state returned from checkers
"""
if self.checkers is None and self.depends is None:
return None
checkers = (
[checker.clone() for checker in self.checkers if checker.contribute_to_task_state]
if self.checkers
else None
)
depends = {}
for dependency in self.depends:
name = dependency.name
if shallow:
depends[name] = dependency.hash()
else:
depends[name] = dependency.state()
return {
"depends": depends,
"checks": [
{
"class": f"{type(checker).__module__}.{type(checker).__name__}",
"state": checker.state(),
}
for checker in checkers
],
}
def hash(self):
return hash_object(self.state(shallow=True))
def add_dependency(self, *tasks):
self._depends.extend(tasks)
@property
def depends(self) -> list:
return [
dependency if isinstance(dependency, TaskRunner) else TASKS[CONFIG.format(dependency)]
for dependency in self._depends
]
def render_help(self, buffer) -> None:
"""render the "help" command
Renders ixian internal help for the task. This help should explain
how to use the task via ixian.
Many tasks are proxies to other tools (e.g. npm, pipenv, etc). This
help shouldn't try to replace that. Proxy tasks should indicate as such
and include an example how to reach the tool's built-in help (--help)
combines:
- Name of task
- Docstring as length description
- task status tree
"""
from ixian.config import CONFIG
buffer.write(BOLD_WHITE)
buffer.write("NAME\n")
buffer.write(ENDC)
buffer.write(f" {self.name} -- {self.short_description}\n")
buffer.write(BOLD_WHITE)
buffer.write("\nDESCRIPTION\n")
buffer.write(ENDC)
if self.description:
buffer.write(CONFIG.format(self.description))
if self.config:
buffer.write(BOLD_WHITE)
buffer.write("\nCONFIGURATION\n")
buffer.write(ENDC)
padding = max(len(config) for config in self.config) - 1
for config in self.config:
buffer.write(
" - {key} {value}\n".format(
key="{key}:".format(key=config[1:-1]).ljust(padding),
value=CONFIG.format(config),
)
)
buffer.write(BOLD_WHITE)
buffer.write("\n\nSTATUS\n")
buffer.write(ENDC)
self.render_status(buffer)
def render_status(self, buffer) -> None:
"""render task status.
Display the dependency tree for the task.
Formatting/Readability optimizations:
- Tree trimming: Redundant nodes are trimmed from the status tree.
If A and B both depend on C then C will only be shown once.
"""
def render_task(node, indent=0):
# render task status
if node["name"] is not None:
passes = node["passes"]
if passes:
icon = OK_GREEN + "✔" + ENDC
else:
icon = GRAY + "○" + ENDC
if indent:
spacer = " " * indent
else:
spacer = ""
task_line = f'{spacer}{icon} {node["name"]}\n'
buffer.write(task_line)
indent += 2
for dependency in node["dependencies"]:
render_task(dependency, indent=indent)
render_task(self.status(), indent=0)
def tree(self, dedupe: bool = True, flatten: bool = True) -> dict:
"""
Return tree of tasks, with this task as the root.
:param dedupe: remove duplicates from tree
:param flatten: flatten single item dependcy lists into the parent
:return:
"""
| |
# -*- coding: utf-8 -*-
"""
Keras (with Tensorflow) re-implementation of TP-GAN.
Main part of this code is implemented reffering to author's pure Tensorflow implementation.
https://github.com/HRLTY/TP-GAN
original paper
<NAME>., <NAME>., <NAME>., & <NAME>. (2017). Beyond face rotation: Global and local perception gan for photorealistic and identity preserving frontal view synthesis. arXiv preprint arXiv:1704.04086.
https://arxiv.org/abs/1704.04086
@author: yhiro
"""
from keras import backend as K
from keras.layers import Input, Add, Maximum, Dense, Activation, BatchNormalization, Conv2D, Conv2DTranspose, Reshape, Flatten, Concatenate, Lambda, MaxPooling2D, ZeroPadding2D, Dropout, AveragePooling2D, Average
from keras.optimizers import SGD, Adam
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import TensorBoard, Callback
from keras.models import Model, Sequential
from keras import regularizers
from keras.initializers import Constant, RandomNormal, TruncatedNormal, Zeros
from keras import losses
from keras.utils import multi_gpu_model
import tensorflow as tf
import os
import numpy as np
import re
import cv2
from PIL import Image
import matplotlib.pyplot as plt
#from keras_tpgan import multipie_gen
import multipie_gen
tf.logging.set_verbosity(tf.logging.WARN) # record warnning message
class TPGAN():
def __init__(self, lcnn_extractor_weights='',#'D:/desktop/tpgan_keras/extract29v2_lr0.00010_loss0.997_valacc1.000_epoch1110.hdf5'
base_filters=64, gpus=1,
generator_weights='',#'D:/desktop/tpgan_keras/generator/epoch0480_loss0.560.hdf5'
classifier_weights='',#'D:/desktop/tpgan_keras/classifier/epoch0480_loss0.560 (1).hdf5'
discriminator_weights=''):#'D:/desktop/tpgan_keras/discriminator/epoch0480_loss0.222.hdf5'
# LCNN:Lookup-based Convolutional Neural Network
"""
initialize TP-GAN network with given weights file. if weights file is None, the weights are initialized by default initializer.
Args:
lcnn_extractor_weights (str): Light-CNN weights file which is trained with celeb-1M and fine-tuned with MULTI-PIE.
base_filters (int): base filters count of TP-GAN. default 64.
gpus (int): number of gpus to use.
generator_weights (str): trained generator weights file path. it is used to resume training. not required when train from scratch.
classifier_weights (str): trained classifier weights file path. it is used to resume training. not required when training from scratch.
discriminator_weights (str): trained discriminator weights file path. it is used to resume training. not required when training from scratch.
"""
self.gpus = gpus
self.base_filters = base_filters
self.generator_weights = generator_weights
self.discriminator_weights = discriminator_weights
self.classifier_weights = classifier_weights
self._discriminator = None
self._generator = None
self._classifier = None
self._parts_rotator = None #進入臉部
self.generator_train_model = None
self.discriminator_train_model = None
self.gen_current_epochs = self.current_epoch_from_weights_file(self.generator_weights)
self.disc_current_epochs = self.current_epoch_from_weights_file(self.discriminator_weights)
self.class_epochs = self.current_epoch_from_weights_file(self.classifier_weights)
self.lcnn = LightCNN(extractor_type='29v2', extractor_weights=lcnn_extractor_weights)
def current_epoch_from_weights_file(self, weights_file):
if weights_file is not None:
try:
ret_epochs = int(re.match(r'.*epoch([0-9]+).*.hdf5', weights_file).groups()[0])
except:
ret_epochs = 0
else:
ret_epochs = 0
return ret_epochs
def discriminator(self):
"""
getter of singleton discriminator
"""
if self._discriminator is None:
self._discriminator = self.build_discriminator(base_filters=self.base_filters)
if self.discriminator_weights is not None:
self._discriminator.load_weights(self.discriminator_weights)
return self._discriminator
def generator(self):
"""
getter of singleton generator
"""
if self._generator is None:
self._generator = self.build_generator(base_filters=self.base_filters) #
# if self.generator_weights is not None:
# self._generator.load_weights(self.generator_weights)
return self._generator
def classifier(self):
"""
getter of singleton classifier
"""
if self._classifier is None:
self._classifier = self.build_classifier()
if self.classifier_weights is not None:
self._classifier.load_weights(self.classifier_weights)
return self._classifier
def parts_rotator(self):
"""
getter of singleton part rotator for each part; left eye, right eye, nose, and mouth.
"""
if self._parts_rotator is None:
self._parts_rotator = self.build_parts_rotator(base_filters=self.base_filters)
return self._parts_rotator
def _add_activation(self, X, func='relu'):
"""
private func to add activation layer
"""
if func is None:
return X
elif func == 'relu':
return Activation('relu')(X)
elif func == 'lrelu':
return LeakyReLU()(X)
else:
raise Exception('Undefined function for activation: ' + func)
def _res_block(self, X, kernel_size, batch_norm=False, activation=None, name=None):
"""
private func to add residual block
"""
X_shortcut = X
if batch_norm:
X = Conv2D(X.shape[-1].value, kernel_size=kernel_size, strides=(1, 1), padding='same', name=name+'_c1_0', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(X)
X = BatchNormalization(epsilon=1e-5, name=name+'_c1_0_bn')(X)
else:
X = Conv2D(X.shape[-1].value, kernel_size=kernel_size, strides=(1, 1), padding='same', name=name+'_c1_1', kernel_initializer=TruncatedNormal(stddev=0.02), bias_initializer=Zeros())(X)
self._add_activation(X, activation)
if batch_norm:
X = Conv2D(X.shape[-1].value, kernel_size=kernel_size, strides=(1, 1), padding='same', use_bias=False, name=name+'_c2_0', kernel_initializer=TruncatedNormal(stddev=0.02))(X)
X = BatchNormalization(epsilon=1e-5, name=name+'_c2_0_bn')(X)
else:
X = Conv2D(X.shape[-1].value, kernel_size=kernel_size, strides=(1, 1), padding='same', name=name+'_c2_1', kernel_initializer=TruncatedNormal(stddev=0.02), bias_initializer=Zeros())(X)
self._add_activation(X, activation)
X = Add()([X_shortcut, X])
return X
def build_generator(self, name="generator", base_filters=64):
"""
build generator model.
"""
def combine_parts(size_hw, leye, reye, nose, mouth):
img_h, img_w = size_hw
leye_img = ZeroPadding2D(padding=((int(multipie_gen.LEYE_Y - multipie_gen.EYE_H/2), img_h - int(multipie_gen.LEYE_Y + multipie_gen.EYE_H/2)),
(int(multipie_gen.LEYE_X - multipie_gen.EYE_W/2), img_w - int(multipie_gen.LEYE_X + multipie_gen.EYE_W/2))))(leye)
reye_img = ZeroPadding2D(padding=((int(multipie_gen.REYE_Y - multipie_gen.EYE_H/2), img_h - int(multipie_gen.REYE_Y + multipie_gen.EYE_H/2)),
(int(multipie_gen.REYE_X - multipie_gen.EYE_W/2), img_w - int(multipie_gen.REYE_X + multipie_gen.EYE_W/2))))(reye)
nose_img = ZeroPadding2D(padding=((int(multipie_gen.NOSE_Y - multipie_gen.NOSE_H/2), img_h - int(multipie_gen.NOSE_Y + multipie_gen.NOSE_H/2)),
(int(multipie_gen.NOSE_X - multipie_gen.NOSE_W/2), img_w - int(multipie_gen.NOSE_X + multipie_gen.NOSE_W/2))))(nose)
mouth_img = ZeroPadding2D(padding=((int(multipie_gen.MOUTH_Y - multipie_gen.MOUTH_H/2), img_h - int(multipie_gen.MOUTH_Y + multipie_gen.MOUTH_H/2)),
(int(multipie_gen.MOUTH_X - multipie_gen.MOUTH_W/2), img_w - int(multipie_gen.MOUTH_X + multipie_gen.MOUTH_W/2))))(mouth)
return Maximum()([leye_img, reye_img, nose_img, mouth_img])
full_name = name
# shorten name
name = name[0]
in_img = Input(shape=(multipie_gen.IMG_H, multipie_gen.IMG_W, 3))
#print('in_img',in_img)
mc_in_img128 = Concatenate()([in_img, Lambda(lambda x: x[:,:,::-1,:])(in_img)]) #[::-1]將字串或陣列倒序排列
#print('Lambda(lambda x: x[:,:,::-1,:])(in_img)',Lambda(lambda x: x[:,:,::-1,:])(in_img))
#print('mc_in_img128',mc_in_img128)
mc_in_img64 = Lambda(lambda x: tf.image.resize_bilinear(x, [multipie_gen.IMG_H//2, multipie_gen.IMG_W//2]))(mc_in_img128)
mc_in_img32 = Lambda(lambda x: tf.image.resize_bilinear(x, [multipie_gen.IMG_H//4, multipie_gen.IMG_W//4]))(mc_in_img64)
#conv0
c128 = Conv2D(base_filters, (7, 7), padding='same', strides=(1, 1), name=name+'_c128', use_bias=True, kernel_initializer=TruncatedNormal(stddev=0.02), bias_initializer=Zeros())(mc_in_img128)
c128 = self._add_activation(c128, 'lrelu')
c128r = self._res_block(c128, (7, 7), batch_norm=True, activation='lrelu', name=name+'_c128_r')
print('c128',c128)
#conv1
c64 = Conv2D(base_filters, (5, 5), padding='same', strides=(2, 2), name=name+'_c64', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(c128r)
c64 = BatchNormalization(epsilon=1e-5, name=name+'_c64_bn')(c64)
c64 = self._add_activation(c64, 'lrelu')
c64r = self._res_block(c64, (5, 5), batch_norm=True, activation='lrelu', name=name+'_c64_r')
print('c64',c64)
#conv2
c32 = Conv2D(base_filters*2, (3, 3), padding='same', strides=(2, 2), name=name+'_c32', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(c64r)
c32 = BatchNormalization(epsilon=1e-5, name=name+'_c32_bn')(c32)
c32 = self._add_activation(c32, 'lrelu')
c32r = self._res_block(c32, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c32_r')
print('c32',c32)
#conv3
c16 = Conv2D(base_filters*4, (3, 3), padding='same', strides=(2, 2), name=name+'_c16', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(c32r)
c16 = BatchNormalization(epsilon=1e-5, name=name+'_c16_bn')(c16)
c16 = self._add_activation(c16, 'lrelu')
c16r = self._res_block(c16, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c16_r')
print('c16',c16)
#conv4
c8 = Conv2D(base_filters*8, (3, 3), padding='same', strides=(2, 2), name=name+'_c8', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(c16r)
c8 = BatchNormalization(epsilon=1e-5, name=name+'_c8_bn')(c8)
c8 = self._add_activation(c8, 'lrelu')
print('c8',c8)
c8r = self._res_block(c8, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c8_r')
c8r2 = self._res_block(c8r, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c8_r2')
c8r3 = self._res_block(c8r2, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c8_r3')
c8r4 = self._res_block(c8r3, (3, 3), batch_norm=True, activation='lrelu', name=name+'_c8_r4')
#paper的fc1是使用linear而不是dense
fc1 = Dense(512, name=name+'_fc1', kernel_initializer=RandomNormal(stddev=0.02), bias_initializer=Constant(0.1), kernel_regularizer=regularizers.l2(0.005))(Flatten()(c8r4))
fc2 = Maximum()([Lambda(lambda x: x[:, :256])(fc1), Lambda(lambda x: x[:, 256:])(fc1)])
in_noise = Input(shape=(100,))
fc2_with_noise = Concatenate()([fc2, in_noise])
#print('fc1',fc1)
#print('fc2',fc2)
#print('in_noise',in_noise)
#print('fc2_with_noise',fc2_with_noise)
#test_out = self.model.predict(fc2_with_noise)
#fc3 = Dense(8*8*base_filters, name=name+'_fc3', kernel_initializer=RandomNormal(stddev=0.02), bias_initializer=Constant(0.1))(fc2_with_noise)
#f8 = Conv2DTranspose(base_filters, (8, 8), padding='valid', strides=(1, 1), name=name+'_f8', activation='relu', kernel_initializer=RandomNormal(stddev=0.02), bias_initializer=Zeros())(Reshape((1, 1, fc3.shape[-1].value))(fc3))
f8 = Conv2DTranspose(base_filters, (8, 8), padding='valid', strides=(1, 1), name=name+'_f8', activation='relu', kernel_initializer=RandomNormal(stddev=0.02), bias_initializer=Zeros())(Reshape((1, 1, fc2_with_noise.shape[-1].value))(fc2_with_noise))
#f32 = Conv2DTranspose(base_filters//2, (3, 3), padding='same', strides=(4, 4), name=name+'_f16', activation='relu', kernel_initializer=RandomNormal(stddev=0.02), bias_initializer=Zeros())(f8)
f32 = Conv2DTranspose(base_filters//2, (3, 3), padding='same', strides=(4, 4), name=name+'_f32', activation='relu', kernel_initializer=RandomNormal(stddev=0.02), bias_initializer=Zeros())(f8)
f64 = Conv2DTranspose(base_filters//4, (3, 3), padding='same', strides=(2, 2), name=name+'_f64', activation='relu', kernel_initializer=RandomNormal(stddev=0.02), bias_initializer=Zeros())(f32)
f128 = Conv2DTranspose(base_filters//8, (3, 3), padding='same', strides=(2, 2), name=name+'_f128', activation='relu', kernel_initializer=RandomNormal(stddev=0.02), bias_initializer=Zeros())(f64)
# size8
d8 = Concatenate(name=name+'_d8')([c8r4, f8])
#d8r = self._res_block(d8, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d8_r')
#d8r2 = self._res_block(d8r, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d8_r2')
#d8r3 = self._res_block(d8r2, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d8_r3')
print('c8r4',c8r4)
print('f8',f8)
print('d8',d8)
# size16
d16 = Conv2DTranspose(base_filters*8, (3, 3), padding='same', strides=(2, 2), name=name+'_d16', use_bias=False, kernel_initializer=RandomNormal(stddev=0.02))(d8)
#d16 = Conv2DTranspose(base_filters*8, (3, 3), padding='same', strides=(2, 2), name=name+'_d16', use_bias=False, kernel_initializer=RandomNormal(stddev=0.02))(d8r3)
d16 = BatchNormalization(epsilon=1e-5, name=name+'_d16_bn')(d16)
d16 = self._add_activation(d16, 'relu')
#d16r = self._res_block(c16r, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d16_r')
d16r = self._res_block(d16, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d16_r')
#d16r2 = self._res_block(Concatenate()([d16, d16r]), (3, 3), batch_norm=True, activation='lrelu', name=name+'_d16_r2')
#d16r3 = self._res_block(d16r2, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d16_r3')
# size32
d32 = Conv2DTranspose(base_filters*4, (3, 3), padding='same', strides=(2, 2), name=name+'_d32', use_bias=False, kernel_initializer=RandomNormal(stddev=0.02))(Concatenate()([d16r,c16r]))
#d32 = Conv2DTranspose(base_filters*4, (3, 3), padding='same', strides=(2, 2), name=name+'_d32', use_bias=False, kernel_initializer=RandomNormal(stddev=0.02))(d16r3)
d32 = BatchNormalization(epsilon=1e-5, name=name+'_d32_bn')(d32)
d32 = self._add_activation(d32, 'relu')
#d32r = self._res_block(Concatenate()([c32r, mc_in_img32, f32]), (3, 3), batch_norm=True, activation='lrelu', name=name+'_d32_r')
#d32r2 = self._res_block(Concatenate()([d32, d32r]), (3, 3), batch_norm=True, activation='lrelu', name=name+'_d32_r2')
#d32r3 = self._res_block(d32r2, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d32_r3')
d32r = self._res_block(d32, (3, 3), batch_norm=True, activation='lrelu', name=name+'_d32_r3')
#img32 = Conv2D(3, (3, 3), padding='same', strides=(1, 1), activation='tanh', name=name+'_img32', kernel_initializer=TruncatedNormal(stddev=0.02), bias_initializer=Zeros())(d32r3)
img32= Conv2D(3, (3, 3), padding='same', strides=(1, 1), name=name+'_img32', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(d32r)
# size64
d64 = Conv2DTranspose(base_filters*2, (3, 3), padding='same', strides=(2, 2), name=name+'_d64', use_bias=False, kernel_initializer=RandomNormal(stddev=0.02))(Concatenate()([d32r,f32,c32,mc_in_img32]))
d64 = BatchNormalization(epsilon=1e-5, name=name+'_d64_bn')(d64)
d64 = self._add_activation(d64, 'relu')
#d64r = self._res_block(Concatenate()([c64r, mc_in_img64, f64]), (5, 5), batch_norm=True, activation='lrelu', name=name+'_d64_r')
d64r = self._res_block(d64, (5, 5), batch_norm=True, activation='lrelu', name=name+'_d64_r')
#
img64= Conv2D(3, (3, 3), padding='same', strides=(1, 1), name=name+'_img64', use_bias=False, kernel_initializer=TruncatedNormal(stddev=0.02))(d64r)
#interpolated64 = Lambda(lambda x: tf.image.resize_bilinear(x, [64, 64]))(img32) # Use Lambda layer to wrap tensorflow func, resize_bilinear
| |
<gh_stars>10-100
import qctests.ICDC_aqc_01_level_order as ICDC
import qctests.ICDC_aqc_06_n_temperature_extrema as ICDC_nte
import util.testingProfile
import util.main as main
import numpy as np
##### ICDC number of temperature extrema.
##### --------------------------------------------------
class TestClass:
parameters = {
'db': 'iquod.db',
'table': 'unit'
}
def setUp(self):
# refresh this table every test
ICDC.loadParameters(self.parameters)
def tearDown(self):
main.dbinteract('DROP TABLE icdclevelorder;')
def test_ICDC_n_temperature_extrema(self):
'''Make sure code processes data supplied by <NAME>
correctly.
'''
lines = data.splitlines()
for i, line in enumerate(lines):
if line[0:2] == 'HH':
header = line.split()
nlevels = int(header[-1][:-3])
depths = []
temps = []
qctruth = []
for j in range(nlevels):
d = lines[i + j + 1].split()
depths.append(float(d[0]))
temps.append(float(d[1]))
qctruth.append(int(d[2]) > 0)
p = util.testingProfile.fakeProfile(temps, depths, uid=i)
qc = ICDC_nte.test(p, self.parameters)
assert np.array_equal(qc, qctruth), 'Failed profile with header ' + line
# Data provided by <NAME>, ICDC, University of Hamburg.
data = '''
HH 6682679 -11.700 -81.433 1968 2 11 21OSD
.0 25.090 1
9.0 25.060 1
10.0 23.780 1
19.0 24.920 1
20.0 23.750 1
28.0 21.440 1
29.0 23.780 1
47.0 19.510 1
49.0 23.780 1
71.0 17.410 1
77.0 23.770 1
94.0 15.650 1
100.0 23.720 1
141.0 13.180 1
145.0 21.930 1
188.0 12.360 1
191.0 19.160 1
328.0 13.220 1
439.0 10.710 1
540.0 7.650 1
640.0 4.650 1
HH 3230656 20.580 -156.100 1967 6 14 378CTD
.0 26.370 1
2.0 26.370 1
4.0 26.370 1
6.0 26.370 1
8.0 26.370 1
10.0 26.360 1
12.0 26.350 1
14.0 26.330 1
16.0 26.240 1
18.0 26.100 1
20.0 25.960 1
22.0 25.820 1
24.0 25.670 1
26.0 25.600 1
28.0 25.580 1
30.0 25.530 1
32.0 25.440 1
34.0 25.310 1
36.0 25.170 1
38.0 25.090 1
40.0 25.060 1
42.0 25.010 1
44.0 24.940 1
46.0 24.870 1
48.0 24.810 1
50.0 24.670 1
52.0 24.390 1
54.0 24.160 1
56.0 24.050 1
58.0 23.930 1
60.0 23.750 1
62.0 23.520 1
64.0 23.320 1
66.0 23.220 1
68.0 23.090 1
70.0 22.860 1
72.0 22.650 1
76.0 22.410 1
78.0 22.220 1
80.0 22.000 1
82.0 21.880 1
84.0 21.790 1
86.0 21.650 1
88.0 21.560 1
90.0 21.460 1
92.0 21.300 1
94.0 22.520 1
96.0 21.090 1
98.0 21.060 1
100.0 21.020 1
102.0 20.970 1
104.0 20.930 1
106.0 20.870 1
108.0 20.770 1
110.0 20.690 1
112.0 20.620 1
114.0 15.960 1
116.0 20.500 1
118.0 20.460 1
120.0 20.410 1
122.0 20.290 1
124.0 20.050 1
126.0 19.770 1
128.0 19.600 1
130.0 19.480 1
132.0 19.380 1
134.0 20.560 1
136.0 19.170 1
138.0 19.090 1
140.0 18.980 1
142.0 18.830 1
144.0 18.700 1
146.0 18.580 1
148.0 18.440 1
150.0 18.250 1
152.0 18.050 1
154.0 19.270 1
156.0 17.770 1
158.0 17.630 1
160.0 17.540 1
162.0 17.490 1
164.0 17.430 1
166.0 17.310 1
168.0 17.210 1
170.0 17.160 1
172.0 17.180 1
174.0 17.900 1
176.0 17.090 1
178.0 17.030 1
180.0 16.980 1
182.0 16.880 1
184.0 16.750 1
186.0 16.620 1
188.0 16.490 1
190.0 16.350 1
192.0 16.180 1
194.0 17.170 1
196.0 15.750 1
198.0 15.610 1
200.0 15.530 1
202.0 15.470 1
204.0 15.400 1
206.0 15.340 1
208.0 15.270 1
210.0 15.190 1
212.0 15.090 1
214.0 15.010 1
216.0 14.910 1
218.0 14.790 1
220.0 14.680 1
222.0 14.550 1
224.0 14.370 1
226.0 14.150 1
228.0 13.990 1
230.0 13.940 1
232.0 13.850 1
234.0 13.650 1
236.0 13.480 1
238.0 13.390 1
240.0 13.350 1
242.0 13.320 1
244.0 13.280 1
246.0 13.210 1
248.0 13.130 1
250.0 13.030 1
252.0 12.910 1
254.0 12.760 1
256.0 12.560 1
258.0 12.270 1
260.0 11.940 1
262.0 11.740 1
264.0 11.660 1
266.0 11.590 1
268.0 11.540 1
270.0 11.500 1
272.0 11.470 1
274.0 11.440 1
276.0 11.420 1
278.0 11.400 1
280.0 11.360 1
282.0 11.290 1
284.0 11.200 1
286.0 11.100 1
288.0 10.980 1
290.0 10.840 1
292.0 10.700 1
294.0 10.580 1
296.0 10.480 1
298.0 10.410 1
300.0 10.350 1
302.0 10.250 1
304.0 10.140 1
306.0 10.070 1
308.0 10.030 1
310.0 9.990 1
312.0 9.960 1
314.0 9.910 1
316.0 9.850 1
318.0 9.780 1
320.0 9.730 1
322.0 9.700 1
324.0 9.670 1
326.0 9.650 1
328.0 9.630 1
330.0 9.610 1
332.0 9.580 1
334.0 9.490 1
336.0 9.340 1
338.0 9.230 1
340.0 9.180 1
342.0 9.150 1
344.0 9.130 1
346.0 9.120 1
348.0 9.090 1
350.0 9.010 1
352.0 8.900 1
354.0 8.770 1
356.0 8.660 1
358.0 8.570 1
360.0 8.510 1
362.0 8.470 1
364.0 8.430 1
366.0 8.370 1
368.0 8.310 1
370.0 8.280 1
372.0 8.300 1
374.0 8.330 1
376.0 8.300 1
378.0 8.260 1
380.0 8.250 1
382.0 8.250 1
384.0 8.250 1
386.0 8.240 1
388.0 8.230 1
390.0 8.220 1
392.0 8.200 1
394.0 8.190 1
396.0 8.190 1
398.0 8.190 1
400.0 8.130 1
402.0 8.000 1
404.0 7.910 1
406.0 7.860 1
408.0 7.820 1
410.0 7.810 1
412.0 7.780 1
414.0 7.710 1
416.0 7.630 1
418.0 7.570 1
420.0 7.540 1
422.0 7.510 1
424.0 7.490 1
426.0 7.460 1
428.0 7.410 1
430.0 7.380 1
432.0 7.350 1
434.0 7.320 1
436.0 7.280 1
438.0 7.240 1
440.0 7.220 1
442.0 7.190 1
444.0 7.170 1
446.0 7.160 1
448.0 7.150 1
450.0 7.100 1
452.0 7.020 1
454.0 6.980 1
456.0 6.970 1
458.0 6.960 1
460.0 6.940 1
462.0 6.900 1
464.0 6.850 1
466.0 6.820 1
468.0 6.820 1
470.0 6.840 1
472.0 6.850 1
474.0 6.830 1
476.0 6.790 1
478.0 6.740 1
480.0 6.690 1
482.0 6.660 1
484.0 6.640 1
486.0 6.610 1
488.0 6.560 1
490.0 6.540 1
492.0 6.530 1
494.0 6.520 1
496.0 6.520 1
498.0 6.530 1
500.0 6.520 1
502.0 6.530 1
504.0 6.530 1
506.0 6.530 1
508.0 6.520 1
510.0 6.520 1
512.0 6.520 1
514.0 6.510 1
516.0 6.490 1
518.0 6.470 1
520.0 6.440 1
522.0 6.410 1
524.0 6.390 1
526.0 6.390 1
528.0 6.390 1
530.0 6.400 1
532.0 6.400 1
534.0 6.400 1
536.0 6.390 1
538.0 6.390 1
540.0 6.390 1
542.0 6.380 1
544.0 6.370 1
546.0 6.360 1
548.0 6.350 1
550.0 6.310 1
552.0 6.260 1
554.0 6.220 1
556.0 6.200 1
558.0 6.170 1
560.0 6.150 1
562.0 6.140 1
564.0 6.120 1
566.0 6.100 1
568.0 6.070 1
570.0 6.060 1
572.0 6.050 1
574.0 6.040 1
576.0 6.030 1
578.0 6.010 1
580.0 5.970 1
582.0 5.940 1
584.0 5.910 1
586.0 5.900 1
588.0 5.900 1
590.0 5.880 1
592.0 5.860 1
594.0 5.850 1
596.0 5.850 1
598.0 5.840 1
600.0 5.820 1
602.0 5.790 1
604.0 5.770 1
606.0 5.760 1
608.0 5.750 1
610.0 5.740 1
612.0 5.730 1
614.0 5.720 1
616.0 5.720 1
618.0 5.710 1
620.0 5.690 1
622.0 5.660 1
624.0 5.620 1
626.0 5.610 1
628.0 5.600 1
630.0 5.590 1
632.0 5.570 1
634.0 5.550 1
636.0 5.540 1
638.0 5.540 1
640.0 5.540 1
642.0 5.530 1
644.0 5.530 1
646.0 5.520 1
648.0 5.510 1
650.0 5.510 1
652.0 5.490 1
654.0 5.470 1
656.0 5.450 1
658.0 5.440 1
660.0 5.420 1
662.0 5.420 1
664.0 5.420 1
666.0 5.400 1
668.0 5.370 1
670.0 5.340 1
672.0 5.330 1
674.0 5.310 1
676.0 5.300 1
678.0 5.300 1
680.0 5.300 1
682.0 5.290 1
684.0 5.280 1
686.0 5.280 1
688.0 5.280 1
690.0 5.270 1
692.0 5.250 1
694.0 5.240 1
696.0 5.220 1
698.0 5.210 1
700.0 5.200 1
702.0 5.190 1
704.0 5.180 1
706.0 5.180 1
708.0 5.180 1
710.0 5.170 1
712.0 5.170 1
714.0 5.150 1
716.0 5.140 | |
= status
ret['cbfired'] = cbfired
ret['elapsed'] = int(time.time() - cmdstart)
if verbose:
self.debug("done with exec")
except CommandTimeoutException, cte:
self.lastexitcode = SshConnection.cmd_timeout_err_code
elapsed = str(int(time.time() - start))
self.debug("Command (" + cmd + ") timeout exception after " + str(elapsed) + " seconds\nException")
raise cte
return ret
def refresh_connection(self):
"""
Attempts to establish a new ssh connection to replace the old 'connection' of this
ssh obj.
"""
if self.connection:
self.connection.close()
self.connection = self.get_ssh_connection(self.host,
username=self.username,
password=<PASSWORD>,
keypath=self.keypath,
proxy_username=self.proxy_username,
proxy_password=self.proxy_password,
proxy_keypath=self.proxy_keypath,
enable_ipv6_dns=self.enable_ipv6_dns,
timeout=self.timeout,
retry=self.retry,
verbose=self.debug_connect)
def get_ssh_connection(self,
hostname,
username="root",
password=None,
keypath=None,
proxy=None,
proxy_username=None,
proxy_password=<PASSWORD>,
proxy_keypath=None,
key_files=None,
enable_ipv6_dns=None,
port=22,
timeout=60,
retry=1,
verbose=False):
"""
Create a paramiko ssh session to hostname. Will attempt to authenticate first with a keypath if provided,
if the sshkey file path is not provided. username and password will be used to authenticate. This leaves out
the case where a password is passed as the password needed to unlock the key file. This 3rd case may need to be
added but may mask failures in tests for key insertion when using tests who's images have baked in passwords for
login access(tbd).
Upon success returns a paramiko sshclient with an established connection.
:param hostname: - mandatory - hostname or ip to establish ssh connection with
:param username: - optional - username used to authenticate ssh session
:param password: - optional - password used to authenticate ssh session
:param keypath: - optional - full path to sshkey file used to authenticate ssh session
:param proxy: - optional - host to proxy ssh connection through
:param proxy_username: - optional ssh username of proxy host for authentication
:param proxy_password: - optional ssh password of proxy host for authentication
:param proxy_keypath: - optional path to ssh key to use for proxy authentication
:param timeout: - optional - tcp timeout
:param enable_ipv6_dns: - optional - boolean to avoid ipv6 dns 'AAAA' lookups
:param retry: - optional - Number of attempts to establish ssh connection for errors outside of authentication
:param port: - optional - port to connect to, default 22
:param verbose: - optional - enable verbose debug output
"""
connected = False
iplist = []
ip = None
key_files = key_files or self.key_files or []
if key_files and not isinstance(key_files, types.ListType):
key_files = key_files.split(',')
proxy_ip = None
if not key_files and password is None and keypath is None and not self.find_keys:
raise Exception("ssh_connect: Need to set password, keypath, keyfiles, or find_keys")
if enable_ipv6_dns is None:
enable_ipv6_dns = self.enable_ipv6_dns
proxy = proxy or self.proxy
self.debug("ssh_connect args:\nhostname:" + str(hostname)
+ "\nusername:" + str(username)
+ "\npassword:" + str(password)
+ "\nkeypath:" + str(keypath)
+ "\nproxy_username:" + str(proxy_username)
+ "\nproxy_password" + str(proxy_password)
+ "\nproxy_keypath" + str(proxy_keypath)
+ "\ntimeout:" + str(timeout)
+ "\nretry:" + str(retry),verbose=verbose)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
hostname = str(hostname.strip())
if not enable_ipv6_dns:
self.debug('IPV6 DNS lookup disabled, do IPV4 resolution and pass IP to connect()',verbose=verbose)
# Paramiko uses family type 'AF_UNSPEC' which does both ipv4/ipv6 lookups and can cause some DNS servers
# to fail in their response(s). Hack to avoid ipv6 lookups...
# Try ipv4 dns resolution of 'hostname', and pass the ip instead of a hostname to
# Paramiko's connect to avoid the potential ipv6 'AAAA' lookup...
iplist = self.get_ipv4_lookup(hostname,verbose=verbose)
if not iplist:
iplist = [hostname]
attempt = 0
#adjust retry count for debug 'readability' ie 'attempt 1' vs 'attempt 0'
retry += 1
while (attempt < retry) and not connected:
attempt += 1
proxy_transport = None
for ip in iplist:
if self.proxy:
if not enable_ipv6_dns:
proxy_ip = self.get_ipv4_lookup(self.proxy, verbose=verbose)[0]
proxy_transport = self.get_proxy_transport(proxy_host=proxy,
dest_host=ip,
port=port,
proxy_username=proxy_username,
proxy_password=<PASSWORD>_password,
proxy_keypath=proxy_keypath)
if proxy_transport:
ssh._transport = proxy_transport
else:
ssh._transport = paramiko.Transport(ip)
ssh._transport.start_client()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.debug("SSH connection attempt(" + str(attempt) +" of " + str(retry) + "), host:'"
+ str(username) + "@" + str(hostname) + "', using ipv4:" + str(ip) +
", thru proxy:'" + str(proxy_ip) + "'")
if keypath is None and password:
self.debug("Using username:"+username+" and password:"+str(self.mask_password(password)),
verbose=verbose)
ssh._transport.auth_password(username, password)
#ssh.connect(ip, username=username, password=password, timeout=timeout)
connected = True
break
elif keypath:
self.debug("Using Keypath:" + keypath, verbose=verbose)
priv_key = paramiko.RSAKey.from_private_key_file(keypath)
ssh._transport.auth_publickey(username,priv_key)
#ssh.connect(ip, port=port, username=username, key_filename=keypath, timeout=timeout)
connected = True
break
elif key_files or self.find_keys:
self.debug("Using local keys, no keypath/password provided.", verbose=verbose)
ssh._auth(username, password, None, key_files, True, True)
#ssh.connect(ip, port=port, username=username, key_filename=keypath, timeout=timeout)
connected = True
except paramiko.ssh_exception.SSHException, se:
self.debug("Failed to connect to " + hostname + ", retry in 10 seconds. Err:" + str(se))
time.sleep(10)
pass
if connected:
via_string = ''
if proxy_transport:
proxy_host,port = ssh._transport.getpeername()
via_string = ' via proxy host:'+str(proxy_host)+':'+str(port)
self.debug('SSH - Connected to ' + str(ip)+str(via_string))
break
if not connected:
raise Exception(
'Failed to connect to "' + str(hostname) + '", attempts:' + str(attempt) + ". IPs tried:" + ",".join(
iplist))
#self.debug("Returning ssh connection to: "+ hostname)
return ssh
def get_ipv4_lookup(self, hostname, port=22, verbose=False):
"""
Do an ipv4 lookup of 'hostname' and return list of any resolved ip addresses
:param hostname: hostname to resolve
:param port: port to include in lookup, default is ssh port 22
:param verbose: boolean to print addditional debug
:return: list of ip addresses (strings in a.b.c.d format)
"""
get_ipv4_ip = False
iplist = []
try:
if socket.inet_aton(hostname):
ipcheck = re.compile("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
if not ipcheck.match(hostname):
get_ipv4_ip = True
self.debug(str(hostname) + ", is already an ip, dont do host lookup...", verbose=verbose)
# This is already an ip don't look it up (regex might be better here?)
except socket.error:
get_ipv4_ip = True
if get_ipv4_ip:
try:
#ipv4 lookup host for ssh connection...
addrs = socket.getaddrinfo(hostname, port, socket.AF_INET, socket.IPPROTO_IP, socket.IPPROTO_TCP)
for addr in addrs:
iplist.append(str(addr[4][0]))
self.debug('Resolved hostname:' + str(hostname) + ' to IP(s):' + ",".join(iplist),verbose=verbose)
except Exception, de:
self.debug('Error looking up DNS ip for hostname:' + str(hostname) + ", err:" + str(de))
else:
#hostname is an ipv4 address...
iplist = [hostname]
return iplist
def mask_password(self,pass_string):
"""
Replace all but first and last chars with '*' of provided password string.
:param pass_string: string representing a password to hide/format
:return: Formatted hidden password
"""
password = copy.copy(pass_string)
show = ""
if not password:
return password
if len(password) > 3:
length = len(password)-2
else:
length = len(password)
for x in xrange(length):
show += '*'
if len(password) > 3:
show = password[0]+show
show += password[len(password)-1]
return show
def expect_password_cb(self,
buf,
password,
prompt="^Password",
nextcb=None,
cbargs=[],
retry=0,
password_attempts=0,
verbose=False):
'''
Sample callback to handle password prompts to be provided to ssh.cmd()
:param buf: output from cmd()
:param password: string password to be supplied to a detected 'password' prompt
:param nextcb: function/method callback to be returned, this cmd() will use to handle it's future output.
:param prompt: regex string used to match prompt. case insensitive match used
:
'''
ret = SshCbReturn(stop=False)
#newbuf = None
def debug(msg, ssh=self):
if verbose:
ssh.debug(msg)
def add_to_buffer(lines_to_add, newbuf):
for line in lines_to_add:
debug('Adding line to buf:"' + str(line) +'"')
if newbuf is None:
newbuf = line+ '\n'
else:
newbuf += line + '\n'
return newbuf
bufadd = lambda line: add_to_buffer(line, ret.buf)
debug('STARTING expect_password_cb: password:' + str(password)+", prompt:"+str(prompt))
debug('Starting buf:"' + str(buf) + '"')
#Create a callback return obj for the cmd() loop to consume...
lines = buf.splitlines()
#See if we've already gone through password dialog, if so there may be a left over newline. Remove it.
if password_attempts and lines[0] == "":
debug('Removing first blank line(s) after sending password')
lines.pop(0)
if not lines:
#self.debug('Skipping blanklines...')
ret.buf = None
ret.nextargs=[password, prompt, nextcb, cbargs, retry, password_attempts, verbose]
return ret
#See if we have a prompt for password, assume we only have one match and were blocking waiting on password input
prompt_indices = [i for i, s in enumerate(lines) if re.match(prompt, s, re.IGNORECASE)]
if prompt_indices:
debug('Got password prompt, sending password...')
#Check to see if we've already tried a password, and if we should retry or fail
if password_attempts > retry:
raise CommandExpectPasswordException("Password dialog attempts:" + str(password_attempts) +
" exceeded retry limit:" + str(retry))
prompt_index = prompt_indices[0]
#Add any lines other than password prompt back to return buffer
| |
<reponame>inetrg/spoki<filename>evaluation/src/cse/logs/phasematcher.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
These classes reads JSON data from a variety sources and parse them into
objects that support a `from_dict` method.
"""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2018-2021"
import gzip
import json
from collections import defaultdict
from datetime import datetime, timezone
from cse.types.analyzer import Analyzer
# -- packet keys --------------------------------------------------------------
packet_keys = ["isyn", "iack", "rsyn", "rack"]
# -- time ---------------------------------------------------------------------
def get_unix_timestamp(datetime_object):
ts = datetime_object.astimezone(timezone.utc)
return int(datetime.timestamp(ts))
def is_within_timeout(con, ts_syn, to):
if con is None:
return False
ts_con = get_unix_timestamp(con.timestamp)
# SYN must be received within to secs of the probe confirmation.
return (ts_con <= ts_syn and (ts_syn - ts_con) < to) or (
ts_syn < ts_con and (ts_con - ts_syn) <= 1
)
# -- syn matching -------------------------------------------------------------
def is_matching_ack(syn, req, ack):
# Check source port which is not part of the key, but should match
# between SYN and its ACK.
if (
req is not None
and syn.payload.sport == req.dport == ack.payload.sport
and req.anum == ack.payload.snum
):
return True
return False
# -- count list over keys -----------------------------------------------------
def count(collection):
cnt = 0
for val in collection.values():
cnt += len(val)
return cnt
def count_unanswered(collection):
cnt = 0
for val in collection.values():
cnt += len([v for v in val if v.probe_request is None])
return cnt
# -- filtering tuples -----------------------------------------------------
def filter_list(lst, batch_id, tag):
removed = []
keys_to_delete = []
for key, evs in lst.items():
keep = []
for ev in evs:
if ev.batch_id == batch_id:
d = {
"ts": get_unix_timestamp(ev.packet.timestamp),
"isyn": None,
"iack": None,
"rsyn": None,
"rack": None,
"tag": tag,
}
d[tag] = ev.to_dict()
removed.append(d)
else:
keep.append(ev)
if len(keep) == 0:
keys_to_delete.append(key)
elif len(evs) != len(keep):
lst[key] = keep
for key in keys_to_delete:
del lst[key]
return removed
def filter_dict_elems(lst, batch_id, tag, key_with_packet):
removed = []
keys_to_delete = []
for key, evs in lst.items():
keep = []
for ev in evs:
pkt = ev[key_with_packet]
if pkt.batch_id == batch_id:
d = {
"ts": get_unix_timestamp(pkt.packet.timestamp),
"tag": tag,
}
for default_key in packet_keys:
if default_key not in ev or ev[default_key] is None:
d[default_key] = None
else:
d[default_key] = ev[default_key].to_dict()
removed.append(d)
else:
keep.append(ev)
if len(keep) == 0:
keys_to_delete.append(key)
elif len(evs) != len(keep):
lst[key] = keep
for key in keys_to_delete:
del lst[key]
return removed
# -- phase matching -----------------------------------------------------------
class PhaseMatcher:
def __init__(self, phase_timeout):
# Single Event objects.
self.irregular_syns = defaultdict(lambda: [])
self.regular_syns = defaultdict(lambda: [])
self.acks = defaultdict(lambda: [])
# Dict with keys 'isyn', 'iack'.
self.irregular_acked = defaultdict(lambda: [])
# Dict with keys 'rsyn', 'rack'.
self.regular_acked = defaultdict(lambda: [])
# Dict with keys 'isyn', 'iack', and 'rsyn', 'rack'.
# The ack keys might be None.
self.two_phase_no_ack = defaultdict(lambda: [])
# The key 'iack' might be None.
self.two_phase = defaultdict(lambda: [])
# Dict as above, but includes an 'ident' member.
self.repeated_connection_attempts = defaultdict(lambda: [])
# First idea:
# batch_id -> (saddr, daddr, sport, dport) -> snum
# But that doesn't work because we can see retransmits over the "edges"
# of batches we read in.
self.retransmit_cache = defaultdict(lambda: set())
# retransmit_cache = defaultdict(lambda: defaultdict(lambda: 0))
self.retransmits = defaultdict(lambda: [])
self.phase_timeout_s = int(phase_timeout.total_seconds())
# -- event processing -----------------------------------------------------
def add_event(self, event):
if "rst" in event.packet.payload.flags:
return
# Filter retransmits.
if self.is_retransmit(event):
self.retransmits[event.batch_id].append(event)
return
# Do things ...
lyz = Analyzer(event.packet)
key = event.get_key()
if lyz.is_irregular_syn():
# Irregular SYN
self.irregular_syns[key].append(event)
elif lyz.is_syn():
# Regular SYN.
# Match against irregular SYN within the interval.
if not self.try_match_rsyn_to_isyn(event, key):
# Match against irregular SYN with ACK within the interval.
if not self.try_match_rsyn_to_isyn_acked(event, key):
self.regular_syns[key].append(event)
elif lyz.is_ack():
# ACKs.
# Check regular SYNs (more likely)
if not self.try_match_ack_to_rsyn(event, key):
# Check irregular SYNs (less likely)
if not self.try_match_ack_to_isyn(event, key):
# Check two-phase event without ACK.
if not self.try_match_ack_to_tp(event, key):
# Just save it.
self.acks[key].append(event)
elif lyz.is_syn_ack():
# Nothing to do?
pass
elif lyz.is_rst():
# Nothing to do?
pass
else:
# flags = ", ".join(event.packet.payload.flags)
# print(
# f"unknown packet with flags '{flags}': "
# f"{json.dumps(event.packet.to_dict())}"
# )
# TODO: Try to match these?
pass
def try_match_everything(self):
# - ACKS
matched_later = 0
for key, ack_list in self.acks.items():
remaining = []
for ack in ack_list:
# Most likely?
if not self.try_match_ack_to_rsyn(ack, key):
# Check irregular SYNs (less likely)
if not self.try_match_ack_to_isyn(ack, key):
# Check two-phase event without ACK.
if not self.try_match_ack_to_tp(ack, key):
# Just save it.
remaining.append(ack)
else:
matched_later += 1
else:
matched_later += 1
else:
matched_later += 1
after = len(remaining)
self.acks[key] = remaining
assert len(self.acks[key]) == after, "jo, what?"
print(f"[PHASE] matched {matched_later} ACKS")
# - Phases
matched_later = 0
for key, rsyns in self.regular_syns.items():
remaining = []
matched_index = None
for i, rsyn in enumerate(rsyns):
if self.try_match_rsyn_to_isyn(rsyn, key):
matched_index = i
matched_later += 1
break
if self.try_match_rsyn_to_isyn_acked(rsyn, key):
matched_index = i
matched_later += 1
break
if matched_index is not None:
del rsyns[i]
print(f"[PHASE] matched {matched_later} phases (rsyns)")
matched_later = 0
for key, rseqs in self.regular_acked.items():
remaining = []
matched_index = None
# This could be the second phase.
for i, rseq in enumerate(rseqs):
if self.try_match_rsyn_acked_to_isyn(rseq, key):
matched_index = i
matched_later += 1
break
if self.try_match_rsyn_acked_to_isyn_acked(rseq, key):
matched_index = i
matched_later += 1
break
if matched_index is not None:
del rseqs[i]
print(f"[PHASE] matched {matched_later} phases (acked rsyns)")
def find_repeated_connections(self):
before = count(self.regular_acked)
now_empty_keys = []
total_deleting = 0
for key, rseqs in self.regular_acked.items():
keep = []
before_for_key = len(rseqs)
deleting = 0
for rseq in rseqs:
if not self.try_match_rsyn_acked_to_tp_no_ack(rseq, key):
if not self.try_match_rsyn_acked_to_tp(rseq, key):
keep.append(rseq)
else:
deleting += 1
else:
deleting += 1
if len(keep) == 0:
now_empty_keys.append(key)
else:
self.regular_acked[key] = keep
after_for_key = len(keep)
assert (
after_for_key + deleting
) == before_for_key, "deletion didn't work"
total_deleting += deleting
for key in now_empty_keys:
total_deleting += len(self.regular_acked[key])
del self.regular_acked[key]
after = count(self.regular_acked)
assert (after + total_deleting) == before, "Couldn't delete all rseqs"
print(f"[PHASE] matched {before - after} reg. seq. to an earlier first phase")
# -- evict and write ------------------------------------------------------
def evict(self, batch_id):
# Collect everything we want to write to a file.
elems = []
# Write irregular_syns
# print("[PHASE] filtering irregular syns")
before = count(self.irregular_syns)
tmp = filter_list(self.irregular_syns, batch_id, "isyn")
remaining = count(self.irregular_syns)
print(f"[PHASE] evicting {len(tmp)} isyns")
print(f"[PHASE] remaining: {remaining}")
after = count(self.irregular_syns)
assert (after + len(tmp)) == before, "isyn deletion failed."
elems.extend(tmp)
# Write regular_syns
# print("[PHASE] filtering regular syns")
before = count(self.regular_syns)
tmp = filter_list(self.regular_syns, batch_id, "rsyn")
remaining = count(self.regular_syns)
print(f"[PHASE] evicting {len(tmp)} rsyns")
print(f"[PHASE] remaining: {remaining}")
after = count(self.regular_syns)
assert (after + len(tmp)) == before, "rsyn deletion failed."
elems.extend(tmp)
# Cleanup acks (no writing).
before = count(self.acks)
tmp = filter_list(self.acks, batch_id, "ack")
remaining = count(self.acks)
print(f"[PHASE] evicting {len(tmp)} acks")
print(f"[PHASE] remaining: {remaining}")
after = count(self.acks)
assert (after + len(tmp)) == before, "ack deletion failed."
tmp.clear()
# Write irregular_acked
# print("[PHASE] filtering irregular acked")
before = count(self.irregular_acked)
tmp = filter_dict_elems(self.irregular_acked, batch_id, "isyn (acked)", "isyn")
remaining = count(self.irregular_acked)
print(f"[PHASE] evicting {len(tmp)} acked isyns")
print(f"[PHASE] remaining: {remaining}")
after = count(self.irregular_acked)
assert (after + len(tmp)) == before, "iacked deletion failed."
elems.extend(tmp)
# Write regular_acked
# print("[PHASE] filtering regular acked")
before = count(self.regular_acked)
tmp = filter_dict_elems(self.regular_acked, batch_id, "rsyn (acked)", "rsyn")
remaining = count(self.regular_acked)
print(f"[PHASE] evicting {len(tmp)} acked rsyns")
print(f"[PHASE] remaining: {remaining}")
after = count(self.regular_acked)
assert (after + len(tmp)) == before, "racked deletion failed."
elems.extend(tmp)
# Write two_phase_no_ack
# print("[PHASE] filtering two phase no ack")
before = count(self.two_phase_no_ack)
tmp = filter_dict_elems(
self.two_phase_no_ack, batch_id, "two-phase (no ack)", "isyn"
)
remaining = count(self.two_phase_no_ack)
print(f"[PHASE] evicting {len(tmp)} two phase events without ack")
print(f"[PHASE] remaining: {remaining}")
after = count(self.two_phase_no_ack)
assert (after + len(tmp)) == before, "tp-no-ack deletion failed."
elems.extend(tmp)
# Write two_phase
# print("[PHASE] filtering two phase")
before = count(self.two_phase)
tmp = filter_dict_elems(self.two_phase, batch_id, "two-phase", "isyn")
remaining = count(self.two_phase)
print(f"[PHASE] evicting {len(tmp)} two phase events with ack")
print(f"[PHASE] remaining: {remaining}")
after = count(self.two_phase)
assert (after + len(tmp)) == before, "tp deletion failed."
elems.extend(tmp)
# Repeated connection attempts
self.repeated_connection_attempts.clear()
err_msg = "failed to clear repeated connection apptempts"
assert len(self.repeated_connection_attempts) == 0, err_msg
print(f"[PHASE] cleaned up {len(elems)} elements")
print("[PHASE] sorting them ...")
elems.sort(key=lambda x: x["ts"])
return elems
def evict_and_write_matches(self, datasource, batch_id, file_timestamp):
print(f"[PHASE] evicting batches with id {batch_id}")
fn = f"{datasource}-events-{file_timestamp}.json.gz"
print(f"[PHASE] writing {fn}")
with gzip.open(fn, "wt", newline="") as | |
flags["ZF"]:
return next_addr
if opcode == "jne" and not flags["ZF"]:
return next_addr
if opcode == "jg" and not flags["ZF"] and (flags["SF"] == flags["OF"]):
return next_addr
if opcode == "jge" and (flags["SF"] == flags["OF"]):
return next_addr
if opcode == "ja" and not flags["CF"] and not flags["ZF"]:
return next_addr
if opcode == "jae" and not flags["CF"]:
return next_addr
if opcode == "jl" and (flags["SF"] != flags["OF"]):
return next_addr
if opcode == "jle" and (flags["ZF"] or (flags["SF"] != flags["OF"])):
return next_addr
if opcode == "jb" and flags["CF"]:
return next_addr
if opcode == "jbe" and (flags["CF"] or flags["ZF"]):
return next_addr
if opcode == "jo" and flags["OF"]:
return next_addr
if opcode == "jno" and not flags["OF"]:
return next_addr
if opcode == "jz" and flags["ZF"]:
return next_addr
if opcode == "jnz" and flags["OF"]:
return next_addr
return None
def take_snapshot(self):
"""
Take a snapshot of current process
Warning: this is not thread safe, do not use with multithread program
Returns:
- dictionary of snapshot data
"""
if not self.getpid():
return None
maps = self.get_vmmap()
if not maps:
return None
snapshot = {}
# get registers
snapshot["reg"] = self.getregs()
# get writable memory regions
snapshot["mem"] = {}
for (start, end, perm, _) in maps:
if "w" in perm:
snapshot["mem"][start] = self.dumpmem(start, end)
return snapshot
def save_snapshot(self, filename=None):
"""
Save a snapshot of current process to file
Warning: this is not thread safe, do not use with multithread program
Args:
- filename: target file to save snapshot
Returns:
- Bool
"""
if not filename:
filename = self.get_config_filename("snapshot")
snapshot = self.take_snapshot()
if not snapshot:
return False
# dump to file
fd = open(filename, "wb")
pickle.dump(snapshot, fd, pickle.HIGHEST_PROTOCOL)
fd.close()
return True
def give_snapshot(self, snapshot):
"""
Restore a saved snapshot of current process
Warning: this is not thread safe, do not use with multithread program
Returns:
- Bool
"""
if not snapshot or not self.getpid():
return False
# restore memory regions
for (addr, buf) in snapshot["mem"].items():
self.writemem(addr, buf)
# restore registers, SP will be the last one
for (r, v) in snapshot["reg"].items():
self.execute("set $%s = 0x%x" % (r, v))
if r.endswith("sp"):
sp = v
self.execute("set $sp = 0x%x" % sp)
return True
def restore_snapshot(self, filename=None):
"""
Restore a saved snapshot of current process from file
Warning: this is not thread safe, do not use with multithread program
Args:
- file: saved snapshot
Returns:
- Bool
"""
if not filename:
filename = self.get_config_filename("snapshot")
fd = open(filename, "rb")
snapshot = pickle.load(fd)
return self.give_snapshot(snapshot)
#########################
# Memory Operations #
#########################
@memoized
def get_vmmap(self, name=None):
"""
Get virtual memory mapping address ranges of debugged process
Args:
- name: name/address of binary/library to get mapping range (String)
+ name = "binary" means debugged program
+ name = "all" means all virtual maps
Returns:
- list of virtual mapping ranges (start(Int), end(Int), permission(String), mapname(String))
"""
def _get_offline_maps():
name = self.getfile()
if not name:
return None
headers = self.elfheader()
binmap = []
hlist = [x for x in headers.items() if x[1][2] == 'code']
hlist = sorted(hlist, key=lambda x:x[1][0])
binmap += [(hlist[0][1][0], hlist[-1][1][1], "rx-p", name)]
hlist = [x for x in headers.items() if x[1][2] == 'rodata']
hlist = sorted(hlist, key=lambda x:x[1][0])
binmap += [(hlist[0][1][0], hlist[-1][1][1], "r--p", name)]
hlist = [x for x in headers.items() if x[1][2] == 'data']
hlist = sorted(hlist, key=lambda x:x[1][0])
binmap += [(hlist[0][1][0], hlist[-1][1][1], "rw-p", name)]
return binmap
def _get_allmaps_osx(pid, remote=False):
maps = []
#_DATA 00007fff77975000-00007fff77976000 [ 4K] rw-/rw- SM=COW /usr/lib/system/libremovefile.dylib
pattern = re.compile("([^\n]*)\s* ([0-9a-f][^-\s]*)-([^\s]*) \[.*\]\s([^/]*).* (.*)")
if remote: # remote target, not yet supported
return maps
else: # local target
try: out = execute_external_command("/usr/bin/vmmap -w %s" % self.getpid())
except: error_msg("could not read vmmap of process")
matches = pattern.findall(out)
if matches:
for (name, start, end, perm, mapname) in matches:
if name.startswith("Stack"):
mapname = "[stack]"
start = to_int("0x%s" % start)
end = to_int("0x%s" % end)
if mapname == "":
mapname = name.strip()
maps += [(start, end, perm, mapname)]
return maps
def _get_allmaps_freebsd(pid, remote=False):
maps = []
mpath = "/proc/%s/map" % pid
# 0x8048000 0x8049000 1 0 0xc36afdd0 r-x 1 0 0x1000 COW NC vnode /path/to/file NCH -1
pattern = re.compile("0x([0-9a-f]*) 0x([0-9a-f]*)(?: [^ ]*){3} ([rwx-]*)(?: [^ ]*){6} ([^ ]*)")
if remote: # remote target, not yet supported
return maps
else: # local target
try: out = open(mpath).read()
except: error_msg("could not open %s; is procfs mounted?" % mpath)
matches = pattern.findall(out)
if matches:
for (start, end, perm, mapname) in matches:
if start[:2] in ["bf", "7f", "ff"] and "rw" in perm:
mapname = "[stack]"
start = to_int("0x%s" % start)
end = to_int("0x%s" % end)
if mapname == "-":
if start == maps[-1][1] and maps[-1][-1][0] == "/":
mapname = maps[-1][-1]
else:
mapname = "mapped"
maps += [(start, end, perm, mapname)]
return maps
def _get_allmaps_linux(pid, remote=False):
maps = []
mpath = "/proc/%s/maps" % pid
#00400000-0040b000 r-xp 00000000 08:02 538840 /path/to/file
pattern = re.compile("([0-9a-f]*)-([0-9a-f]*) ([rwxps-]*)(?: [^ ]*){3} *(.*)")
if remote: # remote target
tmp = tmpfile()
self.execute("remote get %s %s" % (mpath, tmp.name))
tmp.seek(0)
out = tmp.read()
tmp.close()
else: # local target
out = open(mpath).read()
matches = pattern.findall(out)
if matches:
for (start, end, perm, mapname) in matches:
start = to_int("0x%s" % start)
end = to_int("0x%s" % end)
if mapname == "":
mapname = "mapped"
maps += [(start, end, perm, mapname)]
return maps
result = []
pid = self.getpid()
if not pid: # not running, try to use elfheader()
try:
return _get_offline_maps()
except:
return []
# retrieve all maps
os = self.getos()
rmt = self.is_target_remote()
maps = []
try:
if os == "FreeBSD": maps = _get_allmaps_freebsd(pid, rmt)
elif os == "Linux" : maps = _get_allmaps_linux(pid, rmt)
elif os == "Darwin" : maps = _get_allmaps_osx(pid, rmt)
except Exception as e:
if config.Option.get("debug") == "on":
msg("Exception: %s" %e)
traceback.print_exc()
# select maps matched specific name
if name == "binary":
name = self.getfile()
if name is None or name == "all":
name = ""
if to_int(name) is None:
for (start, end, perm, mapname) in maps:
if name in mapname:
result += [(start, end, perm, mapname)]
else:
addr = to_int(name)
for (start, end, perm, mapname) in maps:
if start <= addr and addr < end:
result += [(start, end, perm, mapname)]
return result
@memoized
def get_vmrange(self, address, maps=None):
"""
Get virtual memory mapping range of an address
Args:
- address: target address (Int)
- maps: only find in provided maps (List)
Returns:
- tuple of virtual memory info (start, end, perm, mapname)
"""
if address is None:
return None
if maps is None:
maps = self.get_vmmap()
if maps:
for (start, end, perm, mapname) in maps:
if start <= address and end > address:
return (start, end, perm, mapname)
# failed to get the vmmap
else:
try:
gdb.selected_inferior().read_memory(address, 1)
start = address & 0xfffffffffffff000
end = start + 0x1000
return (start, end, 'rwx', 'unknown')
except:
return None
@memoized
def is_executable(self, address, maps=None):
"""
Check if an address is executable
Args:
- address: target address (Int)
- maps: only check in provided maps (List)
Returns:
- True if address belongs to an executable address range (Bool)
"""
vmrange = self.get_vmrange(address, maps)
if vmrange and "x" in vmrange[2]:
return True
else:
return False
@memoized
def is_writable(self, address, maps=None):
"""
Check if an address is writable
Args:
- address: target address (Int)
- maps: only check in provided maps (List)
Returns:
- True if address belongs to a writable address range (Bool)
"""
vmrange = self.get_vmrange(address, maps)
if vmrange and "w" in vmrange[2]:
return True
else:
return False
@memoized
def is_address(self, value, maps=None):
"""
Check if a value is a valid address (belongs to a memory region)
Args:
- value (Int)
- maps: only check in provided maps (List)
Returns:
- True if value belongs to an address range (Bool)
"""
vmrange = self.get_vmrange(value, maps)
return vmrange is not None
@memoized
def get_disasm(self, address, count=1):
"""
Get the ASM code of instruction at address
Args:
- address: address to read instruction (Int)
- | |
<filename>wtfile.py
from functools import partial
import fnmatch
import glob
import os
import re
import stat
import shutil as sh
import sys
VERBOSE = False
__print = print # pylint: disable=invalid-name
def print(*_, **__):
verbose = __.pop('verbose', True)
if VERBOSE or verbose:
__print(*_, **__)
def TODO(*_):
scan = False
if os.getenv('CI'):
pass
elif scan:
raise NotImplementedError(*_)
# chores
# **************************************************************************
__all__ = ['F']
__author__ = 'Sy<<EMAIL>>'
__doc__ = """
An aggressive alternative to pathlib.path and path.py which supports
>>> f = F('/home/sy', 'test.cc')
>>> f.ext
.cc
>>> f.ext('h')
/home/sy/test.h
>>> f.stem('name')
/home/sy/name.h
>>> f.name
name.h
>>> filepath = os.path.join('/home/sy', f.name)
/home/sy/name.h
Q&A
Q: How do I know whether a specific action does IO operation or not?
A: If it's an action adaptable to both path/io, it depends on a parameter
called 'dry' which is False on default. Otherwise it just manipulates
the path string.
Q: Exceptions?
A: There is no extra exception introduced, all operations are ported to module
os or module os.path, wtfile itself is not supposed to corrupt.
If the error message is in Chinese, it's raised by wtfile, otherwise it
is supposed to be raised by python module(if not, fire an issue).
"""
LINESEP = os.linesep
LINESEPS = ['\r\n', '\r', '\n']
LINESEPS_U = LINESEPS + ['\u0085', '\u2028', '\u2029']
P_NEWLINE = re.compile('|'.join(LINESEPS))
P_NEWLINE_U = re.compile('|'.join(LINESEPS_U))
P_NEWLINE_END = re.compile(r'(?:{0})$'.format(P_NEWLINE.pattern))
P_NEWLINE_END_U = re.compile(r'(?:{0})$'.format(P_NEWLINE_U.pattern))
class classproperty(property): # pylint: disable=invalid-name
def __get__(self, cls, owner):
return self.fget(owner)
def __set__(self, *_):
raise AttributeError("Read only classproperty")
class FMeta(type):
pass
class FBase(str, metaclass=FMeta):
module = os.path
def __new__(cls, *_, **__):
print('__new__', _, __, verbose=False)
if _:
return super(FBase, cls).__new__(cls, cls.module.join(*_))
return super(FBase, cls).__new__(cls, cls.module.join(''))
def __init__(self, *_, mode='t', parent=None): # pylint: disable=super-init-not-called
self._parent = parent
self._mode = mode # 't'/'b'
self.__bwd = None
def _derive_(self, *_):
return type(self)(*_, mode=self._mode, parent=self._parent)
def __add__(self, other):
return self._derive_(str.__add__(self, other))
def __radd__(self, other):
return self._derive_(other.__add__(self))
def __call__(self):
raise NotImplementedError()
def __repr__(self):
if VERBOSE:
return f'{type(self).__name__}({super(FBase, self).__repr__()})'
return f'{super(FBase, self).__repr__()}'
def to_str(self):
return str(self)
class FPath(FBase):
def __div__(self, rest):
return self._derive_(self, rest)
__truediv__ = __div__
def __rdiv__(self, rest):
return self._derive_(rest, self)
__rtruediv__ = __rdiv__
def __fspath__(self):
"""PEP519"""
return self
# deprecated
# realpath(misleading, implicit api)
# commonpath
# commonprefix
# lexists
@property
def parent(self):
"""Return the directory name of pathname path.
This is the first element of the pair returned by passing path to the
function split() and proxied by F.
"""
return self._derive_(self.module.dirname(self))
@property
def name(self):
"""Return the directory name of pathname path.
This is the second element of the pair returned by passing path to the
function split() and proxied by FName.
"""
return FName(self.module.basename(self), parent=self)
# @name.setter
# def name(self, value):
# """deparecated"""
# self.name(value)
@property
def stem(self):
stem = self.name.replace(self.ext, '')
return FStem(stem, parent=self)
# @stem.setter
# def stem(self, value):
# """deparecated"""
# self.stem(value)
@property
def ext(self):
ext = self.module.splitext(self)[1]
return FExt(ext, parent=self)
# @ext.setter
# def ext(self, value):
# """deparecated"""
# self.ext(value)
def cd(self, target):
"""cd relative path with dry path
Different from os.path.join, supports relative path like cd('..'), cd
('../sy') and specially cd('...').
Different from with FIO block, it doesn't really do IO chdir, you can
do it like
```python
with F.DIR.cd('../Colors') as folder:
pass
```
"""
if target == '...':
return self.parent.parent
return self._derive_(self, target).norm()
def norm(self):
"""Normalize by collapsing redundant separators and up-level
references so that A//B, A/B/, A/./B and A/foo/../B all become A/B.
This string manipulation may change the meaning of a path that
contains symbolic links.
"""
return self._derive_(self.module.normpath(self))
normal = norm
# def normcase(self):
# """deprecated(window only)"""
# return self._derive_(self.module.normcase(self))
def match(self, pattern):
"""Test whether the filename string matches the pattern string,
returning True or False.
If the operating system is case-insensitive, will be normalized to all
lower- or upper-case before the comparison is performed. fnmatchcase()
can be used to perform a case-sensitive comparison, regardless of
whether that’s standard for the operating system.
"""
return fnmatch.fnmatch(self, pattern)
def matchcase(self, pattern):
return fnmatch.fnmatchcase(self, pattern)
class FIO(FBase):
def __enter__(self):
"""cd dir
with F('/home', 'user') as cwd:
print(F().cwd)
"""
self.__bwd = self.cwd
os.chdir(self)
return self
def __exit__(self, *_):
try:
bwd = self.__bwd
del self.__bwd
os.chdir(bwd)
except AttributeError:
raise TypeError('我来到你的城市,走过你来时的路。')
def __iter__(self):
""" different to for child in f.children, it joins the paths """
if not self.isdir():
yield from self.read().split('\n')
else:
for child in self.children:
yield self._derive_(self, child)
@property
def cwd(self):
return self._derive_(os.getcwd())
@property
def abspath(self):
"""Return a normalized absolutized version of the pathname path.
On most platforms, this is equivalent to calling the function
normpath() as follows: normpath(join(os.getcwd(), path)).
"""
return self._derive_(self.module.abspath(self))
@property
def children(self):
return os.listdir(self)
@property
def siblings(self):
return [file for file in self.parent.children if file != self.name]
@property
def root(self):
raw = self.to_str()
if '/' not in raw:
return self
if raw.startswith('/'):
return self._derive_('/')
if raw.startswith('.'):
return self._derive_('.')
if raw.startswith('~'):
return self._derive_('~')
return self._derive_(raw.split('/')[0])
# @property
# def drive(self):
# """deprecated(windows only)"""
# return self._derive_(self.module.splitdrive(self))
def glob(self, pathname, *, relative=False, recursive=False):
if relative:
pathname = self.cd(pathname)
return list(map(type(self), glob.glob(pathname, recursive=recursive)))
def iglob(self, pathname, *, relative=False, recursive=False):
if relative:
pathname = self.cd(pathname)
yield from map(type(self), glob.iglob(pathname, recursive=recursive))
def exists(self):
return self.module.exists(self)
def isabs(self):
"""Return True if path is an absolute pathname.
On Unix, that means it begins with a slash, on Windows that it begins
with a (back)slash after chopping off a potential drive letter.
"""
return self.module.isabs(self)
def isfile(self):
"""Return True if path is an existing regular file.
This follows symbolic links, so both islink() and isfile() can be true
for the same path.
"""
return self.module.isfile(self)
def isdir(self):
"""Return True if path is an existing directory.
This follows symbolic links, so both islink() and isdir() can be true
for the same path.
"""
return self.module.isdir(self)
def islink(self):
"""Return True if path refers to an existing directory entry that is a
symbolic link. Always False if symbolic links are not supported by the
Python runtime.
"""
return self.module.islink(self)
def ismount(self):
"""Return True if pathname path is a mount point: a point in a file
system where a different file system has been mounted. On POSIX, the
function checks whether path’s parent, path/.., is on a different
device than path, or whether path/.. and path point to the same i-node
on the same device — this should detect mount points for all Unix and
POSIX variants. It is not able to reliably detect bind mounts on the
same filesystem. On Windows, a drive letter root and a share UNC are
always mount points, and for any other path GetVolumePathName is
called to see if it is different from the input path.
"""
return self.module.ismount(self)
def mkdir(self, dirname=None, mode=0o777):
"""Create a directory named path with numeric mode mode.
If the directory already exists, FileExistsError is raised.
On some systems, mode is ignored. Where it is used, the current umask
value is first masked out. If bits other than the last 9 (i.e. the
last 3 digits of the octal representation of the mode) are set, their
meaning is platform-dependent. On some platforms, they are ignored
and you should call chmod() explicitly to set them.
This function can also support paths relative to directory
descriptors.
"""
path = self if not dirname else self / dirname
os.mkdir(path, mode)
return self._derive_(path)
def mkfile(self, filename=None, mode=0o600):
path = self if not filename else self / filename
os.mknod(path, mode)
return self._derive_(path)
# alias
mknod = mkfile
touch = mkfile
def linkto(self, src):
"""Create a symbolic link pointing to src named self."""
os.symlink(src, self)
return self._derive_(src)
def linkfrom(self, dst):
"""Create a symbolic link pointing to self named dst."""
os.symlink(self, dst)
return self._derive_(dst)
def rm(self, f=False): # pylint: disable=invalid-name
if self.isdir():
def onerror(_, path, __):
if f:
os.chmod(path, stat.S_IWRITE)
os.rmdir(path)
sh.rmtree(self, onerror=onerror)
elif self.isfile():
os.remove(self)
else:
raise TypeError("此情无计可消除,才下眉头,却上心头。")
def clear(self, target=None, f=False):
"""Remove a file/dir and recreate it.
"""
path = self if not target else self.cd(target)
if path.isfile():
path.rm(f)
return path.mkfile()
if path.exists():
path.rm(f)
return path.mkdir()
@property
def size(self):
return self.getSize(deep=False)
def getSize(self, inode=False, deep=True):
if inode or self.isfile():
return self.module.getsize(self)
size = 0
for child in self:
if child.isfile():
size += self.module.getsize(child)
elif deep:
size += child.getSize()
return size
@property
def atime(self):
"""Return the | |
common.Checkpointer(
ckpt_dir=os.path.join(self._train_dir, 'policy'),
max_to_keep=self._max_ckpt,
behave_metrics=metric_utils.MetricsGroup(
self._behavior_metrics + [self._iteration_metric],
'behavior_metrics'),
**all_iterable)
else:
self._rb_checkpointer = tf.train.CheckpointManager(
tf.train.Checkpoint(replay_buffer=self._replay_buffer),
directory=os.path.join(self._train_dir, 'replay_buffer'),
max_to_keep=10)
self._policy_checkpointer = tf.train.CheckpointManager(
tf.train.Checkpoint(
behave_metrics=metric_utils.MetricsGroup(
self._behavior_metrics + [self._iteration_metric],
'behavior_metrics'),
**all_iterable),
directory=os.path.join(self._train_dir, 'policy'),
max_to_keep=self._max_ckpt)
def record_log_policy_metric(self, worker_index, env_step):
"""Record log policy metric."""
for i, worker in enumerate(self._worker_names):
self._update_metrics(self._policy_metrics[worker],
[float(i == worker_index)])
for metric in self._policy_metrics[worker]:
add_summary(self._train_file_writer, 'PolicyMetrics/' + metric.name,
metric.result(), env_step)
write_csv(self._csv_dir, metric.name, metric.result(), env_step,
self._iteration_metric.result())
if self._pbt or self._use_bandit:
add_summary(self._train_file_writer,
'QMetrics/' + self._bandit_arm_q[worker].name,
self._bandit_arm_q[worker].result(), env_step)
write_csv(self._csv_dir, self._bandit_arm_q[worker].name,
self._bandit_arm_q[worker].result(), env_step,
self._iteration_metric.result())
logging.info('worker id:%s', self._agent_names[worker])
add_summary(self._train_file_writer, 'WorkerID/' + 'pbt_id_' + worker,
int(self._agent_names[worker].split('-')[1]), env_step)
add_summary(self._train_file_writer,
'WorkerID/' + 'pbt_parent_id_' + worker,
int(self._agent_names[worker].split('-')[2]), env_step)
add_summary(
self._train_file_writer, 'WorkerHparam/' + 'architect_' + worker,
np.where(self._hyper_range['architect'] ==
self._hparams[worker].architect)[0][0], env_step)
add_summary(self._train_file_writer, 'WorkerHparam/' + 'lr_' + worker,
self._hparams[worker].lr, env_step)
add_summary(self._train_file_writer, 'WorkerHparam/' + 'edecay_' + worker,
self._hparams[worker].edecay, env_step)
write_csv(self._csv_dir, 'worker_status_' + worker,
self._agent_names[worker], env_step,
self._iteration_metric.result())
write_csv(self._csv_dir, 'behavior_agent_name',
self._agent_names[self._worker_names[worker_index]], env_step,
self._iteration_metric.result())
for hp in self._policy_metrics_hyper:
value = self._hparams[self._worker_names[worker_index]].get(hp)
if hp in ['lr', 'edecay']:
histo = sorted(self._policy_metrics_hyper[hp].keys())
histo.append(np.inf)
histo = np.asarray(histo)
bins = histo[np.argmax(histo >= np.log(value)) - 1]
else:
bins = value
for param in self._policy_metrics_hyper[hp]:
self._update_metrics(self._policy_metrics_hyper[hp][param],
[float(bins == param)])
for metric in self._policy_metrics_hyper[hp][param]:
add_summary(self._train_file_writer,
'PolicyMetrics(hparams)/' + metric.name, metric.result(),
env_step)
write_csv(self._csv_dir, metric.name, metric.result(), env_step,
self._iteration_metric.result())
def exploit(self, sess, way='uniform'):
"""Exploit."""
bottom_table = []
top_table = []
for worker in self._worker_names:
if not self._bandit_arm_q[worker].is_recent(
update_time=self._pbt_update_requirement):
self._count_reevaluate += 1
logging.info('running immediate eval for agent %s, total eval:%d',
worker, self._count_reevaluate)
self._update_bandit(
sess,
self._select_policy_way,
worker,
immediate_eval=True,
push_value=self._push_when_eval)
bottom_table.append(self._bandit_arm_q[worker].result(way=self._pbt_low))
top_table.append(self._bandit_arm_q[worker].result(way=self._pbt_high))
logging.info('worker name: %s', self._agent_names)
logging.info('bottom table = %s, top table=%s', bottom_table, top_table)
top_list = np.argsort(
top_table)[-int(len(bottom_table) * self._pbt_percent_top):]
bottom_list = np.argsort(
bottom_table)[:int(len(bottom_table) * self._pbt_percent_low)]
top_table = np.asarray(top_table)
bottom_table = np.asarray(bottom_table)
if way == 'weighted':
top_prob = softmax(top_table[top_list] - np.median(top_table))
bottom_norm = (np.median(bottom_table) - bottom_table[bottom_list]) / (
np.quantile(bottom_table, 0.75) - np.quantile(bottom_table, 0.25))
bottom_prob = sigmoid(
bottom_norm - 1.0, coeff=0.3, truncate=self._pbt_drop_prob)
elif way == 'uniform':
top_prob = np.ones_like(top_list, dtype=np.float32) / len(top_table)
bottom_prob = np.ones_like(
bottom_list, dtype=np.float32) * self._pbt_drop_prob
for i, target_id in enumerate(bottom_list):
if np.random.binomial(1, bottom_prob[i]):
target_agent = self._worker_names[target_id]
source_id = top_list[np.where(
np.random.multinomial(1, top_prob) == 1)[0][0]]
source_agent = self._worker_names[source_id]
logging.info('exploit agent %s(%s) to worker %s(%s), exploration start',
source_agent, self._agent_names[source_agent],
target_agent, self._agent_names[target_agent])
new_hparam = self._mutate(self._hparams[source_agent],
self._pbt_mutation_rate,
self._pbt_mutate_list)
new_agent = self.create_or_copy_agent(
new_hparam,
qtype=self._dqn_type,
device=self._device_name[target_agent],
sess=sess,
parent_agent=self._agents[source_agent],
current_worker=target_agent,
do_copy=True)
self._collect_py_policies[target_agent] = py_tf_policy.PyTFPolicy(
new_agent.collect_policy)
self._select_py_policies[target_agent] = py_tf_policy.PyTFPolicy(
new_agent.collect_policy)
# new_agent.train(self._rb_iterator[target_agent])
self._hparams[target_agent] = new_hparam
self._agent_names[target_agent] = new_hparam.name
self._agents[target_agent] = new_agent
self._bandit_arm_q[target_agent] = copy.deepcopy(
self._bandit_arm_q[source_agent])
self._bandit_arm_q[target_agent].rename('QMetric_' + target_agent)
logging.info('created new worker agent %s with hyperparam %s',
target_agent, self._agent_names[target_agent])
def _mutate(self, parent_hparam, mutation_rate, mutate_list):
"""Mutate."""
new_hparam = copy.deepcopy(parent_hparam)
suffix = []
# parent_id = parent_hparam.name.split('-')[1]
parent_id = '0'
for hp in mutate_list:
if np.random.binomial(1, mutation_rate):
coeff = np.random.choice(self._pbt_perturb_factors)
new_val = parent_hparam.get(hp) * coeff
if hp == 'edecay':
new_hparam.set_hparam(hp, int(new_val))
suffix.append(hp + str(new_val))
else:
new_hparam.set_hparam(hp, new_val)
suffix.append(hp + '{:.6f}'.format(new_val))
self._pbt_id()
new_hparam.name = '-'.join([
parent_hparam.name.split('-')[0],
str(self._pbt_id.result()), parent_id
] + suffix)
return new_hparam
def _train_one_step(self, sess, metric_traj):
env_step = self._env_steps_metric.result()
if self._use_bandit or self._pbt:
self._update_metrics([self._bandit_reward_metric], metric_traj)
train_step = None
for worker_name in self._agents:
agent = self._agents[worker_name]
if env_step % agent.update_period == 0:
train_step, loss = agent.train_one_step(sess, self._train_file_writer)
self._maybe_log_train(train_step, loss, worker_name)
add_summary(self._train_file_writer, 'TrainStep/' + worker_name,
train_step, env_step)
self._maybe_record_behavior_summaries(env_step)
return train_step, env_step
def _update_bandit(self,
sess,
method,
current_agent,
immediate_eval=False,
push_value=True):
"""Update bandit."""
if immediate_eval:
# run immediate online eval for the designated agent, but don't increase
# the discount on other agents nor increase their most_recent_time
self._online_eval_parallel(sess, self._select_py_policies[current_agent],
self._env_select, self._update_policy_period,
self._selection_metrics[current_agent])
reward = self._selection_metrics[current_agent][0].result()
else:
reward = self._bandit_reward_metric.result()
self._bandit_reward_metric.reset()
if push_value:
if 'discount' in method:
self._bandit_arm_q[current_agent].add_to_buffer(
reward, discount=self._bandit_discount)
self._add_zero_to_bandit(current_agent, discount=self._bandit_discount)
else:
self._bandit_arm_q[current_agent](reward)
self._add_zero_to_bandit(current_agent)
else:
self._bandit_arm_q[current_agent].modify_last_buffer(reward)
logging.info(
'bandit updated with immediate eval=%s,push_value=%s,method=%s, newest Q=%s',
immediate_eval, push_value, method,
[self._bandit_arm_q[worker].result() for worker in self._worker_names])
def _add_zero_to_bandit(self, current_agent, discount=1.0, update_time=True):
for worker_name in self._agents:
if worker_name != current_agent:
self._bandit_arm_q[worker_name].add_to_buffer(
0.0, discount=discount, update_time=update_time)
def _which_policy(self, sess, method):
"""Which policy."""
if method.startswith('default_'):
logging.info('agent_name:%s', self._worker_names)
try:
worker_index = np.where(
np.array(self._worker_names) == method[8:])[0][0]
except ValueError:
logging.info('Invalid agent name: %s', method[8:])
elif method == 'random':
worker_index = np.random.choice(len(self._agents))
elif 'bandit' in method:
_, current_epsilon = sess.run([self._select_step, self._select_epsilon])
if 'ucb' in method:
q_table = [
self._bandit_arm_q[worker_name].result(
way='ucb', coeff=self._bandit_ucb_coeff)
for worker_name in self._worker_names
]
else:
q_table = [
self._bandit_arm_q[worker_name].result(way='q')
for worker_name in self._worker_names
]
# select arm
q_table = np.asarray(q_table)
if 'epsilon' in method:
if np.random.rand() < current_epsilon:
worker_index = np.random.choice(len(self._agents))
else:
worker_index = np.argmax(q_table)
elif 'softmax' in method:
maxval = np.max(q_table[~np.isinf(q_table)])
q_table[np.isinf(q_table)] = maxval * 2
soft_q = np.exp(q_table) / sum(np.exp(q_table))
logging.info('before soft q table = %s, sum_q = %f', soft_q,
sum(soft_q))
soft_q /= (soft_q.sum() + 1e-7)
logging.info('q table = %s', q_table)
logging.info('soft q table = %s, sum_q = %f', soft_q, sum(soft_q))
worker_index = np.where(np.random.multinomial(1, soft_q) == 1)[0][0]
else: # if 'greedy' in method:
worker_index = np.argmax(q_table)
logging.info('q table = %s', q_table)
# else:
# raise ValueError('Invalid method: %s' % method)
elif 'online' in method:
logging.info('selection_metrics:%s',
self._selection_metrics[self._worker_names[0]])
_, current_epsilon = sess.run([self._select_step, self._select_epsilon])
if method == 'best_online':
online_return = [
self._selection_metrics[worker][0].result('mean')
for worker in self._worker_names
]
worker_index = np.argmax(online_return)
elif method == 'best_online_ucb':
online_ucb = [
self._selection_metrics[worker][0].result('ucb', self._ucb_coeff)
for worker in self._worker_names
]
worker_index = np.argmax(online_ucb)
elif method == 'best_online_variance':
online_std = [
self._selection_metrics[worker][0].result('std')
for worker in self._worker_names
]
worker_index = np.argmax(online_std)
elif method == 'best_online_epsilon':
if np.random.rand() < current_epsilon:
worker_index = np.random.choice(len(self._agents))
else:
online_return = [
self._selection_metrics[worker][0].result('mean')
for worker in self._worker_names
]
worker_index = np.argmax(online_return)
else:
raise ValueError('Invalid method: %s' % method)
else:
raise ValueError('Invalid method: %s' % method)
return worker_index
def _select_policy(self, sess, method, env_step=None):
"""Select policy."""
if 'online' in method:
for worker in self._worker_names:
if self._online_eval_use_train:
self._online_eval_parallel(sess, self._select_py_policies[worker],
self._env_select,
self._eval_episode_select,
self._selection_metrics[worker])
else:
self._online_eval_parallel(sess, self._eval_py_policies[worker],
self._env_select,
self._eval_episode_select,
self._selection_metrics[worker])
for metric in self._selection_metrics[worker]:
add_summary(self._train_file_writer,
metric.name.split('_')[0] + '(mean)/' + metric.name,
metric.result('mean'), env_step)
# add_summary(self._train_file_writer,
# 'SelectionEvalMetrics(95ucb)/' + metric.name,
# metric.result('95ucb'), env_step)
worker_index = self._which_policy(sess, method)
return worker_index
def _maybe_record_behavior_summaries(self, env_step):
"""Record summaries if env_step is a multiple of summary_interval."""
if env_step % self._summary_interval == 0:
for metric in self._behavior_metrics:
if metric.result() != 0:
add_summary(self._train_file_writer, 'Metrics/' + metric.name,
metric.result(), env_step)
def _maybe_time_train(self, train_step):
"""Maybe time train."""
if train_step % self._log_interval == 0:
steps_per_sec = (
(train_step - self._timed_at_step) /
(self._collect_timer.value() + self._train_timer.value()))
add_summary(self._train_file_writer, 'train_steps_per_sec', steps_per_sec,
train_step)
logging.info('%.3f steps/sec', steps_per_sec)
logging.info(
'%s', 'collect_time = {}, train_time = {}'.format(
self._collect_timer.value(), self._train_timer.value()))
self._timed_at_step = train_step
self._collect_timer.reset()
self._train_timer.reset()
def _maybe_log_and_reset_timer(self):
logging.info(
'iteration time: %s',
'pbt_time = {}, select_time = {}, policy_time = {}, checkpoint_time = {}'
.format(self._pbt_timer.value(), self._select_timer.value(),
self._policy_timer.value(), self._checkpoint_timer.value()))
self._pbt_timer.reset()
self._select_timer.reset()
self._policy_timer.reset()
self._checkpoint_timer.reset()
@gin.configurable
class EvalRunner(Runner):
"""evaluate DQN on Atari."""
def __init__(
self,
ucb_coeff=1.96,
num_iterations=200,
eval_episode_per_iteration=100, # ALE frames
eval_parallel_size=20,
eval_epsilon_greedy=0.0,
eval_interval_secs=60,
eval_agents=None,
**kwargs):
super(EvalRunner, self).__init__(**kwargs)
self._num_iterations = num_iterations
self._eval_interval_secs = eval_interval_secs
self._eval_parallel_size = eval_parallel_size
self._eval_episode_per_iteration = eval_episode_per_iteration
self._eval_epsilon_greedy = eval_epsilon_greedy
self._eval_agents = eval_agents
self._ucb_coeff = ucb_coeff
self._env_eval = parallel_py_environment.ParallelPyEnvironment([
lambda: suite_atari.load( # pylint: disable=g-long-lambda
self._env_name,
max_episode_steps=self._max_episode_frames / ATARI_FRAME_SKIP,
gym_env_wrappers=suite_atari.
DEFAULT_ATARI_GYM_WRAPPERS_WITH_STACKING)
] * self._eval_parallel_size)
if eval_agents:
self._worker_names = eval_agents
self._eval_metrics = {}
self._eval_py_policies = {}
for worker in self._worker_names:
# Use the policy directly for eval.
self._eval_metrics[worker] = [
new_pymetrics.DistributionReturnMetric(
name='EvalAverageReturn_' + worker, buffer_size=np.inf),
new_pymetrics.DistributionEpisodeLengthMetric(
name='EvalAverageEpisodeLength_' + worker, buffer_size=np.inf)
]
# self.build_graph_and_assign_device()
# self.update_train_bandit_checkpointer(update_bandit=False,use_common=True)
def _initialize_graph(self, sess, checkpoint_path):
"""Initialize the graph for sess."""
self._train_checkpointer._checkpoint.restore( # pylint: disable=protected-access
checkpoint_path).initialize_or_restore(sess)
common.initialize_uninitialized_variables(sess)
sess.run(self._init_agent_op)
self._eval_timer = timer.Timer()
def traindone(self):
return tf.gfile.Exists(os.path.join(self._train_dir, 'TrainDone'))
def run(self):
"""Execute the eval loop."""
self._eval_file_writer = {}
for worker_name in self._eval_agents:
if not tf.gfile.Exists(os.path.join(self._train_dir, worker_name)):
tf.gfile.MakeDirs(os.path.join(self._train_dir, worker_name))
self._eval_file_writer[worker_name] = tf.summary.FileWriter(
os.path.join(self._train_dir, worker_name))
for checkpoint_path in tf.train.checkpoints_iterator(
self._train_dir,
min_interval_secs=self._eval_interval_secs,
timeout_fn=self.traindone):
logging.info('find new checkpoint %s', checkpoint_path)
step = checkpoint_path.split('ckpt-')[1]
hparam_file = os.path.join(self._hparam_dir,
'hparam-{}.json'.format(step))
if tf.gfile.Exists(hparam_file):
logging.info('hparam file %s found, loading', hparam_file)
self._load_hparam(hparam_file)
logging.info('agent_names:%s', self._agent_names)
assert not FLAGS.enable_functions
tf.reset_default_graph()
gc.collect()
self.build_graph_and_assign_device()
self._select_step = tf.Variable(
0, dtype=tf.int64, trainable=False, name='bandit_step')
# self.create_pypolicy_and_train_op()
fail = True
while fail:
# try
self.update_train_bandit_checkpointer(
update_bandit=False, use_common=True)
fail = False
logging.info('new checkpoint manager at ckpt:%s, hparam at ckpt:%s',
self._train_checkpointer._manager.latest_checkpoint, step) # pylint: disable=protected-access
else:
logging.info('hparam file not found for %s, try next ckpt', hparam_file)
continue
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Initialize the graph.
self._initialize_graph(sess, checkpoint_path)
logging.info('Starting evaluation')
logging.info('choosing from: %s', self._worker_names)
for worker_name in self._eval_agents:
logging.info('evaluating agent %s', worker_name)
if worker_name in self._worker_names:
agent = self._agents[worker_name]
with tf.device(self._device_name[worker_name]):
self._eval_py_policies[worker_name] = py_tf_policy.PyTFPolicy(
agent.policy)
self._online_eval_parallel(sess,
| |
"""test_mainmodel.py - tests the mainmodel module
<NAME> (TRI/Austin, Inc.)
"""
__author__ = '<NAME>'
import models.mainmodel as model
import models.dataio as dataio
import models.abstractplugin as abstractplugin
import models.config as config
import models.ultrasonicgate as ultrasonicgate
import controllers.pathfinder as pathfinder
from utils.skiptest import skipIfModuleNotInstalled
import h5py
import numpy as np
import logging
import multiprocessing
import os
import random
import shutil
import sys
import tempfile
import unittest
def deleted_user_path():
"""Utility function to delete empty folders in the user data folders,
used to verify that MainModel will recreate missing folders as required.
Returns a list of folders successfully deleted or None if no folders
were deleted."""
data_folders = [pathfinder.user_path(), pathfinder.data_path(), pathfinder.thumbnails_path(),
pathfinder.plugins_path(), pathfinder.podmodels_path(), pathfinder.adamodels_path(),
pathfinder.colormaps_path()]
deleted_folders = []
for folder in data_folders:
exists_and_empty = os.path.exists(folder) and os.listdir(folder) == []
if exists_and_empty:
try:
os.rmdir(folder)
deleted_folders.append(folder)
except WindowsError: # folder in use (Explorer, cmd, etc.)
pass
if deleted_folders:
return deleted_folders
return None
# Define a mock plugin to inspect results of calling plugin classes
class MockPlugin(abstractplugin.TRIPlugin):
"""Mock NDIToolbox plugin used to check plugin_wrapper"""
def __init__(self, **kwargs):
abstractplugin.TRIPlugin.__init__(self, **kwargs)
self.config = {'a': 'b'}
self._data = {'kwargs': kwargs}
@property
def data(self):
return self._data
@data.setter
def data(self, new_data):
self._data['data'] = new_data
def run(self):
self._data['config'] = self.config
# A MockPlugin that raises an Exception on execution
class ExceptionPlugin(MockPlugin):
"""Raises an Exception on run() - used to verify
exception Queue messaging"""
def run(self):
raise Exception("Wuh-oh.")
class TestMainModel(unittest.TestCase):
"""Tests the main model"""
def setUp(self):
self.sample_data = np.array(self.random_data())
self.sample_data_basename = "sample.dat"
self.sample_data_file = os.path.join(os.path.dirname(__file__),
self.sample_data_basename)
with h5py.File(self.sample_data_file, 'w') as fidout:
fidout.create_dataset(self.sample_data_basename, data=self.sample_data)
self.mock_controller = ""
self.model = model.MainModel(self.mock_controller)
cfg = config.Configure(pathfinder.config_path())
self.original_loglevel = cfg.get_app_option("log level")
def random_data(self):
"""Returns a list of random data"""
return [random.uniform(-100, 100) for i in range(25)]
@unittest.skipIf(deleted_user_path() is None,
"User data folders in use")
def test_check_user_path(self):
"""Verify main model creates the user data folders if not
already in existence."""
self.check_user_path()
def check_user_path(self):
"""Verify user data folders were created"""
data_folders = [pathfinder.user_path(), pathfinder.data_path(),
pathfinder.thumbnails_path(), pathfinder.gates_path(),
pathfinder.plugins_path(), pathfinder.podmodels_path(),
pathfinder.adamodels_path(), pathfinder.colormaps_path(),
pathfinder.batchoutput_path()]
self.model.check_user_path()
for folder in data_folders:
self.assertTrue(os.path.exists(folder))
def test_copy_system_files(self):
"""Verify main model copies dynamic modules to the specified
folder."""
test_module_folder = os.path.dirname(__file__)
test_modules = []
temp_dest_folder = tempfile.mkdtemp()
module_files = os.listdir(test_module_folder)
for module_file in module_files:
module_name, module_extension = os.path.splitext(module_file)
if module_name.startswith("test_") and\
module_extension == os.extsep + "py":
test_modules.append(module_file)
self.model.copy_system_files(test_module_folder, temp_dest_folder)
for module_file in test_modules:
dest_module = os.path.join(temp_dest_folder, module_file)
self.assertTrue(os.path.exists(dest_module))
try:
shutil.rmtree(temp_dest_folder)
except WindowsError: # folder in use (Windows)
pass
def test_copy_system_plugins(self):
"""Verify main model copies system plugins to the user's
plugins folder."""
self.copy_system_plugins()
def test_copy_system_gates(self):
"""Verify main model copies system ultrasonic gate plugins to the user's
gates folder."""
self.copy_system_gates()
def test_copy_system_colormaps(self):
"""Verify main model copies colormaps to the user's colormaps folder."""
self.copy_system_colormaps()
def copy_system_plugins(self):
"""Verify system plugins are copied to the user's plugins folder"""
# Sample of system plugins to install
system_plugins = ['medfilter_plugin.py', 'normalize_plugin.py', '__init__.py']
self.remove_system_files(system_plugins, pathfinder.plugins_path())
self.model.copy_system_plugins()
for plugin in system_plugins:
installed_plugin = os.path.join(pathfinder.plugins_path(), plugin)
self.assertTrue(os.path.exists(installed_plugin))
def copy_system_gates(self):
"""Verify system ultrasonic gate plugins are copied to user's
gates folder"""
gate_plugins = ['predefined_gates.py', 'additional_gates.py', '__init__.py']
self.remove_system_files(gate_plugins, pathfinder.gates_path())
self.model.copy_system_gates()
for gate in gate_plugins:
installed_gate = os.path.join(pathfinder.gates_path(), gate)
self.assertTrue(os.path.exists(installed_gate))
def copy_system_colormaps(self):
"""Verify system colormaps are copied to user's folder"""
colormaps_folder = os.path.join(pathfinder.app_path(), 'colormaps')
colormaps = os.listdir(colormaps_folder)
self.remove_system_files(colormaps, pathfinder.colormaps_path())
self.model.copy_system_colormaps()
for cmap in colormaps:
installed_cmap = os.path.join(pathfinder.colormaps_path(), cmap)
self.assertTrue(os.path.exists(installed_cmap))
def remove_system_files(self, file_list, dest):
"""Attempts to remove every file in file_list found in dest folder.
Used to verify copying system files to user's local data folder."""
for each_file in file_list:
dest_path = os.path.join(dest, each_file)
if os.path.exists(dest_path):
try:
os.remove(dest_path)
except WindowsError: # file in use (Windows)
pass
def test_migrate_user_path(self):
"""Verify migration of the user's data folder"""
current_user_path = pathfinder.user_path()
temp_user_path = tempfile.mkdtemp()
self.model.migrate_user_path(temp_user_path)
self.check_user_path()
self.copy_system_plugins()
self.model.migrate_user_path(current_user_path)
try:
shutil.rmtree(temp_user_path)
except WindowsError: # folder in use
pass
def test_load_dynamic_modules(self):
"""Verify the main model's dynamic module loading"""
plugin_list = model.load_dynamic_modules(pathfinder.plugins_path(), abstractplugin.AbstractPlugin)
for plugin in plugin_list:
plugin_instance = plugin[1]
self.assertTrue(issubclass(plugin_instance, abstractplugin.AbstractPlugin))
def test_load_plugins(self):
"""Verify the main model loads available plugins"""
plugin_list = model.load_plugins()
for plugin in plugin_list:
plugin_instance = plugin[1]
self.assertTrue(issubclass(plugin_instance, abstractplugin.AbstractPlugin))
def test_load_gates(self):
"""Verify the main model loads available gates"""
gate_list = model.load_gates()
for gate in gate_list:
gate_instance = gate[1]
self.assertTrue(issubclass(gate_instance, ultrasonicgate.UltrasonicGate))
def test_plugin_wrapper(self):
"""Verify the plugin_wrapper function properly configures and runs a plugin"""
plugin_queue = multiprocessing.Queue()
plugin_exception_queue = multiprocessing.Queue()
plugin_data = np.array(self.random_data())
plugin_cfg = {'a': 'c'}
kwargs = {'name': 'Mock Plugin', 'description': 'Mock plugin used to test plugin_wrapper'}
model.plugin_wrapper(plugin_exception_queue, MockPlugin, plugin_data, plugin_queue, plugin_cfg,
**kwargs)
returned_data = plugin_queue.get()
self.assertTrue(isinstance(returned_data, dict))
self.assertDictEqual(returned_data['config'], plugin_cfg)
self.assertDictEqual(returned_data['kwargs'], kwargs)
self.assertTrue(np.array_equal(returned_data['data'], plugin_data))
def test_plugin_wrapper_exceptions(self):
"""Verify the plugin_wrapper function properly returns Exception info"""
plugin_queue = multiprocessing.Queue()
plugin_exception_queue = multiprocessing.Queue()
plugin_data = np.array(self.random_data())
model.plugin_wrapper(exception_queue=plugin_exception_queue,
plugin_cls=ExceptionPlugin,
plugin_data=plugin_data,
plugin_queue=plugin_queue)
exc_type, exc = plugin_exception_queue.get(block=True)
self.assertTrue(isinstance(exc, Exception))
@skipIfModuleNotInstalled("tcunittest")
def test_run_plugin(self):
"""Verify the main model can run a loaded plugin"""
plugin_data = np.array(self.random_data())
plugin_config = {'pi': 3.141592654}
plugin_cls = self.get_normalize_plugin()
plugin_process, plugin_queue, exception_queue = model.run_plugin(plugin_cls,
data=plugin_data, config=plugin_config)
self.assertTrue(isinstance(plugin_process, multiprocessing.Process))
returned_data = plugin_queue.get()
expected_data = plugin_data / np.max(plugin_data)
self.assertTrue(np.array_equal(expected_data, returned_data))
@skipIfModuleNotInstalled("tcunittest")
def test_run_plugin_exceptions(self):
"""Verify run_plugin returns exception messages in Queue"""
plugin_data = np.zeros(5) # Use division by zero exception in NormalizePlugin
plugin_config = {'pi': 3.141592654}
plugin_cls = self.get_normalize_plugin()
plugin_process, plugin_queue, exception_queue = model.run_plugin(plugin_cls,
data=plugin_data, config=plugin_config)
exc_type, exc = exception_queue.get(block=True)
self.assertTrue(isinstance(exc, Exception))
def get_normalize_plugin(self):
"""Returns NDIToolbox's NormalizePlugin plugin"""
normalize_plugin_name = "NormalizePlugin"
plugin_list = model.load_plugins()
plugin_names = [plugin[0] for plugin in plugin_list]
plugin_classes = [plugin[1] for plugin in plugin_list]
# Ensure that the normalize plugin was found
self.assertTrue(normalize_plugin_name in plugin_names)
return plugin_classes[plugin_names.index(normalize_plugin_name)]
def test_get_config(self):
"""Verify returning the application's configuration"""
expected_configuration = config.Configure(pathfinder.config_path()).config
expected_configuration.read(pathfinder.config_path())
returned_configuration = model.get_config().config
returned_configuration.read(pathfinder.config_path())
for section in expected_configuration.sections():
self.assertListEqual(expected_configuration.items(section), returned_configuration.items(section))
def test_copy_data(self):
"""Verify copying of sample data file to data folder"""
self.model.copy_data(self.sample_data_file)
copied_data_file = os.path.join(pathfinder.data_path(),
self.sample_data_basename)
self.assertTrue(os.path.exists(copied_data_file))
os.remove(copied_data_file)
def test_remove_data(self):
"""Verify removal of a data file from the data folder"""
self.model.copy_data(self.sample_data_file)
copied_data_file = os.path.join(pathfinder.data_path(),
self.sample_data_basename)
self.assertTrue(os.path.exists(copied_data_file))
self.model.remove_data(copied_data_file)
self.assertFalse(os.path.exists(copied_data_file))
def test_remove_thumbs(self):
"""Verify remove_thumbs method deletes all files in the thumbnail
folder"""
shutil.copy(__file__, pathfinder.thumbnails_path())
self.assertTrue(len(os.listdir(pathfinder.thumbnails_path())) > 0)
self.model.remove_thumbs()
self.assertListEqual(os.listdir(pathfinder.thumbnails_path()), [])
def test_get_preview_state(self):
"""Verify returning the current setting for displaying plot thumbnails"""
cfg = config.Configure(pathfinder.config_path())
preview_state = cfg.get_app_option_boolean("Enable Preview")
self.assertEqual(preview_state, self.model.get_preview_state())
def test_set_preview_state(self):
"""Verify setting the current setting for displaying plot thumbnails"""
cfg = config.Configure(pathfinder.config_path())
original_preview_state = cfg.get_app_option_boolean("Enable Preview")
new_preview_state = not original_preview_state
self.assertEqual(original_preview_state, self.model.get_preview_state())
self.model.set_preview_state(new_preview_state)
self.assertEqual(new_preview_state, self.model.get_preview_state())
self.model.set_preview_state(original_preview_state)
def test_get_coords(self):
"""Verify returning the UL corner of the main app window set in config"""
cfg = config.Configure(pathfinder.config_path())
str_coords = cfg.get_app_option_list("Coordinates")
expected_coords = (0, 0)
if str_coords is not None:
expected_coords = [int(coord) for coord in str_coords]
self.assertListEqual(expected_coords, self.model.get_coords())
def test_set_coords(self):
"""Verify setting the UL corner of the main app window in config"""
cfg = config.Configure(pathfinder.config_path())
str_coords = cfg.get_app_option_list("Coordinates")
original_coords = (0, 0)
if str_coords is not None:
original_coords = [int(coord) for coord in str_coords]
self.assertListEqual(original_coords, self.model.get_coords())
new_coords_int = [3, 5]
self.model.set_coords(new_coords_int)
self.assertListEqual(new_coords_int, self.model.get_coords())
new_coords_str = ["9", "-1"]
self.model.set_coords(new_coords_str)
self.assertListEqual([int(coord) for coord in new_coords_str], self.model.get_coords())
self.model.set_coords(original_coords)
def test_get_size(self):
"""Verify returning the size of the main app window set in config"""
cfg = config.Configure(pathfinder.config_path())
str_win_size = cfg.get_app_option_list("Window Size")
expected_win_size = [300, 600]
if str_win_size is not None:
expected_win_size = [int(dimsize) for dimsize in str_win_size]
self.assertListEqual(expected_win_size, self.model.get_window_size())
def test_set_size(self):
"""Verify setting the size of the main app window in config"""
cfg = config.Configure(pathfinder.config_path())
str_win_size = cfg.get_app_option_list("Window Size")
original_win_size = [300, 600]
if str_win_size is not None:
original_win_size = [int(dimsize) for dimsize in str_win_size]
self.assertListEqual(original_win_size, self.model.get_window_size())
new_win_size = [800, 1024]
self.model.set_window_size(new_win_size)
self.assertListEqual(new_win_size, self.model.get_window_size())
self.model.set_window_size(original_win_size)
def test_get_loglevel(self):
"""Verify returning the log level from config"""
cfg = config.Configure(pathfinder.config_path())
log_level = cfg.get_app_option("log level")
available_log_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
log_level = available_log_levels.get(log_level, logging.WARNING)
self.assertEqual(log_level, model.get_loglevel())
def test_set_loglevel(self):
"""Verify setting the log level in config"""
cfg = config.Configure(pathfinder.config_path())
log_levels = ['debug', 'info', 'warning', 'error', 'critical', None, 'abc']
acceptable_log_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
for level in log_levels:
model.set_loglevel(level)
if level in acceptable_log_levels:
self.assertEqual(acceptable_log_levels[level], model.get_loglevel())
def test_get_loglevels(self):
"""Verify returning a list of available log levels"""
available_log_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
self.assertDictEqual(available_log_levels, model.available_log_levels)
def test_get_logger(self):
"""Verify returning a logger instance"""
logger = model.get_logger(__name__)
self.assertTrue(isinstance(logger, logging.Logger))
expected_logger = logging.getLogger(name='.'.join(['nditoolbox', __name__]))
self.assertEqual(expected_logger.name, logger.name)
acceptable_log_levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]
for level in acceptable_log_levels:
| |
import re
import pymongo
from bson.dbref import DBRef
from pymongo.read_preferences import ReadPreference
from mongoengine import signals
from mongoengine.base import (
BaseDict,
BaseDocument,
BaseList,
DocumentMetaclass,
EmbeddedDocumentList,
TopLevelDocumentMetaclass,
get_document,
)
from mongoengine.common import _import_class
from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db
from mongoengine.context_managers import (
set_write_concern,
switch_collection,
switch_db,
)
from mongoengine.errors import (
InvalidDocumentError,
InvalidQueryError,
SaveConditionError,
)
from mongoengine.pymongo_support import list_collection_names
from mongoengine.queryset import (
NotUniqueError,
OperationError,
QuerySet,
transform,
)
__all__ = (
"Document",
"EmbeddedDocument",
"DynamicDocument",
"DynamicEmbeddedDocument",
"OperationError",
"InvalidCollectionError",
"NotUniqueError",
"MapReduceDocument",
)
def includes_cls(fields):
"""Helper function used for ensuring and comparing indexes."""
first_field = None
if len(fields):
if isinstance(fields[0], str):
first_field = fields[0]
elif isinstance(fields[0], (list, tuple)) and len(fields[0]):
first_field = fields[0][0]
return first_field == "_cls"
class InvalidCollectionError(Exception):
pass
class EmbeddedDocument(BaseDocument, metaclass=DocumentMetaclass):
r"""A :class:`~mongoengine.Document` that isn't stored in its own
collection. :class:`~mongoengine.EmbeddedDocument`\ s should be used as
fields on :class:`~mongoengine.Document`\ s through the
:class:`~mongoengine.EmbeddedDocumentField` field type.
A :class:`~mongoengine.EmbeddedDocument` subclass may be itself subclassed,
to create a specialised version of the embedded document that will be
stored in the same collection. To facilitate this behaviour a `_cls`
field is added to documents (hidden though the MongoEngine interface).
To enable this behaviour set :attr:`allow_inheritance` to ``True`` in the
:attr:`meta` dictionary.
"""
__slots__ = ("_instance",)
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = DocumentMetaclass
# A generic embedded document doesn't have any immutable properties
# that describe it uniquely, hence it shouldn't be hashable. You can
# define your own __hash__ method on a subclass if you need your
# embedded documents to be hashable.
__hash__ = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._instance = None
self._changed_fields = []
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._data == other._data
return False
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
data = super().__getstate__()
data["_instance"] = None
return data
def __setstate__(self, state):
super().__setstate__(state)
self._instance = state["_instance"]
def to_mongo(self, *args, **kwargs):
data = super().to_mongo(*args, **kwargs)
# remove _id from the SON if it's in it and it's None
if "_id" in data and data["_id"] is None:
del data["_id"]
return data
class Document(BaseDocument, metaclass=TopLevelDocumentMetaclass):
"""The base class used for defining the structure and properties of
collections of documents stored in MongoDB. Inherit from this class, and
add fields as class attributes to define a document's structure.
Individual documents may then be created by making instances of the
:class:`~mongoengine.Document` subclass.
By default, the MongoDB collection used to store documents created using a
:class:`~mongoengine.Document` subclass will be the name of the subclass
converted to snake_case. A different collection may be specified by
providing :attr:`collection` to the :attr:`meta` dictionary in the class
definition.
A :class:`~mongoengine.Document` subclass may be itself subclassed, to
create a specialised version of the document that will be stored in the
same collection. To facilitate this behaviour a `_cls`
field is added to documents (hidden though the MongoEngine interface).
To enable this behaviour set :attr:`allow_inheritance` to ``True`` in the
:attr:`meta` dictionary.
A :class:`~mongoengine.Document` may use a **Capped Collection** by
specifying :attr:`max_documents` and :attr:`max_size` in the :attr:`meta`
dictionary. :attr:`max_documents` is the maximum number of documents that
is allowed to be stored in the collection, and :attr:`max_size` is the
maximum size of the collection in bytes. :attr:`max_size` is rounded up
to the next multiple of 256 by MongoDB internally and mongoengine before.
Use also a multiple of 256 to avoid confusions. If :attr:`max_size` is not
specified and :attr:`max_documents` is, :attr:`max_size` defaults to
10485760 bytes (10MB).
Indexes may be created by specifying :attr:`indexes` in the :attr:`meta`
dictionary. The value should be a list of field names or tuples of field
names. Index direction may be specified by prefixing the field names with
a **+** or **-** sign.
Automatic index creation can be disabled by specifying
:attr:`auto_create_index` in the :attr:`meta` dictionary. If this is set to
False then indexes will not be created by MongoEngine. This is useful in
production systems where index creation is performed as part of a
deployment system.
By default, _cls will be added to the start of every index (that
doesn't contain a list) if allow_inheritance is True. This can be
disabled by either setting cls to False on the specific index or
by setting index_cls to False on the meta dictionary for the document.
By default, any extra attribute existing in stored data but not declared
in your model will raise a :class:`~mongoengine.FieldDoesNotExist` error.
This can be disabled by setting :attr:`strict` to ``False``
in the :attr:`meta` dictionary.
"""
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = TopLevelDocumentMetaclass
__slots__ = ("__objects",)
@property
def pk(self):
"""Get the primary key."""
if "id_field" not in self._meta:
return None
return getattr(self, self._meta["id_field"])
@pk.setter
def pk(self, value):
"""Set the primary key."""
return setattr(self, self._meta["id_field"], value)
def __hash__(self):
"""Return the hash based on the PK of this document. If it's new
and doesn't have a PK yet, return the default object hash instead.
"""
if self.pk is None:
return super(BaseDocument, self).__hash__()
return hash(self.pk)
@classmethod
def _get_db(cls):
"""Some Model using other db_alias"""
return get_db(cls._meta.get("db_alias", DEFAULT_CONNECTION_NAME))
@classmethod
def _disconnect(cls):
"""Detach the Document class from the (cached) database collection"""
cls._collection = None
@classmethod
def _get_collection(cls):
"""Return the PyMongo collection corresponding to this document.
Upon first call, this method:
1. Initializes a :class:`~pymongo.collection.Collection` corresponding
to this document.
2. Creates indexes defined in this document's :attr:`meta` dictionary.
This happens only if `auto_create_index` is True.
"""
if not hasattr(cls, "_collection") or cls._collection is None:
# Get the collection, either capped or regular.
if cls._meta.get("max_size") or cls._meta.get("max_documents"):
cls._collection = cls._get_capped_collection()
else:
db = cls._get_db()
collection_name = cls._get_collection_name()
cls._collection = db[collection_name]
# Ensure indexes on the collection unless auto_create_index was
# set to False.
# Also there is no need to ensure indexes on slave.
db = cls._get_db()
if cls._meta.get("auto_create_index", True) and db.client.is_primary:
cls.ensure_indexes()
return cls._collection
@classmethod
def _get_capped_collection(cls):
"""Create a new or get an existing capped PyMongo collection."""
db = cls._get_db()
collection_name = cls._get_collection_name()
# Get max document limit and max byte size from meta.
max_size = cls._meta.get("max_size") or 10 * 2 ** 20 # 10MB default
max_documents = cls._meta.get("max_documents")
# MongoDB will automatically raise the size to make it a multiple of
# 256 bytes. We raise it here ourselves to be able to reliably compare
# the options below.
if max_size % 256:
max_size = (max_size // 256 + 1) * 256
# If the collection already exists and has different options
# (i.e. isn't capped or has different max/size), raise an error.
if collection_name in list_collection_names(
db, include_system_collections=True
):
collection = db[collection_name]
options = collection.options()
if options.get("max") != max_documents or options.get("size") != max_size:
raise InvalidCollectionError(
'Cannot create collection "{}" as a capped '
"collection as it already exists".format(cls._collection)
)
return collection
# Create a new capped collection.
opts = {"capped": True, "size": max_size}
if max_documents:
opts["max"] = max_documents
return db.create_collection(collection_name, **opts)
def to_mongo(self, *args, **kwargs):
data = super().to_mongo(*args, **kwargs)
# If '_id' is None, try and set it from self._data. If that
# doesn't exist either, remove '_id' from the SON completely.
if data["_id"] is None:
if self._data.get("id") is None:
del data["_id"]
else:
data["_id"] = self._data["id"]
return data
def modify(self, query=None, **update):
"""Perform an atomic update of the document in the database and reload
the document object using updated version.
Returns True if the document has been updated or False if the document
in the database doesn't match the query.
.. note:: All unsaved changes that have been made to the document are
rejected if the method returns True.
:param query: the update will be performed only if the document in the
database matches the query
:param update: Django-style update keyword arguments
"""
if query is None:
query = {}
if self.pk is None:
raise InvalidDocumentError("The document does not have a primary key.")
id_field = self._meta["id_field"]
query = query.copy() if isinstance(query, dict) else query.to_query(self)
if id_field not in query:
query[id_field] = self.pk
elif query[id_field] != self.pk:
raise InvalidQueryError(
"Invalid document modify query: | |
<filename>supersid/supersid_plot.py<gh_stars>10-100
#!/usr/bin/python
'''
supersid_plot
version: 1.3.1 enhanced for Python 2.7 and 3.3
Original Copyright: Stanford Solar Center - 2008
Copyright: <NAME> - 2012
Support one to many files as input, even in Drag & Drop
Draw multi-stations graphs
Offer the possibility to generate PDF and email it (perfect for batch mode)
Offer the possibility to fetch NOAA XRA data and add them on the plot
'''
from __future__ import print_function # use the new Python 3 'print' function
import sys
import datetime, time
import itertools
import os.path
import glob
# matplolib tools
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter as ff
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.dates
# Internet and Email modules
import smtplib
try: # python 2.7 vs. Python 3.3
import urllib2
from email.MIMEText import MIMEText
except ImportError:
import urllib.request, urllib.error
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders, utils
import argparse
# SuperSID modules
from sidfile import SidFile
from config import Config
def sendMail(config, To_mail, msgBody, PDFfile):
"""Send the mail using the smtplib module
The plot (as PDF) attached"""
senderEmail = config.get("from_mail","")
mailserver = config.get("email_server","")
mailport = config.get("email_port", "")
mailserveruser = config.get("email_login","") # <-- set to None if no login required
mailserverpasswd = config.get("email_password","") # <-- set to None if no login required
# create the mail message
msg = MIMEMultipart(_subtype='html')
msg['Subject'] = 'Auto-generated eMail from SuperSID'
msg.attach( MIMEText(msgBody) )
# Following headers are useful to show the email correctly
msg['From'] = senderEmail
msg['Reply-to'] = senderEmail
msg['To'] = To_mail
msg['Date'] = utils.formatdate(localtime = 1)
# attach the PDF file
ctype, encoding = ('application/pdf', None)
maintype, subtype = ctype.split('/', 1)
with open(PDFfile, 'rb') as pdf:
att = MIMEBase(maintype, subtype)
att.set_payload(pdf.read())
encoders.encode_base64(att)
att.add_header('Content-Disposition', 'attachment', filename=PDFfile)
msg.attach(att)
# Establish an SMTP object by connecting to your mail server
s = smtplib.SMTP()
print("Connect to:", mailserver, mailport )
s.connect(mailserver, port=mailport)
if mailserveruser: s.login(mailserveruser, mailserverpasswd)
# Send the email - real from, real to, extra headers and content ...
s.sendmail(senderEmail, To_mail, msg.as_string())
s.close()
print ("Email to %s sent." % To_mail)
class SUPERSID_PLOT():
def m2hm(self, x, i):
"""Small function to format the time on horizontal axis - minor ticks"""
t = matplotlib.dates.num2date(x)
h = t.hour
m = t.minute
return '%(h)02d:%(m)02d' % {'h':h,'m':m} if h % 2 == 1 else '' # only for odd hours
def m2yyyymmdd(self, x, i):
"""Small function to format the date on horizontal axis - major ticks"""
t = matplotlib.dates.num2date(x)
y = t.year
m = t.month
d = t.day
return '%(y)04d-%(m)02d-%(d)02d --' % {'y':y,'m':m, 'd': d}
def plot_filelist(self, filelist, showPlot = True, eMail=None, pdf=None, web=False, config=None):
"""Read the files in the filelist parameters.
Each data are combine in one plot.
That plot can be displayed or not (showPlot), sent by email (eMail provided), saved as pdf (pdf provided).
Connection for the given days to NOAA website is possible (web) in order to draw vetical lines for XRA data."""
emailText = []
Tstamp = lambda HHMM: datetime.datetime(year=int(day[:4]), month=int(day[4:6]), day=int(day[6:8]),
hour=int(HHMM[:2]), minute=int(HHMM[2:]))
## Sunrise and sunset shade
#sun_rise = 6.0
#sun_set = 18.0
#plt.axvspan(0.0, sun_rise, facecolor='blue', alpha=0.2)
#plt.axvspan(sun_set, 24.0, facecolor='blue', alpha=0.2)
if type(filelist) is str:
if filelist.find(',') >= 0: # file1,file2,...,fileN given as script argument
filelist = filelist.split(",")
else:
filelist = (filelist, )
filenames = []
# use glob for one or more files
filenames.extend([a for a in itertools.chain.from_iterable(
[glob.glob(os.path.expanduser(f)) for f in filelist]) ])
#print (filenames)
# plot's figure and axis
fig = plt.figure()
current_axes = fig.gca()
current_axes.xaxis.set_minor_locator(matplotlib.dates.HourLocator())
current_axes.xaxis.set_major_locator(matplotlib.dates.DayLocator())
current_axes.xaxis.set_major_formatter(ff(self.m2yyyymmdd))
current_axes.xaxis.set_minor_formatter(ff(self.m2hm))
current_axes.set_xlabel("UTC Time")
current_axes.set_ylabel("Signal Strength")
## Get data from files
maxData, data_length = -1, -1; # impossible values
XRAlist = [] # flare list from NOAA
daysList = set() # date of NOAA's pages already retrieved, prevent multiple fetch
figTitle = [] # list of file names (w/o path and extension) as figure's title
# one color per station
colorList = "brgcmy"
colorStation = {}
colorIdx = 0
time.clock()
for filename in sorted(filenames):
figTitle.append(os.path.basename(filename)[:-4]) # extension .csv assumed
sFile = SidFile(filename)
for station in sFile.stations:
# Does this station already have a color? if not, reserve one
if station not in colorStation:
colorStation[station] = colorList[colorIdx % len(colorList)] + '-' # format like 'b-'
colorIdx += 1
# Add points to the plot
plt.plot_date(sFile.timestamp, sFile.get_station_data(station), colorStation[station])
# Extra housekeeping
maxData = max(max(sFile.get_station_data(station)), maxData) # maxData will be used later to put the XRA labels up
#msg = str(len(sFile.get_station_data(station))) + " points plotted after reading " + os.path.basename(filename)
msg = "[{}] {} points plotted after reading {}".format(station, len(sFile.get_station_data(station)), os.path.basename(filename))
print (msg)
emailText.append(msg)
if web and sFile.startTime not in daysList:
# get the XRA data from NOAA website to draw corresponding lines on the plot
# fetch that day's flares on NOAA as not previously accessed
day = sFile.sid_params["utc_starttime"][:10].replace("-","")
#NOAA_URL = 'http://www.swpc.noaa.gov/ftpdir/warehouse/%s/%s_events/%sevents.txt' % (day[:4], day[:4], day)
#ftp://ftp.swpc.noaa.gov/pub/indices/events/20141030events.txt
NOAA_URL = 'ftp://ftp.swpc.noaa.gov/pub/indices/events/%sevents.txt' % (day)
response = None
if sys.version[0]<'3': # python 2.7 vs. Python 3.3
try:
response = urllib2.urlopen(NOAA_URL)
except urllib2.HTTPError as err:
print (err,"\n",NOAA_URL)
else:
try:
response = urllib.request.urlopen(NOAA_URL)
except urllib.error.HTTPError as err:
print (err,"\n",NOAA_URL)
lastXRAlen = len(XRAlist) # save temporarly current number of XRA events in memory
if response:
for webline in response.read().splitlines():
if sys.version[0]>='3': webline = str(webline, 'utf-8') # Python 3: cast bytes to str
fields = webline.split()
if len(fields) >= 9 and not fields[0].startswith("#"):
if fields[1] == '+': fields.remove('+')
if fields[6] in ('XRA', ): # maybe other event types could be of interrest
# eventName, BeginTime, MaxTime, EndTime, Particulars
msg = fields[0]+" "+fields[1]+" "+fields[2]+" "+fields[3]+" "+fields[8]
emailText.append(msg)
print (msg)
try:
btime = Tstamp(fields[1]) # 'try' necessary as few occurences of --:-- instead of HH:MM exist
except:
pass
try:
mtime = Tstamp(fields[2])
except:
mtime = btime
try:
etime = Tstamp(fields[3])
except:
etime = mtime
XRAlist.append( (fields[0], btime, mtime, etime, fields[8]) ) # as a tuple
msg = str(len(XRAlist) - lastXRAlen) + " XRA events recorded by NOAA on " + day
emailText.append(msg)
print (msg)
# keep track of the days
daysList.add(sFile.startTime)
print ("All files read in", time.clock(), "sec.")
if web: # add the lines marking the retrieved flares from NOAA
alternate = 0
for eventName, BeginTime, MaxTime, EndTime, Particulars in XRAlist:
plt.vlines( [BeginTime, MaxTime, EndTime], 0, maxData,
color=['g','r','y'], linestyles='dotted')
plt.text(MaxTime, alternate * maxData, Particulars, horizontalalignment='center',
bbox={'facecolor': 'w', 'alpha': 0.5, 'fill': True})
alternate = 0 if alternate==1 else 1
# plot/page size / figure size with on A4 paper
if len(daysList) == 1:
fig.set_size_inches(29.7 / 2.54, 21.0 / 2.54, forward=True)
else: # allow PDF poster for many days (monthly graph) --> use Adobe PDF Reader --> Print --> Poster mode
fig.set_size_inches((29.7 / 2.54) * (len(daysList)/2.0), (21.0 / 2.54) / 2.0, forward=True)
fig.subplots_adjust(bottom=0.08, left = 0.05, right = 0.98, top=0.95)
# some cosmetics on the figure
for label in current_axes.xaxis.get_majorticklabels():
label.set_fontsize(8)
label.set_rotation(30) # 'vertical')
#label.set_horizontalalignment='left'
for label in current_axes.xaxis.get_minorticklabels():
label.set_fontsize(12 if len(daysList) == 1 else 8)
fig.suptitle(", ".join(figTitle))
xLegend = 0.03
for station, color in colorStation.items():
fig.text(xLegend, 0.93, station, color=color[0], fontsize=12, bbox={'fc':"w", 'pad':10, 'ec':color[0]})
xLegend += 0.05
# actions requested by user
if pdf or eMail:
pp = PdfPages(pdf or 'Image.pdf') # in case option eMail is given but not pdf
plt.savefig(pp, format='pdf')
pp.close()
if showPlot: plt.show()
if eMail: sendMail(config, eMail, "\n".join(emailText), pdf or 'Image.pdf')
#-------------------------------------------------------------------------------
'''
For running supersid_plot.py directly from command line
'''
def do_main(filelist, showPlot = True, eMail=None, pdf=None, web=False, config=None):
ssp = SUPERSID_PLOT()
ssp.plot_filelist(filelist, showPlot, eMail, pdf, web, config);
if __name__ == '__main__':
filenames = ""
parser = argparse.ArgumentParser(description="""Usage: supersid_plot.py filename.csv\n
Usage: supersid_plot.py "filename1.csv,filename2.csv,filename3.csv"\n
Usage: supersid_plot.py "filename*.csv"\n
Note: " are optional on Windows, mandatory on *nix\n
Other options: supersid_plot.py -h\n""")
parser.add_argument("-c", "--config", dest="cfg_filename", required=False, default='',
help="SuperSID Configuration file")
parser.add_argument("-f", "--file", dest="filename",
help="Read SID and SuperSID csv file(s). Wildcards accepted.", metavar="FILE|FILE*.csv")
parser.add_argument("-p", "--pdf", dest="pdffilename",
help="Write the plot in a PDF file.", metavar="filename.PDF")
parser.add_argument("-e", "--email", dest="email", nargs="?",
help="sends PDF file to the given email", metavar="<EMAIL>")
parser.add_argument("-n", "--noplot",
action="store_false", dest="showPlot", default=True,
help="do not display the plot. Usefull in batch mode.")
parser.add_argument("-w", "--web",
action="store_true", dest="webData", default=False,
help="Add information on flares (XRA) from NOAA website.")
parser.add_argument("-y", "--yesterday",
action="store_true", dest="askYesterday", default=False,
help="Yesterday's date is used for the file name.")
parser.add_argument("-t", | |
import discord
import asyncio
import os
import random
import traceback
import sys
from datetime import datetime, timedelta
from io import BytesIO, StringIO
from config import *
from settings import *
import json
import urllib.request
################## START INIT #####################
client = discord.Client()
session = [False, {}, False, [0, 0], [timedelta(0), timedelta(0)], 0, '']
PLAYERS_ROLE = None
ADMINS_ROLE = None
WEREWOLF_NOTIFY_ROLE = None
ratelimit_dict = {}
pingif_dict = {}
notify_me = []
faftergame = None
with open(NOTIFY_FILE, 'a+') as notify_file:
notify_file.seek(0)
notify_me = notify_file.read().split(',')
random.seed(datetime.now())
def get_jsonparsed_data(url):
response = urllib.request.urlopen(url)
if response.code / 100 >= 4:
return None # url does not exist
data = response.read().decode("utf-8")
return json.loads(data)
url = "https://raw.githubusercontent.com/belguawhale/Discord-Werewolf/master/lang/" + MESSAGE_LANGUAGE + ".json"
lang = get_jsonparsed_data(url)
if not lang:
print("Could not find language {}, fallback on en".format(MESSAGE_LANGUAGE))
lang = get_jsonparsed_data("https://raw.githubusercontent.com/belguawhale/Discord-Werewolf/master/lang/en.json")
################### END INIT ######################
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
await log(0, 'on_ready triggered!')
# [playing : True | False, players : {player id : [alive, role, action, template]}, day?, [datetime night, datetime day], [elapsed night, elapsed day], first join time]
for role in client.get_server(WEREWOLF_SERVER).role_hierarchy:
if role.name == PLAYERS_ROLE_NAME:
global PLAYERS_ROLE
PLAYERS_ROLE = role
if role.name == ADMINS_ROLE_NAME:
global ADMINS_ROLE
ADMINS_ROLE = role
if role.name == WEREWOLF_NOTIFY_ROLE_NAME:
global WEREWOLF_NOTIFY_ROLE
WEREWOLF_NOTIFY_ROLE = role
if PLAYERS_ROLE:
await log(0, "Players role id: " + PLAYERS_ROLE.id)
else:
await log(2, "Could not find players role " + PLAYERS_ROLE_NAME)
if ADMINS_ROLE:
await log(0, "Admins role id: " + ADMINS_ROLE.id)
else:
await log(2, "Could not find admins role " + ADMINS_ROLE_NAME)
if WEREWOLF_NOTIFY_ROLE:
await log(0, "Werewolf Notify role id: " + WEREWOLF_NOTIFY_ROLE.id)
else:
await log(1, "Could not find Werewolf Notify role " + WEREWOLF_NOTIFY_ROLE_NAME)
@client.event
async def on_message(message):
if message.author.id in [client.user.id] + IGNORE_LIST or not client.get_server(WEREWOLF_SERVER).get_member(message.author.id):
if not (message.author.id in ADMINS or message.author.id == OWNER_ID):
return
if await rate_limit(message):
return
if message.channel.is_private:
await log(0, 'pm from ' + message.author.name + ' (' + message.author.id + '): ' + message.content)
if session[0] and message.author.id in session[1].keys():
if session[1][message.author.id][1] in WOLFCHAT_ROLES and session[1][message.author.id][0]:
await wolfchat(message)
if message.content.strip().startswith(BOT_PREFIX):
# command
command = message.content.strip()[len(BOT_PREFIX):].lower().split(' ')[0]
parameters = ' '.join(message.content.strip().lower().split(' ')[1:])
if has_privileges(1, message) or message.channel.id == GAME_CHANNEL or message.channel.is_private:
await parse_command(command, message, parameters)
elif message.channel.is_private:
command = message.content.strip().lower().split(' ')[0]
parameters = ' '.join(message.content.strip().lower().split(' ')[1:])
await parse_command(command, message, parameters)
############# COMMANDS #############
async def cmd_shutdown(message, parameters):
if parameters.startswith("-fstop"):
await cmd_fstop(message, "-force")
elif parameters.startswith("-stop"):
await cmd_fstop(message, parameters[len("-stop"):])
await reply(message, "Shutting down...")
await client.logout()
async def cmd_ping(message, parameters):
msg = random.choice(lang['ping']).format(
bot_nick=client.user.display_name, author=message.author.name, p=BOT_PREFIX)
await reply(message, msg)
async def cmd_eval(message, parameters):
output = None
parameters = ' '.join(message.content.split(' ')[1:])
if parameters == '':
await reply(message, commands['eval'][2].format(BOT_PREFIX))
return
try:
output = eval(parameters)
except:
await reply(message, '```\n' + str(traceback.format_exc()) + '\n```')
traceback.print_exc()
return
if asyncio.iscoroutine(output):
output = await output
if output:
await reply(message, '```\n' + str(output) + '\n```')
else:
await reply(message, ':thumbsup:')
async def cmd_exec(message, parameters):
parameters = ' '.join(message.content.split(' ')[1:])
if parameters == '':
await reply(message, commands['exec'][2].format(BOT_PREFIX))
return
old_stdout = sys.stdout
redirected_output = sys.stdout = StringIO()
try:
exec(parameters)
except Exception:
formatted_lines = traceback.format_exc().splitlines()
await reply(message, '```py\n{}\n{}\n```'.format(formatted_lines[-1], '\n'.join(formatted_lines[4:-1])))
return
finally:
sys.stdout = old_stdout
if redirected_output.getvalue():
await client.send_message(message.channel, redirected_output.getvalue())
return
await client.send_message(message.channel, ':thumbsup:')
async def cmd_help(message, parameters):
if parameters == '':
parameters = 'help'
if parameters in commands.keys():
await reply(message, commands[parameters][2].format(BOT_PREFIX))
else:
await reply(message, 'No help found for command ' + parameters)
async def cmd_list(message, parameters):
cmdlist = []
for key in commands.keys():
if message.channel.is_private:
if has_privileges(commands[key][1][1], message):
cmdlist.append(key)
else:
if has_privileges(commands[key][1][0], message):
cmdlist.append(key)
await reply(message, "Available commands: " + ", ".join(cmdlist).rstrip(", "))
async def cmd_join(message, parameters):
if session[0]:
return
if len(session[1]) >= MAX_PLAYERS:
await reply(message, random.choice(lang['maxplayers']).format(MAX_PLAYERS))
return
if message.author.id in session[1]:
await reply(message, random.choice(lang['alreadyin']).format(message.author.name))
else:
session[1][message.author.id] = [True, '', '', [], []]
if len(session[1].keys()) == 1:
client.loop.create_task(game_start_timeout_loop())
await client.change_presence(status=discord.Status.idle)
await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['gamestart']).format(
message.author.name, p=BOT_PREFIX))
else:
await client.send_message(message.channel, "**{}** joined the game and raised the number of players to **{}**.".format(
message.author.name, len(session[1])))
# alive, role, action, [templates], [other]
await client.add_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), PLAYERS_ROLE)
await player_idle(message)
async def cmd_leave(message, parameters):
if session[0] and message.author.id in list(session[1].keys()) and session[1][message.author.id][0]:
session[1][message.author.id][0] = False
await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['leavedeath']).format(message.author.name, get_role(message.author.id, 'death')))
await client.remove_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), PLAYERS_ROLE)
if session[0] and await win_condition() == None:
await check_traitor()
else:
if message.author.id in session[1]:
if session[0]:
await reply(message, "wot?")
return
del session[1][message.author.id]
await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['leavelobby']).format(message.author.name, len(session[1])))
if len(session[1]) == 0:
await client.change_presence(status=discord.Status.online)
await client.remove_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), PLAYERS_ROLE)
else:
await reply(message, random.choice(lang['notplayingleave']))
async def cmd_fjoin(message, parameters):
if session[0]:
return
if parameters == '':
await reply(message, commands['fjoin'][2].format(BOT_PREFIX))
return
raw_members = parameters.split(' ')
join_list = []
join_names = []
for member in raw_members:
if member.strip('<!@>').isdigit():
if isinstance(client.get_server(WEREWOLF_SERVER).get_member(member.strip('<!@>')), discord.Member):
join_list.append(member.strip('<!@>'))
join_names.append(client.get_server(WEREWOLF_SERVER).get_member(member.strip('<!@>')).name)
else:
join_list.append(member.strip('<!@>'))
join_names.append(member.strip('<!@>'))
if join_list == []:
await reply(message, "ERROR: no valid mentions found")
return
join_msg = ""
for i, member in enumerate(join_list):
session[1][member] = [True, '', '', [], []]
join_msg += "**" + join_names[i] + "** was forced to join the game.\n"
if client.get_server(WEREWOLF_SERVER).get_member(member):
await client.add_roles(client.get_server(WEREWOLF_SERVER).get_member(member), PLAYERS_ROLE)
join_msg += "New player count: **{}**".format(len(session[1].keys()))
if len(session[1]) > 0:
await client.change_presence(status=discord.Status.idle)
await client.send_message(message.channel, join_msg)
await log(1, "{0} ({1}) used fjoin {2}".format(message.author.name, message.author.id, parameters))
async def cmd_fleave(message, parameters):
if parameters == '':
await reply(message, commands['fleave'][2].format(BOT_PREFIX))
return
raw_members = parameters.split(' ')
leave_list = []
if parameters == 'all':
leave_list = list(session[1].keys())
else:
for member in raw_members:
if member.strip('<!@>').isdigit():
leave_list.append(member.strip('<!@>'))
if leave_list == []:
await reply(message, "ERROR: no valid mentions found")
return
leave_msg = ""
for i, member in enumerate(leave_list):
if member in list(session[1].keys()):
if session[0]:
session[1][member][0] = False
leave_msg += "**" + get_name(member) + "** was forcibly shoved into a fire. The air smells of freshly burnt **" + get_role(member, 'death') + "**.\n"
else:
del session[1][member]
leave_msg += "**" + get_name(member) + "** was forced to leave the game.\n"
if client.get_server(WEREWOLF_SERVER).get_member(member):
await client.remove_roles(client.get_server(WEREWOLF_SERVER).get_member(member), PLAYERS_ROLE)
if not session[0]:
leave_msg += "New player count: **{}**".format(len(session[1].keys()))
if len(session[1]) == 0:
await client.change_presence(status=discord.Status.online)
await client.send_message(client.get_channel(GAME_CHANNEL), leave_msg)
await log(1, "{0} ({1}) used fleave {2}".format(message.author.name, message.author.id, parameters))
if session[0] and await win_condition() == None:
await check_traitor()
async def cmd_refresh(message, parameters):
if parameters == '':
parameters = MESSAGE_LANGUAGE
url = "https://raw.githubusercontent.com/belguawhale/Discord-Werewolf/master/lang/{}.json".format(parameters)
codeset = parameters
temp_lang = get_jsonparsed_data(url)
if not temp_lang:
url = "https://raw.githubusercontent.com/belguawhale/Discord-Werewolf/master/lang/en.json"
codeset = 'en'
temp_lang = get_jsonparsed_data(url)
if not temp_lang:
await reply(message, "Error: could not refresh language messages.")
await log(2, "Refresh of language code {} and fallback failed".format(parameters))
return
global lang
lang = temp_lang
await reply(message, 'The messages with language code `' + codeset + '` have been refreshed from GitHub.')
async def cmd_start(message, parameters):
if session[0]:
return
if message.author.id not in session[1].keys():
await reply(message, random.choice(lang['notplayingstart']))
return
if len(session[1]) < MIN_PLAYERS:
await reply(message, random.choice(lang['minplayers']).format(MIN_PLAYERS))
return
await run_game(message)
async def cmd_fstart(message, parameters):
if session[0]:
return
if len(session[1]) < MIN_PLAYERS:
await reply(message, random.choice(lang['minplayers']).format(MIN_PLAYERS))
else:
await client.send_message(client.get_channel(GAME_CHANNEL), "**" + message.author.name + "** forced the game to start.")
await log(1, "{0} ({1}) used fstart".format(message.author.name, message.author.id))
await run_game(message)
async def cmd_fstop(message, parameters):
if not session[0]:
await reply(message, "There is no currently running game!")
return
await log(1, "{0} ({1}) used fstop".format(message.author.name, message.author.id))
msg = "Game forcibly stopped by **" + message.author.name + "**"
if parameters == "":
msg += "."
elif parameters == "-force":
if not session[0]:
return
msg += ". Here is some debugging info:\n```py\n{0}\n```".format(str(session))
session[0] = False
perms = client.get_channel(GAME_CHANNEL).overwrites_for(client.get_server(WEREWOLF_SERVER).default_role)
perms.send_messages = True
await client.edit_channel_permissions(client.get_channel(GAME_CHANNEL), client.get_server(WEREWOLF_SERVER).default_role, perms)
for player in list(list(session[1].keys())):
del session[1][player]
member = client.get_server(WEREWOLF_SERVER).get_member(player)
if member:
await client.remove_roles(member, PLAYERS_ROLE)
session[3] = [0, 0]
session[4] = [timedelta(0), timedelta(0)]
await client.send_message(client.get_channel(GAME_CHANNEL), msg)
else:
msg += " for reason: `" + parameters + "`."
await end_game(msg + '\n\n' + end_game_stats())
async def cmd_sync(message, parameters):
for member in client.get_server(WEREWOLF_SERVER).members:
if member.id in session[1] and session[1][member.id][0]:
if not PLAYERS_ROLE in member.roles:
await client.add_roles(member, PLAYERS_ROLE)
else:
if PLAYERS_ROLE in member.roles:
await client.remove_roles(member, PLAYERS_ROLE)
perms = client.get_channel(GAME_CHANNEL).overwrites_for(client.get_server(WEREWOLF_SERVER).default_role)
if session[0]:
perms.send_messages = False
else:
perms.send_messages = True
await client.edit_channel_permissions(client.get_channel(GAME_CHANNEL), client.get_server(WEREWOLF_SERVER).default_role, perms)
await reply(message, "Sync successful.")
async def cmd_op(message, parameters):
if parameters == "":
await client.add_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), ADMINS_ROLE)
await reply(message, ":thumbsup:")
else:
member = client.get_server(WEREWOLF_SERVER).get_member(parameters.strip("<!@>"))
if member:
if member.id in ADMINS:
await client.add_roles(member, ADMINS_ROLE)
await reply(message, ":thumbsup:")
async def cmd_deop(message, parameters):
if parameters == "":
await client.remove_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), ADMINS_ROLE)
await reply(message, ":thumbsup:")
else:
member = client.get_server(WEREWOLF_SERVER).get_member(parameters.strip("<!@>"))
if member:
if member.id in ADMINS:
await client.remove_roles(member, ADMINS_ROLE)
await reply(message, ":thumbsup:")
async def cmd_role(message, parameters):
if parameters == "" | |
Bots can currently send audio
files of up to 50 MB in size, this limit may be changed in the future.
For backward compatibility, when both fields title and description are
empty and mime-type of the sent file is not "audio/mpeg", file is sent
as playable voice message. In this case, your audio must be in an .ogg
file encoded with OPUS. This will be removed in the future. You need to
use sendVoice method instead.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
audio: Audio file to send. You can either pass a file_id as String to resend an audio
that is already on the Telegram servers, or upload a new audio file using
multipart/form-data.
duration (Optional[int]): Duration of sent audio in seconds.
performer (Optional[str]): Performer of sent audio.
title (Optional[str]): Title of sent audio.
caption (Optional[str]): Audio caption
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendAudio'.format(self.base_url)
data = {'chat_id': chat_id, 'audio': audio}
if duration:
data['duration'] = duration
if performer:
data['performer'] = performer
if title:
data['title'] = title
if caption:
data['caption'] = caption
return url, data
@log
@message
def send_document(self,
chat_id,
document,
filename=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20.,
**kwargs):
"""Use this method to send general files.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
document: File to send. You can either pass a file_id as String to resend a file that
is already on the Telegram servers, or upload a new file using multipart/form-data.
filename (Optional[str]): File name that shows in telegram message (it is useful when
you send file generated by temp module, for example).
caption (Optional[str]): Document caption (may also be used when resending documents by
file_id), 0-200 characters.
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendDocument'.format(self.base_url)
data = {'chat_id': chat_id, 'document': document}
if filename:
data['filename'] = filename
if caption:
data['caption'] = caption
return url, data
@log
@message
def send_sticker(self,
chat_id,
sticker,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to send .webp stickers.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
sticker: Sticker to send. You can either pass a file_id as String to resend a sticker
that is already on the Telegram servers, or upload a new sticker using
multipart/form-data.
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendSticker'.format(self.base_url)
data = {'chat_id': chat_id, 'sticker': sticker}
return url, data
@log
@message
def send_video(self,
chat_id,
video,
duration=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20.,
**kwargs):
"""Use this method to send video files, Telegram clients support mp4
videos (other formats may be sent as telegram.Document).
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
video: Video to send. You can either pass a file_id as String to resend a video that is
already on the Telegram servers, or upload a new video file using
multipart/form-data.
duration (Optional[int]): Duration of sent video in seconds.
caption (Optional[str]): Video caption (may also be used when resending videos by
file_id).
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVideo'.format(self.base_url)
data = {'chat_id': chat_id, 'video': video}
if duration:
data['duration'] = duration
if caption:
data['caption'] = caption
return url, data
@log
@message
def send_voice(self,
chat_id,
voice,
duration=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20.,
**kwargs):
"""Use this method to send audio files, if you want Telegram clients to display the file as
a playable voice message. For this to work, your audio must be in an .ogg file encoded with
OPUS (other formats may be sent as Audio or Document). On success, the sent Message is
returned. Bots can currently send audio files of up to 50 MB in size, this limit may be
changed in the future.
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
voice: Audio file to send. You can either pass a file_id as String to resend an audio
that is already on the Telegram servers, or upload a new audio file using
multipart/form-data.
duration (Optional[int]): Duration of sent audio in seconds.
caption (Optional[str]): Voice caption
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
**kwargs (dict): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, instance representing the message posted.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVoice'.format(self.base_url)
data = {'chat_id': chat_id, 'voice': voice}
if duration:
data['duration'] = duration
if caption:
data['caption'] = caption
return url, data
@log
@message
def send_video_note(self,
chat_id,
video_note,
duration=None,
length=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20.,
**kwargs):
"""As of v.4.0, Telegram clients support rounded square mp4 videos of up to 1 minute
long. Use this method to send video messages
Args:
chat_id (int|str): Unique identifier for the message recipient - Chat id.
video_note (InputFile|str): Video note to send. Pass a file_id as String to send a
video note that exists on the Telegram servers (recommended) or upload a new video.
Sending video notes by a URL is currently unsupported
duration (Optional[int]): Duration of sent audio in seconds.
length (Optional[int]): Video width and height
disable_notification (Optional[bool]): Sends the message silently. iOS users will not
receive a notification, Android users will receive a notification with no sound.
reply_to_message_id (Optional[int]): If the message is a reply, ID of the original
message.
reply_markup (Optional[:class:`telegram.ReplyMarkup`]): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (Optional[int|float]): Send file timeout (default: 20 seconds).
**kwargs (dict): Arbitrary keyword arguments.
| |
evt
def create_aliyun_vpc_virtualrouter_entry_remote(dst_cidr_block, vrouter_uuid, vrouter_type, next_hop_type, next_hop_uuid, session_uuid=None):
action = api_actions.CreateAliyunVpcVirtualRouterEntryRemoteAction()
action.dstCidrBlock = dst_cidr_block
action.vRouterUuid = vrouter_uuid
action.vRouterType = vrouter_type
action.nextHopType = next_hop_type
action.nextHopUuid = next_hop_uuid
test_util.action_logger('Create [VPC VirtualRouter Entry Remote:] %s %s %s %s %s' % (dst_cidr_block, vrouter_uuid, vrouter_type, next_hop_type, next_hop_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VPC VirtualRouter Entry Remote:] %s %s %s %s %s is created.' % (dst_cidr_block, vrouter_uuid, vrouter_type, next_hop_type, next_hop_uuid))
return evt.inventory
def create_vpn_ipsec_config(name, pfs='group2', enc_alg='3des', auth_alg='sha1', session_uuid=None):
action = api_actions.CreateVpnIpsecConfigAction()
action.name = name
action.pfs = pfs
action.encAlg = enc_alg
action.authAlg = auth_alg
test_util.action_logger('Create [VPN IPsec Config:] %s %s %s %s' % (name, pfs, enc_alg, auth_alg))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VPN IPsec Config:] %s %s %s %s is created.' % (name, pfs, enc_alg, auth_alg))
return evt.inventory
def create_vpn_ike_ipsec_config(name, psk, local_ip, remote_ip, pfs='group2', enc_alg='3des', auth_alg='sha1', version='ikev1', mode='main', session_uuid=None):
action = api_actions.CreateVpnIkeConfigAction()
action.psk = psk
action.pfs = pfs
action.localIp = local_ip
action.remoteIp = remote_ip
action.encAlg = enc_alg
action.authAlg = auth_alg
action.version = version
action.mode = mode
action.name = name
test_util.action_logger('Create [VPN Ike Config:] %s %s %s %s %s %s %s %s %s' % (name, local_ip, remote_ip, psk, pfs, enc_alg, auth_alg, version, mode))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VPN Ike Config:] %s %s %s %s %s %s %s %s %s is created.' % (name, local_ip, remote_ip, psk, pfs, enc_alg, auth_alg, version, mode))
return evt.inventory
def create_vpc_vpn_connection(user_gatway_uuid, vpn_gateway_uuid, name, local_cidr, remote_cidr, ike_config_uuid, ipsec_config_uuid, active='true', session_uuid=None):
action = api_actions.CreateVpcVpnConnectionRemoteAction()
action.userGatewayUuid = user_gatway_uuid
action.vpnGatewayUuid = vpn_gateway_uuid
action.name = name
action.localCidr = local_cidr
action.remoteCidr = remote_cidr
action.ikeConfUuid = ike_config_uuid
action.ipsecConfUuid = ipsec_config_uuid
action.active = active
test_util.action_logger('Create [VPC VPN Connection:] %s %s' % (vpn_gateway_uuid, user_gatway_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VPC VPN Connection:] %s %s is created.' % (vpn_gateway_uuid, user_gatway_uuid))
return evt.inventory
def create_vpc_user_vpn_gateway(data_center_uuid, gw_ip, gw_name, session_uuid=None):
action = api_actions.CreateVpcUserVpnGatewayRemoteAction()
action.dataCenterUuid = data_center_uuid
action.ip = gw_ip
action.name = gw_name
test_util.action_logger('Create [VPC User VPN Gateway:] %s %s' % (data_center_uuid, gw_ip))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VPC User VPN Gateway:] %s %s is created.' % (data_center_uuid, gw_ip))
return evt.inventory
def del_vpc_user_vpn_gateway_remote(uuid, session_uuid=None):
action = api_actions.DeleteVpcUserVpnGatewayRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc User Vpn Gateway Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Vpc User Vpn Gateway Remote:] %s is deleted.' % (uuid))
return evt
def del_vpc_vpn_connection_remote(uuid, session_uuid=None):
action = api_actions.DeleteVpcVpnConnectionRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc Vpn Connection Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Vpc Vpn Connection Remote:] %s is deleted.' % (uuid))
return evt
def del_aliyun_route_entry_remote(uuid, session_uuid=None):
action = api_actions.DeleteAliyunRouteEntryRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Aliyun Route Entry Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Aliyun Route Entry Remote:] %s is deleted.' % (uuid))
return evt
def del_vpc_vpn_gateway_local(uuid, session_uuid=None):
action = api_actions.DeleteVpcVpnGatewayLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc Vpn Gateway in local:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Vpc Vpn Gateway in local:] %s is deleted.' % (uuid))
return evt
def del_vpc_vpn_connection_local(uuid, session_uuid=None):
action = api_actions.DeleteVpcVpnConnectionLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc Vpn Gateway Local:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Router Entry Remote:] %s is deleted.' % (uuid))
return evt
def del_vpc_ike_config_local(uuid, session_uuid=None):
action = api_actions.DeleteVpcIkeConfigLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc Ike Config in Local:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Vpc Ike Config in Local:] %s is deleted.' % (uuid))
return evt
def del_vpc_ipsec_config_local(uuid, session_uuid=None):
action = api_actions.DeleteVpcIpSecConfigLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Vpc IPsec Config in Local:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Vpc IPsec Config in Local:] %s is deleted.' % (uuid))
return evt
def del_vpc_user_vpn_gateway_local(uuid, session_uuid=None):
action = api_actions.DeleteVpcUserVpnGatewayLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [Router Entry Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Router Entry Remote:] %s is deleted.' % (uuid))
return evt
def destroy_vm_instance(uuid, session_uuid=None):
action = api_actions.DestroyVmInstanceAction()
action.uuid = uuid
test_util.action_logger('Destroy [VM Instance:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[VM Instance:] %s is destroyed.' % (uuid))
return evt
def create_ecs_security_group_remote(name, vpc_uuid, session_uuid=None):
action = api_actions.CreateEcsSecurityGroupRemoteAction()
action.name = name
action.vpcUuid = vpc_uuid
test_util.action_logger('Create [Ecs Security Group Remote:] %s %s' % (name, vpc_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('Ecs Security Group Remote:] %s %s is created.' % (name, vpc_uuid))
return evt.inventory
def create_ecs_security_group_rule_remote(group_uuid, direction, protocol, port_range, cidr, policy, nic_type, priority, session_uuid=None):
action = api_actions.CreateEcsSecurityGroupRuleRemoteAction()
action.groupUuid = group_uuid
action.direction = direction
action.protocol = protocol
action.portRange = port_range
action.cidr = cidr
action.policy = policy
action.nictype = nic_type
action.priority = priority
test_util.action_logger('Create [Ecs Security Group Rule Remote:] %s %s %s %s %s %s %s %s' % (group_uuid, direction, protocol, port_range, cidr, policy, nic_type, priority))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs Security Group Rule Remote:] %s %s %s %s %s %s %s %s is created.' % (group_uuid, direction, protocol, port_range, cidr, policy, nic_type, priority))
return evt.inventory
def sync_ecs_security_group_from_remote(ecs_vpc_uuid, session_uuid=None):
action = api_actions.SyncEcsSecurityGroupFromRemoteAction()
action.ecsVpcUuid = ecs_vpc_uuid
test_util.action_logger('Sync [Security Group From Remote:] %s' % (ecs_vpc_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def sync_ecs_security_group_rule_from_remote(sg_uuid, session_uuid=None):
action = api_actions.SyncEcsSecurityGroupRuleFromRemoteAction()
action.uuid = sg_uuid
test_util.action_logger('Sync [Security Group From Remote:] %s' % (sg_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def sync_vpc_vpn_gateway_from_remote(data_center_uuid, session_uuid=None):
action = api_actions.SyncVpcVpnGatewayFromRemoteAction()
action.dataCenterUuid = data_center_uuid
test_util.action_logger('Sync [Vpc Vpn Gateway From Remote:] %s' % (data_center_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def sync_vpc_user_vpn_gateway_from_remote(data_center_uuid, session_uuid=None):
action = api_actions.SyncVpcUserVpnGatewayFromRemoteAction()
action.dataCenterUuid = data_center_uuid
test_util.action_logger('Sync [Vpc User Vpn Gateway From Remote:] %s' % (data_center_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def sync_vpc_vpn_connection_from_remote(data_center_uuid, session_uuid=None):
action = api_actions.SyncVpcVpnConnectionFromRemoteAction()
action.dataCenterUuid = data_center_uuid
test_util.action_logger('Sync [Vpc Vpn Connection From Remote:] %s' % (data_center_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def del_ecs_security_group_in_local(uuid, session_uuid=None):
action = api_actions.DeleteEcsSecurityGroupInLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [ecs security group in local:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs security group in local:] %s is deleted.' % uuid)
return evt
def del_ecs_security_group_rule_remote(uuid, session_uuid=None):
action = api_actions.DeleteEcsSecurityGroupRuleRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Ecs Security Group Rule Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs Security Group Rule Remote:] %s is deleted.' % (uuid))
return evt
def del_ecs_security_group_remote(uuid, session_uuid=None):
action = api_actions.DeleteEcsSecurityGroupRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [Ecs Security Group Remote:] %s ' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Ecs Security Group Remote:] %s is deleted.' % (uuid))
return evt
def create_ecs_image_from_local_image(bs_uuid, datacenter_uuid, image_uuid, name, session_uuid=None):
action = api_actions.CreateEcsImageFromLocalImageAction()
action.backupStorageUuid = bs_uuid
action.dataCenterUuid = datacenter_uuid
action.imageUuid = image_uuid
action.name = name
test_util.action_logger('Create Ecs Image from [Local image:] %s %s %s' % (bs_uuid, datacenter_uuid, image_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('Ecs Image is created from [Local image:] %s %s %s.' % (bs_uuid, datacenter_uuid, image_uuid))
return evt.inventory
def del_ecs_image_remote(uuid, session_uuid=None):
action = api_actions.DeleteEcsImageRemoteAction()
action.uuid = uuid
test_util.action_logger('Delete [ecs image remote:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs image remote:] %s is deleted.' % uuid)
return evt
def del_ecs_image_in_local(uuid, session_uuid=None):
action = api_actions.DeleteEcsImageLocalAction()
action.uuid = uuid
test_util.action_logger('Delete [ecs image in local:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs image in local:] %s is deleted.' % uuid)
return evt
def del_hybrid_eip_local(uuid, eip_type='aliyun', session_uuid=None):
action = api_actions.DeleteHybridEipFromLocalAction()
action.type = eip_type
action.uuid = uuid
test_util.action_logger('Delete [Hybrid Eip in local:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[Hybrid Eip in local:] %s is deleted.' % uuid)
return evt
def sync_ecs_image_from_remote(datacenter_uuid, image_type='self', session_uuid=None):
action = api_actions.SyncEcsImageFromRemoteAction()
action.dataCenterUuid = datacenter_uuid
action.type = image_type
test_util.action_logger('Sync [Ecs Image From Remote:] %s' % (datacenter_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def create_ecs_instance_from_ecs_image(ecs_root_password, image_uuid, ecs_vswitch_uuid, ecs_bandwidth, ecs_security_group_uuid, instance_offering_uuid=None, instance_type=None, private_ip_address=None, allocate_public_ip='false', name=None, ecs_console_password=<PASSWORD>, session_uuid=None):
action = api_actions.CreateEcsInstanceFromEcsImageAction()
action.ecsRootPassword = <PASSWORD>
action.ecsImageUuid = image_uuid
action.ecsVSwitchUuid = ecs_vswitch_uuid
action.instanceOfferingUuid = instance_offering_uuid
action.instanceType = instance_type
action.ecsBandWidth = ecs_bandwidth
action.ecsSecurityGroupUuid = ecs_security_group_uuid
action.privateIpAddress = private_ip_address
action.allocatePublicIp = allocate_public_ip
action.name = name
action.ecsConsolePassword = <PASSWORD>
test_util.action_logger('Create Ecs Instance from [Ecs Image:] %s' % image_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('Ecs Instance is created from [Ecs Image:] %s.' % image_uuid)
return evt.inventory
def del_ecs_instance(uuid, session_uuid=None):
action = api_actions.DeleteEcsInstanceAction()
action.uuid = uuid
test_util.action_logger('Delete [ecs instance:] %s' % (uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.test_logger('[ecs instance:] %s is deleted.' % uuid)
return evt
def sync_ecs_instance_from_remote(datacenter_uuid, only_zstack=None, session_uuid=None):
action = api_actions.SyncEcsInstanceFromRemoteAction()
action.dataCenterUuid = datacenter_uuid
action.onlyZstack = only_zstack
test_util.action_logger('Sync [Ecs Instance From Remote:] %s' % (datacenter_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventories
def update_ecs_instance(uuid, name=None, description=None, password=None, | |
<filename>ebttools/gui/main.py
# -*- coding: utf-8 -*-
"""
Copyright (c) 2016 <NAME>
Main GUI class of the EBT evaluation program
Program to evaluate EBT-films
A scannend image can be loaded and an area of interest selected
From that area a dose distrubtion can be calculated using a calibration file
and that dose distrubtion is than displayed in a seperate window
calibration location into advanced settings:
change must trigger reload
load adv settings before loading calibration
load gui settings after
"""
#get ready for python 3
from __future__ import (print_function, division, absolute_import,
unicode_literals)
#standard modules
import logging #logging funtionality
import numpy as np
#file manipulation and path functionality
import os
#modules for exception handeling
import sys
import traceback
try:
if os.environ['QT_API'] == 'pyqt5':
from PyQt5.QtWidgets import (QMainWindow, QFileDialog, QApplication,
QMessageBox)
from PyQt5 import QtCore
from matplotlib.backends.backend_qt5agg import (FigureCanvas)
else:
from PyQt4.QtGui import (QMainWindow, QFileDialog, QApplication,
QMessageBox)
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import (FigureCanvas)
except ImportError:
raise ImportError("dosewidget requires PyQt4 or PyQt5. "
"QT_API: {!s}".format(os.environ['QT_API']))
#use relative import, this should only ever run as a module
#module with the dose calculation routines
from ..core import load_calibrations, DoseArray
#my custom toolbar and main ui
if os.environ['QT_API'] == 'pyqt5':
from .main_ui_qt5 import Ui_MainWindow
else:
from .main_ui_qt4 import Ui_MainWindow
from .navtoolbar import MyNavigationToolbar
#the dose display window
from .dosewidget import DoseWidget, _advSettings
#load the Qt bindings
#load matplotlib for plotting
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
#image loading functionality
from PIL import Image
#import my gui helper functions (save, load gui data, gui logger, adv settings)
from mg import pyguitools
#define a list with advanced settings (editable via fromLayout)
#each list entry is a setting, with the first part as identifier and second as value
settingsList = [("selection rectangle","red"),
("mirror on load",False),
("rotate on load",False),
("histogramm bins",256),
("histogramm min",0),
("histogramm max",255)]
class MainGui(QMainWindow):
"""Main GUI of the EBT evaluation program
Provides a logging window, loads the films and the calibration files.
Constructs the dose view from the selected film ROI.
"""
def __init__(self):
"""Constructor
"""
QMainWindow.__init__(self)
# Set up the user interface from Designer.
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setDockOptions(QMainWindow.AnimatedDocks | QMainWindow.AllowNestedDocks)
#add the logging window by Andreas
self.log_dock = pyguitools.QtDockLog(datefmt=" ",infoString=False)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, self.log_dock)
# default log level to info
self.log_dock.ui.comboBox.setCurrentIndex(1)
#initialize the advanced settings (editable via formlayout, thus the list of tuples)
self.advSettings = pyguitools.EasyEditSettings(settingsList)
#convert settings to a dictionary for easier access
self.settings = self.advSettings.get_settings()
self.doseViewSettings = _advSettings
self.tabCounter = 0
#matplotlib frame setup
self.create_mplframe()
self.load_calibrations()
#connect slots
#menu items
self.ui.actionShow_Log.triggered.connect(self.show_log)
self.ui.actionShow_Scan.triggered.connect(self.show_scan)
self.ui.actionScan_View_Settings.triggered.connect(self.change_advSettings)
self.ui.actionDose_View_Settings.triggered.connect(self.change_doseViewSettings)
self.ui.actionSave_Dose_View_Values.triggered.connect(self.save_doseView_values)
#value changes
self.ui.x0.valueChanged.connect(self.selection_changed)
self.ui.x1.valueChanged.connect(self.selection_changed)
self.ui.y0.valueChanged.connect(self.selection_changed)
self.ui.y1.valueChanged.connect(self.selection_changed)
#buttons
self.ui.browseImageButton.clicked.connect(self.image_file_dialog)
self.ui.imagePath.returnPressed.connect(self.image_path_changed)
self.ui.loadButton.clicked.connect(self.image_path_changed)
self.ui.showDose_button.clicked.connect(self.show_dose)
self.ui.calcStatsButton.clicked.connect(self.area_stats)
self.ui.showHistoButton.clicked.connect(self.histogram)
self.ui.tabWidget.tabCloseRequested.connect(self.close_tab)
self.ui.browseSaveTable.clicked.connect(self.save_table_file_dialog)
self.ui.saveTablePath.returnPressed.connect(self.save_table_path_changed)
self.ui.saveChannelData.clicked.connect(self.save_calib_data)
self.ui.calcPhi0Button.clicked.connect(self.get_phi0)
#read the settings file
self.load_settings()
#init the save path
self.saveTablePath = ""
#redefining close event
def closeEvent(self,event):
"""save, then close
"""
self.save_settings()
event.accept()
##############################################################################
# methods (internal functions not directly called by user interaction)
def create_mplframe(self):
"""create a matplotlib frame with toolbar and figure on canvas
"""
#create figure and axes objects
self.fig = Figure()
self.subplot = self.fig.add_subplot(111)
#disable axis, because it will only show an image
self.subplot.get_yaxis().set_visible(False)
self.subplot.get_xaxis().set_visible(False)
#create canvas and toolbar
self.canvas = FigureCanvas(self.fig)
self.toolbar = MyNavigationToolbar(self.canvas, None)
#add the canvas and toolbar to the gui
self.ui.imageLayout.addWidget(self.canvas)
self.ui.imageLayout.addWidget(self.toolbar)
#connect the toolbar selection to matploblib as a callback
self.canvas.mpl_connect('selection_changed',self.toolbar_selection)
def load_settings(self):
"""load the setting of the GUI (called on startup)
"""
#create a QSettings object to store the settings
self.QtSettings=QtCore.QSettings("OncoRay","EBT Evaluation")
#self.QtSettings=QtCore.QSettings("settings.ini",QtCore.QSettings.IniFormat)
#load window settings
self.QtSettings.beginGroup("MainWindow")
self.restoreGeometry(self.QtSettings.value("geometry",QtCore.QByteArray(),type=QtCore.QByteArray))
self.restoreState(self.QtSettings.value("state",QtCore.QByteArray(),type=QtCore.QByteArray))
# self.resize(self.QtSettings.value("windowSize",QtCore.QSize(1024,1280),
# type=QtCore.QSize))
self.QtSettings.endGroup()
#load values for various elements
self.QtSettings.beginGroup("Settings")
pyguitools.gui_restore(self.ui,self.QtSettings)
self.QtSettings.endGroup()
def check_save_table_path(self, path):
"""checks if the path given is different from the already saved one
and then checks is validity and creates a header"""
#check if path is empty and try to get something not empty
if path == "":
path = QFileDialog.getSaveFileName(self,caption = 'select a file to save to',
directory = self.ui.saveTablePath.text(),
filter="Text files (*.txt);;All files (*)",
options = QFileDialog.DontConfirmOverwrite)
#in pyqt5 a tuple is returned, unpack it
if os.environ['QT_API'] == 'pyqt5':
path, _ = path
if path == "":
return ""
else:
self.ui.saveTablePath.setText(path)
#do nothing if path already checked
if self.saveTablePath == path:
return path
#if it already exists ask to overwrite or append
if os.path.isfile(path):
title = "path already exists"
text = (path+" already exists. Should it be overwritten? \n"
+ "No will append to the file. "
+ "Choose Abort to select another file")
overwriteAnswer = QMessageBox.question(self,title,text,
QMessageBox.Yes | QMessageBox.No | QMessageBox.Abort,
QMessageBox.No)
#if selected no, append to the file, i.e. return the path as checked
if overwriteAnswer == QMessageBox.No:
return path
#empty path return on abort
elif overwriteAnswer == QMessageBox.Abort:
return ""
#with yes the method continues and will overwrite the file
#should not be a dir
elif os.path.isdir(path):
logging.error("specified path is a directory")
return ""
#in all other cases, open it for writing and write a header to it
headerString = ("film no\t" "filename\t" "x0\ty0\tx1\ty1\t"
"number of pixels\t"
"R_avg\tR_std\t" "G_avg\tG_std\t" "B_avg\tB_std\n")
with open(path,"w") as saveTableFile:
saveTableFile.write(headerString)
return path
def save_settings(self):
"""save the settings of the GUI (called on exit)
"""
self.QtSettings.beginGroup("MainWindow")
self.QtSettings.setValue("geometry",self.saveGeometry())
self.QtSettings.setValue("state",self.saveState())
self.QtSettings.endGroup()
#save element content
self.QtSettings.beginGroup("Settings")
pyguitools.gui_save(self.ui,self.QtSettings)
self.QtSettings.endGroup()
def load_calibrations(self):
"""Load the calibration files (for dose calculation)
"""
try:
self.calibrations = load_calibrations(os.path.join(os.path.dirname(__file__),
"..",
"calibrations"))
for key in self.calibrations:
self.ui.calibration_selection.addItem(key,self.calibrations[key])
idx = self.ui.calibration_selection.count()-1
self.ui.calibration_selection.setItemData(idx,
self.calibrations[key]["tooltip"],
QtCore.Qt.ToolTipRole)
self.ui.calibration_selection.setCurrentIndex(0)
except (IOError, OSError) as e:
logging.error("failure while reading calibration files: " +
e.strerror+" at "+e.filename)
##############################################################################
# slots (user interaction functions, all connected to a signal emitted by some
# GUI element)
def area_stats(self):
"""calculate and log simple stats of the selected area (e.g, average)
"""
x0 = self.ui.x0.value()
x1 = self.ui.x1.value()
y0 = self.ui.y0.value()
y1 = self.ui.y1.value()
channel = self.ui.channel_selection.itemData(self.ui.channel_selection.currentIndex())
avg = np.average(self.npImg[y0:y1,x0:x1,channel])
std = np.std(self.npImg[y0:y1,x0:x1,channel])
minimum = np.min(self.npImg[y0:y1,x0:x1,channel])
maximum = np.max(self.npImg[y0:y1,x0:x1,channel])
n = (max(x1,x0)-min(x1,x0))*(max(y1,y0)-min(y1,y0))
logging.info("### Statistics for area x: {:d} - {:d}; y: {:d} - {:d} ###".format(x0,x1,y0,y1))
logging.info("channel: {!s}".format(self.ui.channel_selection.currentText()))
logging.info("average: {:.3f} +- {:.3f}".format(avg,std/np.sqrt(n)))
logging.info("standard deviation: {:.3f}".format(std))
logging.info("maximum: {:d}".format(maximum))
logging.info("minimum: {:d}".format(minimum))
logging.info("--------------------------------------------------------------")
def change_advSettings(self):
self.advSettings.change_settings(title="advanced scan settings")
self.settings = self.advSettings.get_settings()
def change_doseViewSettings(self):
self.doseViewSettings.change_settings(title="dose view settings")
tabs = self.ui.tabWidget.count()
for i in range(0,tabs):
if self.ui.tabWidget.tabText(i) != "scan view":
widget = self.ui.tabWidget.widget(i)
widget.set_settings(self.doseViewSettings.get_settings())
def close_tab(self, index):
self.ui.tabWidget.removeTab(index)
def get_phi0(self):
"""uses the stats from currently selected area to fill the phi0 fields
"""
x0 = self.ui.x0.value()
x1 = self.ui.x1.value()
y0 = self.ui.y0.value()
y1 = self.ui.y1.value()
channels = [self.ui.channel_selection.itemData(i) for i in range(self.ui.channel_selection.count())]
for ch, field in zip(channels,[self.ui.phi0Ch1,self.ui.phi0Ch2,self.ui.phi0Ch3]):
avg = np.average(self.npImg[y0:y1,x0:x1,ch])
field.setValue(avg)
def histogram(self):
"""show a histgram of the selected channel
"""
channel = self.ui.channel_selection.itemData(self.ui.channel_selection.currentIndex())
#create a window, the reference must be stored, because the window
#gets destroyed when its reference is garbage collected
#make plotWindow a list and append to that if multiple windows should be possible
title = "histogram of {:s} channel".format(self.ui.channel_selection.currentText())
self.plotWindow = pyguitools.SimplePlotWindow(name = title)
self.plotWindow.ax1.hist(self.npImg[self.ui.y0.value():self.ui.y1.value(),
self.ui.x0.value():self.ui.x1.value(),
channel].flatten(),
bins=self.settings["histogramm bins"],
range=(self.settings["histogramm min"],self.settings["histogramm max"]))
self.plotWindow.ax1.set_xlim(self.settings["histogramm min"],self.settings["histogramm max"])
self.plotWindow.show()
def image_file_dialog(self):
"""open dialog to select a scan (browsing)
"""
extensionFilter=("TIFF files (*.tiff *.TIFF *.tif *.TIF)"
"Image files (*.tiff *.TIFF *.tif *.jpg *.jpeg *.png *.bmp *.gif);;"
"All files (*)")
filePath =QFileDialog.getOpenFileName(self,
caption = 'select a scanned image',
directory = self.ui.imagePath.text(),
filter = extensionFilter)
#in pyqt5 a tuple is returned, unpack it
if os.environ['QT_API'] == 'pyqt5':
filePath, _ = filePath
if filePath != '':
self.ui.imagePath.setText(filePath)
self.image_path_changed()
else:
logging.info('file selection canceled')
def image_path_changed(self):
"""trys to load a new scan
"""
#load the image, if no path given open dialog
if self.ui.imagePath.text() == "":
self.image_file_dialog()
#catch wrong path and permission errors
try:
img = Image.open(str(self.ui.imagePath.text()))
except (IOError, OSError) as e:
logging.error("failed to open file: "+str(e))
return ()
#check the format of the loaded image and adjust the input field accordingly
logging.debug("image mode: "+img.mode)
self.ui.channel_selection.clear()
if img.mode == "RGB":
self.ui.channel_selection.addItem("red",0)
self.ui.channel_selection.addItem("green",1)
self.ui.channel_selection.addItem("blue",2)
#set the phi0 input
for field in [self.ui.phi0Ch1,self.ui.phi0Ch2,self.ui.phi0Ch3]:
field.setMaximum(255)
field.setMinimum(0)
field.setEnabled(True)
self.ui.phi0LabelCh1.setText("red:")
self.ui.phi0LabelCh2.setText("green:")
self.ui.phi0LabelCh3.setText("blue:")
elif img.mode == "L" | |
None: # pragma: no cover
# This really shouldn't be possible since _register_subcommands would prevent this from happening
# but keeping in case it does for some strange reason
raise CommandSetRegistrationError('Could not find argparser for command "{}" needed by subcommand: {}'
.format(command_name, str(method)))
for action in command_parser._actions:
if isinstance(action, argparse._SubParsersAction):
action.remove_parser(subcommand_name)
break
def add_settable(self, settable: Settable) -> None:
"""
Convenience method to add a settable parameter to ``self.settables``
:param settable: Settable object being added
"""
self.settables[settable.name] = settable
def remove_settable(self, name: str) -> None:
"""
Convenience method for removing a settable parameter from ``self.settables``
:param name: name of the settable being removed
:raises: KeyError if the Settable matches this name
"""
try:
del self.settables[name]
except KeyError:
raise KeyError(name + " is not a settable parameter")
def build_settables(self):
"""Create the dictionary of user-settable parameters"""
self.add_settable(Settable('allow_style', str,
'Allow ANSI text style sequences in output (valid values: '
'{}, {}, {})'.format(ansi.STYLE_TERMINAL,
ansi.STYLE_ALWAYS,
ansi.STYLE_NEVER),
choices=[ansi.STYLE_TERMINAL, ansi.STYLE_ALWAYS, ansi.STYLE_NEVER]))
self.add_settable(Settable('always_show_hint', bool,
'Display tab completion hint even when completion suggestions print'))
self.add_settable(Settable('debug', bool, "Show full traceback on exception"))
self.add_settable(Settable('echo', bool, "Echo command issued into output"))
self.add_settable(Settable('editor', str, "Program used by 'edit'"))
self.add_settable(Settable('feedback_to_output', bool, "Include nonessentials in '|', '>' results"))
self.add_settable(Settable('max_completion_items', int,
"Maximum number of CompletionItems to display during tab completion"))
self.add_settable(Settable('quiet', bool, "Don't print nonessential feedback"))
self.add_settable(Settable('timing', bool, "Report execution times"))
# ----- Methods related to presenting output to the user -----
@property
def allow_style(self) -> str:
"""Read-only property needed to support do_set when it reads allow_style"""
return ansi.allow_style
@allow_style.setter
def allow_style(self, new_val: str) -> None:
"""Setter property needed to support do_set when it updates allow_style"""
new_val = new_val.capitalize()
if new_val in [ansi.STYLE_TERMINAL, ansi.STYLE_ALWAYS, ansi.STYLE_NEVER]:
ansi.allow_style = new_val
else:
raise ValueError("must be {}, {}, or {} (case-insensitive)".format(ansi.STYLE_TERMINAL, ansi.STYLE_ALWAYS,
ansi.STYLE_NEVER))
def _completion_supported(self) -> bool:
"""Return whether tab completion is supported"""
return self.use_rawinput and self.completekey and rl_type != RlType.NONE
@property
def visible_prompt(self) -> str:
"""Read-only property to get the visible prompt with any ANSI style escape codes stripped.
Used by transcript testing to make it easier and more reliable when users are doing things like coloring the
prompt using ANSI color codes.
:return: prompt stripped of any ANSI escape codes
"""
return ansi.strip_style(self.prompt)
def poutput(self, msg: Any = '', *, end: str = '\n') -> None:
"""Print message to self.stdout and appends a newline by default
Also handles BrokenPipeError exceptions for when a command's output has
been piped to another process and that process terminates before the
cmd2 command is finished executing.
:param msg: message to print (anything convertible to a str with '{}'.format() is OK)
:param end: string appended after the end of the message, default a newline
"""
try:
ansi.style_aware_write(self.stdout, "{}{}".format(msg, end))
except BrokenPipeError:
# This occurs if a command's output is being piped to another
# process and that process closes before the command is
# finished. If you would like your application to print a
# warning message, then set the broken_pipe_warning attribute
# to the message you want printed.
if self.broken_pipe_warning:
sys.stderr.write(self.broken_pipe_warning)
# noinspection PyMethodMayBeStatic
def perror(self, msg: Any = '', *, end: str = '\n', apply_style: bool = True) -> None:
"""Print message to sys.stderr
:param msg: message to print (anything convertible to a str with '{}'.format() is OK)
:param end: string appended after the end of the message, default a newline
:param apply_style: If True, then ansi.style_error will be applied to the message text. Set to False in cases
where the message text already has the desired style. Defaults to True.
"""
if apply_style:
final_msg = ansi.style_error(msg)
else:
final_msg = "{}".format(msg)
ansi.style_aware_write(sys.stderr, final_msg + end)
def pwarning(self, msg: Any = '', *, end: str = '\n', apply_style: bool = True) -> None:
"""Wraps perror, but applies ansi.style_warning by default
:param msg: message to print (anything convertible to a str with '{}'.format() is OK)
:param end: string appended after the end of the message, default a newline
:param apply_style: If True, then ansi.style_warning will be applied to the message text. Set to False in cases
where the message text already has the desired style. Defaults to True.
"""
if apply_style:
msg = ansi.style_warning(msg)
self.perror(msg, end=end, apply_style=False)
def pexcept(self, msg: Any, *, end: str = '\n', apply_style: bool = True) -> None:
"""Print Exception message to sys.stderr. If debug is true, print exception traceback if one exists.
:param msg: message or Exception to print
:param end: string appended after the end of the message, default a newline
:param apply_style: If True, then ansi.style_error will be applied to the message text. Set to False in cases
where the message text already has the desired style. Defaults to True.
"""
if self.debug and sys.exc_info() != (None, None, None):
import traceback
traceback.print_exc()
if isinstance(msg, Exception):
final_msg = "EXCEPTION of type '{}' occurred with message: '{}'".format(type(msg).__name__, msg)
else:
final_msg = "{}".format(msg)
if apply_style:
final_msg = ansi.style_error(final_msg)
if not self.debug and 'debug' in self.settables:
warning = "\nTo enable full traceback, run the following command: 'set debug true'"
final_msg += ansi.style_warning(warning)
self.perror(final_msg, end=end, apply_style=False)
def pfeedback(self, msg: Any, *, end: str = '\n') -> None:
"""For printing nonessential feedback. Can be silenced with `quiet`.
Inclusion in redirected output is controlled by `feedback_to_output`.
:param msg: message to print (anything convertible to a str with '{}'.format() is OK)
:param end: string appended after the end of the message, default a newline
"""
if not self.quiet:
if self.feedback_to_output:
self.poutput(msg, end=end)
else:
self.perror(msg, end=end, apply_style=False)
def ppaged(self, msg: Any, *, end: str = '\n', chop: bool = False) -> None:
"""Print output using a pager if it would go off screen and stdout isn't currently being redirected.
Never uses a pager inside of a script (Python or text) or when output is being redirected or piped or when
stdout or stdin are not a fully functional terminal.
:param msg: message to print to current stdout (anything convertible to a str with '{}'.format() is OK)
:param end: string appended after the end of the message, default a newline
:param chop: True -> causes lines longer than the screen width to be chopped (truncated) rather than wrapped
- truncated text is still accessible by scrolling with the right & left arrow keys
- chopping is ideal for displaying wide tabular data as is done in utilities like pgcli
False -> causes lines longer than the screen width to wrap to the next line
- wrapping is ideal when you want to keep users from having to use horizontal scrolling
WARNING: On Windows, the text always wraps regardless of what the chop argument is set to
"""
# msg can be any type, so convert to string before checking if it's blank
msg_str = str(msg)
# Consider None to be no data to print
if msg is None or msg_str == '':
return
try:
import subprocess
# Attempt to detect if we are not running within a fully functional terminal.
# Don't try to use the pager when being run by a continuous integration system like Jenkins + pexpect.
functional_terminal = False
if self.stdin.isatty() and self.stdout.isatty():
if sys.platform.startswith('win') or os.environ.get('TERM') is not None:
functional_terminal = True
# Don't attempt to use a pager that can block if redirecting or running a script (either text or Python)
# Also only attempt to use a pager if actually running in a real fully functional terminal
if functional_terminal and not self._redirecting and not self.in_pyscript() and not self.in_script():
if ansi.allow_style.lower() == ansi.STYLE_NEVER.lower():
msg_str = ansi.strip_style(msg_str)
msg_str += end
pager = self.pager
if chop:
pager = self.pager_chop
# Prevent KeyboardInterrupts while in the pager. The pager application will
# still receive the SIGINT since it is in the same process group as us.
with self.sigint_protection:
pipe_proc = subprocess.Popen(pager, shell=True, stdin=subprocess.PIPE)
pipe_proc.communicate(msg_str.encode('utf-8', 'replace'))
else:
self.poutput(msg_str, end=end)
except BrokenPipeError:
# This occurs if a command's output is being piped to another process and that | |
2 * np.pi * self._l * x)
c = sum(a * b) / 2 * self.dim + (2 ** (self.dim - 1)) * (1 + self._alpha)
return c
def f_bowl(self, x):
"Quadratic centre function"
a = 20 * (x) ** 2
c = sum(a)
return c
def f_quad(self, x):
"Combination of different x^n-functions"
c = np.zeros(self._q)
for i in range(self._q):
xx = x - self._p[i]
for j in range(int(self._order[i] - 1)):
xx = xx * (x - self._p[i])
c[i] = sum(xx * self._b[i] + self._v[i])
return min(c)
def f_plate(self, x):
"Combination of linear plate functions"
c = np.zeros(self._q)
for i in range(self._q):
c[i] = sum(x * self._slope[i] + self._intersect[i])
return np.sort(c)[-1]
def f_steep(self, x):
"Steep drops"
c = np.zeros(self._q)
for i in range(self._q):
dist = np.linalg.norm(x - self._p[i])
if dist <= self._r[i]:
c[i] = self._c[i] * (1.0 - (dist / self._r[i]) ** self._w[i])
else:
c[i] = 1.0
return min(c)
# ......................................
# Create a list with all functions to choose from"
funclist = []
# ......................................
"""
Generated tunable test functions
from Ronkkonen, 2010
"""
# ......................................
class TuneMultimodal(Ronkkonen):
def __init__(self, seed, name):
char = ["multimodal"]
Ronkkonen.__init__(self, seed, name, char=char)
def func(self, x):
c = self.f_multimodal(x)
return c
funclist.append(TuneMultimodal)
# ......................................
class TuneBowl(Ronkkonen):
def __init__(self, seed, name):
char = ["unimodal"]
Ronkkonen.__init__(self, seed, name, char=char)
def func(self, x):
c = self.f_bowl(x)
return c
funclist.append(TuneBowl)
# ......................................
class TuneMultimodalBowl(Ronkkonen):
def __init__(self, seed, name):
char = ["multimodal"]
Ronkkonen.__init__(self, seed, name, char=char)
def func(self, x):
c = 5 * self.f_multimodal(x) + self.f_bowl(x)
return c
funclist.append(TuneMultimodalBowl)
# ......................................
class TuneQuad(Ronkkonen):
def __init__(self, seed, name):
char = ["multimodal"]
super().__init__(seed, name, char=char)
def func(self, x):
c = self.f_quad(x)
return c
funclist.append(TuneQuad)
# ......................................
class TunePlate(Ronkkonen):
def __init__(self, seed, name):
char = ["unimodal"]
Ronkkonen.__init__(self, seed, name, char=char)
def func(self, x):
c = self.f_plate(x)
return c
funclist.append(TunePlate)
# ......................................
class TuneSteep(Ronkkonen):
def __init__(self, seed, name):
char = ["steep"]
Ronkkonen.__init__(self, seed, name, char=char)
# disable rotation and offset
# this is to make sure the minima are not outside the bounds and the response surface is entirely flat
self.o = np.zeros(self.dim, dtype=float)
self.m = np.identity(self.dim)
# self.getx0()
def func(self, x):
c = self.f_steep(x)
return c
funclist.append(TuneSteep)
# ......................................
class TuneMix(Ronkkonen):
def __init__(self, seed, name):
self.ratio = np.random.uniform(0.0, 1.0, 4)
Ronkkonen.__init__(self, seed, name, char=[])
# self.getx0()
def func(self, x):
c = 0
c += self.f_multimodal(x) * self.ratio[0]
c += self.f_bowl(x) * self.ratio[1]
c += self.f_plate(x) * self.ratio[2]
c += self.f_steep(x) * self.ratio[3]
return c
funclist.append(TuneMix)
# ......................................
"""
Benchmark optimization functions
from https://www.sfu.ca/~ssurjano/optimization.html
"""
# ......................................
class Levy(Function):
def __init__(self, seed, name):
orig_bounds = [-10.0, 10.0]
char = ["multimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
z = 1 + (x - 1) / 4
c = (
np.sin(np.pi * z[0]) ** 2
+ sum((z[:-1] - 1) ** 2 * (1 + 10 * np.sin(np.pi * z[:-1] + 1) ** 2))
+ (z[-1] - 1) ** 2 * (1 + np.sin(2 * np.pi * z[-1]) ** 2)
)
return c
funclist.append(Levy)
# ......................................
class Ackley(Function):
def __init__(self, seed, name):
orig_bounds = [-40.0, 40.0]
char = ["multimodal", "steep"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x, a=20, b=0.2, c=2 * np.pi):
n = len(x)
s1 = sum(x**2)
s2 = sum(np.cos(c * x))
cc = -a * np.exp(-b * np.sqrt(s1 / n)) - np.exp(s2 / n) + a + np.exp(1)
return cc
funclist.append(Ackley)
# ......................................
class Rosenbrock(Function):
def __init__(self, seed, name):
orig_bounds = [-5.0, 10.0]
char = ["unimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
x0 = x[:-1]
x1 = x[1:]
c = sum((1 - x0) ** 2) + 100 * sum((x1 - x0**2) ** 2)
return c
funclist.append(Rosenbrock)
# ......................................
class Schwefel(Function):
def __init__(self, seed, name):
orig_bounds = [-500.0, 500.0]
char = ["multimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
n = len(x)
c = 418.9829 * n - sum(x * np.sin(np.sqrt(abs(x))))
return c
funclist.append(Schwefel)
# ......................................
class Rastrigin(Function):
def __init__(self, seed, name):
orig_bounds = [-5.12, 5.12]
char = ["multimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
n = len(x)
c = 10 * n + sum(x**2 - 10 * np.cos(2 * np.pi * x))
return c
funclist.append(Rastrigin)
# ......................................
# class Easom(Function): #2D
# def __init__(self,seed,name):
# super().__init__(self,seed,name)
# orig_bounds = [-100.,100.]
# self.getx0()
# self.char.append('steep')
# self.char.append('unimodal')
# def func(self, x):
# x1 = x[:-1]
# x2 = x[1:]
# c = -np.cos(x1)*np.cos(x2)*np.exp(-(x1-np.pi)**2-(x2-np.pi)**2)
# return c
# funclist.append(Easom)
# ......................................
class Styblinski(Function):
def __init__(self, seed, name):
orig_bounds = [-5.0, 5.0]
char = ["multimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
c = 0.5 * sum(x**4 - 16 * x**2 + 5 * x)
return c
funclist.append(Styblinski)
# ......................................
class Branin(Function): # 2D
def __init__(self, seed, name):
orig_bounds = [0.0, 15.0]
char = ["multimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
x1 = x[:-1] + 5.0 # correct for the uneven box
x2 = x[1:]
a = 1
b = 5.1 / (4 * np.pi**2)
c = 5 / np.pi
r = 6
s = 10
t = 1 / (8 * np.pi)
cc = sum(
a * (x2 - b * x1**2 + c * x1 - r) ** 2 + s * (1 - t) * np.cos(x1) + s
)
return cc
funclist.append(Branin)
# ......................................
class SchafferF6(Function): # 2D
def __init__(self, seed, name):
orig_bounds = [-100.0, 100.0]
char = ["steep", "multimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
x1 = x[:-1]
x2 = x[1:]
x1x2 = x1**2 + x2**2
c = sum(0.5 + (np.sin(np.sqrt(x1x2)) ** 2 - 0.5) / ((1 + 0.001 * x1x2) ** 2))
return c
funclist.append(SchafferF6)
# ......................................
class Beale(Function): # 2D
def __init__(self, seed, name):
orig_bounds = [-4.5, 4.5]
char = ["multimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
x1 = x[:-1]
x2 = x[1:]
c = sum(
(1.5 - x1 + x1 * x2) ** 2
+ (2.25 - x1 + x1 * x2**2) ** 2
+ (2.625 - x1 + x1 * x2**3) ** 2
)
return c
funclist.append(Beale)
# ......................................
class AckleyNo2(Function):
def __init__(self, seed, name):
orig_bounds = [-4.0, 4.0]
char = ["unimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
n = len(x)
x1 = x[:-1]
x2 = x[1:]
cc = sum(-200 * np.exp(-0.2 * np.sqrt(x1**2 + x2**2)))
return cc
funclist.append(AckleyNo2)
# ......................................
class Bohachevsky(Function):
def __init__(self, seed, name):
orig_bounds = [-100.0, 100.0]
char = ["unimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
n = len(x)
x1 = x[:-1]
x2 = x[1:]
cc = sum(
x1**2
+ 2 * x2**2
- 0.3 * np.cos(3 * np.pi * x1)
- 0.4 * np.cos(4 * np.pi * x2)
+ 0.7
)
return cc
funclist.append(Bohachevsky)
# ......................................
class Matyas(Function):
def __init__(self, seed, name):
orig_bounds = [-10.0, 10.0]
char = ["unimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
n = len(x)
x1 = x[:-1]
x2 = x[1:]
cc = sum(0.26 * (x1**2 + x2**2) - 0.48 * x1 * x2)
return cc
funclist.append(Matyas)
# ......................................
class Zakharov(Function):
def __init__(self, seed, name):
orig_bounds = [-5.0, 10.0]
char = ["unimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
n = len(x)
cc = (
sum(x**2)
+ sum(0.5 * np.arange(1, len(x) + 1) * x) ** 2
+ sum(0.5 * np.arange(1, len(x) + 1) * x) ** 4
)
return cc
funclist.append(Zakharov)
# ......................................
class McCormick(Function):
def __init__(self, seed, name):
orig_bounds = [-3.0, 4]
char = ["unimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
n = len(x)
x1 = x[:-1]
x2 = x[1:]
cc = sum(np.sin(x1 + x2) + (x1 - x2) ** 2 - 1.5 * x1 + 2.5 * x2 + 1)
return cc
funclist.append(McCormick)
# ......................................
class Leon(Function):
def __init__(self, seed, name):
orig_bounds = [-5.0, 5.0]
char = ["unimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
n = len(x)
x1 = x[:-1]
x2 = x[1:]
cc = sum(100 * (x2 - x1**3) ** 2 + (1 - x1) ** 2)
return cc
funclist.append(Leon)
# ......................................
# CEC2013 Test functions are only defined for dimensions 2,5,10,20,30,40,50,60,70,80,90,100
class CEC2013(Function):
def __init__(self, seed, name, number=None):
if number == None:
number = np.random.randint(1, 29)
self.pgprob = pg.problem(pg.cec2013(prob_id=number, dim=DIMENSIONALITY))
orig_bounds = [
self.pgprob.get_bounds()[0][0],
self.pgprob.get_bounds()[1][0],
]
if 1 <= number <= 5:
char = ["unimodal"]
if 6 <= number <= 29:
char = ["multimodal"]
super().__init__(seed, name, orig_bounds=orig_bounds, char=char)
def func(self, x):
c = self.pgprob.fitness(x)[0]
return c
funclist.extend([CEC2013] * 10) # 10 times!
# ......................................
class LinearRegression(Function): # 4D, minimal 2D!
# learning to opt benchmark!
def __init__(self, seed, name):
np.random.seed(seed=seed)
d = | |
<filename>VSR/Backend/TF/Framework/LayersHelper.py<gh_stars>1000+
"""
Copyright: <NAME> 2017-2020
Author: <NAME>
Email: <EMAIL>
Created Date: Sep 5th 2018
commonly used layers helper
"""
from VSR.Util import to_list
from .. import tf
from ..Util import (
SpectralNorm, TorchInitializer, pixel_shift, pop_dict_wo_keyerror, prelu
)
class Layers(object):
def batch_norm(self, x, training, decay=0.9, epsilon=1e-5, name=None):
# interesting.
return tf.layers.batch_normalization(x,
momentum=decay,
training=training,
fused=False,
epsilon=epsilon,
name=name)
def instance_norm(self, x, trainable=True, name=None, reuse=None):
from .. import tfc
with tf.variable_scope(name, 'InstanceNorm', reuse=reuse):
return tfc.layers.instance_norm(
x,
trainable=trainable,
variables_collections=[tf.GraphKeys.GLOBAL_VARIABLES])
def layer_norm(self, x, trainable=True, name=None, reuse=None):
from .. import tfc
with tf.variable_scope(name, 'LayerNorm', reuse=reuse):
return tfc.layers.layer_norm(
x,
trainable=trainable,
variables_collections=[tf.GraphKeys.GLOBAL_VARIABLES])
def group_norm(self, x, group, axis, trainable=True, name=None, reuse=None):
from .. import tfc
with tf.variable_scope(name, 'GroupNorm', reuse=reuse):
return tfc.layers.group_norm(
x, group, axis,
trainable=trainable,
variables_collections=[tf.GraphKeys.GLOBAL_VARIABLES])
def conv2d(self, x, filters, kernel_size, strides=(1, 1), padding='same',
data_format='channels_last', dilation_rate=(1, 1),
activation=None, use_bias=True, use_batchnorm=False,
use_sn=False, use_in=False, use_ln=False, use_gn=False,
kernel_initializer='he_normal',
kernel_regularizer='l2', **kwargs):
"""wrap a convolution for common use case"""
if kernel_initializer == 'torch':
ki = TorchInitializer()
kr = None
if use_bias:
bi = TorchInitializer(kernel_size * kernel_size * x.shape[-1])
else:
bi = tf.zeros_initializer()
else:
ki, kr = self._kernel(kernel_initializer, kernel_regularizer)
bi = tf.zeros_initializer()
nn = tf.layers.Conv2D(filters, kernel_size, strides=strides,
padding=padding, data_format=data_format,
dilation_rate=dilation_rate, use_bias=use_bias,
kernel_initializer=ki, kernel_regularizer=kr,
bias_initializer=bi, **kwargs)
nn.build(x.shape.as_list())
if use_sn:
nn.kernel = SpectralNorm()(nn.kernel)
x = nn(x)
if use_batchnorm:
x = tf.layers.batch_normalization(x, training=self.training_phase)
if use_in:
x = self.instance_norm(x)
if use_ln:
x = self.layer_norm(x)
if use_gn:
x = self.group_norm(x, 32, -1)
activator = self._act(activation)
if activation:
x = activator(x)
return x
def conv3d(self, x, filters, kernel_size, strides=(1, 1, 1), padding='same',
data_format='channels_last', dilation_rate=(1, 1, 1),
activation=None, use_bias=True, use_batchnorm=False,
use_in=False, use_ln=False, use_gn=False,
kernel_initializer='he_normal', kernel_regularizer='l2',
**kwargs):
if kernel_initializer == 'torch':
ki = TorchInitializer()
kr = None
if use_bias:
bi = TorchInitializer(kernel_size * kernel_size * x.shape[-1])
else:
bi = tf.zeros_initializer()
else:
ki, kr = self._kernel(kernel_initializer, kernel_regularizer)
bi = tf.zeros_initializer()
nn = tf.layers.Conv3D(filters, kernel_size, strides=strides,
padding=padding, data_format=data_format,
dilation_rate=dilation_rate, use_bias=use_bias,
kernel_initializer=ki, kernel_regularizer=kr,
bias_initializer=bi, **kwargs)
nn.build(x.shape.as_list())
x = nn(x)
if use_batchnorm:
x = tf.layers.batch_normalization(x, training=self.training_phase)
if use_in:
x = self.instance_norm(x)
if use_ln:
x = self.layer_norm(x)
if use_gn:
x = self.group_norm(x, 32, -1)
activator = self._act(activation)
if activation:
x = activator(x)
return x
def deconv2d(self, x,
filters,
kernel_size,
strides=(1, 1),
padding='same',
data_format='channels_last',
activation=None,
use_bias=True,
use_batchnorm=False,
use_sn=False,
use_in=False,
use_ln=False,
use_gn=False,
kernel_initializer='he_normal',
kernel_regularizer='l2',
**kwargs):
"""warp a conv2d_transpose op for simplicity usage"""
if kernel_initializer == 'torch':
ki = TorchInitializer()
kr = None
if use_bias:
bi = TorchInitializer(kernel_size * kernel_size * x.shape[-1])
else:
bi = tf.zeros_initializer()
else:
ki, kr = self._kernel(kernel_initializer, kernel_regularizer)
bi = tf.zeros_initializer()
nn = tf.layers.Conv2DTranspose(filters, kernel_size, strides=strides,
padding=padding,
data_format=data_format,
use_bias=use_bias,
bias_initializer=bi,
kernel_initializer=ki,
kernel_regularizer=kr, **kwargs)
nn.build(x.shape.as_list())
if use_sn:
nn.kernel = SpectralNorm()(nn.kernel)
x = nn(x)
if use_batchnorm:
x = tf.layers.batch_normalization(x, training=self.training_phase)
if use_in:
x = self.instance_norm(x)
if use_ln:
x = self.layer_norm(x)
if use_gn:
x = self.group_norm(x, 32, -1)
activator = self._act(activation)
if activation:
x = activator(x)
return x
def deconv3d(self, x,
filters,
kernel_size,
strides=(1, 1, 1),
padding='same',
data_format='channels_last',
activation=None,
use_bias=True,
use_batchnorm=False,
use_in=False,
use_ln=False,
use_gn=False,
kernel_initializer='he_normal',
kernel_regularizer='l2',
**kwargs):
if kernel_initializer == 'torch':
ki = TorchInitializer()
kr = None
if use_bias:
bi = TorchInitializer(kernel_size * kernel_size * x.shape[-1])
else:
bi = tf.zeros_initializer()
else:
ki, kr = self._kernel(kernel_initializer, kernel_regularizer)
bi = tf.zeros_initializer()
nn = tf.layers.Conv3DTranspose(filters, kernel_size, strides=strides,
padding=padding,
data_format=data_format,
use_bias=use_bias,
bias_initializer=bi,
kernel_initializer=ki,
kernel_regularizer=kr, **kwargs)
nn.build(x.shape.as_list())
x = nn(x)
if use_batchnorm:
x = tf.layers.batch_normalization(x, training=self.training_phase)
if use_in:
x = self.instance_norm(x)
if use_ln:
x = self.layer_norm(x)
if use_gn:
x = self.group_norm(x, 32, -1)
activator = self._act(activation)
if activation:
x = activator(x)
return x
def dense(self, x, units, activation=None, use_bias=True, use_sn=False,
kernel_initializer='he_normal', kernel_regularizer='l2',
**kwargs):
act = self._act(activation)
ki, kr = self._kernel(kernel_initializer, kernel_regularizer)
nn = tf.layers.Dense(units, use_bias=use_bias,
kernel_initializer=ki,
kernel_regularizer=kr, **kwargs)
nn.build(x.shape.as_list())
if use_sn:
nn.kernel = SpectralNorm()(nn.kernel)
x = nn(x)
if act:
x = act(x)
return x
linear = dense
@staticmethod
def _act(activation):
activator = None
if activation:
if isinstance(activation, str):
if activation == 'relu':
activator = tf.nn.relu
elif activation == 'tanh':
activator = tf.nn.tanh
elif activation == 'prelu':
activator = prelu
elif activation == 'lrelu':
activator = tf.nn.leaky_relu
elif callable(activation):
activator = activation
else:
raise ValueError('invalid activation!')
return activator
def _kernel(self, kernel_initializer, kernel_regularizer):
ki = None
if isinstance(kernel_initializer, str):
if kernel_initializer == 'he_normal':
ki = tf.keras.initializers.he_normal()
elif kernel_initializer == 'he_uniform':
ki = tf.keras.initializers.he_uniform()
elif kernel_initializer == 'zeros' or kernel_initializer == 'zero':
ki = tf.keras.initializers.zeros()
elif 'truncated_normal' in kernel_initializer:
stddev = float(kernel_initializer.split('_')[-1])
ki = tf.truncated_normal_initializer(stddev=stddev)
elif 'random_normal' in kernel_initializer:
stddev = float(kernel_initializer.split('_')[-1])
ki = tf.random_normal_initializer(stddev=stddev)
elif callable(kernel_initializer):
ki = kernel_initializer
elif kernel_initializer:
raise ValueError('invalid kernel initializer!')
kr = None
if isinstance(kernel_regularizer, str):
if kernel_regularizer == 'l1':
kr = tf.keras.regularizers.l1(
self.weight_decay) if self.weight_decay else None
elif kernel_regularizer == 'l2':
kr = tf.keras.regularizers.l2(
self.weight_decay) if self.weight_decay else None
elif callable(kernel_regularizer):
kr = kernel_regularizer
elif kernel_regularizer:
raise ValueError('invalid kernel regularizer!')
return ki, kr
def upscale(self, image, method='espcn', scale=None, direct_output=True,
**kwargs):
"""Image up-scale layer
Upsample `image` width and height by scale factor `scale[0]` and
`scale[1]`. Perform upsample progressively: i.e. x12:= x2->x2->x3
Args:
image: tensors to upsample
method: method could be 'espcn', 'nearest' or 'deconv'
scale: None or int or [int, int]. If None, `scale`=`self.scale`
direct_output: output channel is the desired RGB or Grayscale, if
False, keep the same channels as `image`
"""
_allowed_method = ('espcn', 'nearest', 'deconv')
assert str(method).lower() in _allowed_method
method = str(method).lower()
act = kwargs.get('activator')
ki = kwargs.get('kernel_initializer', 'he_normal')
kr = kwargs.get('kernel_regularizer', 'l2')
use_bias = kwargs.get('use_bias', True)
scale_x, scale_y = to_list(scale, 2) or self.scale
features = self.channel if direct_output else image.shape.as_list()[-1]
while scale_x > 1 or scale_y > 1:
if scale_x % 2 == 1 or scale_y % 2 == 1:
if method == 'espcn':
image = pixel_shift(self.conv2d(
image, features * scale_x * scale_y, 3,
use_bias=use_bias, kernel_initializer=ki, kernel_regularizer=kr),
[scale_x, scale_y], features)
elif method == 'nearest':
image = pixel_shift(
tf.concat([image] * scale_x * scale_y, -1),
[scale_x, scale_y],
image.shape[-1])
elif method == 'deconv':
image = self.deconv2d(image, features, 3,
strides=[scale_y, scale_x],
kernel_initializer=ki,
kernel_regularizer=kr,
use_bias=use_bias)
if act:
image = act(image)
break
else:
scale_x //= 2
scale_y //= 2
if method == 'espcn':
image = pixel_shift(self.conv2d(
image, features * 4, 3, use_bias=use_bias,
kernel_initializer=ki, kernel_regularizer=kr), [2, 2], features)
elif method == 'nearest':
image = pixel_shift(
tf.concat([image] * 4, -1),
[2, 2],
image.shape[-1])
elif method == 'deconv':
image = self.deconv2d(image, features, 3, strides=2,
use_bias=use_bias,
kernel_initializer=ki, kernel_regularizer=kr)
if act:
image = act(image)
return image
def __getattr__(self, item):
from functools import partial as _p
"""Make an alignment for easy calls. You can add more patterns as below.
>>> Layers.relu_conv2d = Layers.conv2d(activation='relu')
>>> Layers.bn_conv2d = Layers.conv2d(use_batchnorm=True)
>>> Layers.sn_leaky_conv2d = Layers.conv2d(use_sn=True, activation='lrelu')
NOTE: orders do not matter.
"""
if 'conv2d' in item:
items = item.split('_')
kwargs = {
'kernel_initializer': 'he_normal',
'kernel_regularizer': 'l2',
'use_batchnorm': False,
'use_sn': False,
}
if 'bn' in items or 'batchnorm' in items:
kwargs.update(use_batchnorm=True)
if 'sn' in items or 'spectralnorm' in items:
kwargs.update(use_sn=True)
if 'relu' in items:
kwargs.update(activation='relu')
if 'leaky' in items or 'lrelu' in items or 'leakyrelu' in items:
kwargs.update(activation='lrelu')
if 'prelu' in items:
kwargs.update(activation='prelu')
if 'tanh' in items:
kwargs.update(activation='tanh')
return _p(self.conv2d, **kwargs)
elif 'conv3d' in item:
items = item.split('_')
kwargs = {
'kernel_initializer': 'he_normal',
'kernel_regularizer': 'l2',
'use_batchnorm': False,
}
if 'bn' in items or 'batchnorm' in items:
kwargs.update(use_batchnorm=True)
if 'relu' in items:
kwargs.update(activation='relu')
if 'leaky' in items or 'lrelu' in items or 'leakyrelu' in items:
kwargs.update(activation='lrelu')
if 'prelu' in items:
kwargs.update(activation='prelu')
if 'tanh' in items:
kwargs.update(activation='tanh')
return _p(self.conv3d, **kwargs)
elif 'dense' in item or 'linear' in item:
items = item.split('_')
kwargs = {
'kernel_initializer': 'he_normal',
'kernel_regularizer': 'l2',
'use_sn': False,
}
if 'sn' in items or 'spectralnorm' in items:
kwargs.update(use_sn=True)
if 'relu' in items:
kwargs.update(activation='relu')
if 'leaky' in items or 'lrelu' in items or 'leakyrelu' in items:
kwargs.update(activation='lrelu')
if 'prelu' in items:
kwargs.update(activation='prelu')
if 'tanh' in items:
kwargs.update(activation='tanh')
return _p(self.dense, **kwargs)
return None
def resblock(self, x, filters, kernel_size, strides=(1, 1), padding='same',
data_format='channels_last', activation=None, use_bias=True,
use_batchnorm=False, use_sn=False,
kernel_initializer='he_normal',
kernel_regularizer='l2', placement=None, **kwargs):
"""make a residual block
Args:
x:
filters:
kernel_size:
strides: if strides is more than 1, resblock downsample in the 2nd
conv, and the short cut 1x1 conv
padding:
data_format:
activation:
use_bias:
use_batchnorm:
use_sn:
kernel_initializer:
kernel_regularizer:
placement: 'front' or 'behind', use BN layer in front of or behind
after the | |
# Copyright 2018 GoDaddy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from uuid import UUID
from dateutil import parser
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from octavia_tempest_plugin.common import constants as const
from octavia_tempest_plugin.tests import test_base
from octavia_tempest_plugin.tests import waiters
CONF = config.CONF
class PoolAPITest(test_base.LoadBalancerBaseTest):
"""Test the pool object API."""
@classmethod
def resource_setup(cls):
"""Setup resources needed by the tests."""
super(PoolAPITest, cls).resource_setup()
lb_name = data_utils.rand_name("lb_member_lb1_pool")
lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
const.NAME: lb_name}
cls._setup_lb_network_kwargs(lb_kwargs)
cls.protocol = const.HTTP
cls.lb_feature_enabled = CONF.loadbalancer_feature_enabled
if not cls.lb_feature_enabled.l7_protocol_enabled:
cls.protocol = cls.lb_feature_enabled.l4_protocol
lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout)
listener_name = data_utils.rand_name("lb_member_listener1_pool")
listener_kwargs = {
const.NAME: listener_name,
const.PROTOCOL: cls.protocol,
const.PROTOCOL_PORT: '80',
const.LOADBALANCER_ID: cls.lb_id,
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.listener_id = listener[const.ID]
cls.addClassResourceCleanup(
cls.mem_listener_client.cleanup_listener,
cls.listener_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
@decorators.idempotent_id('7587fe48-87ba-4538-9f03-190911f100ff')
def test_pool_create_standalone(self):
self._test_pool_create(has_listener=False)
@decorators.idempotent_id('c9c0df79-f07e-428c-ae57-b9d4078eec79')
def test_pool_create_with_listener(self):
self._test_pool_create(has_listener=True)
def _test_pool_create(self, has_listener):
"""Tests pool create and basic show APIs.
* Tests that users without the loadbalancer member role cannot
create pools.
* Create a fully populated pool.
* Show pool details.
* Validate the show reflects the requested values.
"""
pool_name = data_utils.rand_name("lb_member_pool1-create")
pool_description = data_utils.arbitrary_string(size=255)
pool_sp_cookie_name = 'my_cookie'
pool_kwargs = {
const.NAME: pool_name,
const.DESCRIPTION: pool_description,
const.ADMIN_STATE_UP: True,
const.PROTOCOL: self.protocol,
const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
}
if self.lb_feature_enabled.session_persistence_enabled:
pool_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
const.COOKIE_NAME: pool_sp_cookie_name,
}
if has_listener:
pool_kwargs[const.LISTENER_ID] = self.listener_id
else:
pool_kwargs[const.LOADBALANCER_ID] = self.lb_id
# Test that a user without the load balancer role cannot
# create a pool
if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
self.assertRaises(
exceptions.Forbidden,
self.os_primary.pool_client.create_pool,
**pool_kwargs)
pool = self.mem_pool_client.create_pool(**pool_kwargs)
self.addClassResourceCleanup(
self.mem_pool_client.cleanup_pool,
pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
pool = waiters.wait_for_status(
self.mem_pool_client.show_pool,
pool[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
if has_listener and not CONF.load_balancer.test_with_noop:
pool = waiters.wait_for_status(
self.mem_pool_client.show_pool,
pool[const.ID], const.OPERATING_STATUS,
const.ONLINE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
self.assertEqual(pool_name, pool[const.NAME])
self.assertEqual(pool_description, pool[const.DESCRIPTION])
self.assertTrue(pool[const.ADMIN_STATE_UP])
parser.parse(pool[const.CREATED_AT])
parser.parse(pool[const.UPDATED_AT])
UUID(pool[const.ID])
# Operating status for a pool without members will be:
if has_listener and not CONF.load_balancer.test_with_noop:
# ONLINE if it is attached to a listener and is a live test
self.assertEqual(const.ONLINE, pool[const.OPERATING_STATUS])
else:
# OFFLINE if it is just on the LB directly or is in noop mode
self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
self.assertEqual(self.protocol, pool[const.PROTOCOL])
self.assertEqual(1, len(pool[const.LOADBALANCERS]))
self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
if has_listener:
self.assertEqual(1, len(pool[const.LISTENERS]))
self.assertEqual(self.listener_id,
pool[const.LISTENERS][0][const.ID])
else:
self.assertEmpty(pool[const.LISTENERS])
self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
pool[const.LB_ALGORITHM])
if self.lb_feature_enabled.session_persistence_enabled:
self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
pool[const.SESSION_PERSISTENCE][const.TYPE])
self.assertEqual(pool_sp_cookie_name,
pool[const.SESSION_PERSISTENCE][
const.COOKIE_NAME])
@decorators.idempotent_id('6959a32e-fb34-4f3e-be68-8880c6450016')
def test_pool_list(self):
"""Tests pool list API and field filtering.
* Create a clean loadbalancer.
* Create three pools.
* Validates that other accounts cannot list the pools.
* List the pools using the default sort order.
* List the pools using descending sort order.
* List the pools using ascending sort order.
* List the pools returning one field at a time.
* List the pools returning two fields.
* List the pools filtering to one of the three.
* List the pools filtered, one field, and sorted.
"""
lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
lb = self.mem_lb_client.create_loadbalancer(
name=lb_name, provider=CONF.load_balancer.provider,
vip_network_id=self.lb_member_vip_net[const.ID])
lb_id = lb[const.ID]
self.addCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
lb_id,
const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout)
pool1_name = data_utils.rand_name("lb_member_pool2-list")
pool1_desc = 'B'
pool1_sp_cookie_name = 'my_cookie1'
pool1_kwargs = {
const.NAME: pool1_name,
const.DESCRIPTION: pool1_desc,
const.ADMIN_STATE_UP: True,
const.PROTOCOL: self.protocol,
const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
const.LOADBALANCER_ID: lb_id,
}
if self.lb_feature_enabled.session_persistence_enabled:
pool1_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
const.COOKIE_NAME: pool1_sp_cookie_name,
}
pool1 = self.mem_pool_client.create_pool(
**pool1_kwargs)
self.addCleanup(
self.mem_pool_client.cleanup_pool,
pool1[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
pool1 = waiters.wait_for_status(
self.mem_pool_client.show_pool, pool1[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
lb_id,
const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
# second is both a simple and a reliable way to accomplish this.
time.sleep(1)
pool2_name = data_utils.rand_name("lb_member_pool1-list")
pool2_desc = 'A'
pool2_sp_cookie_name = 'my_cookie2'
pool2_kwargs = {
const.NAME: pool2_name,
const.DESCRIPTION: pool2_desc,
const.ADMIN_STATE_UP: True,
const.PROTOCOL: self.protocol,
const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
const.LOADBALANCER_ID: lb_id,
}
if self.lb_feature_enabled.session_persistence_enabled:
pool2_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
const.COOKIE_NAME: pool2_sp_cookie_name,
}
pool2 = self.mem_pool_client.create_pool(
**pool2_kwargs)
self.addCleanup(
self.mem_pool_client.cleanup_pool,
pool2[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
pool2 = waiters.wait_for_status(
self.mem_pool_client.show_pool, pool2[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
lb_id,
const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
# second is both a simple and a reliable way to accomplish this.
time.sleep(1)
pool3_name = data_utils.rand_name("lb_member_pool3-list")
pool3_desc = 'C'
pool3_kwargs = {
const.NAME: pool3_name,
const.DESCRIPTION: pool3_desc,
const.ADMIN_STATE_UP: False,
const.PROTOCOL: self.protocol,
const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
# No session persistence, just so there's one test for that
const.LOADBALANCER_ID: lb_id,
}
pool3 = self.mem_pool_client.create_pool(
**pool3_kwargs)
self.addCleanup(
self.mem_pool_client.cleanup_pool,
pool3[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
pool3 = waiters.wait_for_status(
self.mem_pool_client.show_pool, pool3[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
lb_id,
const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
# Test that a different user cannot list pools
if not CONF.load_balancer.RBAC_test_type == const.NONE:
member2_client = self.os_roles_lb_member2.pool_client
primary = member2_client.list_pools(
query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
self.assertEqual(0, len(primary))
# Test that a user without the lb member role cannot list load
# balancers
if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
self.assertRaises(
exceptions.Forbidden,
self.os_primary.pool_client.list_pools)
# Check the default sort order, created_at
pools = self.mem_pool_client.list_pools(
query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
self.assertEqual(pool1[const.DESCRIPTION],
pools[0][const.DESCRIPTION])
self.assertEqual(pool2[const.DESCRIPTION],
pools[1][const.DESCRIPTION])
self.assertEqual(pool3[const.DESCRIPTION],
pools[2][const.DESCRIPTION])
# Test sort descending by description
pools = self.mem_pool_client.list_pools(
query_params='loadbalancer_id={lb_id}&{sort}={descr}:{desc}'
.format(lb_id=lb_id, sort=const.SORT,
descr=const.DESCRIPTION, desc=const.DESC))
self.assertEqual(pool1[const.DESCRIPTION],
pools[1][const.DESCRIPTION])
self.assertEqual(pool2[const.DESCRIPTION],
pools[2][const.DESCRIPTION])
self.assertEqual(pool3[const.DESCRIPTION],
pools[0][const.DESCRIPTION])
# Test sort ascending by description
pools = self.mem_pool_client.list_pools(
query_params='loadbalancer_id={lb_id}&{sort}={descr}:{asc}'
.format(lb_id=lb_id, sort=const.SORT,
descr=const.DESCRIPTION, asc=const.ASC))
self.assertEqual(pool1[const.DESCRIPTION],
pools[1][const.DESCRIPTION])
self.assertEqual(pool2[const.DESCRIPTION],
pools[0][const.DESCRIPTION])
self.assertEqual(pool3[const.DESCRIPTION],
pools[2][const.DESCRIPTION])
# Test fields
for field in const.SHOW_POOL_RESPONSE_FIELDS:
pools = self.mem_pool_client.list_pools(
query_params='loadbalancer_id={lb_id}&{fields}={field}'
.format(lb_id=lb_id,
fields=const.FIELDS, field=field))
self.assertEqual(1, len(pools[0]))
self.assertEqual(pool1[field], pools[0][field])
self.assertEqual(pool2[field], pools[1][field])
self.assertEqual(pool3[field], pools[2][field])
# Test multiple fields at the same time
pools = self.mem_pool_client.list_pools(
query_params='loadbalancer_id={lb_id}&{fields}={admin}&'
'{fields}={created}'.format(
lb_id=lb_id, fields=const.FIELDS,
admin=const.ADMIN_STATE_UP,
created=const.CREATED_AT))
self.assertEqual(2, len(pools[0]))
self.assertTrue(pools[0][const.ADMIN_STATE_UP])
parser.parse(pools[0][const.CREATED_AT])
self.assertTrue(pools[1][const.ADMIN_STATE_UP])
parser.parse(pools[1][const.CREATED_AT])
self.assertFalse(pools[2][const.ADMIN_STATE_UP])
parser.parse(pools[2][const.CREATED_AT])
# Test filtering
pools = self.mem_pool_client.list_pools(
query_params='loadbalancer_id={lb_id}&{desc}={lb_desc}'.format(
lb_id=lb_id, desc=const.DESCRIPTION,
lb_desc=pool2[const.DESCRIPTION]))
self.assertEqual(1, len(pools))
self.assertEqual(pool2[const.DESCRIPTION],
pools[0][const.DESCRIPTION])
# Test combined params
pools = self.mem_pool_client.list_pools(
query_params='loadbalancer_id={lb_id}&{admin}={true}&'
'{fields}={descr}&{fields}={id}&'
'{sort}={descr}:{desc}'.format(
lb_id=lb_id, admin=const.ADMIN_STATE_UP,
true=const.ADMIN_STATE_UP_TRUE,
fields=const.FIELDS, descr=const.DESCRIPTION,
id=const.ID, sort=const.SORT, desc=const.DESC))
# Should get two pools
self.assertEqual(2, len(pools))
# pools should have two fields
self.assertEqual(2, len(pools[0]))
# Should be in descending order
self.assertEqual(pool2[const.DESCRIPTION],
pools[1][const.DESCRIPTION])
self.assertEqual(pool1[const.DESCRIPTION],
pools[0][const.DESCRIPTION])
@decorators.idempotent_id('b7932438-1aea-4175-a50c-984fee1c0cad')
def test_pool_show(self):
"""Tests pool show API.
* Create a fully populated pool.
* Show pool details.
* Validate the show reflects the requested values.
* Validates that other accounts cannot see the pool.
"""
pool_name = data_utils.rand_name("lb_member_pool1-show")
pool_description = data_utils.arbitrary_string(size=255)
pool_sp_cookie_name = 'my_cookie'
pool_kwargs = {
const.NAME: pool_name,
const.DESCRIPTION: pool_description,
const.ADMIN_STATE_UP: True,
const.PROTOCOL: self.protocol,
const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
const.LOADBALANCER_ID: self.lb_id,
}
if self.lb_feature_enabled.session_persistence_enabled:
pool_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
const.COOKIE_NAME: pool_sp_cookie_name,
}
pool = self.mem_pool_client.create_pool(**pool_kwargs)
self.addClassResourceCleanup(
self.mem_pool_client.cleanup_pool,
pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
pool = waiters.wait_for_status(
self.mem_pool_client.show_pool,
pool[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
self.assertEqual(pool_name, pool[const.NAME])
self.assertEqual(pool_description, pool[const.DESCRIPTION])
self.assertTrue(pool[const.ADMIN_STATE_UP])
parser.parse(pool[const.CREATED_AT])
parser.parse(pool[const.UPDATED_AT])
UUID(pool[const.ID])
# Operating status for pools will always be offline without members
self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
self.assertEqual(self.protocol, pool[const.PROTOCOL])
self.assertEqual(1, len(pool[const.LOADBALANCERS]))
self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
self.assertEmpty(pool[const.LISTENERS])
self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
pool[const.LB_ALGORITHM])
if self.lb_feature_enabled.session_persistence_enabled:
self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
pool[const.SESSION_PERSISTENCE][const.TYPE])
self.assertEqual(pool_sp_cookie_name,
pool[const.SESSION_PERSISTENCE][
const.COOKIE_NAME])
# Test that a user with lb_admin role can see the pool
if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
pool_client = self.os_roles_lb_admin.pool_client
pool_adm = pool_client.show_pool(pool[const.ID])
self.assertEqual(pool_name, pool_adm[const.NAME])
# Test that a user with cloud admin role can see the pool
if not CONF.load_balancer.RBAC_test_type == const.NONE:
adm = self.os_admin.pool_client.show_pool(
pool[const.ID])
self.assertEqual(pool_name, adm[const.NAME])
# Test that a different user, with load balancer member role, cannot
# see this pool
if not CONF.load_balancer.RBAC_test_type == const.NONE:
member2_client = self.os_roles_lb_member2.pool_client
self.assertRaises(exceptions.Forbidden,
member2_client.show_pool,
pool[const.ID])
# Test that a user, without the load balancer member role, cannot
# show pools
if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
self.assertRaises(
exceptions.Forbidden,
self.os_primary.pool_client.show_pool,
pool[const.ID])
@decorators.idempotent_id('7bd0a6bf-57b4-46a6-83ef-f9991896658a')
def test_pool_update(self):
"""Tests pool update and show APIs.
* Create a fully populated pool.
* Show pool details.
* Validate the show reflects the initial values.
* Validates that other accounts cannot update the pool.
* Update the pool details.
* Show pool details.
| |
<reponame>verkaik/modflow6-parallel
# tests to ability to run flow model first followed by transport model
import os
import shutil
import numpy as np
try:
import pymake
except:
msg = 'Error. Pymake package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install https://github.com/modflowpy/pymake/zipball/master'
raise Exception(msg)
try:
import flopy
except:
msg = 'Error. FloPy package is not available.\n'
msg += 'Try installing using the following command:\n'
msg += ' pip install flopy'
raise Exception(msg)
import targets
exe_name_mf6 = targets.target_dict['mf6']
exe_name_mf6 = os.path.abspath(exe_name_mf6)
data_ws = os.path.abspath('./data/prudic2004test2/')
testdir = './temp'
testgroup = 'prudic2004t2fmi'
d = os.path.join(testdir, testgroup)
if os.path.isdir(d):
shutil.rmtree(d)
nlay = 8
nrow = 36
ncol = 23
delr = 405.665
delc = 403.717
top = 100.
fname = os.path.join(data_ws, 'bot1.dat')
bot0 = np.loadtxt(fname)
botm = [bot0] + [bot0 - (15. * k) for k in range(1, nlay)]
fname = os.path.join(data_ws, 'idomain1.dat')
idomain0 = np.loadtxt(fname, dtype=int)
idomain = nlay * [idomain0]
def run_flow_model():
global idomain
name = 'flow'
gwfname = name
wsf = os.path.join(testdir, testgroup, name)
sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=wsf,
exe_name=exe_name_mf6)
tdis_rc = [(1., 1, 1.), (365.25 * 25, 1, 1.)]
nper = len(tdis_rc)
tdis = flopy.mf6.ModflowTdis(sim, time_units='DAYS',
nper=nper, perioddata=tdis_rc)
gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, save_flows=True)
# ims
hclose = 0.01
rclose = 0.1
nouter = 1000
ninner = 100
relax = 0.99
imsgwf = flopy.mf6.ModflowIms(sim, print_option='ALL',
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation='NONE',
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration='CG',
scaling_method='NONE',
reordering_method='NONE',
relaxation_factor=relax,
filename='{}.ims'.format(gwfname))
dis = flopy.mf6.ModflowGwfdis(gwf, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
top=top, botm=botm, idomain=idomain)
idomain = dis.idomain.array
ic = flopy.mf6.ModflowGwfic(gwf, strt=50.)
npf = flopy.mf6.ModflowGwfnpf(gwf, xt3doptions=False,
save_flows=True,
save_specific_discharge=True,
save_saturation=True,
icelltype=[1] + 7 * [0],
k=250., k33=125.)
sto_on = False
if sto_on:
sto = flopy.mf6.ModflowGwfsto(gwf, save_flows=True,
iconvert=[1] + 7 * [0],
ss=1.e-5, sy=0.3,
steady_state={0: True},
transient={1: False})
oc = flopy.mf6.ModflowGwfoc(gwf,
budget_filerecord='{}.bud'.format(gwfname),
head_filerecord='{}.hds'.format(gwfname),
headprintrecord=[
('COLUMNS', ncol, 'WIDTH', 15,
'DIGITS', 6, 'GENERAL')],
saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')],
printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')])
rch_on = True
if rch_on:
rch = flopy.mf6.ModflowGwfrcha(gwf, recharge={0: 4.79e-3}, pname='RCH-1')
chdlist = []
fname = os.path.join(data_ws, 'chd.dat')
for line in open(fname, 'r').readlines():
ll = line.strip().split()
if len(ll) == 4:
k, i, j, hd = ll
chdlist.append([(int(k) - 1, int(i) - 1, int(j) - 1,), float(hd)])
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdlist, pname='CHD-1')
rivlist = []
fname = os.path.join(data_ws, 'riv.dat')
for line in open(fname, 'r').readlines():
ll = line.strip().split()
if len(ll) == 7:
k, i, j, s, c, rb, bn = ll
rivlist.append(
[(int(k) - 1, int(i) - 1, int(j) - 1,), float(s), float(c),
float(rb), bn])
rivra = \
flopy.mf6.ModflowGwfriv.stress_period_data.empty(gwf, maxbound=len(rivlist),
boundnames=True)[0]
for i, t in enumerate(rivlist):
rivra[i] = tuple(t)
fname = os.path.join(data_ws, 'sfr-packagedata.dat')
sfrpd = np.genfromtxt(fname, names=True)
sfrpackagedata = flopy.mf6.ModflowGwfsfr.packagedata.empty(gwf, boundnames=True,
maxbound=sfrpd.shape[
0])
for name in sfrpackagedata.dtype.names:
if name in rivra.dtype.names:
sfrpackagedata[name] = rivra[name]
for name in sfrpackagedata.dtype.names:
if name in sfrpd.dtype.names:
sfrpackagedata[name] = sfrpd[name]
sfrpackagedata['boundname'] = rivra['boundname']
fname = os.path.join(data_ws, 'sfr-connectiondata.dat')
with open(fname) as f:
lines = f.readlines()
sfrconnectiondata = []
for line in lines:
t = line.split()
c = []
for v in t:
i = int(v)
c.append(i)
sfrconnectiondata.append(c)
sfrperioddata = {0: [[0, 'inflow', 86400], [18, 'inflow', 8640.]]}
sfr_obs = {(gwfname + '.sfr.obs.csv',):
[
('reach1leakage', 'SFR', 'LONGESTRIVERINTHEWORLD1'),
('reach2leakage', 'SFR', 'LONGESTRIVERINTHEWORLD2'),
('reach3leakage', 'SFR', 'LONGESTRIVERINTHEWORLD3'),
('reach4leakage', 'SFR', 'LONGESTRIVERINTHEWORLD4'),
],
}
sfr_obs['digits'] = 7
sfr_obs['print_input'] = True
sfr_obs['filename'] = gwfname + '.sfr.obs'
sfr_on = True
if sfr_on:
sfr = flopy.mf6.ModflowGwfsfr(gwf,
print_stage=True,
print_flows=True,
stage_filerecord=gwfname + '.sfr.bin',
budget_filerecord=gwfname + '.sfr.bud',
mover=True, pname='SFR-1',
unit_conversion=128390.00,
boundnames=True, nreaches=len(rivlist),
packagedata=sfrpackagedata,
connectiondata=sfrconnectiondata,
perioddata=sfrperioddata,
observations=sfr_obs)
fname = os.path.join(data_ws, 'lakibd.dat')
lakibd = np.loadtxt(fname, dtype=int)
lakeconnectiondata = []
nlakecon = [0, 0]
lak_leakance = 1.
for i in range(nrow):
for j in range(ncol):
if lakibd[i, j] == 0:
continue
else:
ilak = lakibd[i, j] - 1
# back
if i > 0:
if lakibd[i - 1, j] == 0 and idomain[0, i - 1, j]:
h = [ilak, nlakecon[ilak], (0, i - 1, j), 'horizontal',
lak_leakance, 0.0, 0.0, delc / 2., delr]
nlakecon[ilak] += 1
lakeconnectiondata.append(h)
# left
if j > 0:
if lakibd[i, j - 1] and idomain[0, i, j - 1] == 0:
h = [ilak, nlakecon[ilak], (0, i, j - 1), 'horizontal',
lak_leakance, 0.0, 0.0, delr / 2., delc]
nlakecon[ilak] += 1
lakeconnectiondata.append(h)
# right
if j < ncol - 1:
if lakibd[i, j + 1] == 0 and idomain[0, i, j + 1]:
h = [ilak, nlakecon[ilak], (0, i, j + 1), 'horizontal',
lak_leakance, 0.0, 0.0, delr / 2., delc]
nlakecon[ilak] += 1
lakeconnectiondata.append(h)
# front
if i < nrow - 1:
if lakibd[i + 1, j] == 0 and idomain[0, i + 1, j]:
h = [ilak, nlakecon[ilak], (0, i + 1, j), 'horizontal',
lak_leakance, 0.0, 0.0, delc / 2., delr]
nlakecon[ilak] += 1
lakeconnectiondata.append(h)
# vertical
v = [ilak, nlakecon[ilak], (1, i, j), 'vertical', lak_leakance, 0.0,
0.0, 0.0, 0.0]
nlakecon[ilak] += 1
lakeconnectiondata.append(v)
i, j = np.where(lakibd > 0)
idomain[0, i, j] = 0
gwf.dis.idomain.set_data(idomain[0], layer=0, multiplier=[1])
lakpackagedata = [[0, 44., nlakecon[0], 'lake1'],
[1, 35.2, nlakecon[1], 'lake2']]
# <outletno> <lakein> <lakeout> <couttype> <invert> <width> <rough> <slope>
outlets = [[0, 0, -1, 'MANNING', 44.5, 5.000000, 0.03, 0.2187500E-02]]
lake_on = True
if lake_on:
lak = flopy.mf6.ModflowGwflak(gwf, time_conversion=86400.000,
print_stage=True, print_flows=True,
stage_filerecord=gwfname + '.lak.bin',
budget_filerecord=gwfname + '.lak.bud',
mover=True, pname='LAK-1',
boundnames=True,
nlakes=2, noutlets=len(outlets),
outlets=outlets,
packagedata=lakpackagedata,
connectiondata=lakeconnectiondata)
mover_on = True
if mover_on:
maxmvr, maxpackages = 2, 2
mvrpack = [['SFR-1'], ['LAK-1']]
mvrperioddata = [
['SFR-1', 5, 'LAK-1', 0, 'FACTOR', 1.],
['LAK-1', 0, 'SFR-1', 6, 'FACTOR', 1.],
]
mvr = flopy.mf6.ModflowGwfmvr(gwf, maxmvr=maxmvr,
print_flows=True,
budget_filerecord=gwfname + '.mvr.bud',
maxpackages=maxpackages,
packages=mvrpack,
perioddata=mvrperioddata)
sim.write_simulation()
sim.run_simulation(silent=False)
fname = gwfname + '.hds'
fname = os.path.join(wsf, fname)
hobj = flopy.utils.HeadFile(fname, precision='double')
head = hobj.get_data()
hobj.file.close()
if lake_on:
fname = gwfname + '.lak.bin'
fname = os.path.join(wsf, fname)
lkstage = None
if os.path.isfile(fname):
lksobj = flopy.utils.HeadFile(fname, precision='double', text='stage')
lkstage = lksobj.get_data().flatten()
lksobj.file.close()
if sfr_on:
fname = gwfname + '.sfr.bin'
fname = os.path.join(wsf, fname)
sfstage = None
if os.path.isfile(fname):
bobj = flopy.utils.HeadFile(fname, precision='double', text='stage')
sfstage = bobj.get_data().flatten()
bobj.file.close()
return
def run_transport_model():
name = 'transport'
gwtname = name
wst = os.path.join(testdir, testgroup, name)
sim = flopy.mf6.MFSimulation(sim_name=name, version='mf6',
exe_name=exe_name_mf6, sim_ws=wst,
continue_=False)
tdis_rc = [(1., 1, 1.), (365.25 * 25, 25, 1.)]
nper = len(tdis_rc)
tdis = flopy.mf6.ModflowTdis(sim, time_units='DAYS',
nper=nper, perioddata=tdis_rc)
gwt = flopy.mf6.ModflowGwt(sim, modelname=gwtname)
# ims
hclose = 0.001
rclose = 0.001
nouter = 50
ninner = 20
relax = 0.97
imsgwt = flopy.mf6.ModflowIms(sim, print_option='ALL',
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation='DBD',
under_relaxation_theta=0.7,
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration='BICGSTAB',
scaling_method='NONE',
reordering_method='NONE',
relaxation_factor=relax,
filename='{}.ims'.format(gwtname))
sim.register_ims_package(imsgwt, [gwt.name])
dis = flopy.mf6.ModflowGwtdis(gwt, nlay=nlay, nrow=nrow, ncol=ncol,
delr=delr, delc=delc,
top=top, botm=botm, idomain=idomain)
ic = flopy.mf6.ModflowGwtic(gwt, strt=0.)
sto = flopy.mf6.ModflowGwtmst(gwt, porosity=0.3)
adv = flopy.mf6.ModflowGwtadv(gwt, scheme='TVD')
dsp = flopy.mf6.ModflowGwtdsp(gwt, alh=20., ath1=2, atv=0.2)
sourcerecarray = [()]
ssm = flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray)
cnclist = [
[(0, 0, 11), 500.], [(0, 0, 12), 500.], [(0, 0, 13), 500.],
[(0, 0, 14), 500.],
[(1, 0, 11), 500.], [(1, 0, 12), 500.], [(1, 0, 13), 500.],
[(1, 0, 14), 500.],
]
cnc = flopy.mf6.ModflowGwtcnc(gwt, maxbound=len(cnclist),
stress_period_data=cnclist,
save_flows=False,
pname='CNC-1')
lktpackagedata = [(0, 0., 99., 999., 'mylake1'),
(1, 0., 99., 999., 'mylake2'), ]
lktperioddata = [(0, 'STATUS', 'ACTIVE'),
(1, 'STATUS', 'ACTIVE'),
]
lkt_obs = {(gwtname + '.lkt.obs.csv',):
[
('lkt1conc', 'CONCENTRATION', 1),
('lkt2conc', 'CONCENTRATION', 2),
('lkt1frommvr', 'FROM-MVR', (0,)),
('lkt2frommvr', 'FROM-MVR', (1,)),
('lkt1tomvr', 'TO-MVR', (0,)),
('lkt1bntomvr', 'TO-MVR', 'mylake1'),
],
}
lkt_obs['digits'] = 7
lkt_obs['print_input'] = True
lkt_obs['filename'] = gwtname + '.lkt.obs'
lkt_on = True
if lkt_on:
lkt = flopy.mf6.modflow.ModflowGwtlkt(gwt,
boundnames=True,
save_flows=True,
print_input=True,
print_flows=True,
print_concentration=True,
concentration_filerecord=gwtname + '.lkt.bin',
budget_filerecord=gwtname +
'.lkt.bud',
packagedata=lktpackagedata,
lakeperioddata=lktperioddata,
observations=lkt_obs,
pname='LAK-1',
auxiliary=['aux1', 'aux2'])
nreach = 38
sftpackagedata = []
for irno in range(nreach):
t = (irno, 0., 99., 999., 'myreach{}'.format(irno + 1))
sftpackagedata.append(t)
sftperioddata = [
(0, 'STATUS', 'ACTIVE'),
(0, 'CONCENTRATION', 0.)
]
sft_obs = {(gwtname + '.sft.obs.csv',):
[('sft{}conc'.format(i + 1), 'CONCENTRATION', i + 1) for i in
range(nreach)]}
# append additional obs attributes to obs dictionary
sft_obs['digits'] = 7
sft_obs['print_input'] = True
sft_obs['filename'] = gwtname + '.sft.obs'
sft_on = True
if sft_on:
sft = flopy.mf6.modflow.ModflowGwtsft(gwt,
boundnames=True,
save_flows=True,
print_input=True,
print_flows=True,
print_concentration=True,
concentration_filerecord=gwtname + '.sft.bin',
budget_filerecord=gwtname +
'.sft.bud',
packagedata=sftpackagedata,
reachperioddata=sftperioddata,
observations=sft_obs,
pname='SFR-1',
auxiliary=['aux1', 'aux2'])
pd = [
('GWFHEAD', '../flow/flow.hds', None),
('GWFBUDGET', '../flow/flow.bud', None),
('GWFMOVER', '../flow/flow.mvr.bud', None),
('LAK-1', '../flow/flow.lak.bud', None),
('SFR-1', '../flow/flow.sfr.bud', None),
]
fmi = flopy.mf6.ModflowGwtfmi(gwt, packagedata=pd)
# mover transport package
mvt = flopy.mf6.modflow.ModflowGwtmvt(gwt, print_flows=True)
| |
<filename>accbpg/functions.py<gh_stars>10-100
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
class RSmoothFunction:
"""
Relatively-Smooth Function, can query f(x) and gradient
"""
def __call__(self, x):
assert 0, "RSmoothFunction: __call__(x) is not defined"
def gradient(self, x):
assert 0, "RSmoothFunction: gradient(x) is not defined"
def func_grad(self, x, flag):
"""
flag=0: function, flag=1: gradient, flag=2: function & gradient
"""
assert 0, "RSmoothFunction: func_grad(x, flag) is not defined"
class DOptimalObj(RSmoothFunction):
"""
f(x) = - log(det(H*diag(x)*H')) where H is an m by n matrix, m < n
"""
def __init__(self, H):
self.H = H
self.m = H.shape[0]
self.n = H.shape[1]
assert self.m < self.n, "DOptimalObj: need m < n"
def __call__(self, x):
return self.func_grad(x, flag=0)
def gradient(self, x):
return self.func_grad(x, flag=1)
def func_grad(self, x, flag=2):
assert x.size == self.n, "DOptimalObj: x.size not equal to n"
assert x.min() >= 0, "DOptimalObj: x needs to be nonnegative"
HXHT = np.dot(self.H*x, self.H.T)
if flag == 0: # only return function value
f = -np.log(np.linalg.det(HXHT))
return f
HXHTinvH = np.dot(np.linalg.inv(HXHT), self.H)
g = - np.sum(self.H * HXHTinvH, axis=0)
if flag == 1: # only return gradient
return g
# return both function value and gradient
f = -np.log(np.linalg.det(HXHT))
return f, g
def func_grad_slow(self, x, flag=2):
assert x.size == self.n, "DOptimalObj: x.size not equal to n"
assert x.min() >= 0, "DOptimalObj: x needs to be nonnegative"
sx = np.sqrt(x)
Hsx = self.H*sx; # using numpy array broadcast
HXHT = np.dot(Hsx,Hsx.T)
if flag == 0: # only return function value
f = -np.log(np.linalg.det(HXHT))
return f
Hsx = np.linalg.solve(HXHT, self.H)
g = np.empty(self.n)
for i in range(self.n):
g[i] = - np.dot(self.H[:,i], Hsx[:,i])
if flag == 1: # only return gradient
return g
# return both function value and gradient
f = -np.log(np.linalg.det(HXHT))
return f, g
class PoissonRegression(RSmoothFunction):
"""
f(x) = D_KL(b, Ax) for linear inverse problem A * x = b
"""
def __init__(self, A, b):
assert A.shape[0] == b.shape[0], "A and b sizes not matching"
self.A = A
self.b = b
self.m = A.shape[0]
self.n = A.shape[1]
def __call__(self, x):
return self.func_grad(x, flag=0)
def gradient(self, x):
return self.func_grad(x, flag=1)
def func_grad(self, x, flag=2):
assert x.size == self.n, "PoissonRegression: x.size not equal to n."
Ax = np.dot(self.A, x)
if flag == 0:
fx = sum( self.b * np.log(self.b / Ax) + Ax - self.b )
return fx
# use array broadcasting
g = ((1-self.b/Ax).reshape(self.m, 1) * self.A).sum(axis=0)
# line above is the same as the following code
#g = np.zeros(x.shape)
#for i in range(self.m):
# g += (1 - self.b[i]/np.dot(self.A[i,:], x)) * self.A[i,:]
if flag == 1:
return g
# return both function value and gradient
fx = sum( self.b * np.log(self.b / Ax) + Ax - self.b )
return fx, g
class KLdivRegression(RSmoothFunction):
"""
f(x) = D_KL(Ax, b) for linear inverse problem A * x = b
"""
def __init__(self, A, b):
assert A.shape[0] == b.shape[0], "A and b size not matching"
self.A = A
self.b = b
self.m = A.shape[0]
self.n = A.shape[1]
def __call__(self, x):
return self.func_grad(x, flag=0)
def gradient(self, x):
return self.func_grad(x, flag=1)
def func_grad(self, x, flag=2):
assert x.size == self.n, "NonnegRegression: x.size not equal to n."
Ax = np.dot(self.A, x)
if flag == 0:
fx = sum( Ax * np.log(Ax / self.b) - Ax + self.b )
return fx
# use array broadcasting
g = (np.log(Ax/self.b).reshape(self.m, 1) * self.A).sum(axis=0)
# line above is the same as the following code
#g = np.zeros(x.shape)
#for i in range(self.m):
# g += np.log(Ax[i]/self.b[i]) * self.A[i,:]
if flag == 1:
return g
# return both function value and gradient
fx = sum( Ax * np.log(Ax / self.b) - Ax + self.b )
return fx, g
#######################################################################
class LegendreFunction:
"""
Function of Legendre type, used as the kernel of Bregman divergence for
composite optimization
minimize_{x in C} f(x) + Psi(x)
where f is L-smooth relative to a Legendre function h(x),
Psi(x) is an additional simple convex function.
"""
def __call__(self, x):
assert 0, "LegendreFunction: __call__(x) is not defined."
def extra_Psi(self, x):
return 0
def gradient(self, x):
assert 0, "LegendreFunction: gradient(x) is not defined."
def divergence(self, x, y):
"""
Return D(x,y) = h(x) - h(y) - <h'(y), x-y>
"""
assert 0, "LegendreFunction: divergence(x,y) is not defined."
def prox_map(self, g, L):
"""
Return argmin_{x in C} { Psi(x) + <g, x> + L * h(x) }
"""
assert 0, "LegendreFunction: prox_map(x, L) is not defined."
def div_prox_map(self, y, g, L):
"""
Return argmin_{x in C} { Psi(x) + <g, x> + L * D(x,y) }
default implementation by calling prox_map(g - L*g(y), L)
"""
assert y.shape == g.shape, "Vectors y and g should have same size."
assert L > 0, "Relative smoothness constant L should be positive."
return self.prox_map(g - L*self.gradient(y), L)
class BurgEntropy(LegendreFunction):
"""
h(x) = - sum_{i=1}^n log(x[i]) for x > 0
"""
def __call__(self, x):
assert x.min()>0, "BurgEntropy only takes positive arguments."
return -sum(np.log(x))
def gradient(self, x):
assert x.min()>0, "BurgEntropy only takes positive arguments."
return -1/x
def divergence(self, x, y):
assert x.shape == y.shape, "Vectors x and y are of different sizes."
assert x.min() > 0 and y.min() > 0, "Entries of x or y not positive."
return sum(x/y - np.log(x/y) - 1)
def prox_map(self, g, L):
"""
Return argmin_{x > 0} { <g, x> + L * h(x) }
This function needs to be replaced with inheritance
"""
assert L > 0, "BurgEntropy prox_map only takes positive L value."
assert g.min() > 0, "BurgEntropy prox_map only takes positive value."
return L / g
def div_prox_map(self, y, g, L):
"""
Return argmin_{x > C} { <g, x> + L * D(x,y) }
This is a general function that works for all derived classes
"""
assert y.shape == g.shape, "Vectors y and g are of different sizes."
assert y.min() > 0 and L > 0, "Either y or L is not positive."
return self.prox_map(g - L*self.gradient(y), L)
class BurgEntropyL1(BurgEntropy):
"""
h(x) = - sum_{i=1}^n log(x[i]) used in context of solving the problem
min_{x > 0} f(x) + lamda * ||x||_1
"""
def __init__(self, lamda=0, x_max=1e4):
assert lamda >= 0, "BurgEntropyL1: lambda should be nonnegative."
self.lamda = lamda
self.x_max = x_max
def extra_Psi(self, x):
"""
return lamda * ||x||_1
"""
return self.lamda * x.sum()
def prox_map(self, g, L):
"""
Return argmin_{x > 0} { lambda * ||x||_1 + <g, x> + L h(x) }
!!! This proximal mapping may have unbounded solution x->infty
"""
assert L > 0, "BurgEntropyL1: prox_map only takes positive L."
assert g.min() > -self.lamda, "Not getting positive solution."
#g = np.maximum(g, -self.lamda + 1.0 / self.x_max)
return L / (self.lamda + g)
class BurgEntropyL2(BurgEntropy):
"""
h(x) = - sum_{i=1}^n log(x[i]) used in context of solving the problem
min_{x > 0} f(x) + (lambda/2) ||x||_2^2
"""
def __init__(self, lamda=0):
assert lamda >= 0, "BurgEntropyL2: lamda should be nonnegative."
self.lamda = lamda
def extra_Psi(self, x):
"""
return (lamda/2) * ||x||_2^2
"""
return (self.lamda / 2) * np.dot(x, x)
def prox_map(self, g, L):
"""
Return argmin_{x > 0} { (lamda/2) * ||x||_2^2 + <g, x> + L * h(x) }
"""
assert L > 0, "BurgEntropyL2: prox_map only takes positive L value."
gg = g / L
lamda_L = self.lamda / L
return (np.sqrt(gg*gg + 4*lamda_L) - gg) / (2 * lamda_L)
class BurgEntropySimplex(BurgEntropy):
"""
h(x) = - sum_{i=1}^n log(x[i]) used in the context of solving
min_{x \in C} f(x) where C is the standard simplex, with Psi(x) = 0
"""
def __init__(self, eps=1e-8):
# eps is precision for solving prox_map using Newton's method
assert eps > 0, "BurgEntropySimplex: eps should be positive."
self.eps = eps
def prox_map(self, g, L):
"""
Return argmin_{x in C} { | |
# If successfully sent:
if bytes_sent == bytes_total:
# Show a confirmation to the user so they know the job was sucessful and provide the option to switch to
# the monitor tab.
self._success_message = Message(
i18n_catalog.i18nc("@info:status", "Print job was successfully sent to the printer."),
lifetime=5, dismissable=True,
title=i18n_catalog.i18nc("@info:title", "Data Sent"))
self._success_message.addAction("View", i18n_catalog.i18nc("@action:button", "View in Monitor"), icon=None,
description="")
self._success_message.actionTriggered.connect(self._successMessageActionTriggered)
self._success_message.show()
else:
if self._progress_message is not None:
self._progress_message.setProgress(0)
self._progress_message.hide()
def _progressMessageActionTriggered(self, message_id: Optional[str] = None, action_id: Optional[str] = None) -> None:
if action_id == "Abort":
Logger.log("d", "User aborted sending print to remote.")
if self._progress_message is not None:
self._progress_message.hide()
self._compressing_gcode = False
self._sending_gcode = False
CuraApplication.getInstance().getController().setActiveStage("PrepareStage")
# After compressing the sliced model Cura sends data to printer, to stop receiving updates from the request
# the "reply" should be disconnected
if self._latest_reply_handler:
self._latest_reply_handler.disconnect()
self._latest_reply_handler = None
def _successMessageActionTriggered(self, message_id: Optional[str] = None, action_id: Optional[str] = None) -> None:
if action_id == "View":
CuraApplication.getInstance().getController().setActiveStage("MonitorStage")
@pyqtSlot()
def openPrintJobControlPanel(self) -> None:
Logger.log("d", "Opening print job control panel...")
QDesktopServices.openUrl(QUrl("http://" + self._address + "/print_jobs"))
@pyqtSlot()
def openPrinterControlPanel(self) -> None:
Logger.log("d", "Opening printer control panel...")
QDesktopServices.openUrl(QUrl("http://" + self._address + "/printers"))
@pyqtProperty("QVariantList", notify = printJobsChanged)
def printJobs(self)-> List[UM3PrintJobOutputModel]:
return self._print_jobs
@pyqtProperty(bool, notify = receivedPrintJobsChanged)
def receivedPrintJobs(self) -> bool:
return self._received_print_jobs
@pyqtProperty("QVariantList", notify = printJobsChanged)
def queuedPrintJobs(self) -> List[UM3PrintJobOutputModel]:
return [print_job for print_job in self._print_jobs if print_job.state == "queued" or print_job.state == "error"]
@pyqtProperty("QVariantList", notify = printJobsChanged)
def activePrintJobs(self) -> List[UM3PrintJobOutputModel]:
return [print_job for print_job in self._print_jobs if print_job.assignedPrinter is not None and print_job.state != "queued"]
@pyqtProperty("QVariantList", notify = clusterPrintersChanged)
def connectedPrintersTypeCount(self) -> List[Dict[str, str]]:
printer_count = {} # type: Dict[str, int]
for printer in self._printers:
if printer.type in printer_count:
printer_count[printer.type] += 1
else:
printer_count[printer.type] = 1
result = []
for machine_type in printer_count:
result.append({"machine_type": machine_type, "count": str(printer_count[machine_type])})
return result
@pyqtProperty("QVariantList", notify=clusterPrintersChanged)
def printers(self):
return self._printers
@pyqtSlot(int, result = str)
def formatDuration(self, seconds: int) -> str:
return Duration(seconds).getDisplayString(DurationFormat.Format.Short)
@pyqtSlot(int, result = str)
def getTimeCompleted(self, time_remaining: int) -> str:
current_time = time()
datetime_completed = datetime.fromtimestamp(current_time + time_remaining)
return "{hour:02d}:{minute:02d}".format(hour=datetime_completed.hour, minute=datetime_completed.minute)
@pyqtSlot(int, result = str)
def getDateCompleted(self, time_remaining: int) -> str:
current_time = time()
datetime_completed = datetime.fromtimestamp(current_time + time_remaining)
return (datetime_completed.strftime("%a %b ") + "{day}".format(day=datetime_completed.day)).upper()
@pyqtSlot(str)
def sendJobToTop(self, print_job_uuid: str) -> None:
# This function is part of the output device (and not of the printjob output model) as this type of operation
# is a modification of the cluster queue and not of the actual job.
data = "{\"to_position\": 0}"
self.put("print_jobs/{uuid}/move_to_position".format(uuid = print_job_uuid), data, on_finished=None)
@pyqtSlot(str)
def deleteJobFromQueue(self, print_job_uuid: str) -> None:
# This function is part of the output device (and not of the printjob output model) as this type of operation
# is a modification of the cluster queue and not of the actual job.
self.delete("print_jobs/{uuid}".format(uuid = print_job_uuid), on_finished=None)
@pyqtSlot(str)
def forceSendJob(self, print_job_uuid: str) -> None:
data = "{\"force\": true}"
self.put("print_jobs/{uuid}".format(uuid=print_job_uuid), data, on_finished=None)
def _printJobStateChanged(self) -> None:
username = self._getUserName()
if username is None:
return # We only want to show notifications if username is set.
finished_jobs = [job for job in self._print_jobs if job.state == "wait_cleanup"]
newly_finished_jobs = [job for job in finished_jobs if job not in self._finished_jobs and job.owner == username]
for job in newly_finished_jobs:
if job.assignedPrinter:
job_completed_text = i18n_catalog.i18nc("@info:status", "Printer '{printer_name}' has finished printing '{job_name}'.".format(printer_name=job.assignedPrinter.name, job_name = job.name))
else:
job_completed_text = i18n_catalog.i18nc("@info:status", "The print job '{job_name}' was finished.".format(job_name = job.name))
job_completed_message = Message(text=job_completed_text, title = i18n_catalog.i18nc("@info:status", "Print finished"))
job_completed_message.show()
# Ensure UI gets updated
self.printJobsChanged.emit()
# Keep a list of all completed jobs so we know if something changed next time.
self._finished_jobs = finished_jobs
## Called when the connection to the cluster changes.
def connect(self) -> None:
super().connect()
self.sendMaterialProfiles()
def _onGetPreviewImageFinished(self, reply: QNetworkReply) -> None:
reply_url = reply.url().toString()
uuid = reply_url[reply_url.find("print_jobs/")+len("print_jobs/"):reply_url.rfind("/preview_image")]
print_job = findByKey(self._print_jobs, uuid)
if print_job:
image = QImage()
image.loadFromData(reply.readAll())
print_job.updatePreviewImage(image)
def _update(self) -> None:
super()._update()
self.get("printers/", on_finished = self._onGetPrintersDataFinished)
self.get("print_jobs/", on_finished = self._onGetPrintJobsFinished)
for print_job in self._print_jobs:
if print_job.getPreviewImage() is None:
self.get("print_jobs/{uuid}/preview_image".format(uuid=print_job.key), on_finished=self._onGetPreviewImageFinished)
def _onGetPrintJobsFinished(self, reply: QNetworkReply) -> None:
self._received_print_jobs = True
self.receivedPrintJobsChanged.emit()
if not checkValidGetReply(reply):
return
result = loadJsonFromReply(reply)
if result is None:
return
print_jobs_seen = []
job_list_changed = False
for idx, print_job_data in enumerate(result):
print_job = findByKey(self._print_jobs, print_job_data["uuid"])
if print_job is None:
print_job = self._createPrintJobModel(print_job_data)
job_list_changed = True
elif not job_list_changed:
# Check if the order of the jobs has changed since the last check
if self._print_jobs.index(print_job) != idx:
job_list_changed = True
self._updatePrintJob(print_job, print_job_data)
if print_job.state != "queued" and print_job.state != "error": # Print job should be assigned to a printer.
if print_job.state in ["failed", "finished", "aborted", "none"]:
# Print job was already completed, so don't attach it to a printer.
printer = None
else:
printer = self._getPrinterByKey(print_job_data["printer_uuid"])
else: # The job can "reserve" a printer if some changes are required.
printer = self._getPrinterByKey(print_job_data["assigned_to"])
if printer:
printer.updateActivePrintJob(print_job)
print_jobs_seen.append(print_job)
# Check what jobs need to be removed.
removed_jobs = [print_job for print_job in self._print_jobs if print_job not in print_jobs_seen]
for removed_job in removed_jobs:
job_list_changed = job_list_changed or self._removeJob(removed_job)
if job_list_changed:
# Override the old list with the new list (either because jobs were removed / added or order changed)
self._print_jobs = print_jobs_seen
self.printJobsChanged.emit() # Do a single emit for all print job changes.
def _onGetPrintersDataFinished(self, reply: QNetworkReply) -> None:
if not checkValidGetReply(reply):
return
result = loadJsonFromReply(reply)
if result is None:
return
printer_list_changed = False
printers_seen = []
for printer_data in result:
printer = findByKey(self._printers, printer_data["uuid"])
if printer is None:
printer = self._createPrinterModel(printer_data)
printer_list_changed = True
printers_seen.append(printer)
self._updatePrinter(printer, printer_data)
removed_printers = [printer for printer in self._printers if printer not in printers_seen]
for printer in removed_printers:
self._removePrinter(printer)
if removed_printers or printer_list_changed:
self.printersChanged.emit()
def _createPrinterModel(self, data: Dict[str, Any]) -> PrinterOutputModel:
printer = PrinterOutputModel(output_controller = ClusterUM3PrinterOutputController(self),
number_of_extruders = self._number_of_extruders)
printer.setCameraUrl(QUrl("http://" + data["ip_address"] + ":8080/?action=stream"))
self._printers.append(printer)
return printer
def _createPrintJobModel(self, data: Dict[str, Any]) -> UM3PrintJobOutputModel:
print_job = UM3PrintJobOutputModel(output_controller=ClusterUM3PrinterOutputController(self),
key=data["uuid"], name= data["name"])
configuration = ConfigurationModel()
extruders = [ExtruderConfigurationModel(position = idx) for idx in range(0, self._number_of_extruders)]
for index in range(0, self._number_of_extruders):
try:
extruder_data = data["configuration"][index]
except IndexError:
continue
extruder = extruders[int(data["configuration"][index]["extruder_index"])]
extruder.setHotendID(extruder_data.get("print_core_id", ""))
extruder.setMaterial(self._createMaterialOutputModel(extruder_data.get("material", {})))
configuration.setExtruderConfigurations(extruders)
print_job.updateConfiguration(configuration)
print_job.setCompatibleMachineFamilies(data.get("compatible_machine_families", []))
print_job.stateChanged.connect(self._printJobStateChanged)
return print_job
def _updatePrintJob(self, print_job: UM3PrintJobOutputModel, data: Dict[str, Any]) -> None:
print_job.updateTimeTotal(data["time_total"])
print_job.updateTimeElapsed(data["time_elapsed"])
impediments_to_printing = data.get("impediments_to_printing", [])
print_job.updateOwner(data["owner"])
status_set_by_impediment = False
for impediment in impediments_to_printing:
if impediment["severity"] == "UNFIXABLE":
status_set_by_impediment = True
print_job.updateState("error")
break
if not status_set_by_impediment:
print_job.updateState(data["status"])
print_job.updateConfigurationChanges(self._createConfigurationChanges(data["configuration_changes_required"]))
def _createConfigurationChanges(self, data: List[Dict[str, Any]]) -> List[ConfigurationChangeModel]:
result = []
for change in data:
result.append(ConfigurationChangeModel(type_of_change=change["type_of_change"],
index=change["index"],
target_name=change["target_name"],
origin_name=change["origin_name"]))
return result
def _createMaterialOutputModel(self, material_data) -> MaterialOutputModel:
containers = ContainerRegistry.getInstance().findInstanceContainers(type="material", GUID=material_data["guid"])
if containers:
color = containers[0].getMetaDataEntry("color_code")
brand = containers[0].getMetaDataEntry("brand")
material_type = containers[0].getMetaDataEntry("material")
name = containers[0].getName()
else:
Logger.log("w",
"Unable to find material with guid {guid}. Using data as provided by cluster".format(
guid=material_data["guid"]))
color = material_data["color"]
brand = material_data["brand"]
material_type = material_data["material"]
name = "Empty" if material_data["material"] == "empty" else "Unknown"
return MaterialOutputModel(guid=material_data["guid"], type=material_type,
brand=brand, color=color, name=name)
def _updatePrinter(self, printer: PrinterOutputModel, data: Dict[str, Any]) -> None:
# For some unknown reason the cluster wants UUID for everything, except for sending a job directly to a printer.
# Then we suddenly need the unique name. So in order to not have to mess up all the other code, we save a mapping.
self._printer_uuid_to_unique_name_mapping[data["uuid"]] = data["unique_name"]
definitions = ContainerRegistry.getInstance().findDefinitionContainers(name = data["machine_variant"])
if not definitions:
Logger.log("w", "Unable to find definition for machine variant %s", data["machine_variant"])
return
machine_definition = definitions[0]
printer.updateName(data["friendly_name"])
printer.updateKey(data["uuid"])
printer.updateType(data["machine_variant"])
# Do not store the build plate information that comes from connect if the current printer has not build plate information
if "build_plate" in data and machine_definition.getMetaDataEntry("has_variant_buildplates", False):
printer.updateBuildplateName(data["build_plate"]["type"])
if not data["enabled"]:
printer.updateState("disabled")
else:
printer.updateState(data["status"])
for index in range(0, self._number_of_extruders):
extruder = printer.extruders[index]
try:
extruder_data = data["configuration"][index]
except IndexError:
break
extruder.updateHotendID(extruder_data.get("print_core_id", ""))
material_data = extruder_data["material"]
if extruder.activeMaterial is None or extruder.activeMaterial.guid != material_data["guid"]:
material = self._createMaterialOutputModel(material_data)
extruder.updateActiveMaterial(material)
def _removeJob(self, job: UM3PrintJobOutputModel) -> bool:
if job not in self._print_jobs:
return False
if job.assignedPrinter:
job.assignedPrinter.updateActivePrintJob(None)
job.stateChanged.disconnect(self._printJobStateChanged)
self._print_jobs.remove(job)
return True
def _removePrinter(self, printer: PrinterOutputModel) -> None:
self._printers.remove(printer)
if self._active_printer == | |
diagonal cases:
# first case: increasing the row number and the column number:
elif i + 4 < len(board) and j + 4 < len(board):
temp = []
for a in range(5):
temp.append(board[i+a][j+a])
if (temp == ["b"] * 5):
return 1, i, j
if (temp == ["w"] * 5):
return 0, i, j
# second case: increasing the row number but decreasing the column number:
elif i + 4 < len(board) and j - 4 < len(board):
temp_3 = []
for d in range(5):
temp_3.append(board[i+d][j-d])
if (temp_3 == ["b"] * 5):
return 1, i, j
if (temp_3 == ["w"] * 5):
return 0, i, j
if draw:
return 2
return 3
def newiswin(board):
draw = True
for i in range(len(board)):
for j in range(len(board)):
if draw and board[i][j] == ' ':
draw = False
if (j + 4 < len(board)):
if (board[i][j] == 'b' and
board[i][j + 1] == 'b' and
board[i][j + 2] == 'b' and
board[i][j + 3] == 'b' and
board[i][j + 4] == 'b' and
(j < 1 or board[i][j - 1] != 'b') and
(j + 5 >= 8 or board[i][j + 5] != 'b')):
return 1
if (board[i][j] == 'w' and
board[i][j + 1] == 'w' and
board[i][j + 2] == 'w' and
board[i][j + 3] == 'w' and
board[i][j + 4] == 'w' and
(j < 1 or board[i][j - 1] != 'w') and
(j + 5 >= 8 or board[i][j + 5] != 'w')):
return 0
if (i + 4 < len(board)):
if (board[i][j] == 'b' and
board[i + 1][j + 1] == 'b' and
board[i + 2][j + 2] == 'b' and
board[i + 3][j + 3] == 'b' and
board[i + 4][j + 4] == 'b' and
(i < 1 or j < 1 or board[i - 1][j - 1] != 'b') and
(i + 5 >= 8 or j + 5 >= 8 or board[i + 5][j + 5] != 'b')):
return 1
if (board[i][j] == 'w' and
board[i + 1][j + 1] == 'w' and
board[i + 2][j + 2] == 'w' and
board[i + 3][j + 3] == 'w' and
board[i + 4][j + 4] == 'w' and
(i < 1 or j < 1 or board[i - 1][j - 1] != 'w') and
(i + 5 >= 8 or j + 5 >= 8 or board[i + 5][j + 5] != 'w')):
return 0
if (i - 4 >= 0):
if (board[i][j] == 'b' and
board[i - 1][j + 1] == 'b' and
board[i - 2][j + 2] == 'b' and
board[i - 3][j + 3] == 'b' and
board[i - 4][j + 4] == 'b' and
(i + 1 >= 8 or j < 1 or board[i + 1][j - 1] != 'b') and
(i < 5 or j + 5 >= 8 or board[i - 5][j + 5] != 'b')):
return 1
if (board[i][j] == 'w' and
board[i - 1][j + 1] == 'w' and
board[i - 2][j + 2] == 'w' and
board[i - 3][j + 3] == 'w' and
board[i - 4][j + 4] == 'w' and
(i + 1 >= 8 or j < 1 or board[i + 1][j - 1] != 'w') and
(i < 5 or j + 5 >= 8 or board[i - 5][j + 5] != 'w')):
return 0
if (i + 4 < len(board)):
if (board[i][j] == 'b' and
board[i + 1][j] == 'b' and
board[i + 2][j] == 'b' and
board[i + 3][j] == 'b' and
board[i + 4][j] == 'b' and
(i + 5 >= 8 or board[i + 5][j] != 'b') and
(i < 1 or board[i - 1][j] != 'b')):
return 1
if (board[i][j] == 'w' and
board[i + 1][j] == 'w' and
board[i + 2][j] == 'w' and
board[i + 3][j] == 'w' and
board[i + 4][j] == 'w' and
(i + 5 >= 8 or board[i + 5][j] != 'w') and
(i < 1 or board[i - 1][j] != 'w')):
return 0
if draw:
return 2
return 3
def is_win(board):
states = ["White won", "Black won", "Draw", "Continue playing"]
return states[newiswin(board)]
def print_board(board): # return void
s = "*"
for i in range(len(board[0])-1):
s += str(i%10) + "|"
s += str((len(board[0])-1)%10)
s += "*\n"
for i in range(len(board)):
s += str(i%10)
for j in range(len(board[0])-1):
s += str(board[i][j]) + "|"
s += str(board[i][len(board[0])-1])
s += "*\n"
s += (len(board[0])*2 + 1)*"*"
print(s)
def make_empty_board(sz):
board = []
for _ in range(sz):
board.append([" "]*sz)
return board
def analysis(board):
for c, full_name in [["b", "Black"], ["w", "White"]]:
print("%s stones" % (full_name))
for i in range(2, 6):
open, semi_open = detect_rows(board, c, i)
print("Open rows of length %d: %d" % (i, open))
print("Semi-open rows of length %d: %d" % (i, semi_open))
def play_gomoku(board_size):
board = make_empty_board(board_size)
board_height = len(board)
board_width = len(board[0])
while True:
print_board(board)
if is_empty(board):
move_y = board_height // 2
move_x = board_width // 2
else:
move_y, move_x = search_max(board)
print("Computer move: (%d, %d)" % (move_y, move_x))
board[move_y][move_x] = "b"
print_board(board)
analysis(board)
game_res = is_win(board)
if game_res in ["White won", "Black won", "Draw"]:
return game_res
print("Your move:")
move_y = int(input("y coord: "))
move_x = int(input("x coord: "))
board[move_y][move_x] = "w"
print_board(board)
analysis(board)
game_res = is_win(board)
if game_res in ["White won", "Black won", "Draw"]:
return game_res
def put_seq_on_board(board, y, x, d_y, d_x, length, col):
for _ in range(length):
board[y][x] = col
y += d_y
x += d_x
def test_is_empty():
board = make_empty_board(8)
if is_empty(board):
print("TEST CASE for is_empty PASSED")
else:
print("TEST CASE for is_empty FAILED")
def test_is_bounded():
board = make_empty_board(8)
x = 5; y = 1; d_x = 0; d_y = 1; length = 3
put_seq_on_board(board, y, x, d_y, d_x, length, "w")
print_board(board)
y_end = 3
x_end = 5
if is_bounded(board, y_end, x_end, length, d_y, d_x) == 'OPEN':
print("TEST CASE for is_bounded PASSED")
else:
print("TEST CASE for is_bounded FAILED")
def test_detect_row():
board = make_empty_board(8)
x = 5; y = 1; d_x = 0; d_y = 1; length = 3
put_seq_on_board(board, y, x, d_y, d_x, length, "w")
print_board(board)
if detect_row(board, "w", 0,x,length,d_y,d_x) == (1,0):
print("TEST CASE for detect_row PASSED")
else:
print("TEST CASE for detect_row FAILED")
def test_detect_rows():
board = make_empty_board(8)
x = 5; y = 1; d_x = 0; d_y = 1; length = 3; col = 'w'
put_seq_on_board(board, y, x, d_y, d_x, length, "w")
print_board(board)
if detect_rows(board, col,length) == (1,0):
print("TEST CASE for detect_rows PASSED")
else:
print("TEST CASE for detect_rows FAILED")
def test_search_max():
board = make_empty_board(8)
x = 5; y = 0; d_x = 0; d_y = 1; length = 4; col = 'w'
put_seq_on_board(board, y, x, d_y, d_x, length, col)
x = 6; y = 0; d_x = 0; d_y = 1; length = 4; col = 'b'
put_seq_on_board(board, y, x, d_y, d_x, length, col)
print_board(board)
if search_max(board) == (4,6):
print("TEST CASE for search_max PASSED")
else:
print("TEST CASE for search_max FAILED")
def easy_testset_for_main_functions():
test_is_empty()
test_is_bounded()
test_detect_row()
test_detect_rows()
test_search_max()
def some_tests():
board = make_empty_board(8)
board[0][5] = "w"
board[0][6] = "b"
y = 5; x = 2; d_x = 0; d_y = 1; length = 3
put_seq_on_board(board, y, x, d_y, d_x, length, "w")
print_board(board)
analysis(board)
# Expected output:
# *0|1|2|3|4|5|6|7*
# 0 | | | | |w|b| *
# 1 | | | | | | | *
# 2 | | | | | | | *
# 3 | | | | | | | *
# 4 | | | | | | | *
# 5 | |w| | | | | *
# 6 | |w| | | | | *
# 7 | |w| | | | | *
# *****************
# Black stones:
# Open rows of length 2: 0
# Semi-open rows |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.