input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
dy = self.map_canvas.yview()[0] * len(self.tilemap)
x = max(int(dx + event.x / 64), 0)
y = max(int(dy + event.y / 64), 0)
# Regular base-layer tile mode
if view_mode.get() == 0:
map_of_tiles = list(self.tilemap)
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=App.tiles[selected_image.get()])
row = list(map_of_tiles[y])
row[x] = App.translate_tk2f[App.tiles[selected_image.get()]]
map_of_tiles[y] = row
# Decoration layer mode
elif view_mode.get() == 1:
map_of_tiles = list(self.decomap)
row = list(map_of_tiles[y])
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=App.decos[selected_deco.get()])
if selected_deco.get() != 0:
row[x] = App.translate_tk2f[App.decos[selected_deco.get()]]
else:
row[x] = 0
map_of_tiles[y] = row
# Loading zone mode
elif view_mode.get() == 2:
# Delete loading zone
if selected_load.get() == 0:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.load_imgs[0])
try:
del self.loading_zones[(x, y)]
except:
pass
# Add loading zone
elif selected_load.get() == 1:
self.loading_zones[(x, y)] = []
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.load_imgs[1])
# Configure loading zone
elif selected_load.get() == 2:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.load_imgs[2])
if (x, y) in self.loading_zones:
new_loading_zone = EditLoadDestination(master, title="Edit Destination", args=self.loading_zones[(x, y)]).result
if new_loading_zone != None:
self.loading_zones[(x, y)] = new_loading_zone
redraw_map_canvas()
# Copy loading zone settings
elif selected_load.get() == 3:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.load_imgs[3])
if (x, y) in self.loading_zones:
self.copied_load_settings = self.loading_zones[(x, y)]
# Paste loading zone settings
elif selected_load.get() == 4:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.load_imgs[4])
if (x, y) in self.loading_zones and self.copied_load_settings != None:
self.loading_zones[(x, y)] = self.copied_load_settings
else:
print("Unknown instruction")
# Lightmap mode
else:
# Delete light source
if selected_light.get() == 0:
try:
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.light_imgs[0])
self.light_sources.remove((x, y))
except ValueError:
pass
# Add basic 3x3 light source
if selected_light.get() == 1:
if not (x, y) in self.light_sources:
self.light_sources.append((x, y))
self.map_canvas.create_image((32 + 64 * x, 32 + 64 * y), image=self.light_imgs[1])
except IndexError:
return
if view_mode.get() == 0:
self.tilemap = map_of_tiles
elif view_mode.get() == 1:
self.decomap = map_of_tiles
else:
pass
elif cursor_mode.get() == 1:
# Canvas dragging mode
if self.start != None:
self.map_canvas.scan_mark(self.start[0], self.start[1])
self.start = None
self.map_canvas.scan_dragto(event.x, event.y, gain=1)
else:
# Do nothing, usually to lock-out input while canvas is reloading.
pass
def redraw(event):
redraw_map_canvas()
def set_coords_label(event):
dx = self.map_canvas.xview()[0] * len(self.tilemap[0])
dy = self.map_canvas.yview()[0] * len(self.tilemap)
x = int(dx + event.x / 64)
y = int(dy + event.y / 64)
self.coords_label.config(text="x: {}, y: {}".format(x, y))
self.map_canvas = Canvas(self.map_frame, width=64*16, height=64*9, bg="WHITE", bd=0)
self.map_canvas.grid(row=0, column=0)
self.map_vbar = Scrollbar(self.map_frame)
self.map_vbar.config(command=self.map_canvas.yview)
self.map_vbar.grid(row=0, column=1, sticky=N+S)
self.map_vbar.activate("slider")
self.map_hbar = Scrollbar(self.map_frame, orient=HORIZONTAL)
self.map_hbar.config(command=self.map_canvas.xview)
self.map_hbar.grid(row=1, column=0, sticky=E+W)
self.map_hbar.activate("slider")
self.map_canvas.config(scrollregion=(0, 0, 64*16, 64*9), xscrollcommand=self.map_hbar.set, yscrollcommand=self.map_vbar.set)
for i in range(0, 9 + 1):
self.map_canvas.create_line(0, 64 * i, 64 * 16, 64 * i)
for j in range(0, 16 + 1):
self.map_canvas.create_line(64 * j, 0, 64 * j, 64 * 9)
self.map_canvas.bind("<Button-1>", callback)
self.map_canvas.bind("<ButtonPress-1>", mark_start)
self.map_canvas.bind("<ButtonRelease-1>", redraw)
self.map_canvas.bind("<B1-Motion>", callback)
self.map_canvas.bind("<Motion>", set_coords_label)
# Menu bar function setup
def open_file():
f = askopenfilename(filetypes=[("Tilemap", "*.tilemap")], defaultextension=[("Tilemap", "*.tilemap")])
if f is '':
return
current_tilemap = self.tilemap
current_decomap = self.decomap
current_directory = self.directory
current_colliders = self.colliders
current_loading_zones = self.loading_zones
#try:
with open(f) as rf:
new_tilemap = literal_eval(rf.readline())
print("Found {} by {} tilemap".format(len(new_tilemap), len(new_tilemap[0])))
self.tilemap = new_tilemap
new_decomap = literal_eval(rf.readline())
print("Found {} by {} decomap".format(len(new_decomap), len(new_decomap[0])))
self.decomap = new_decomap
new_colliders = literal_eval(rf.readline())
print("Found collision list:", new_colliders)
self.colliders = new_colliders
new_loading_zones = literal_eval(rf.readline())
print("Found loading zone dictionary:", new_loading_zones)
self.loading_zones = new_loading_zones
new_light_sources = literal_eval(rf.readline())
print("Found light source list:", new_light_sources)
self.light_sources = new_light_sources
new_default_start = literal_eval(rf.readline())
print("Found default spawn:", new_default_start)
self.default_start = new_default_start
redraw_map_canvas()
self.directory = f
saved.set(1)
## except:
## showwarning("File Error", "Error: Could not open file.")
## self.tilemap = current_tilemap
## self.decomap = current_decomap
## self.directory = current_directory
## self.colliders = current_colliders
## self.loading_zones = current_loading_zones
## redraw_map_canvas()
## saved.set(0)
def save_file():
#print(list(len(i) for i in self.tilemap))
if self.directory == "no_file":
save_file_as()
else:
f = open(self.directory, "w")
data_to_save = str(self.tilemap)
data_to_save += "\n"
data_to_save += str(self.decomap)
data_to_save += "\n"
data_to_save += str(self.colliders)
data_to_save += "\n"
data_to_save += str(self.loading_zones)
data_to_save += "\n"
data_to_save += str(self.light_sources)
data_to_save += "\n"
data_to_save += str(self.default_start)
f.write(data_to_save)
f.close()
saved.set(1)
def save_file_as():
f = asksaveasfile(mode="w", filetypes=[("Tilemap", "*.tilemap")], defaultextension=[("Tilemap", "*.tilemap")])
if f is None:
return
data_to_save = str(self.tilemap)
data_to_save += "\n"
data_to_save += str(self.decomap)
data_to_save += "\n"
data_to_save += str(self.colliders)
data_to_save += "\n"
data_to_save += str(self.loading_zones)
data_to_save += "\n"
data_to_save += str(self.light_sources)
data_to_save += "\n"
data_to_save += str(self.default_start)
self.directory = f.name
f.write(data_to_save)
f.close()
saved.set(1)
def new():
if saved.get() == 0:
action = askyesnocancel("Worldbuilder", "Progress is unsaved. Would you like to save first?", icon='warning')
if action == False:
pass
elif action == True:
save_file()
return
else:
return
self.tilemap = build_matrix(16, 9)
self.decomap = build_matrix(16, 9)
self.directory = "no_file"
self.colliders = []
self.loading_zones = {}
self.copied_load_settings = None
redraw_map_canvas()
def tilemap2string(tilemap, ids, spacing):
translated_map = []
used_list = []
rev_ids = dict((v,k) for k,v in ids.items())
for i in range(len(tilemap)):
translated_map.append([ids.get(item, item) for item in tilemap[i]])
for i in translated_map:
for j in i:
if not j in used_list:
used_list.append(j)
used_list.sort()
used = dict((item, rev_ids[item]) for item in used_list)
string_version = "["
for i in range(len(tilemap) - 1):
string_version += str(translated_map[i]) + ",\n" + " " * (spacing + 1)
string_version += str(translated_map[:-1]) + "]"
return string_version, used
def export_file():
f = asksaveasfile(mode='w', filetypes=[('JSON File', '*.json')], defaultextension=[('JSON File', '*.json')])
if f is None:
return
try:
export_dict = {}
# Translate the tilemap and decomap to numerical ID's
translated_tilemap = []
for i in self.tilemap:
translated_tilemap.append([App.tile_ids.get(item, item) for item in i])
translated_decomap = []
for i in self.decomap:
translated_decomap.append([App.deco_ids.get(item, item) for item in i])
# Export only colliders that are being used
used_colliders = []
for i in translated_tilemap:
for j in i:
if j in self.colliders:
used_colliders.append(j)
export_dict["colliders"] = used_colliders
# Export tilemap and decomap
export_dict["tilemap"] = translated_tilemap
export_dict["decomap"] = translated_decomap
# Export loading zones
export_dict["loading_zones"] = []
for i, j in self.loading_zones.items():
export_dict["loading_zones"].append({"zone": i,
"target_level": j[0],
"target_pos": j[1]
})
# Export lightmap, default spawn, and level name
export_dict["lightmap"] = self.light_sources
export_dict["spawn"] = self.default_start
export_dict["name"] = path.splitext(path.basename(f.name))[0]
# Save dictionary as .json file
json.dump(export_dict, f)
except KeyError:
showwarning("Export Error", "One of the exported images has not been assigned an ID.")
f.close()
def add_rows():
number = EnterNumber(master, title="Add Rows", text="Add Rows").result
if number is None or number < 0:
return
for i in range(number):
self.tilemap = add_row(self.tilemap)
self.decomap = add_row(self.decomap)
redraw_map_canvas()
def add_columns():
number = EnterNumber(master, title="Add Column", text="Add Column").result
if number is None or number < 0:
return
for i in range(number):
self.tilemap = add_column(self.tilemap)
self.decomap = add_column(self.decomap)
redraw_map_canvas()
def delete_rows():
number = EnterNumber(master, title="Delete Column", text="Delete Column").result
if number is None:
return
if len(self.tilemap) - number >= 9:
for i in range(number):
self.tilemap = delete_row(self.tilemap)
self.decomap = delete_row(self.decomap)
redraw_map_canvas()
else:
showwarning("Invalid Size", "Tilemaps cannot have a height smaller than 9 tiles.")
def delete_columns():
number = EnterNumber(master, title="Delete Column", text="Delete Column").result
if number is None:
return
if len(self.tilemap[0]) - number >= 16:
for i in range(0, number):
self.tilemap = delete_column(self.tilemap)
self.decomap = delete_column(self.decomap)
redraw_map_canvas()
else:
showwarning("Invalid Size", "Tilemaps cannot have a width smaller than 16 tiles.")
def edit_default_pos():
new_pos = Enter2Numbers(master, title="Default Spawn Position", text="(x, y)").result
if new_pos is None:
return
if not(0 <= new_pos[0] <= len(self.tilemap[0])) or not(0 <= new_pos[1] <= len(self.tilemap)):
showwarning("Invalid Position", "Spawn position must be on the map")
else:
self.default_start = new_pos
def check_config():
tile_ids_copy = dict(list(self.tile_ids.items())[1:])
ids = list(i[1] for i in tile_ids_copy.items())
dupes = [value for index, value in enumerate(ids) if value in ids[:index]]
if dupes != []:
showwarning("Tile Id List Error", 'Duplicate id(s) were found in the tile id list:\n{}\n\nPlease edit the list using "Manage Tile Ids" and check again for errors using "Check ID List"'.format(dupes))
deco_ids_copy = dict(list(self.deco_ids.items())[1:])
ids = list(i[1] for i in deco_ids_copy.items())
dupes = [value for index, value in enumerate(ids) if value in ids[:index]]
if dupes | |
<filename>models/tripletext2seq.py
from __future__ import print_function
import time
import math
import pickle
import tensorflow as tf
from tensorflow.python.layers.core import Dense
import numpy as np
class TripleText2SeqModel():
"""
This Model is called triples sequences to sequence model
model takes a single triple and multiple sequences as an input and outputs
a single sequence.
This model is equipped by two attention modules
- attention over the input triples
- attention over each encoded vector of each word in the
input sequences
- Triple Encoder:
- Entities Encoded through Entity Embeddings
- Predicates Encoded Through Predicate Embeddings
- Sequences Encoder:
- a separate RNN over Word Embeddings of each input sequence
Data preparation:
- Thise model doesn't handle Additional tokens `<unk> <rare> <pad>`
those are expected to be added beforehand to the vocabulary
- vocabulary is created offline
- The inputs to the decoder are preprocessed beforehand to start with `<s>` and `<\s>`
- targets are decoder inputs shifted by one (to ignore start symbol)
"""
def __init__(self, config, mode='training'):
print('Initializing new seq 2 seq model')
assert mode in ['training', 'evaluation', 'inference']
self.mode = mode
self.config = config
self.__create_placeholders()
self.__create_encoder()
self.__create_decoder()
def __create_placeholders(self):
"""
Function to create placeholders for each
:return:
"""
# Encoder Inputs
#################
# Input Triple
###############
# The input triple is given in the form of list of entities [sub,obj] and list of predicates [pred]
# This design allows also inputting multiple triples at once since order matters [s1,s2,o1,o2] [p1,p2]
self.encoder_entities_inputs = tf.placeholder(tf.int32, shape=[None, self.config.ENTITIESLENGTH], name="encoder_entities_inputs")
self.encoder_predicates_inputs = tf.placeholder(tf.int32, shape=[None, self.config.PREDICATESLENGTH], name="encoder_predicates_inputs")
self.encoder_predicates_direction = tf.placeholder(tf.float32, shape=[None], name="encoder_predicates_direction")
# Input Sequences
# textual evidences = input sequences
######################################
# input sequences with padding
# :size = NUMBER_OF_TEXTUAL_EVIDENCES x BATCHSIZE x input sequence max length
self.encoder_text_inputs = tf.placeholder(dtype=tf.int32, shape=[self.config.NUMBER_OF_TEXTUAL_EVIDENCES, None, None], name='encoder_text_inputs')
# actual lengths of each input sequence
# :size = NUMBER_OF_TEXTUAL_EVIDENCES x 1
# each batch has a fixed input sequence length
self.encoder_text_inputs_length = tf.placeholder(dtype=tf.int32, shape=[self.config.NUMBER_OF_TEXTUAL_EVIDENCES, None], name='encoder_text_inputs_length')
self.batch_size = tf.shape(self.encoder_entities_inputs)[0]
# Decoder placeholders:
# these are the raw inputs to the decoder same as input sequences
# output sequence with padding
# :size = BATCHSIZE x output sequence max length
self.decoder_inputs = tf.placeholder(tf.int32, shape=[None, None], name="decoder_inputs")
# number indicating actual lengths of the output sequence
# :size = BATCHSIZE x 1
self.decoder_inputs_length = tf.placeholder(dtype=tf.int32, shape=(None,), name='decoder_inputs_length')
if self.mode == "training":
self.decoder_inputs_train = self.decoder_inputs
# for training our targets are decoder inputs shifted by one (to ignore the <s> symbol)
# as shown in figure https://www.tensorflow.org/images/basic_seq2seq.png
self.decoder_targets_train = self.decoder_inputs[:, 1:]
# decoder_inputs_length_train: [batch_size x 1]
self.decoder_inputs_length_train = self.decoder_inputs_length
self.decoder_targets_length_train = self.decoder_inputs_length - 1
# calculating max_decoder_length
self.decoder_max_length = tf.reduce_max(self.decoder_targets_length_train)
elif self.mode == "inference":
# at inference time there's no decoder input so we set the Decode length to a maximum.
self.decoder_max_length = self.config.MAX_DECODE_LENGTH
# global step
self.global_step = tf.Variable(0, trainable=False, name='global_step')
def __build_single_rnn_cell(self, hidden_size):
cell = tf.nn.rnn_cell.GRUCell(hidden_size)
# if self.use_dropout:
# cell = DropoutWrapper(cell, dtype=self.dtype,
# output_keep_prob=self.keep_prob_placeholder, )
return cell
def __create_triple_encoder(self):
print('building Triples encoder ...')
start = time.time()
with tf.variable_scope('encoder'):
# Create Embeddings Weights
if self.config.USE_PRETRAINED_KB_EMBEDDINGS:
ent_kb_emb = pickle.load(open(self.config.PRETRAINED_ENTITIES_EMBEDDINGS_PATH))
self.encoder_entities_embeddings = tf.Variable(ent_kb_emb, name="entities_embeddings", trainable=self.config.TRAIN_KB_EMBEDDINGS)
pred_kb_emb = pickle.load(open(self.config.PRETRAINED_PREDICATES_EMBEDDINGS_PATH))
self.encoder_predicates_embeddings = tf.Variable(pred_kb_emb, name="predicates_embeddings",
trainable=self.config.TRAIN_KB_EMBEDDINGS)
else:
self.encoder_entities_embeddings = tf.get_variable("entities_embeddings",
shape=[self.config.ENTITIES_VOCAB, self.config.ENTITIES_EMBEDDING_SIZE],
initializer=self.__helper__initializer(),
dtype=tf.float32
)
self.encoder_predicates_embeddings = tf.get_variable("predicates_embeddings",
shape=[self.config.PREDICATES_VOCAB,
self.config.PREDICATES_EMBEDDING_SIZE],
initializer=self.__helper__initializer(),
dtype=tf.float32
)
# embedding the encoder inputs
# encoder_inputs is of size [Batch size x 3]
# encoder_inputs_embedded is of size [Batch size x 3 x TRIPLES_EMBEDDING_SIZE]
self.encoder_entities_inputs_embedded = tf.nn.embedding_lookup(self.encoder_entities_embeddings, self.encoder_entities_inputs)
self.encoder_predicates_inputs_embedded = tf.nn.embedding_lookup(self.encoder_predicates_embeddings, self.encoder_predicates_inputs)
direction = tf.expand_dims(self.encoder_predicates_direction, axis=1)
direction = tf.expand_dims(direction, axis=2)
self.encoder_predicates_inputs_embedded = tf.multiply(self.encoder_predicates_inputs_embedded, direction)
self.encoder_triples_inputs_embedded = tf.concat((self.encoder_entities_inputs_embedded, self.encoder_predicates_inputs_embedded), axis=1)
# Encode input triple into a vector
# encoder_state: [batch_size, cell_output_size]
self.encoder_triples_last_state = tf.concat(tf.unstack(self.encoder_triples_inputs_embedded, axis=1), axis=1)
print('Building encoder in: ', time.time() - start, ' secs')
def __create_seq_encoder(self):
print('Building Input Sequence Encoder ...')
start = time.time()
with tf.variable_scope('encoder'):
###################
# Word Embeddings #
###################
# Create Word Embeddings Weights
if self.config.USE_PRETRAINED_WORD_EMBEDDINGS:
word_emb = pickle.load(open(self.config.PRETRAINED_WORD_EMBEDDINGS_PATH)).astype(np.float32)
self.encoder_word_embeddings = tf.Variable(word_emb, name="encoder_word_embeddings",
trainable=self.config.TRAIN_WORD_EMBEDDINGS)
else:
self.encoder_word_embeddings = tf.get_variable("encoder_word_embeddings",
shape=[self.config.DECODER_VOCAB_SIZE,
self.config.INPUT_SEQ_EMBEDDING_SIZE],
initializer=self.__helper__initializer(),
dtype=tf.float32
)
# Embedding the encoder inputs
# Encoder Input size = NUMBER_OF_TEXTUAL_EVIDENCES x BATCH x input_length
# Embedded Input size = NUMBER_OF_TEXTUAL_EVIDENCES x BATCH x input_length x word_embeddings_size
self.encoder_text_inputs_embedded = tf.nn.embedding_lookup(self.encoder_word_embeddings,
self.encoder_text_inputs)
#######
# RNN #
#######
# building a multilayer RNN for each Textual Evidence
# Encode input sequences into context vectors:
# encoder_outputs: [Num_text_evidence, batch_size, max_time_step, cell_output_size]
# encoder_state: [Num_text_evidence, batch_size, cell_output_size]
self.encoder_text_outputs = []
self.encoder_text_last_state = []
# If not bidirectional encoder
self.encoder_cell = []
rnn = self.__build_single_rnn_cell(self.config.INPUT_SEQ_RNN_HIDDEN_SIZE)
if "bi" not in self.config.ENCODER_RNN_CELL_TYPE:
for _ in range(self.config.NUMBER_OF_TEXTUAL_EVIDENCES):
#rnn = self.__build_single_rnn_cell(self.config.INPUT_SEQ_RNN_HIDDEN_SIZE)
self.encoder_cell.append(tf.nn.rnn_cell.MultiRNNCell([rnn] * self.config.NUM_LAYERS))
for i in range(self.config.NUMBER_OF_TEXTUAL_EVIDENCES):
out, state = tf.nn.dynamic_rnn(
cell=self.encoder_cell[i],
inputs=self.encoder_text_inputs_embedded[i],
sequence_length=self.encoder_text_inputs_length[i],
dtype=tf.float32
)
self.encoder_text_outputs.append(out)
self.encoder_text_last_state.append(tf.squeeze(state, axis=0))
# If bidirectional encoder
else:
self.fwd_encoder_cell = []
self.bw_encoder_cell = []
for _ in range(self.config.NUMBER_OF_TEXTUAL_EVIDENCES):
# two rnn decoders for each layer for each input sequence\
#fwrnn = self.__build_single_rnn_cell(self.config.INPUT_SEQ_RNN_HIDDEN_SIZE)
#bwrnn = self.__build_single_rnn_cell(self.config.INPUT_SEQ_RNN_HIDDEN_SIZE)
self.fwd_encoder_cell.append([rnn] * self.config.NUM_LAYERS)
self.bw_encoder_cell.append([rnn] * self.config.NUM_LAYERS)
for i in range(self.config.NUMBER_OF_TEXTUAL_EVIDENCES):
out, fwd_state, bk_state = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw=self.fwd_encoder_cell[i],
cells_bw=self.bw_encoder_cell[i],
inputs=self.encoder_text_inputs_embedded[i],
sequence_length=self.encoder_text_inputs_length[i],
dtype=tf.float32
)
self.encoder_text_outputs.append(tf.concat(out, 2))
self.encoder_text_last_state.append(tf.squeeze(tf.concat([fwd_state, bk_state], 2), axis=0))
print('Building encoder in: ', time.time() - start, ' secs')
def __create_encoder(self):
self.__create_triple_encoder()
self.__create_seq_encoder()
# concatinating last state of the triple encoder with the last state of each text input being encoded
last_states = [self.encoder_triples_last_state] + self.encoder_text_last_state
self.encoder_last_state = tf.concat(last_states, axis=1)
def __create_decoder_cell(self):
self.decoder_cell = tf.nn.rnn_cell.GRUCell(self.config.DECODER_RNN_HIDDEN_SIZE)
# fully connected layer to change size of Encoder Last state to Decoder Hidden size
decoder_hidden_state_reshape = Dense(self.config.DECODER_RNN_HIDDEN_SIZE)
self.decoder_initial_state = (decoder_hidden_state_reshape(self.encoder_last_state), )
def __create_decoder_attention_cell_old(self):
"""
create decoder RNN with attention
:return:
"""
memory = tf.concat([self.encoder_triples_inputs_embedded] + self.encoder_text_outputs, axis=1)
self.attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units=self.config.TRIPLES_EMBEDDING_SIZE, # the depth of the Attention layer
memory=memory,
name="Attention"
)
# create decoder cell:
gru = self.__build_single_rnn_cell(self.config.DECODER_RNN_HIDDEN_SIZE)
self.decoder_cell_list = [gru] * self.config.NUM_LAYERS
decoder_hidden_state_reshape = Dense(self.config.DECODER_RNN_HIDDEN_SIZE)
self.decoder_cell_list[-1] = tf.contrib.seq2seq.AttentionWrapper(
cell=self.decoder_cell_list[-1],
attention_layer_size=self.config.DECODER_RNN_HIDDEN_SIZE, # the output hidden size of the last decoder
attention_mechanism=self.attention_mechanism,
initial_cell_state=decoder_hidden_state_reshape(self.encoder_last_state),
alignment_history=False,
name="Attention_Wrapper"
)
self.decoder_cell = tf.nn.rnn_cell.MultiRNNCell(self.decoder_cell_list)
# To be compatible with AttentionWrapper, the encoder last state
# of the top layer should be converted into the AttentionWrapperState form
# We can easily do this by calling AttentionWrapper.zero_state
# self.decoder_initial_state = self.encoder_last_state
init_state = self.decoder_cell_list[-1].zero_state(
batch_size=self.batch_size,
dtype=tf.float32
)
# a tuple because decode initial state has to take a tuple
self.decoder_initial_state = (init_state,)
def __create_decoder_attention_cell(self):
"""
create decoder RNN with attention
:return:
"""
triple_memory = self.encoder_triples_inputs_embedded
self.triple_attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units=self.config.TRIPLES_EMBEDDING_SIZE, # the depth of the Attention layer
memory=triple_memory,
name="TripleAttention"
)
context_memory = tf.concat(self.encoder_text_outputs, axis=1)
self.context_attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units=self.config.INPUT_SEQ_RNN_HIDDEN_SIZE if "bi" not in self.config.ENCODER_RNN_CELL_TYPE
else self.config.INPUT_SEQ_RNN_HIDDEN_SIZE * 2, # the depth of the Attention layer
memory=context_memory,
name="ContextAttention"
)
# create decoder cell:
gru = self.__build_single_rnn_cell(self.config.DECODER_RNN_HIDDEN_SIZE)
self.decoder_cell_list = [gru] * self.config.NUM_LAYERS
decoder_hidden_state_reshape = Dense(self.config.DECODER_RNN_HIDDEN_SIZE)
self.decoder_cell_list[-1] = tf.contrib.seq2seq.AttentionWrapper(
cell=self.decoder_cell_list[-1],
# the output hidden size of the last decoder
attention_layer_size=[self.config.TRIPLES_EMBEDDING_SIZE,
self.config.INPUT_SEQ_RNN_HIDDEN_SIZE if "bi" not in self.config.ENCODER_RNN_CELL_TYPE
else self.config.INPUT_SEQ_RNN_HIDDEN_SIZE * 2],
attention_mechanism=[self.triple_attention_mechanism, self.context_attention_mechanism],
initial_cell_state=decoder_hidden_state_reshape(self.encoder_last_state),
alignment_history=False,
name="Attention_Wrapper"
)
self.decoder_cell = tf.nn.rnn_cell.MultiRNNCell(self.decoder_cell_list)
# To be compatible with AttentionWrapper, the encoder last state
# of the top layer should be converted into the AttentionWrapperState form
# We can easily do this by calling AttentionWrapper.zero_state
# self.decoder_initial_state = self.encoder_last_state
init_state = self.decoder_cell_list[-1].zero_state(
batch_size=self.batch_size,
dtype=tf.float32
)
# a tuple because decode initial state has to take a tuple
self.decoder_initial_state = (init_state,)
def __create_decoder(self):
print("building decoder and attention ..")
start = time.time()
with tf.variable_scope('decoder'):
# input and output layers to the decoder
# decoder_input_layer = Dense(self.config.DECODER_RNN_HIDDEN_SIZE, dtype=tf.float32, name='decoder_input_projection')
decoder_output_layer = Dense(self.config.DECODER_VOCAB_SIZE, name="decoder_output_projection")
if self.config.COUPLE_ENCODER_DECODER_WORD_EMBEDDINGS:
# connect encoder and decoder word embeddings
self.decoder_embeddings = self.encoder_word_embeddings
elif self.config.USE_PRETRAINED_WORD_EMBEDDINGS:
word_emb = pickle.load(open(self.config.PRETRAINED_WORD_EMBEDDINGS_PATH)).astype(np.float32)
self.decoder_embeddings = tf.Variable(word_emb, name="decoder_embeddings",
trainable=self.config.TRAIN_WORD_EMBEDDINGS)
else:
self.decoder_embeddings = tf.get_variable("decoder_embeddings",
shape=[self.config.DECODER_VOCAB_SIZE, self.config.DECODER_EMBEDDING_SIZE],
initializer=self.__helper__initializer(),
dtype=tf.float32
)
if self.config.USE_ATTENTION:
self.__create_decoder_attention_cell()
else:
self.__create_decoder_cell()
######################################
# Build the decoder in training mode #
######################################
if self.mode == 'training':
# changing inputs to embeddings and then through the input projection
# decoder_inputs_embedded: [batch_size, max_time_step + 1, embedding_size]
self.decoder_inputs_embedded = tf.nn.embedding_lookup(params=self.decoder_embeddings,
ids=self.decoder_inputs_train)
# self.decoder_inputs_embedded = decoder_input_layer(self.decoder_inputs_embedded)
# Helper to feed inputs to the training:
self.training_helper = tf.contrib.seq2seq.TrainingHelper(
inputs=self.decoder_inputs_embedded,
sequence_length=self.decoder_inputs_length_train,
name='training_helper')
# Build the | |
not include PyPI.
argv.extend(
[
"--no-pypi",
*(f"--index={index}" for index in python_repos.indexes),
*(f"--repo={repo}" for repo in python_repos.repos),
"--resolver-version",
"pip-2020-resolver",
]
)
python: PythonExecutable | None = None
# NB: If `--platform` is specified, this signals that the PEX should not be built locally.
# `--interpreter-constraint` only makes sense in the context of building locally. These two
# flags are mutually exclusive. See https://github.com/pantsbuild/pex/issues/957.
if request.platforms:
# TODO(#9560): consider validating that these platforms are valid with the interpreter
# constraints.
argv.extend(request.platforms.generate_pex_arg_list())
elif request.python:
python = request.python
elif request.internal_only:
# NB: If it's an internal_only PEX, we do our own lookup of the interpreter based on the
# interpreter constraints, and then will run the PEX with that specific interpreter. We
# will have already validated that there were no platforms.
python = await Get(
PythonExecutable, InterpreterConstraints, request.interpreter_constraints
)
else:
# `--interpreter-constraint` options are mutually exclusive with the `--python` option,
# so we only specify them if we have not already located a concrete Python.
argv.extend(request.interpreter_constraints.generate_pex_arg_list())
if python:
argv.extend(["--python", python.path])
argv.append("--no-emit-warnings")
if python_setup.manylinux:
argv.extend(["--manylinux", python_setup.manylinux])
else:
argv.append("--no-manylinux")
if request.main is not None:
argv.extend(request.main.iter_pex_args())
# TODO(<NAME>): Right now any request requirements will shadow corresponding pex path
# requirements, which could lead to problems. Support shading python binaries.
# See: https://github.com/pantsbuild/pants/issues/9206
if request.pex_path:
argv.extend(["--pex-path", ":".join(pex.name for pex in request.pex_path)])
source_dir_name = "source_files"
argv.append(f"--sources-directory={source_dir_name}")
sources_digest_as_subdir = await Get(
Digest, AddPrefix(request.sources or EMPTY_DIGEST, source_dir_name)
)
additional_inputs_digest = request.additional_inputs or EMPTY_DIGEST
repository_pex_digest = repository_pex.digest if repository_pex else EMPTY_DIGEST
constraints_file_digest = EMPTY_DIGEST
requirements_file_digest = EMPTY_DIGEST
requirement_count: int
# TODO(#12314): Capture the resolve name for multiple user lockfiles.
resolve_name = (
request.requirements.options_scope_name
if isinstance(request.requirements, (ToolDefaultLockfile, ToolCustomLockfile))
else None
)
if isinstance(request.requirements, Lockfile):
is_monolithic_resolve = True
argv.extend(["--requirement", request.requirements.file_path])
argv.append("--no-transitive")
globs = PathGlobs(
[request.requirements.file_path],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin=request.requirements.file_path_description_of_origin,
)
requirements_file_digest = await Get(Digest, PathGlobs, globs)
requirements_file_digest_contents = await Get(
DigestContents, Digest, requirements_file_digest
)
requirement_count = len(requirements_file_digest_contents[0].content.decode().splitlines())
if python_setup.invalid_lockfile_behavior in {
InvalidLockfileBehavior.warn,
InvalidLockfileBehavior.error,
}:
metadata = PythonLockfileMetadata.from_lockfile(
requirements_file_digest_contents[0].content,
request.requirements.file_path,
resolve_name,
)
_validate_metadata(metadata, request, request.requirements, python_setup)
elif isinstance(request.requirements, LockfileContent):
is_monolithic_resolve = True
file_content = request.requirements.file_content
requirement_count = len(file_content.content.decode().splitlines())
argv.extend(["--requirement", file_content.path])
argv.append("--no-transitive")
if python_setup.invalid_lockfile_behavior in {
InvalidLockfileBehavior.warn,
InvalidLockfileBehavior.error,
}:
metadata = PythonLockfileMetadata.from_lockfile(
file_content.content, resolve_name=resolve_name
)
_validate_metadata(metadata, request, request.requirements, python_setup)
requirements_file_digest = await Get(Digest, CreateDigest([file_content]))
else:
assert isinstance(request.requirements, PexRequirements)
is_monolithic_resolve = request.requirements.is_all_constraints_resolve
requirement_count = len(request.requirements.req_strings)
if request.requirements.constraints_strings:
constraints_file = "__constraints.txt"
constaints_content = "\n".join(request.requirements.constraints_strings)
constraints_file_digest = await Get(
Digest,
CreateDigest([FileContent(constraints_file, constaints_content.encode())]),
)
argv.extend(["--constraints", constraints_file])
argv.extend(request.requirements.req_strings)
merged_digest = await Get(
Digest,
MergeDigests(
(
sources_digest_as_subdir,
additional_inputs_digest,
constraints_file_digest,
requirements_file_digest,
repository_pex_digest,
*(pex.digest for pex in request.pex_path),
)
),
)
if request.internal_only or is_monolithic_resolve:
# This is a much friendlier layout for the CAS than the default zipapp.
layout = PexLayout.PACKED
else:
layout = request.layout or PexLayout.ZIPAPP
argv.extend(["--layout", layout.value])
output_files: Iterable[str] | None = None
output_directories: Iterable[str] | None = None
if PexLayout.ZIPAPP == layout:
output_files = [request.output_filename]
else:
output_directories = [request.output_filename]
process = await Get(
Process,
PexCliProcess(
python=python,
subcommand=(),
extra_args=argv,
additional_input_digest=merged_digest,
description=_build_pex_description(request),
output_files=output_files,
output_directories=output_directories,
# TODO: This is not the best heuristic for available concurrency, since the
# requirements almost certainly have transitive deps which also need building, but it
# is better than using something hardcoded.
concurrency_available=requirement_count,
),
)
process = dataclasses.replace(process, platform=platform)
# NB: Building a Pex is platform dependent, so in order to get a PEX that we can use locally
# without cross-building, we specify that our PEX command should be run on the current local
# platform.
result = await Get(ProcessResult, Process, process)
if pex_runtime_env.verbosity > 0:
log_output = result.stderr.decode()
if log_output:
logger.info("%s", log_output)
digest = (
await Get(
Digest, MergeDigests((result.output_digest, *(pex.digest for pex in request.pex_path)))
)
if request.pex_path
else result.output_digest
)
return BuildPexResult(
result=result, pex_filename=request.output_filename, digest=digest, python=python
)
def _validate_metadata(
metadata: PythonLockfileMetadata,
request: PexRequest,
requirements: (Lockfile | LockfileContent),
python_setup: PythonSetup,
) -> None:
# TODO(#12314): Improve this message: `Requirement.parse` raises `InvalidRequirement`, which
# doesn't have mypy stubs at the moment; it may be hard to catch this exception and typecheck.
req_strings = (
{PipRequirement.parse(i) for i in requirements.req_strings}
if requirements.req_strings is not None
else None
)
validation = metadata.is_valid_for(
requirements.lockfile_hex_digest,
request.interpreter_constraints,
python_setup.interpreter_universe,
req_strings,
)
if validation:
return
def tool_message_parts(
requirements: (ToolCustomLockfile | ToolDefaultLockfile),
) -> Iterator[str]:
tool_name = requirements.options_scope_name
uses_source_plugins = requirements.uses_source_plugins
uses_project_interpreter_constraints = requirements.uses_project_interpreter_constraints
yield "You are using "
if isinstance(requirements, ToolDefaultLockfile):
yield "the `<default>` lockfile provided by Pants "
elif isinstance(requirements, ToolCustomLockfile):
yield f"the lockfile at {requirements.file_path} "
yield (
f"to install the tool `{tool_name}`, but it is not compatible with your "
"configuration: "
"\n\n"
)
if any(
i == InvalidPythonLockfileReason.INVALIDATION_DIGEST_MISMATCH
or i == InvalidPythonLockfileReason.REQUIREMENTS_MISMATCH
for i in validation.failure_reasons
):
# TODO(12314): Add message showing _which_ requirements diverged.
yield (
"- You have set different requirements than those used to generate the lockfile. "
f"You can fix this by not setting `[{tool_name}].version`, "
)
if uses_source_plugins:
yield f"`[{tool_name}].source_plugins`, "
yield (
f"and `[{tool_name}].extra_requirements`, or by using a new "
"custom lockfile."
"\n"
)
if (
InvalidPythonLockfileReason.INTERPRETER_CONSTRAINTS_MISMATCH
in validation.failure_reasons
):
yield (
f"- You have set interpreter constraints (`{request.interpreter_constraints}`) that "
"are not compatible with those used to generate the lockfile "
f"(`{metadata.valid_for_interpreter_constraints}`). "
)
if not uses_project_interpreter_constraints:
yield (
f"You can fix this by not setting `[{tool_name}].interpreter_constraints`, "
"or by using a new custom lockfile. "
)
else:
yield (
f"`{tool_name}` determines its interpreter constraints based on your code's own "
"constraints. To fix this error, you can either change your code's constraints "
f"(see {doc_url('python-interpreter-compatibility')}) or by generating a new "
"custom lockfile. "
)
yield "\n"
yield "\n"
if not isinstance(requirements, ToolCustomLockfile):
yield (
"To generate a custom lockfile based on your current configuration, set "
f"`[{tool_name}].lockfile` to where you want to create the lockfile, then run "
f"`./pants generate-lockfiles --resolve={tool_name}`. "
)
else:
yield (
"To regenerate your lockfile based on your current configuration, run "
f"`./pants generate-lockfiles --resolve={tool_name}`. "
)
message: str
if isinstance(requirements, (ToolCustomLockfile, ToolDefaultLockfile)):
message = "".join(tool_message_parts(requirements)).strip()
else:
# TODO(12314): Improve this message
raise InvalidLockfileError(f"{validation.failure_reasons}")
if python_setup.invalid_lockfile_behavior == InvalidLockfileBehavior.error:
raise ValueError(message)
else:
logger.warning("%s", message)
def _build_pex_description(request: PexRequest) -> str:
if request.description:
return request.description
if isinstance(request.requirements, Lockfile):
desc_suffix = f"from {request.requirements.file_path}"
elif isinstance(request.requirements, LockfileContent):
desc_suffix = f"from {request.requirements.file_content.path}"
else:
if not request.requirements.req_strings:
return f"Building {request.output_filename}"
elif request.requirements.repository_pex:
repo_pex = request.requirements.repository_pex.name
return (
f"Extracting {pluralize(len(request.requirements.req_strings), 'requirement')} "
f"to build {request.output_filename} from {repo_pex}: "
f"{', '.join(request.requirements.req_strings)}"
)
else:
desc_suffix = (
f"with {pluralize(len(request.requirements.req_strings), 'requirement')}: "
f"{', '.join(request.requirements.req_strings)}"
)
return f"Building {request.output_filename} {desc_suffix}"
@rule
async def create_pex(request: PexRequest) -> Pex:
result = await Get(BuildPexResult, PexRequest, request)
return result.create_pex()
@rule
async def create_optional_pex(request: OptionalPexRequest) -> OptionalPex:
if request.maybe_pex_request is None:
return OptionalPex(None)
result = await Get(Pex, PexRequest, request.maybe_pex_request)
return OptionalPex(result)
@dataclass(frozen=True)
class Script:
path: PurePath
@property
def argv0(self) -> str:
return f"./{self.path}" if self.path.parent == PurePath() else str(self.path)
@dataclass(frozen=True)
class VenvScript:
script: Script
content: FileContent
@dataclass(frozen=True)
class VenvScriptWriter:
complete_pex_env: CompletePexEnvironment
pex: Pex
venv_dir: PurePath
@classmethod
def create(
cls, pex_environment: PexEnvironment, pex: Pex, venv_rel_dir: PurePath
) -> VenvScriptWriter:
# N.B.: We don't know the working directory that will be used in any given
# invocation of the venv scripts; so we deal with working_directory inside the scripts
# themselves by absolutifying all relevant paths at runtime.
complete_pex_env = pex_environment.in_sandbox(working_directory=None)
venv_dir = complete_pex_env.pex_root / venv_rel_dir
return cls(complete_pex_env=complete_pex_env, pex=pex, venv_dir=venv_dir)
def _create_venv_script(
self,
bash: BashBinary,
*,
script_path: PurePath,
venv_executable: PurePath,
) -> VenvScript:
env_vars = (
f"{name}={shlex.quote(value)}"
for name, value in self.complete_pex_env.environment_dict(
python_configured=True
).items()
)
target_venv_executable = shlex.quote(str(venv_executable))
venv_dir = shlex.quote(str(self.venv_dir))
execute_pex_args = " ".join(
f"$(ensure_absolute {shlex.quote(arg)})"
for arg in self.complete_pex_env.create_argv(self.pex.name, python=self.pex.python)
)
script = dedent(
f"""\
#!{bash.path}
set -euo pipefail
# N.B.: We convert all sandbox root relative paths to absolute paths so this script
# works when run with a cwd set elsewhere.
# N.B.: This relies on BASH_SOURCE which has been available since bash-3.0, released in
# 2004. In turn, our use of BASH_SOURCE relies on the fact that this script is executed
# by the engine via its absolute path.
ABS_SANDBOX_ROOT="${{BASH_SOURCE%/*}}"
function ensure_absolute() {{
local value0="$1"
shift
if [ "${{value0:0:1}}" == "/" ]; then
echo | |
custom radius",
default = False)
fit = bpy.props.EnumProperty(name = "Method",
items = (("best", "Best fit", "Non-linear least squares"),
("inside", "Fit inside","Only move vertices towards the center")),
description = "Method used for fitting a circle to the vertices",
default = 'best')
flatten = bpy.props.BoolProperty(name = "Flatten",
description = "Flatten the circle, instead of projecting it on the " \
"mesh",
default = True)
influence = bpy.props.FloatProperty(name = "Influence",
description = "Force of the tool",
default = 100.0,
min = 0.0,
max = 100.0,
precision = 1,
subtype = 'PERCENTAGE')
radius = bpy.props.FloatProperty(name = "Radius",
description = "Custom radius for circle",
default = 1.0,
min = 0.0,
soft_max = 1000.0)
regular = bpy.props.BoolProperty(name = "Regular",
description = "Distribute vertices at constant distances along the " \
"circle",
default = True)
@classmethod
def poll(cls, context):
ob = context.active_object
return(ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, "fit")
col.separator()
col.prop(self, "flatten")
row = col.row(align=True)
row.prop(self, "custom_radius")
row_right = row.row(align=True)
row_right.active = self.custom_radius
row_right.prop(self, "radius", text="")
col.prop(self, "regular")
col.separator()
col.prop(self, "influence")
def invoke(self, context, event):
# load custom settings
settings_load(self)
return self.execute(context)
def execute(self, context):
# initialise
global_undo, object, mesh = initialise()
settings_write(self)
# check cache to see if we can save time
cached, single_loops, loops, derived, mapping = cache_read("Circle",
object, mesh, False, False)
if cached:
derived, mesh_mod = get_derived_mesh(object, mesh, context.scene)
else:
# find loops
derived, mesh_mod, single_vertices, single_loops, loops = \
circle_get_input(object, mesh, context.scene)
mapping = get_mapping(derived, mesh, mesh_mod, single_vertices,
False, loops)
single_loops, loops = circle_check_loops(single_loops, loops,
mapping, mesh_mod)
# saving cache for faster execution next time
if not cached:
cache_write("Circle", object, mesh, False, False, single_loops,
loops, derived, mapping)
move = []
for i, loop in enumerate(loops):
# best fitting flat plane
com, normal = calculate_plane(mesh_mod, loop)
# if circular, shift loop so we get a good starting vertex
if loop[1]:
loop = circle_shift_loop(mesh_mod, loop, com)
# flatten vertices on plane
locs_2d, p, q = circle_3d_to_2d(mesh_mod, loop, com, normal)
# calculate circle
if self.fit == 'best':
x0, y0, r = circle_calculate_best_fit(locs_2d)
else: # self.fit == 'inside'
x0, y0, r = circle_calculate_min_fit(locs_2d)
# radius override
if self.custom_radius:
r = self.radius / p.length
# calculate positions on circle
if self.regular:
new_locs_2d = circle_project_regular(locs_2d[:], x0, y0, r)
else:
new_locs_2d = circle_project_non_regular(locs_2d[:], x0, y0, r)
# take influence into account
locs_2d = circle_influence_locs(locs_2d, new_locs_2d,
self.influence)
# calculate 3d positions of the created 2d input
move.append(circle_calculate_verts(self.flatten, mesh_mod,
locs_2d, com, p, q, normal))
# flatten single input vertices on plane defined by loop
if self.flatten and single_loops:
move.append(circle_flatten_singles(mesh_mod, com, p, q,
normal, single_loops[i]))
# move vertices to new locations
move_verts(mesh, mapping, move, -1)
# cleaning up
if derived:
bpy.context.blend_data.meshes.remove(mesh_mod)
terminate(global_undo)
return{'FINISHED'}
# curve operator
class Curve(bpy.types.Operator):
bl_idname = "mesh.looptools_curve"
bl_label = "Curve"
bl_description = "Turn a loop into a smooth curve"
bl_options = {'REGISTER', 'UNDO'}
boundaries = bpy.props.BoolProperty(name = "Boundaries",
description = "Limit the tool to work within the boundaries of the "\
"selected vertices",
default = False)
influence = bpy.props.FloatProperty(name = "Influence",
description = "Force of the tool",
default = 100.0,
min = 0.0,
max = 100.0,
precision = 1,
subtype = 'PERCENTAGE')
interpolation = bpy.props.EnumProperty(name = "Interpolation",
items = (("cubic", "Cubic", "Natural cubic spline, smooth results"),
("linear", "Linear", "Simple and fast linear algorithm")),
description = "Algorithm used for interpolation",
default = 'cubic')
regular = bpy.props.BoolProperty(name = "Regular",
description = "Distribute vertices at constant distances along the" \
"curve",
default = True)
restriction = bpy.props.EnumProperty(name = "Restriction",
items = (("none", "None", "No restrictions on vertex movement"),
("extrude", "Extrude only","Only allow extrusions (no "\
"indentations)"),
("indent", "Indent only", "Only allow indentation (no "\
"extrusions)")),
description = "Restrictions on how the vertices can be moved",
default = 'none')
@classmethod
def poll(cls, context):
ob = context.active_object
return(ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, "interpolation")
col.prop(self, "restriction")
col.prop(self, "boundaries")
col.prop(self, "regular")
col.separator()
col.prop(self, "influence")
def invoke(self, context, event):
# load custom settings
settings_load(self)
return self.execute(context)
def execute(self, context):
# initialise
global_undo, object, mesh = initialise()
settings_write(self)
# check cache to see if we can save time
cached, single_loops, loops, derived, mapping = cache_read("Curve",
object, mesh, False, self.boundaries)
if cached:
derived, mesh_mod = get_derived_mesh(object, mesh, context.scene)
else:
# find loops
derived, mesh_mod, loops = curve_get_input(object, mesh,
self.boundaries, context.scene)
mapping = get_mapping(derived, mesh, mesh_mod, False, True, loops)
loops = check_loops(loops, mapping, mesh_mod)
verts_selected = [v.index for v in mesh_mod.vertices if v.select \
and not v.hide]
# saving cache for faster execution next time
if not cached:
cache_write("Curve", object, mesh, False, self.boundaries, False,
loops, derived, mapping)
move = []
for loop in loops:
knots, points = curve_calculate_knots(loop, verts_selected)
pknots = curve_project_knots(mesh_mod, verts_selected, knots,
points, loop[1])
tknots, tpoints = curve_calculate_t(mesh_mod, knots, points,
pknots, self.regular, loop[1])
splines = calculate_splines(self.interpolation, mesh_mod,
tknots, knots)
move.append(curve_calculate_vertices(mesh_mod, knots, tknots,
points, tpoints, splines, self.interpolation,
self.restriction))
# move vertices to new locations
move_verts(mesh, mapping, move, self.influence)
# cleaning up
if derived:
bpy.context.blend_data.meshes.remove(mesh_mod)
terminate(global_undo)
return{'FINISHED'}
# flatten operator
class Flatten(bpy.types.Operator):
bl_idname = "mesh.looptools_flatten"
bl_label = "Flatten"
bl_description = "Flatten vertices on a best-fitting plane"
bl_options = {'REGISTER', 'UNDO'}
influence = bpy.props.FloatProperty(name = "Influence",
description = "Force of the tool",
default = 100.0,
min = 0.0,
max = 100.0,
precision = 1,
subtype = 'PERCENTAGE')
plane = bpy.props.EnumProperty(name = "Plane",
items = (("best_fit", "Best fit", "Calculate a best fitting plane"),
("normal", "Normal", "Derive plane from averaging vertex "\
"normals"),
("view", "View", "Flatten on a plane perpendicular to the "\
"viewing angle")),
description = "Plane on which vertices are flattened",
default = 'best_fit')
restriction = bpy.props.EnumProperty(name = "Restriction",
items = (("none", "None", "No restrictions on vertex movement"),
("bounding_box", "Bounding box", "Vertices are restricted to "\
"movement inside the bounding box of the selection")),
description = "Restrictions on how the vertices can be moved",
default = 'none')
@classmethod
def poll(cls, context):
ob = context.active_object
return(ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, "plane")
#col.prop(self, "restriction")
col.separator()
col.prop(self, "influence")
def invoke(self, context, event):
# load custom settings
settings_load(self)
return self.execute(context)
def execute(self, context):
# initialise
global_undo, object, mesh = initialise()
settings_write(self)
# check cache to see if we can save time
cached, single_loops, loops, derived, mapping = cache_read("Flatten",
object, mesh, False, False)
if not cached:
# order input into virtual loops
loops = flatten_get_input(mesh)
loops = check_loops(loops, mapping, mesh)
# saving cache for faster execution next time
if not cached:
cache_write("Flatten", object, mesh, False, False, False, loops,
False, False)
move = []
for loop in loops:
# calculate plane and position of vertices on them
com, normal = calculate_plane(mesh, loop, method=self.plane,
object=object)
to_move = flatten_project(mesh, loop, com, normal)
if self.restriction == 'none':
move.append(to_move)
else:
move.append(to_move)
move_verts(mesh, False, move, self.influence)
terminate(global_undo)
return{'FINISHED'}
# relax operator
class Relax(bpy.types.Operator):
bl_idname = "mesh.looptools_relax"
bl_label = "Relax"
bl_description = "Relax the loop, so it is smoother"
bl_options = {'REGISTER', 'UNDO'}
input = bpy.props.EnumProperty(name = "Input",
items = (("all", "Parallel (all)", "Also use non-selected "\
"parallel loops as input"),
("selected", "Selection","Only use selected vertices as input")),
description = "Loops that are relaxed",
default = 'selected')
interpolation = bpy.props.EnumProperty(name = "Interpolation",
items = (("cubic", "Cubic", "Natural cubic spline, smooth results"),
("linear", "Linear", "Simple and fast linear algorithm")),
description = "Algorithm used for interpolation",
default = 'cubic')
iterations = bpy.props.EnumProperty(name = "Iterations",
items = (("1", "1", "One"),
("3", "3", "Three"),
("5", "5", "Five"),
("10", "10", "Ten"),
("25", "25", "Twenty-five")),
description = "Number of times the loop is relaxed",
default = "1")
regular = bpy.props.BoolProperty(name = "Regular",
description = "Distribute vertices at constant distances along the" \
"loop",
default = True)
| |
Creator)
sendMention(msg.to, msg._from, "","Success Invite AllGroup")
else:
sendMention(msg.to, msg._from, "","Limit boss")
elif text.lower() == 'kiinviteme':
if msg._from in admin:
gid = ki.getGroupIdsJoined()
for i in gid:
ki.inviteIntoGroup(i, Creator)
sendMention(msg.to, msg._from, "","Success Invite AllGroup")
else:
sendMention(msg.to, msg._from, "","Limit boss")
elif text.lower() == 'kkinviteme':
if msg._from in admin:
gid = kk.getGroupIdsJoined()
for i in gid:
kk.inviteIntoGroup(i, Creator)
sendMention(msg.to, msg._from, "","Success Invite AllGroup")
else:
sendMention(msg.to, msg._from, "","Limit boss")
elif text.lower() == 'kcinviteme':
if msg._from in admin:
gid = kc.getGroupIdsJoined()
for i in gid:
kc.inviteIntoGroup(i, Creator)
sendMention(msg.to, msg._from, "","Success Invite AllGroup")
else:
sendMention(msg.to, msg._from, "","Limit boss")
elif text.lower() == 'kdinviteme':
if msg._from in admin:
gid = kd.getGroupIdsJoined()
for i in gid:
kd.inviteIntoGroup(i, Creator)
sendMention(msg.to, msg._from, "","Success Invite AllGroup")
else:
sendMention(msg.to, msg._from, "","Limit boss")
elif text.lower() == 'keinviteme':
if msg._from in admin:
gid = ke.getGroupIdsJoined()
for i in gid:
ke.inviteIntoGroup(i, Creator)
sendMention(msg.to, msg._from, "","Success Invite AllGroup")
else:
sendMention(msg.to, msg._from, "","Limit boss")
elif text.lower() == 's1inviteme':
if msg._from in admin:
gid = s1.getGroupIdsJoined()
for i in gid:
s1.inviteIntoGroup(i, Creator)
sendMention(msg.to, msg._from, "","Success Invite AllGroup")
else:
sendMention(msg.to, msg._from, "","Limit boss")
elif text.lower() == 's2inviteme':
if msg._from in admin:
gid = s2.getGroupIdsJoined()
for i in gid:
s2.inviteIntoGroup(i, Creator)
sendMention(msg.to, msg._from, "","Success Invite AllGroup")
else:
sendMention(msg.to, msg._from, "","Limit boss")
elif text.lower() == 's3inviteme':
if msg._from in admin:
gid = s3.getGroupIdsJoined()
for i in gid:
s3.inviteIntoGroup(i, Creator)
sendMention(msg.to, msg._from, "","Success Invite AllGroup")
else:
sendMention(msg.to, msg._from, "","Limit boss")
# CREATOR INVITE OFF
elif "Leave" in msg.text:
if msg._from in Creator:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.sendText(i,"Bot Di Paksa Keluar Oleh Owner!\nAyo left teman2\nAssalamualikum wr wb All Member\nAdd Owner kami")
cl.sendContact(i,"ub3808de9f7df35f57fb366d157f9790a")
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
kd.leaveGroup(i)
ke.leaveGroup(i)
s1.leaveGroup(i)
s2.leaveGroup(i)
s3.leaveGroup(i)
ehun.sendText(msg.to,"Bot Success Leave All Group")
elif "Kaptenleave" in msg.text:
if msg._from in Creator:
gid = ehun.getGroupIdsJoined()
for i in gid:
ehun.sendText(i,"Bot Di Paksa Keluar OlehOwner!\nAyo left teman2\nAssalamualikum wr wb All Member\nAdd Owner kami")
ehun.sendContact(i,"ub3808de9f7df35f57fb366d157f9790a")
ehun.leaveGroup(i)
ehun.sendMessage(msg.to,"Sukses boss")
elif text.lower() == "cek":
if msg._from in admin:
try:cl.inviteIntoGroup(msg.to, [mid]);has = "OK"
except:has = "NOT"
try:cl.kickoutFromGroup(msg.to, [mid]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
cl.sendMessage(msg.to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:ki.inviteIntoGroup(msg.to, [Amid]);has = "OK"
except:has = "NOT"
try:ki.kickoutFromGroup(msg.to, [Amid]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
ki.sendMessage(msg.to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:kk.inviteIntoGroup(msg.to, [Bmid]);has = "OK"
except:has = "NOT"
try:kk.kickoutFromGroup(msg.to, [Bmid]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
kk.sendMessage(msg.to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:kc.inviteIntoGroup(msg.to, [Cmid]);has = "OK"
except:has = "NOT"
try:kc.kickoutFromGroup(msg.to, [Cmid]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
kc.sendMessage(msg.to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:kd.inviteIntoGroup(msg.to, [Dmid]);has = "OK"
except:has = "NOT"
try:kd.kickoutFromGroup(msg.to, [Dmid]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
kd.sendMessage(msg.to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:ke.inviteIntoGroup(msg.to, [Emid]);has = "OK"
except:has = "NOT"
try:ke.kickoutFromGroup(msg.to, [Emid]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
ke.sendMessage(msg.to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:s1.inviteIntoGroup(msg.to, [Fmid]);has = "OK"
except:has = "NOT"
try:s1.kickoutFromGroup(msg.to, [Fmid]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
s1.sendMessage(msg.to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:s2.inviteIntoGroup(msg.to, [Gmid]);has = "OK"
except:has = "NOT"
try:s2.kickoutFromGroup(msg.to, [Gmid]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
s2.sendMessage(msg.to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:s3.inviteIntoGroup(msg.to, [Hmid]);has = "OK"
except:has = "NOT"
try:s3.kickoutFromGroup(msg.to, [Hmid]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
s3.sendMessage(msg.to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:ehun.inviteIntoGroup(msg.to, [Imid]);has = "OK"
except:has = "NOT"
try:ehun.kickoutFromGroup(msg.to, [Imid]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
ehun.sendMessage(msg.to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
elif 'Sampah' in msg.text:
if msg._from in admin:
if msg.toType == 2:
group = ehun.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
ehun.cancelGroupInvitation(msg.to,[_mid])
elif 'Clear invites' in msg.text:
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"<NAME>")
elif 'Clean invites' in msg.text:
if msg._from in admin:
if msg.toType == 2:
X = ehun.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(ABC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ehun.sendText(msg.to,"No one is inviting。")
else:
ehun.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ehun.sendText(msg.to,"Can not be used")
else:
ehun.sendText(msg.to,"Can not be used last group")
elif "Ban @" in msg.text:
if msg._from in admin:
if msg.toType == 2:
print("@Ban by mention")
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = ehun.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ehun.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Creator:
try:
bl["blacklist"][target] = True
ehun.sendText(msg.to,"Succes BosQ")
except:
ehun.sendText(msg.to,"Error")
else:
ehun.sendText(msg.to,"Creator Detected~")
elif "Unban @" in msg.text:
if msg._from in admin:
if msg.toType == 2:
print("@Unban by mention")
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = ehun.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ehun.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del bl["blacklist"][target]
ehun.sendText(msg.to,"Succes BosQ")
except:
ehun.sendText(msg.to,"Succes BosQ")
elif text.lower() == 'banlist':
if bl["blacklist"] == {}:
ehun.sendText(msg.to,"Tidak Ada")
else:
mc = ""
for mi_d in bl["blacklist"]:
mc += "->" +ehun.getContact(mi_d).displayName + "\n"
ehun.sendText(msg.to,"===[Blacklist User]===\n"+mc)
elif text.lower() == 'kill':
if msg._from in admin:
if msg.toType == 2:
group = ehun.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in bl["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ehun.sendText(msg.to,"Fuck You")
pass
for jj in matched_list:
try:
klist = [cl,ki,kk,kc,kd,ke,s1,s2,s3]
tim = random.choice(klist)
tim.kickoutFromGroup(msg.to,[jj])
print(msg.to,[jj])
except:
pass
elif text.lower() == 'clear':
if msg._from in admin:
bl["blacklist"] = {}
ehun.sendText(msg.to,"ヽ( ^ω^)ノ└ ❉Unbanned All")
elif text.lower() == 'memlist':
if msg._from in admin:
kontak = ehun.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═�����═══════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
ehun.sendText(msg.to, msgs)
# MAU KOPI
elif text.lower() == 'glist':
if msg._from in admin:
ehun.sendText(msg.to, "Tunggu Sebentar. . .")
gid = ehun.getGroupIdsJoined()
h = ""
jml = 0
for i in gid:
h += "╠➩" + "%s\n" % (ehun.getGroup(i).name +" ~> ["+str(len(ehun.getGroup(i).members))+"]")
jml += 1
ehun.sendText(msg.to,"╔═════════════════════════\n║ ☆☞ LIST GROUPS☜☆\n╠═════════════════════════\n" + h + "╠═════════════════════════" + "\n║ Total Groups =" +" ["+str(len(gid))+"]\n╚═════════════════════════")
elif text.lower() == 'adminlist':
if admin == []:
ehun.sendText(msg.to,"The stafflist is empty")
else:
ehun.sendText(msg.to,"Tunggu...")
mc = "||Admin Ehun Bot||\n=====================\n"
for mi_d in admin:
mc += "••>" +ehun.getContact(mi_d).displayName + "\n"
ehun.sendText(msg.to,mc)
print("[Command]Stafflist executed")
elif text.lower() == 'sticker on':
if msg._from in admin:
wait["sticker"] = True
ehun.sendText(msg.to,"Sticker activ")
elif text.lower() == 'sticker off':
if msg._from in admin:
wait["sticker"] = False
ehun.sendText(msg.to,"Sticker non activ")
elif text.lower() == 'k on':
if | |
box. """
@staticmethod
def box_type_default():
box_type = BoxType.objects \
.filter(box_type_code__istartswith='ev') \
.first()
if box_type:
return box_type
box_type = BoxType.objects.first()
if box_type:
return box_type
return None
box_type_help_text = 'Type of box with this number.'
box_type = models.ForeignKey(
BoxType,
on_delete=models.PROTECT,
verbose_name='Type of Box',
help_text=box_type_help_text,
)
""" Type of box with this number. """
location_help_text = 'Location of box'
location = models.ForeignKey(
"Location",
null=True,
blank=True,
on_delete=models.SET_NULL,
help_text=location_help_text
)
"""Location of box"""
product_help_text = 'Product contained in this box, if filled.'
product = models.ForeignKey(
Product,
on_delete=models.PROTECT,
verbose_name='product',
null=True,
blank=True,
help_text=product_help_text,
)
""" Product contained in this box, if filled. """
exp_year_help_text = 'Year the product expires, if filled.'
exp_year = models.IntegerField(
'Year Product Expires',
null=True,
blank=True,
help_text=exp_year_help_text,
)
""" Year the product expires, if filled. """
exp_month_start_help_text = (
'Optional starting month range of when the product expires, if filled.'
)
exp_month_start = models.IntegerField(
'Expiration Start Month (Optional)',
null=True,
blank=True,
validators=MONTH_VALIDATORS,
help_text=exp_month_start_help_text
)
"""
Optional starting month range of when the product expires, if filled.
"""
exp_month_end_help_text = (
'Optional ending month range of when the product expires, if filled.'
)
exp_month_end = models.IntegerField(
'Expiration End Month (Optional)',
null=True,
blank=True,
validators=MONTH_VALIDATORS,
help_text=exp_month_end_help_text,
)
""" Optional emding month range of when the product expires, if filled. """
date_filled_help_text = 'Approximate date box was filled, if filled.'
date_filled = models.DateTimeField(
'Date Box Filled',
null=True,
blank=True,
help_text=date_filled_help_text,
)
""" Approximate date box was filled, if filled. """
quantity_help_text = (
'Approximate or default number of items in the box, if filled.'
)
quantity = models.IntegerField(
'Quantity in Box',
null=True,
blank=True,
help_text=quantity_help_text,
)
""" Approximate or default number of items in the box, if filled. """
def is_filled(self):
if self.product:
return True
return False
# define a default display of Box
def __str__(self):
""" Default way to display this box record. """
display = (
f'{self.box_number} '
f'({self.box_type.box_type_code}/'
f'{self.quantity}) '
)
if self.product:
display += (
f'{self.product.prod_name} '
f'exp: {self.exp_year} '
)
if self.exp_month_start or self.exp_month_end:
display += (
f'({self.exp_month_start:02}-{self.exp_month_end:02}) '
)
if self.date_filled:
display += (
f'filled: {self.date_filled.year}/'
f'{self.date_filled.month:02}/'
f'{self.date_filled.day:02} '
f'at {self.location.loc_code}'
)
return display
def get_absolute_url(self):
return reverse(
'fpiweb:box_details',
kwargs={'pk': self.pk},
)
@staticmethod
def select_location(queryset):
"""Since we're probably going to have a lot of Box queries
where we also want to pull in location data"""
return queryset.select_related(
'location__loc_row',
'location__loc_bin',
'location__loc_tier',
)
class MovePallet(models.Model):
class Meta:
ordering = ('name',)
app_label = 'fpiweb'
verbose_name_plural = 'Pallets'
# Pallet Status Names
FILL: str = 'Fill'
MERGE: str = 'Merge'
MOVE: str = "Move"
PALLET_STATUS_CHOICES = (
(FILL, 'Fill pallet for new location'),
(MERGE, 'Merging boxes on pallet'),
(MOVE, 'Moving boxes to new location'),
)
id_help_text = 'Internal record identifier for a pallet.'
id = models.AutoField(
'Internal Pallet ID',
primary_key=True,
help_text=id_help_text,
)
""" Internal record identifier for a pallet. """
name_help_text = "Name of pallet"
name = models.CharField(
'Name',
unique=True,
max_length=200,
help_text=name_help_text,
)
""" Name of pallet. """
location = models.ForeignKey(
"Location",
null=True,
blank=True,
on_delete=models.SET_NULL,
help_text="Pallet Location",
)
pallet_status_help_text = "Current status of pallet."
pallet_status = models.CharField(
'Pallet Status',
max_length=15,
choices=PALLET_STATUS_CHOICES,
help_text=pallet_status_help_text,
)
""" Current status of pallet """
def __str__(self) -> str:
""" Display the information about this pallet. """
if len(self.pallet_status) < 1:
display = f'Pallet for {self.name} - ' \
f'status: {self.pallet_status} Default : Fill'
else:
display = f'Pallet for {self.name} - ' \
f'status: {self.pallet_status} '
return display
class Pallet(models.Model):
"""
Temporary file to build up a list of boxes on a pallet.
"""
class Meta:
ordering = ('name',)
app_label = 'fpiweb'
verbose_name_plural = 'Pallets'
# Pallet Status Names
FILL: str = 'Fill'
MERGE: str = 'Merge'
MOVE: str = "Move"
PALLET_STATUS_CHOICES = (
(FILL, 'Fill pallet for new location'),
(MERGE, 'Merging boxes on pallet'),
(MOVE, 'Moving boxes to new location'),
)
id_help_text = 'Internal record identifier for a pallet.'
id = models.AutoField(
'Internal Pallet ID',
primary_key=True,
help_text=id_help_text,
)
""" Internal record identifier for a pallet. """
name_help_text = "Name of pallet"
name = models.CharField(
'Name',
unique=True,
max_length=200,
help_text=name_help_text,
)
""" Name of pallet. """
location = models.ForeignKey(
"Location",
null=True,
blank=True,
on_delete=models.SET_NULL,
help_text="Pallet Location",
)
pallet_status_help_text = "Current status of pallet."
pallet_status = models.CharField(
'Pallet Status',
max_length=15,
choices=PALLET_STATUS_CHOICES,
help_text=pallet_status_help_text,
)
""" Current status of pallet """
def __str__(self) -> str:
""" Display the information about this pallet. """
if len(self.pallet_status) < 1:
display = f'Pallet for {self.name} - ' \
f'status: {self.pallet_status} Default : Fill'
else:
display = f'Pallet for {self.name} - ' \
f'status: {self.pallet_status} '
return display
class PalletBox(models.Model):
"""
Temporary file to hold the individual boxes for a pallet. The goal of
this is to ensure that either a Box record has product, expiration, and
location or it has no product, no expiration, and no location.
"""
class Meta:
ordering = ''
app_label = 'fpiweb'
verbose_name_plural = 'Pallet Boxes'
# Pallet Box Status Names
NEW: str = 'New'
ORIGINAL: str = 'Original'
MOVE: str = "Move"
PALLET_BOX_STATUS_CHOICES = (
(NEW, 'New box added'),
(ORIGINAL, 'Box already here'),
(MOVE, 'Box being moved'),
)
id_help_text = 'Internal record identifier for a pallet box.'
id = models.AutoField(
'Internal Pallet Box ID',
primary_key=True,
help_text=id_help_text,
)
""" Internal record identifier for a pallet box. """
box_number = models.CharField(
'Visible Box Number',
max_length=Box.box_number_max_length,
null=True,
blank=True,
help_text=Box.box_number_help_text,
)
""" Number printed in the label on the box. """
pallet_help_text = 'Internal record identifier for a pallet.'
pallet = models.ForeignKey(
Pallet,
related_name='boxes',
on_delete=models.PROTECT,
help_text=pallet_help_text,
)
box_help_text = 'Internal record identifier for a box.'
box = models.ForeignKey(
Box,
null=True,
blank=True,
on_delete=models.PROTECT,
help_text=box_help_text,
)
product_help_text = 'Product contained in this box, if filled.'
product = models.ForeignKey(
Product,
null=True,
blank=True,
on_delete=models.PROTECT,
verbose_name='product',
help_text=product_help_text,
)
""" Product contained in this box, if filled. """
exp_year_help_text = 'Year the product expires, if filled.'
exp_year = models.IntegerField(
'Year Product Expires',
null=True,
blank=True,
help_text=exp_year_help_text,
)
""" Year the product expires, if filled. """
exp_month_start_help_text = (
'Optional starting month range of when the product expires, if filled.'
)
exp_month_start = models.IntegerField(
'Expiration Start Month (Optional)',
null=True,
blank=True,
help_text=exp_month_start_help_text
)
"""
Optional starting month range of when the product expires, if filled.
"""
exp_month_end_help_text = (
'Optional ending month range of when the product expires, if filled.'
)
exp_month_end = models.IntegerField(
'Expiration End Month (Optional)',
null=True,
blank=True,
help_text=exp_month_end_help_text,
)
""" Optional emding month range of when the product expires, if filled. """
box_status_help_text = 'Box on pallet status.'
box_status = models.CharField(
'Box Status',
max_length=15,
choices=PALLET_BOX_STATUS_CHOICES,
help_text=box_status_help_text,
)
""" Box on pallet status """
def __str__(self) -> str:
""" default way to display a pallet box """
display = f'{self.box_number} ({self.pallet})' \
f'contains {self.product} ' \
f'({self.exp_year}'
if self.exp_month_start or self.exp_month_end:
display += f'/{self.exp_month_start}/{self.exp_month_end}'
display += f'), status: {self.box_status}'
return display
class Activity(models.Model):
"""
Activity (history) from the past.
"""
class Meta:
ordering = ['-date_consumed', 'box_number']
app_label = 'fpiweb'
verbose_name_plural = 'Activities'
# Adjustment Reasons
FILL_EMPTIED: str = 'Fill Emptied'
MOVE_ADDED: str = 'Move Added'
CONSUME_ADDED: str = 'Consume Added'
CONSUME_EMPTIED: str = 'Consume Emptied'
ADJUSTMENT_CODE_CHOICES: list = (
(FILL_EMPTIED, 'Fill emptied previous contents'),
(MOVE_ADDED, 'Move added box'),
(CONSUME_ADDED, 'Consume added box'),
(CONSUME_EMPTIED, 'Consume emptied previous contents')
)
id_help_text = 'Internal record identifier for an activity.'
id = models.AutoField(
'Internal Activity ID',
primary_key=True,
help_text=id_help_text,
)
""" Internal record identifier for an activity. """
box_number_help_text = 'Box number on box at time of consumption.'
box_number = models.CharField(
'Visible Box Number',
max_length=8,
help_text=box_number_help_text,
)
""" Box number on box at time of consumption. """
box_type_help_text = 'Box type holding consumed product.'
box_type = models.CharField(
'Box Type Code',
max_length=10,
help_text=box_type_help_text,
)
""" Box type holding consumed product. """
loc_row_help_text = 'Row box was in at the time product was consumed.'
loc_row = models.CharField(
'Row Location',
max_length=2,
help_text=loc_row_help_text,
)
""" Row box was in at the time product was consumed. """
loc_bin_help_text = 'Bin box was in at the time product was consumed.'
loc_bin = models.CharField(
'Bin Location',
max_length=2,
help_text=loc_bin_help_text,
)
""" Bin box was in at | |
'C6H14O8P',
'g3pi': 'C9H18O11P',
'g3pi_e': 'C9H18O11P',
'g3pi_p': 'C9H18O11P',
'g3ps': 'C6H13NO8P',
'g3ps_e': 'C6H13NO8P',
'g3ps_p': 'C6H13NO8P',
'g6p': 'C6H11O9P',
'g6p_e': 'C6H11O9P',
'g6p_p': 'C6H11O9P',
'gagicolipa': 'C157H271N2O84P4',
'gal': 'C6H12O6',
'gal1p': 'C6H11O9P',
'gal1p_e': 'C6H11O9P',
'gal1p_p': 'C6H11O9P',
'gal_bD_e': 'C6H12O6',
'gal_bD_p': 'C6H12O6',
'gal_e': 'C6H12O6',
'gal_p': 'C6H12O6',
'galct_D': 'C6H8O8',
'galct_D_e': 'C6H8O8',
'galct_D_p': 'C6H8O8',
'galctn_D': 'C6H11O7',
'galctn_D_e': 'C6H11O7',
'galctn_D_p': 'C6H11O7',
'galctn_L': 'C6H11O7',
'galctn_L_e': 'C6H11O7',
'galctn_L_p': 'C6H11O7',
'galt1p': 'C6H13O9P',
'galt_e': 'C6H14O6',
'galt_p': 'C6H14O6',
'galur': 'C6H9O7',
'galur_e': 'C6H9O7',
'galur_p': 'C6H9O7',
'gam1p': 'C6H13NO8P',
'gam6p': 'C6H13NO8P',
'gam6p_e': 'C6H13NO8P',
'gam6p_p': 'C6H13NO8P',
'gam_e': 'C6H14NO5',
'gam_p': 'C6H14NO5',
'gar': 'C7H14N2O8P',
'garagund': 'C77H125N1O22P2',
'gbbtn': 'C7H15NO2',
'gbbtn_e': 'C7H15NO2',
'gbbtn_p': 'C7H15NO2',
'gcald': 'C2H4O2',
'gdp': 'C10H12N5O11P2',
'gdp_e': 'C10H12N5O11P2',
'gdp_p': 'C10H12N5O11P2',
'gdpddman': 'C16H21N5O15P2',
'gdpfuc': 'C16H23N5O15P2',
'gdpmann': 'C16H23N5O16P2',
'gdpofuc': 'C16H21N5O15P2',
'gdptp': 'C10H11N5O20P5',
'gfgaragund': 'C83H135N1O27P2',
'gg4abut': 'C9H15O5N2',
'ggagicolipa': 'C163H281N2O89P4',
'ggbutal': 'C9H16O4N2',
'gggagicolipa': 'C169H291N2O94P4',
'ggptrc': 'C9H20O3N3',
'ghb': 'C4H7O3',
'gicolipa': 'C151H261N2O79P4',
'glc_D': 'C6H12O6',
'glc_D_e': 'C6H12O6',
'glc_D_p': 'C6H12O6',
'glcn': 'C6H11O7',
'glcn_e': 'C6H11O7',
'glcn_p': 'C6H11O7',
'glcr': 'C6H8O8',
'glcr_e': 'C6H8O8',
'glcr_p': 'C6H8O8',
'glcur': 'C6H9O7',
'glcur1p_e': 'C6H8O10P',
'glcur1p_p': 'C6H8O10P',
'glcur_e': 'C6H9O7',
'glcur_p': 'C6H9O7',
'gln_L': 'C5H10N2O3',
'gln_L_e': 'C5H10N2O3',
'gln_L_p': 'C5H10N2O3',
'glntrna': 'C5H9N2O2R',
'glu1sa': 'C5H9NO3',
'glu5p': 'C5H8NO7P',
'glu5sa': 'C5H9NO3',
'glu_D': 'C5H8NO4',
'glu_L': 'C5H8NO4',
'glu_L_e': 'C5H8NO4',
'glu_L_p': 'C5H8NO4',
'glucys': 'C8H13N2O5S',
'glutrna': 'C5H7NO3R',
'glx': 'C2H1O3',
'gly': 'C2H5NO2',
'gly_e': 'C2H5NO2',
'gly_p': 'C2H5NO2',
'glyald': 'C3H6O3',
'glyald_e': 'C3H6O3',
'glyald_p': 'C3H6O3',
'glyb': 'C5H11NO2',
'glyb_e': 'C5H11NO2',
'glyb_p': 'C5H11NO2',
'glyc': 'C3H8O3',
'glyc2p': 'C3H7O6P',
'glyc2p_e': 'C3H7O6P',
'glyc2p_p': 'C3H7O6P',
'glyc3p': 'C3H7O6P',
'glyc3p_e': 'C3H7O6P',
'glyc3p_p': 'C3H7O6P',
'glyc_e': 'C3H8O3',
'glyc_p': 'C3H8O3',
'glyc_R': 'C3H5O4',
'glyc_R_e': 'C3H5O4',
'glyc_R_p': 'C3H5O4',
'glyclt': 'C2H3O3',
'glyclt_e': 'C2H3O3',
'glyclt_p': 'C2H3O3',
'glycogen': 'C6H10O5',
'glytrna': 'C2H4NOR',
'gmeACP': 'C17H29N2O10PRS',
'gmhep17bp': 'C7H12O13P2',
'gmhep1p': 'C7H13O10P',
'gmhep7p': 'C7H13O10P',
'gmp': 'C10H12N5O8P',
'gmp_e': 'C10H12N5O8P',
'gmp_p': 'C10H12N5O8P',
'gp4g': 'C20H24N10O21P4',
'grdp': 'C10H17O7P2',
'grxox': 'X',
'grxrd': 'XH2',
'gslnt': 'C10H16N3O6SSe',
'gsn': 'C10H13N5O5',
'gsn_e': 'C10H13N5O5',
'gsn_p': 'C10H13N5O5',
'gthox': 'C20H30N6O12S2',
'gthox_e': 'C20H30N6O12S2',
'gthox_p': 'C20H30N6O12S2',
'gthrd': 'C10H16N3O6S',
'gthrd_e': 'C10H16N3O6S',
'gthrd_p': 'C10H16N3O6S',
'gtp': 'C10H12N5O14P3',
'gtp_e': 'C10H12N5O14P3',
'gtp_p': 'C10H12N5O14P3',
'gtspmd': 'C17H36N6O5S',
'gua': 'C5H5N5O',
'gua_e': 'C5H5N5O',
'gua_p': 'C5H5N5O',
'h': 'H',
'h2': 'H2',
'h2_e': 'H2',
'h2_p': 'H2',
'h2mb4p': 'C5H9O8P2',
'h2o': 'H2O',
'h2o2': 'H2O2',
'h2o2_e': 'H2O2',
'h2o2_p': 'H2O2',
'h2o_e': 'H2O',
'h2o_p': 'H2O',
'h2s': 'H2S',
'h2s_e': 'H2S',
'h2s_p': 'H2S',
'h_e': 'H',
'h_p': 'H',
'hacolipa_e': 'C192H333N2O101P4',
'halipa_e': 'C126H226N2O40P2',
'hco3': 'CHO3',
'hcys_L': 'C4H9NO2S',
'hdca': 'C16H31O2',
'hdca_e': 'C16H31O2',
'hdca_p': 'C16H31O2',
'hdcap': 'C16H32O5P',
'hdcea': 'C16H29O2',
'hdcea_e': 'C16H29O2',
'hdcea_p': 'C16H29O2',
'hdceap': 'C16H30O5P',
'hdcoa': 'C37H60N7O17P3S',
'hdd2coa': 'C37H60N7O17P3S',
'hdeACP': 'C27H49N2O8PRS',
'hemeO': 'C49H56FeN4O5',
'hexACP': 'C17H31N2O8PRS',
'hg2': 'Hg',
'hg2_e': 'Hg',
'hg2_p': 'Hg',
'hgmeACP': 'C17H29N2O11PRS',
'hhlipa': 'C124H220N2O51P2',
'his_L': 'C6H9N3O2',
'his_L_e': 'C6H9N3O2',
'his_L_p': 'C6H9N3O2',
'hisp': 'C6H11N3O4P',
'histd': 'C6H12N3O',
'histrna': 'C6H8N3OR',
'hkndd': 'C9H8O6',
'hkntd': 'C9H6O6',
'hlipa': 'C117H208N2O45P2',
'hmbil': 'C40H38N4O17',
'hmgth': 'C11H18N3O7S',
'hom_L': 'C4H9NO3',
'hom_L_e': 'C4H9NO3',
'hom_L_p': 'C4H9NO3',
'hphhlipa': 'C131H231N2O60P3',
'hpmeACP': 'C19H33N2O11PRS',
'hpyr': 'C3H3O4',
'hqn': 'C6H6O2',
'hx2coa': 'C27H40N7O17P3S',
'hxa': 'C6H11O2',
'hxa_e': 'C6H11O2',
'hxa_p': 'C6H11O2',
'hxan': 'C5H4N4O',
'hxan_e': 'C5H4N4O',
'hxan_p': 'C5H4N4O',
'hxcoa': 'C27H42N7O17P3S',
'iasp': 'C4H3NO4',
'ichor': 'C10H8O6',
'icit': 'C6H5O7',
'icolipa': 'C145H251N2O74P4',
'idon_L': 'C6H11O7',
'idon_L_e': 'C6H11O7',
'idon_L_p': 'C6H11O7',
'idp': 'C10H11N4O11P2',
'ile_L': 'C6H13NO2',
'ile_L_e': 'C6H13NO2',
'ile_L_p': 'C6H13NO2',
'iletrna': 'C6H12NOR',
'imacp': 'C6H7N2O5P',
'imp': 'C10H11N4O8P',
'imp_e': 'C10H11N4O8P',
'imp_p': 'C10H11N4O8P',
'indole': 'C8H7N',
'indole_e': 'C8H7N',
'indole_p': 'C8H7N',
'inost': 'C6H12O6',
'inost_e': 'C6H12O6',
'inost_p': 'C6H12O6',
'ins': 'C10H12N4O5',
'ins_e': 'C10H12N4O5',
'ins_p': 'C10H12N4O5',
'ipdp': 'C5H9O7P2',
'iscs': 'HSR',
'iscssh': 'HS2R',
'iscu': 'H8O2S6R',
'iscu_2fe2s': 'H4O2S8Fe2R',
'iscu_2fe2s2': 'O2S10Fe4R',
'iscu_4fe4s': 'H4O2S10Fe4R',
'isetac': 'C2H5O4S',
'isetac_e': 'C2H5O4S',
'isetac_p': 'C2H5O4S',
'itp': 'C10H11N4O14P3',
'k': 'K',
'k_e': 'K',
'k_p': 'K',
'kdo': 'C8H13O8',
'kdo2lipid4': 'C84H148N2O37P2',
'kdo2lipid4_e': 'C84H148N2O37P2',
'kdo2lipid4_p': 'C84H148N2O37P2',
'kdo2lipid4L': 'C96H170N2O38P2',
'kdo2lipid4p': 'C100H176N2O38P2',
'kdo8p': 'C8H12O11P',
'kdolipid4': 'C76H137N2O30P2',
'kphphhlipa': 'C139H241N2O70P4',
'lac_D': 'C3H5O3',
'lac_D_e': 'C3H5O3',
'lac_D_p': 'C3H5O3',
'lac_L': 'C3H5O3',
'lac_L_e': 'C3H5O3',
'lac_L_p': 'C3H5O3',
'LalaDglu': 'C8H13N2O5',
'LalaDglu_e': 'C8H13N2O5',
'LalaDglu_p': 'C8H13N2O5',
'LalaDgluMdap': 'C15H25N4O8',
'LalaDgluMdap_e': 'C15H25N4O8',
'LalaDgluMdap_p': 'C15H25N4O8',
'LalaDgluMdapDala': 'C18H30N5O9',
'LalaDgluMdapDala_e': 'C18H30N5O9',
'LalaDgluMdapDala_p': 'C18H30N5O9',
'LalaLglu': 'C8H13N2O5',
'LalaLglu_e': 'C8H13N2O5',
'LalaLglu_p': 'C8H13N2O5',
'lald_D': 'C3H6O2',
'lald_L': 'C3H6O2',
'lcts': 'C12H22O11',
'lcts_e': 'C12H22O11',
'lcts_p': 'C12H22O11',
'leu_L': 'C6H13NO2',
'leu_L_e': 'C6H13NO2',
'leu_L_p': 'C6H13NO2',
'leutrna': 'C6H12NOR',
'lgt_S': 'C13H20N3O8S',
'lipa': 'C110H196N2O39P2',
'lipa_cold_e': 'C114H202N2O39P2',
'lipa_e': 'C110H196N2O39P2',
'lipa_p': 'C110H196N2O39P2',
'lipaold': 'C114H202N2O39P2',
'lipaold_e': 'C114H202N2O39P2',
'lipaold_p': 'C114H202N2O39P2',
'lipidA': 'C68H126N2O23P2',
'lipidAds': 'C68H127N2O20P',
'lipidX': 'C34H64NO12P',
'lipoamp': 'C18H25N5O8PS2',
'lipoate': 'C8H14O2S2',
'lipoate_e': 'C8H14O2S2',
'lipoate_p': 'C8H14O2S2',
'lipopb': 'C8H13OS2',
'lpp_p': 'XC16H30O1',
'lys_L': 'C6H15N2O2',
'lys_L_e': 'C6H15N2O2',
'lys_L_p': 'C6H15N2O2',
'lystrna': 'C6H14N2OR',
'lyx_L': 'C5H10O5',
'lyx_L_e': 'C5H10O5',
'lyx_L_p': 'C5H10O5',
'mal_D': 'C4H4O5',
'mal_D_e': 'C4H4O5',
'mal_D_p': 'C4H4O5',
'mal_L': 'C4H4O5',
'mal_L_e': 'C4H4O5',
'mal_L_p': 'C4H4O5',
'malACP': 'C14H22N2O10PRS',
'malcoa': 'C24H33N7O19P3S',
'malcoame': 'C25H36N7O19P3S',
'malt': 'C12H22O11',
'malt6p': 'C12H21O14P',
'malt_e': 'C12H22O11',
'malt_p': 'C12H22O11',
'malthp': 'C42H72O36',
'malthx': 'C36H62O31',
'malthx_e': 'C36H62O31',
'malthx_p': 'C36H62O31',
'maltpt': 'C30H52O26',
'maltpt_e': 'C30H52O26',
'maltpt_p': 'C30H52O26',
'malttr': 'C18H32O16',
'malttr_e': 'C18H32O16',
'malttr_p': 'C18H32O16',
'maltttr': 'C24H42O21',
'maltttr_e': 'C24H42O21',
'maltttr_p': 'C24H42O21',
'man': 'C6H12O6',
'man1p': 'C6H11O9P',
'man6p': 'C6H11O9P',
'man6p_e': 'C6H11O9P',
'man6p_p': 'C6H11O9P',
'man6pglyc': 'C9H14O12P',
'man_e': 'C6H12O6',
'man_p': 'C6H12O6',
'mana': 'C6H11O7',
'manglyc_e': 'C9H15O9',
'manglyc_p': 'C9H15O9',
'mdhdhf': 'C5H8O4',
'melib': 'C12H22O11',
'melib_e': 'C12H22O11',
'melib_p': 'C12H22O11',
'meoh': 'CH4O1',
'meoh_e': 'CH4O1',
'meoh_p': 'CH4O1',
'mercppyr': 'C3H3O3S',
'met_D': 'C5H10NO2S',
'met_D_e': 'C5H10NO2S',
'met_D_p': 'C5H10NO2S',
'met_L': 'C5H11NO2S',
'met_L_e': 'C5H11NO2S',
'met_L_p': 'C5H11NO2S',
'methf': 'C20H20N7O6',
'metsox_R_L': 'C5H11NO3S',
'metsox_R_L_e': 'C5H11NO3S',
'metsox_R_L_p': 'C5H11NO3S',
'metsox_S_L': 'C5H11NO3S',
'metsox_S_L_e': 'C5H11NO3S',
'metsox_S_L_p': 'C5H11NO3S',
'mettrna': 'C5H10NOSR',
'mg2': 'Mg',
'mg2_e': 'Mg',
'mg2_p': 'Mg',
'mi1p_D': 'C6H11O9P',
'micit': 'C7H7O7',
'mincyc_e': 'C23H27N3O7',
'mincyc_p': 'C23H27N3O7',
'minohp_e': 'C6H6O24P6',
'minohp_p': 'C6H6O24P6',
'mlthf': 'C20H21N7O6',
'mmcoa_S': 'C25H35N7O19P3S',
'mmet': 'C6H14NO2S',
'mmet_e': 'C6H14NO2S',
'mmet_p': 'C6H14NO2S',
'mn2': 'Mn',
'mn2_e': 'Mn',
'mn2_p': 'Mn',
'mnl1p': 'C6H13O9P',
'mnl_e': 'C6H14O6',
'mnl_p': 'C6H14O6',
'moadamp': 'C11H12N5O8P1X',
'moadcoo': 'C1O2X',
'moadcosh': 'C1H1O1S1X',
'mobd': 'MoO4',
'mobd_e': 'MoO4',
'mobd_p': 'MoO4',
'moco': 'C10H10N5O8PS2Mo',
'mococdp': 'C19H22N8O15P2S2Mo',
'mocogdp': 'C20H22N10O15P2S2Mo',
'mpt': 'C10H10N5O6PS2Cu',
'mptamp': 'C20H22N10O12P2S2Cu',
'mql8': 'C51H74O2',
'mqn8': 'C51H72O2',
'msa': 'C3H3O3',
'mso3': 'CH3O3S',
'mso3_e': 'CH3O3S',
'mso3_p': 'CH3O3S',
'mthgxl': 'C3H4O2',
'mththf': 'C5H10O5',
'murein3p3p_p': 'C68H104N12O38',
'murein3px3p_p': 'C68H102N12O37',
'murein3px4p_p': 'C71H107N13O38',
'murein4p3p_p': 'C71H109N13O39',
'murein4p4p_p': 'C74H114N14O40',
'murein4px4p4p_p': 'C111H169N21O59',
'murein4px4p_p': 'C74H112N14O39',
'murein4px4px4p_p': 'C111H167N21O58',
'murein5p3p_p': 'C74H114N14O40',
'murein5p4p_p': 'C77H119N15O41',
'murein5p5p5p_p': 'C120H186N24O63',
'murein5p5p_p': 'C80H124N16O42',
'murein5px3p_p': 'C74H112N14O39',
'murein5px4p_p': 'C77H117N15O40',
'murein5px4px4p_p': 'C114H172N22O59',
'myrsACP': 'C25H47N2O8PRS',
'N1aspmd': 'C9H23N3O',
'n2o': 'N2O',
'n2o_e': 'N2O',
'n2o_p': 'N2O',
'n8aspmd': 'C9H23N3O',
'na1': 'Na',
'na15dap': 'C8H24N3',
'na1_e': 'Na',
'na1_p': 'Na',
'nac': 'C6H4NO2',
'nac_e': 'C6H4NO2',
'nac_p': 'C6H4NO2',
'nad': 'C21H26N7O14P2',
'nadh': 'C21H27N7O14P2',
'nadp': 'C21H25N7O17P3',
'nadph': 'C21H26N7O17P3',
'ncam': 'C6H6N2O',
'nh4': 'H4N',
'nh4_e': 'H4N',
'nh4_p': 'H4N',
'ni2': 'Ni',
'ni2_e': 'Ni',
'ni2_p': 'Ni',
'nicrnt': 'C11H12NO9P',
'nmn': 'C11H14N2O8P',
'nmn_e': 'C11H14N2O8P',
'nmn_p': 'C11H14N2O8P',
'Nmtrp': 'C12H14N2O2',
'no': 'NO',
'no2': 'NO2',
'no2_e': 'NO2',
'no2_p': 'NO2',
'no3': 'NO3',
'no3_e': 'NO3',
'no3_p': 'NO3',
'no_e': 'NO',
'no_p': 'NO',
'novbcn_e': 'C31H36N2O11',
'novbcn_p': 'C31H36N2O11',
'o16a2und_p': 'C123H200N2O57P2',
'o16a3und_p': 'C157H255N3O82P2',
'o16a4colipa_e': 'C312H523N6O200P4',
'o16a4colipa_p': 'C312H523N6O200P4',
'o16a4und_p': 'C191H310N4O107P2',
'o16aund': 'C89H145N1O32P2',
'o16aund_p': 'C89H145N1O32P2',
'o2': 'O2',
'o2_e': 'O2',
'o2_p': 'O2',
'o2s': 'O2',
'o2s_e': 'O2',
'o2s_p': 'O2',
'oaa': 'C4H2O5',
'oc2coa': 'C29H44N7O17P3S',
'ocACP': 'C19H35N2O8PRS',
'occoa': 'C29H46N7O17P3S',
'ocdca': 'C18H35O2',
'ocdca_e': 'C18H35O2',
'ocdca_p': 'C18H35O2',
'ocdcaACP': 'C29H55N2O8PRS',
'ocdcap': 'C18H36O5P',
'ocdcea': 'C18H33O2',
'ocdcea_e': 'C18H33O2',
'ocdcea_p': 'C18H33O2',
'ocdceap': 'C18H34O5P',
'octa': 'C8H15O2',
'octa_e': 'C8H15O2',
'octa_p': 'C8H15O2',
'octapb': 'C8H15O',
'octdp': 'C40H65O7P2',
'octeACP': 'C29H53N2O8PRS',
'od2coa': 'C39H64N7O17P3S',
'odecoa': 'C39H64N7O17P3S',
'ogmeACP': 'C17H27N2O11PRS',
'ohpb': 'C4H4O8P',
'op4en': 'C5H5O3',
'opmeACP': 'C19H31N2O11PRS',
'orn': 'C5H13N2O2',
'orn_e': 'C5H13N2O2',
'orn_p': 'C5H13N2O2',
'orot': 'C5H3N2O4',
'orot5p': 'C10H10N2O11P',
'orot_e': 'C5H3N2O4',
'orot_p': 'C5H3N2O4',
'oxa': 'C2O4',
'oxadpcoa': 'C27H37N7O20P3S',
'oxalcoa': 'C23H31N7O19P3S',
'oxam': 'C2H2NO3',
'oxur': 'C3H3N2O4',
'pa120': 'C27H51O8P1',
'pa120_p': 'C27H51O8P1',
'pa140': 'C31H59O8P1',
'pa140_p': 'C31H59O8P1',
'pa141': 'C31H55O8P1',
'pa141_p': 'C31H55O8P1',
'pa160': 'C35H67O8P1',
'pa160_p': 'C35H67O8P1',
'pa161': 'C35H63O8P1',
'pa161_p': 'C35H63O8P1',
'pa180': 'C39H75O8P1',
'pa180_p': 'C39H75O8P1',
'pa181': 'C39H71O8P1',
'pa181_p': 'C39H71O8P1',
'pac': 'C8H7O2',
'pacald': 'C8H8O',
'pacald_e': 'C8H8O',
'pacald_p': 'C8H8O',
'palmACP': 'C27H51N2O8PRS',
'pan4p': 'C11H21N2O7PS',
'pant_R': 'C6H11O4',
'pap': 'C10H11N5O10P2',
'paps': 'C10H11N5O13P2S',
'pdx5p': 'C8H10NO6P',
'pe120': 'C29H58N1O8P1',
'pe120_p': 'C29H58N1O8P1',
'pe140': 'C33H66N1O8P1',
'pe140_p': 'C33H66N1O8P1',
'pe141': 'C33H62N1O8P1',
'pe141_p': 'C33H62N1O8P1',
'pe160': 'C37H74N1O8P1',
'pe160_p': 'C37H74N1O8P1',
'pe161': 'C37H70N1O8P1',
'pe161_p': 'C37H70N1O8P1',
'pe180': 'C41H82N1O8P1',
'pe180_p': 'C41H82N1O8P1',
'pe181': 'C41H78N1O8P1',
'pe181_p': 'C41H78N1O8P1',
'peamn_e': 'C8H12N',
'peamn_p': 'C8H12N',
'pep': 'C3H2O6P',
'pg120': 'C30H58O10P1',
'pg120_p': 'C30H58O10P1',
'pg140': 'C34H66O10P1',
'pg140_p': 'C34H66O10P1',
'pg141': 'C34H62O10P1',
'pg141_p': 'C34H62O10P1',
'pg160': 'C38H74O10P1',
'pg160_p': 'C38H74O10P1',
'pg161': 'C38H70O10P1',
'pg161_p': 'C38H70O10P1',
'pg180': 'C42H82O10P1',
'pg180_p': 'C42H82O10P1',
'pg181': 'C42H78O10P1',
'pg181_p': 'C42H78O10P1',
'pgp120': 'C30H57O13P2',
'pgp120_p': 'C30H57O13P2',
'pgp140': 'C34H65O13P2',
'pgp140_p': 'C34H65O13P2',
'pgp141': 'C34H61O13P2',
'pgp141_p': 'C34H61O13P2',
'pgp160': 'C38H73O13P2',
'pgp160_p': 'C38H73O13P2',
'pgp161': 'C38H69O13P2',
'pgp161_p': 'C38H69O13P2',
'pgp180': 'C42H81O13P2',
'pgp180_p': 'C42H81O13P2',
'pgp181': 'C42H77O13P2',
'pgp181_p': 'C42H77O13P2',
'phaccoa': 'C29H38N7O17P3S',
'phe_L': 'C9H11NO2',
'phe_L_e': 'C9H11NO2',
'phe_L_p': 'C9H11NO2',
'pheme': 'C34H30FeN4O4',
'pheme_e': 'C34H30FeN4O4',
'pheme_p': 'C34H30FeN4O4',
'phetrna': 'C9H10NOR',
'phhlipa': 'C124H219N2O54P3',
'phom': 'C4H8NO6P',
'phphhlipa': 'C131H230N2O63P4',
'phpyr': 'C9H7O3',
'phthr': 'C4H8NO7P',
'pi': 'HO4P',
'pi_e': 'HO4P',
'pi_p': 'HO4P',
'pimACP': 'C18H31N2O10PRS',
'pmeACP': 'C19H33N2O10PRS',
'pmtcoa': 'C37H62N7O17P3S',
'pnto_R': 'C9H16NO5',
'pnto_R_e': 'C9H16NO5',
'pnto_R_p': 'C9H16NO5',
'poaac': 'C3H5NO3',
'ppa': 'C3H5O2',
'ppa_e': 'C3H5O2',
'ppa_p': 'C3H5O2',
'ppal': 'C3H6O',
'ppal_e': 'C3H6O',
'ppal_p': 'C3H6O',
'ppap': 'C3H5O5P',
'ppbng': 'C10H13N2O4',
'ppcoa': 'C24H36N7O17P3S',
'ppgpp': 'C10H11N5O17P4',
'pphn': 'C10H8O6',
'ppi': 'HO7P2',
'ppp9': 'C34H32N4O4',
'pppg9': 'C34H38N4O4',
'pppi': 'HO10P3',
'pppn': 'C9H9O2',
'pppn_e': 'C9H9O2',
'pppn_p': 'C9H9O2',
'ppt_e': 'HO3P',
'ppt_p': 'HO3P',
'pram': 'C5H11NO7P',
'pran': 'C12H13NO9P',
'prbamp': 'C15H19N5O14P2',
'prbatp': 'C15H19N5O20P4',
'preq0': 'C7H5N5O',
'preq1': 'C7H10N5O',
'prfp': 'C15H21N5O15P2',
'prlp': 'C15H21N5O15P2',
'pro_L': 'C5H9NO2',
'pro_L_e': 'C5H9NO2',
'pro_L_p': 'C5H9NO2',
'progly': 'C7H12N2O3',
'progly_e': 'C7H12N2O3',
'progly_p': 'C7H12N2O3',
'protrna': 'C5H8NOR',
'prpp': 'C5H8O14P3',
'ps120': 'C30H57N1O10P1',
'ps140': 'C34H65N1O10P1',
'ps141': | |
from torch.cuda.amp import GradScaler
from utils.loss_accumulator import LossAccumulator
from torch.nn import Module
import logging
from trainer.losses import create_loss
import torch
from collections import OrderedDict
from trainer.inject import create_injector
from utils.util import recursively_detach, opt_get
logger = logging.getLogger('base')
# Defines the expected API for a single training step
class ConfigurableStep(Module):
def __init__(self, opt_step, env):
super(ConfigurableStep, self).__init__()
self.step_opt = opt_step
self.env = env
self.opt = env['opt']
self.gen_outputs = opt_step['generator_outputs']
self.loss_accumulator = LossAccumulator()
self.optimizers = None
self.scaler = GradScaler(enabled=self.opt['fp16'])
self.grads_generated = False
self.min_total_loss = opt_step['min_total_loss'] if 'min_total_loss' in opt_step.keys() else -999999999
self.clip_grad_eps = opt_get(opt_step, ['clip_grad_eps'], None)
# This is a half-measure that can be used between anomaly_detection and running a potentially problematic
# trainer bare. With this turned on, the optimizer will not step() if a nan grad is detected. If a model trips
# this warning 10 times in a row, the training session is aborted and the model state is saved. This has a
# noticeable affect on training speed, but nowhere near as bad as anomaly_detection.
self.check_grads_for_nan = opt_get(opt_step, ['check_grads_for_nan'], False)
self.nan_counter = 0
self.injectors = []
if 'injectors' in self.step_opt.keys():
injector_names = []
for inj_name, injector in self.step_opt['injectors'].items():
assert inj_name not in injector_names # Repeated names are always an error case.
injector_names.append(inj_name)
self.injectors.append(create_injector(injector, env))
losses = []
self.weights = {}
if 'losses' in self.step_opt.keys():
for loss_name, loss in self.step_opt['losses'].items():
assert loss_name not in self.weights.keys() # Repeated names are always an error case.
losses.append((loss_name, create_loss(loss, env)))
self.weights[loss_name] = loss['weight']
self.losses = OrderedDict(losses)
def get_network_for_name(self, name):
return self.env['generators'][name] if name in self.env['generators'].keys() \
else self.env['discriminators'][name]
# Subclasses should override this to define individual optimizers. They should all go into self.optimizers.
# This default implementation defines a single optimizer for all Generator parameters.
# Must be called after networks are initialized and wrapped.
def define_optimizers(self):
opt_configs = [opt_get(self.step_opt, ['optimizer_params'], None)]
self.optimizers = []
if opt_configs[0] is None:
return
training = self.step_opt['training']
training_net = self.get_network_for_name(training)
nets = [training_net]
training = [training]
for net_name, net, opt_config in zip(training, nets, opt_configs):
# Configs can organize parameters by-group and specify different learning rates for each group. This only
# works in the model specifically annotates which parameters belong in which group using PARAM_GROUP.
optim_params = {'default': {'params': [], 'lr': opt_config['lr']}}
if opt_config is not None and 'param_groups' in opt_config.keys():
for k, pg in opt_config['param_groups'].items():
optim_params[k] = {'params': [], 'lr': pg['lr']}
import torch.nn as nn
norm_modules = (nn.BatchNorm2d, nn.InstanceNorm2d, nn.BatchNorm1d, nn.InstanceNorm1d,
nn.BatchNorm3d, nn.InstanceNorm3d, nn.GroupNorm, nn.LayerNorm)
emb_modules = (nn.Embedding, nn.EmbeddingBag)
param_names_notweights = set()
all_param_names = set()
param_map = {}
for mn, m in net.named_modules():
for k, v in m.named_parameters():
v.is_bias = k.endswith(".bias")
v.is_weight = k.endswith(".weight")
v.is_norm = isinstance(m, norm_modules)
v.is_emb = isinstance(m, emb_modules)
fpn = '%s.%s' % (mn, k) if mn else k # full param name
all_param_names.add(fpn)
param_map[fpn] = v
if v.is_bias or v.is_norm or v.is_emb:
param_names_notweights.add(fpn)
# Some models can specify some parameters to be in different groups.
param_group = "default"
if hasattr(v, 'PARAM_GROUP'):
if v.PARAM_GROUP in optim_params.keys():
param_group = v.PARAM_GROUP
else:
logger.warning(f'Model specifies a custom param group {v.PARAM_GROUP} which is not configured. '
f'The same LR will be used for all parameters.')
if v.requires_grad:
optim_params[param_group]['params'].append(v)
else:
if self.env['rank'] <= 0:
logger.warning('Params [{:s}] will not optimize.'.format(k))
params_notweights = [param_map[k] for k in sorted(list(param_names_notweights))]
params_weights = [param_map[k] for k in sorted(list(all_param_names ^ param_names_notweights))]
if 'optimizer' not in self.step_opt.keys() or self.step_opt['optimizer'] == 'adam':
opt = torch.optim.Adam(list(optim_params.values()), lr=opt_config['lr'],
weight_decay=opt_config['weight_decay'],
betas=(opt_config['beta1'], opt_config['beta2']))
elif self.step_opt['optimizer'] == 'adamw':
groups = [
{ 'params': params_weights, 'weight_decay': opt_get(opt_config, ['weight_decay'], 0) },
{ 'params': params_notweights, 'weight_decay': 0 }
]
opt = torch.optim.AdamW(groups, lr=opt_config['lr'],
weight_decay=opt_get(opt_config, ['weight_decay'], 1e-2),
betas=(opt_get(opt_config, ['beta1'], .9), opt_get(opt_config, ['beta2'], .999)))
elif self.step_opt['optimizer'] == 'lars':
from trainer.optimizers.larc import LARC
from trainer.optimizers.sgd import SGDNoBiasMomentum
optSGD = SGDNoBiasMomentum(list(optim_params.values()), lr=opt_config['lr'], momentum=opt_config['momentum'],
weight_decay=opt_config['weight_decay'])
opt = LARC(optSGD, trust_coefficient=opt_config['lars_coefficient'])
elif self.step_opt['optimizer'] == 'sgd':
from torch.optim import SGD
opt = SGD(list(optim_params.values()), lr=opt_config['lr'], momentum=opt_config['momentum'], weight_decay=opt_config['weight_decay'])
opt._config = opt_config # This is a bit seedy, but we will need these configs later.
opt._config['network'] = net_name
self.optimizers.append(opt)
# Returns all optimizers used in this step.
def get_optimizers(self):
assert self.optimizers is not None
return self.optimizers
# Returns optimizers which are opting in for default LR scheduling.
def get_optimizers_with_default_scheduler(self):
assert self.optimizers is not None
return self.optimizers
# Returns the names of the networks this step will train. Other networks will be frozen.
def get_networks_trained(self):
if isinstance(self.step_opt['training'], list):
return self.step_opt['training']
else:
return [self.step_opt['training']]
def get_training_network_name(self):
if isinstance(self.step_opt['training'], list):
return self.step_opt['training'][0]
else:
return self.step_opt['training']
# Performs all forward and backward passes for this step given an input state. All input states are lists of
# chunked tensors. Use grad_accum_step to dereference these steps. Should return a dict of tensors that later
# steps might use. These tensors are automatically detached and accumulated into chunks.
def do_forward_backward(self, state, grad_accum_step, amp_loss_id, train=True, no_ddp_sync=False, loss_accumulator=None):
local_state = {} # <-- Will store the entire local state to be passed to injectors & losses.
new_state = {} # <-- Will store state values created by this step for returning to ExtensibleTrainer.
for k, v in state.items():
local_state[k] = v[grad_accum_step]
local_state['train_nets'] = str(self.get_networks_trained())
loss_accumulator = self.loss_accumulator if loss_accumulator is None else loss_accumulator
# Some losses compute backward() internally. Accommodate this by stashing the amp_loss_id in env.
self.env['amp_loss_id'] = amp_loss_id
self.env['current_step_optimizers'] = self.optimizers
self.env['training'] = train
# Inject in any extra dependencies.
for inj in self.injectors:
# Don't do injections tagged with eval unless we are not in train mode.
if train and 'eval' in inj.opt.keys() and inj.opt['eval']:
continue
# Likewise, don't do injections tagged with train unless we are not in eval.
if not train and 'train' in inj.opt.keys() and inj.opt['train']:
continue
# Don't do injections tagged with 'after' or 'before' when we are out of spec.
if 'after' in inj.opt.keys() and self.env['step'] < inj.opt['after'] or \
'before' in inj.opt.keys() and self.env['step'] > inj.opt['before'] or \
'every' in inj.opt.keys() and self.env['step'] % inj.opt['every'] != 0:
continue
if 'no_accum' in inj.opt.keys() and grad_accum_step > 0:
continue
training_net = self.get_network_for_name(self.step_opt['training'])
if no_ddp_sync and hasattr(training_net, 'no_sync'):
with training_net.no_sync():
injected = inj(local_state)
elif opt_get(inj.opt, ['no_grad'], False):
with torch.no_grad():
injected = inj(local_state)
else:
injected = inj(local_state)
local_state.update(injected)
new_state.update(injected)
if len(self.losses) > 0:
# Finally, compute the losses.
total_loss = 0
for loss_name, loss in self.losses.items():
# Some losses only activate after a set number of steps. For example, proto-discriminator losses can
# be very disruptive to a generator.
if 'after' in loss.opt.keys() and loss.opt['after'] > self.env['step'] or \
'before' in loss.opt.keys() and self.env['step'] > loss.opt['before'] or \
'every' in loss.opt.keys() and self.env['step'] % loss.opt['every'] != 0:
continue
if loss.is_stateful():
l, lstate = loss(self.get_network_for_name(self.step_opt['training']), local_state)
local_state.update(lstate)
new_state.update(lstate)
else:
l = loss(self.get_network_for_name(self.step_opt['training']), local_state)
total_loss += l * self.weights[loss_name]
# Record metrics.
if isinstance(l, torch.Tensor):
loss_accumulator.add_loss(loss_name, l)
for n, v in loss.extra_metrics():
loss_accumulator.add_loss("%s_%s" % (loss_name, n), v)
loss.clear_metrics()
# In some cases, the loss could not be set (e.g. all losses have 'after')
if train and isinstance(total_loss, torch.Tensor):
loss_accumulator.add_loss("%s_total" % (self.get_training_network_name(),), total_loss)
reset_required = total_loss < self.min_total_loss
# Scale the loss down by the accumulation factor.
total_loss = total_loss / self.env['mega_batch_factor']
# Get dem grads!
self.scaler.scale(total_loss).backward()
if reset_required:
# You might be scratching your head at this. Why would you zero grad as opposed to not doing a
# backwards? Because DDP uses the backward() pass as a synchronization point and there is not a good
# way to simply bypass backward. If you want a more efficient way to specify a min_loss, use or
# implement it at the loss level.
self.get_network_for_name(self.step_opt['training']).zero_grad()
loss_accumulator.increment_metric("%s_skipped_steps" % (self.get_training_network_name(),))
self.grads_generated = True
# Detach all state variables. Within the step, gradients can flow. Once these variables leave the step
# we must release the gradients.
new_state = recursively_detach(new_state)
return new_state
# Performs the optimizer step after all gradient accumulation is completed. Default implementation simply steps()
# all self.optimizers.
def do_step(self, step):
if not self.grads_generated:
return
self.grads_generated = False
for opt in self.optimizers:
# Optimizers can be opted out in the early stages of training.
after = | |
<reponame>SobolGaming/Trivianator
import json
import re
from django.db import models
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.core.validators import MaxValueValidator, validate_comma_separated_integer_list
from django.utils.timezone import now
from django.conf import settings
from model_utils.managers import InheritanceManager
from django.utils.translation import ugettext as _
from .validators import archive_file_validator
from django.contrib.auth.models import User
# Create your models here.
QUESTION_TYPES = (
('single_choice', _("Single correct answer")),
('multi_choice', _("Multiple correct answers")),
)
ANSWER_ORDER_OPTIONS = (
('none', _('None')),
('content', _('Content')),
('random', _('Random')),
)
ANSWER_REVEAL_OPTIONS = (
(1, _("After each question")),
(2, _("At end of quiz")),
(3, _("Never")),
)
class CategoryManager(models.Manager):
def new_category(self, category):
new_category = self.create(category=re.sub('\s+', '-', category)
.lower())
new_category.save()
return new_category
class Category(models.Model):
category = models.CharField(
verbose_name=_("Category"),
max_length=250, blank=True,
unique=True, null=True)
objects = CategoryManager()
class Meta:
verbose_name = _("Category")
verbose_name_plural = _("Categories")
def __str__(self):
return self.category
class Quiz(models.Model):
title = models.CharField(
verbose_name=_("Title"),
max_length=60, blank=False)
url = models.SlugField(
max_length=60, blank=False,
help_text=_("a user friendly url"),
verbose_name=_("user friendly url"))
category = models.ForeignKey(
Category, null=True, blank=True,
verbose_name=_("Category"), on_delete=models.CASCADE)
random_order = models.BooleanField(
blank=False, default=False,
verbose_name=_("Random Order"),
help_text=_("Display the questions in a random order or as they are set?"))
max_questions = models.PositiveIntegerField(
blank=True, null=True, verbose_name=_("Max Questions"),
help_text=_("Number of questions to be answered on each attempt."))
answers_reveal_option = models.PositiveSmallIntegerField(
blank=False, default=1, choices = ANSWER_REVEAL_OPTIONS,
help_text=_("Determines when correct answers are revealed for each question."),
verbose_name=_("Answers Reveal Option"))
saved = models.BooleanField(
blank=True, default=True,
help_text=_("If yes, the result of each attempt by a user will be saved."),
verbose_name=_("Saved"))
single_attempt = models.BooleanField(
blank=False, default=False,
help_text=_("If yes, only one attempt by a user will be permitted."
" Non users cannot sit this quiz."),
verbose_name=_("Single Attempt"))
draft = models.BooleanField(
blank=True, default=False,
verbose_name=_("Draft"),
help_text=_("If yes, the quiz is not displayed in the quiz list and can only be"
" taken by users who can edit quizzes."))
timer = models.PositiveSmallIntegerField(blank=True, default=0,
verbose_name=_("Quiz Timer"),
help_text=_("If > 0, amount of seconds allowed to complete the quiz."))
show_leaderboards = models.BooleanField(blank=True, default=True,
verbose_name=_("Show Leaderboards"),
help_text=_("Boolean if leaderboards should be displayed after quiz completion."))
competitive = models.BooleanField(blank=True, default=False,
verbose_name=_("Competitive"),
help_text=_("Boolean if this quiz is competitive. If 'True' it disables "
"displaying results of the quiz or of the leaderboards, although "
"leaderboard data is collected. Requires 'StartTime' and 'EndTime' "
"to be specified."))
start_time = models.DateTimeField(blank=True, null=True, default=None,
verbose_name=_("StartTime"),
help_text=_("Start DateTime of the quiz."))
end_time = models.DateTimeField(blank=True, null=True, default=None,
verbose_name=_("EndTime"),
help_text=_("End DateTime of the quiz."))
message = models.TextField(null=True, blank=True,
help_text=_("Message to Display prior to taking quiz."),
verbose_name=_("Quiz Message."))
num_display = models.PositiveSmallIntegerField(null = False, blank=True, default= 30,
help_text=_("The Number of users to display in Leaderboard."),
verbose_name=_("Number of users to Display"))
def save(self, force_insert=False, force_update=False, *args, **kwargs):
self.url = re.sub('\s+', '-', self.url).lower()
self.url = ''.join(letter for letter in self.url if
letter.isalnum() or letter == '-')
if self.single_attempt is True:
self.saved = True
super(Quiz, self).save(force_insert, force_update, *args, **kwargs)
class Meta:
verbose_name = _("Quiz")
verbose_name_plural = _("Quizzes")
def __str__(self):
return self.title
def get_questions(self):
return self.question_set.all().select_subclasses()
@property
def get_max_score(self):
return self.get_questions().count()
def anon_score_id(self):
return str(self.id) + "_score"
def anon_q_list(self):
return str(self.id) + "_q_list"
def anon_q_data(self):
return str(self.id) + "_data"
@property
def get_leaderboard(self):
if self.num_display > 0:
return Leaderboard.objects.filter(quiz=self).order_by('-score', 'completion_time')[:self.num_display]
return Leaderboard.objects.filter(quiz=self).order_by('-score', 'completion_time')
@property
def get_leaderboard_count(self):
return Leaderboard.objects.filter(quiz=self).count()
@property
def end_time_expired(self):
if self.competitive:
if now() >= self.end_time:
return True
else:
return False
return True
@property
def get_time_until_start(self):
if self.competitive:
if self.start_time > now():
return (self.start_time - now()).seconds
return 0
return 0
def get_quiz_sit_info(self, user):
try:
sitting = Sitting.objects.get(quiz=self, user=user)
return sitting.get_percent_correct, sitting
except Sitting.MultipleObjectsReturned:
sittings = Sitting.objects.filter(quiz=self, user=user)
best_sit = (None, 0)
# check first for saved/competitive/single-attempt quizzes
delete_list = []
for sit in sittings:
if (sit.quiz.saved is True or sit.quiz.competitive is True or sit.quiz.single_attempt is True) and best_sit[0] == None:
best_sit = (sit, sit.get_percent_correct)
else:
delete_list.append(sit)
if best_sit[0] != None:
for entry in delete_list:
entry.delete()
else:
# check next for best attempt from priors
for sit in sittings:
if best_sit[0] == None:
best_sit = (sit, sit.get_percent_correct)
elif sit.get_percent_correct > best_sit[1]:
# print('Deleting Sitting: ', best_sit[0])
best_sit[0].delete()
best_sit = (sit, sit.get_percent_correct)
elif sit.get_percent_correct <= best_sit[1]:
# print('Deleting Sitting: ', sit)
sit.delete()
return best_sit[1], best_sit[0]
except Sitting.DoesNotExist:
return None, None
# progress manager
class ProgressManager(models.Manager):
def new_progress(self, user):
new_progress = self.create(user=user, score="")
new_progress.save()
return new_progress
class Progress(models.Model):
"""
Progress is used to track an individual signed in user's score on different
categories across all quizzes.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, verbose_name=_("User"), on_delete=models.CASCADE)
serialized_performance = models.CharField(validators=[validate_comma_separated_integer_list], max_length=2048,
verbose_name=_("Per Category Performance"))
objects = ProgressManager()
class Meta:
verbose_name = _("User Progress")
verbose_name_plural = _("User Progress Records")
@property
def list_all_cat_scores(self):
"""
Returns a dict in which the key is the category name and the item is
a list of three integers.
The first is the number of questions correct,
the second is the possible best score,
the third is the percentage correct.
The dict will have one key for every category that you have defined
"""
serialized_performance_before = self.serialized_performance
output = {}
for cat in Category.objects.all():
to_find = r"(?:^|,)" + re.escape(cat.category) + r",(\d+),(\d+)"
# group 1 is score, group 2 is highest possible
match = re.search(to_find, self.serialized_performance, re.IGNORECASE)
if match:
score = int(match.group(1))
possible = int(match.group(2))
try:
percent = int(round((float(score) / float(possible)) * 100))
except:
percent = 0
output[cat.category] = [score, possible, percent]
else: # if category has not been added yet, add it.
self.serialized_performance += cat.category + ",0,0,"
output[cat.category] = [0, 0, 0]
if len(self.serialized_performance) > len(serialized_performance_before):
# If a new category has been added, save changes.
self.save()
return output
def update_score(self, question, score_to_add=0, possible_to_add=0):
"""
Pass in question object, amount to increase score and max possible.
Does not return anything.
"""
category_test = Category.objects.filter(category=question.category).exists()
if any([item is False for item in [category_test,
score_to_add,
possible_to_add,
isinstance(score_to_add, int),
isinstance(possible_to_add, int)]]):
return _("error"), _("category does not exist or invalid score")
to_find = re.escape(str(question.category)) + r",(?P<score>\d+),(?P<possible>\d+),"
match = re.search(to_find, self.serialized_performance, re.IGNORECASE)
if match:
updated_score = int(match.group('score')) + abs(score_to_add)
updated_possible = int(match.group('possible')) +\
abs(possible_to_add)
new_score = ",".join(
[
str(question.category),
str(updated_score),
str(updated_possible), ""
])
# swap old score for the new one
self.serialized_performance = self.serialized_performance.replace(match.group(), new_score)
self.save()
else:
# if not present but existing, add with the points passed in
self.serialized_performance += ",".join(
[
str(question.category),
str(score_to_add),
str(possible_to_add),
""
])
self.save()
def show_saved(self):
"""
Finds the previous quizzes marked as 'saved'.
Returns a queryset of complete quizzes.
"""
return Sitting.objects.filter(user=self.user, complete=True)
def show_started(self):
"""
Finds the previous quizzes that have a sitting.
Returns a queryset of started quizzes.
"""
return Sitting.objects.filter(user=self.user, complete=False)
def __str__(self):
return self.user.username + ' - ' + self.serialized_performance
class SittingManager(models.Manager):
def new_sitting(self, user, quiz):
if quiz.random_order is True:
question_set = quiz.question_set.all().select_subclasses().order_by('?')
else:
question_set = quiz.question_set.all().select_subclasses()
question_set = [item.id for item in question_set]
if len(question_set) == 0:
raise ImproperlyConfigured('Question set of the quiz is empty. '
'Please configure questions properly')
if quiz.max_questions and quiz.max_questions < len(question_set):
question_set = question_set[:quiz.max_questions]
questions = ",".join(map(str, question_set)) + ","
new_sitting = self.create(user=user,
quiz=quiz,
question_order=questions,
question_list=questions,
incorrect_questions="",
current_score=0,
complete=False,
start=now(),
user_answers='{}')
return new_sitting
def user_sitting(self, user, quiz):
if (quiz.single_attempt or (quiz.competitive and not quiz.end_time_expired)) and self.filter(user=user, quiz=quiz, complete=True).exists():
return False
try:
sitting = self.get(user=user, quiz=quiz, complete=False)
except Sitting.DoesNotExist:
sitting = self.new_sitting(user, quiz)
except Sitting.MultipleObjectsReturned:
sitting = self.filter(user=user, quiz=quiz, complete=False)[0]
return sitting
class Sitting(models.Model):
"""
Used to store the progress of logged in users sitting a quiz.
Replaces the session system used by anon users.
Question_order is a list of integer pks of all the questions in the
quiz, in order.
Question_list is a list of integers which represent id's of
the unanswered questions in csv format.
Incorrect_questions is a list in the same format.
Sitting deleted when quiz finished unless quiz.saved is true.
User_answers is a json object in which the question PK is stored
with the answer the user gave.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("User"), on_delete=models.CASCADE)
quiz = models.ForeignKey(Quiz, verbose_name=_("Quiz"), on_delete=models.CASCADE)
question_order = models.CharField(validators=[validate_comma_separated_integer_list],
max_length=1024, verbose_name=_("Question Order"))
question_list = models.CharField(validators=[validate_comma_separated_integer_list],
max_length=1024, verbose_name=_("Question List"))
incorrect_questions = models.CharField(validators=[validate_comma_separated_integer_list],
max_length=1024, blank=True, verbose_name=_("Incorrect questions"))
current_score = models.IntegerField(verbose_name=_("Current Score"))
complete = models.BooleanField(default=False, blank=False,
verbose_name=_("Complete"))
user_answers = models.TextField(blank=True, default='{}',
verbose_name=_("User Answers"))
start = models.DateTimeField(auto_now_add=True, verbose_name=_("Start"))
end = models.DateTimeField(null=True, blank=True, verbose_name=_("End"))
objects = SittingManager()
class Meta:
permissions = (("view_sittings", _("Can see completed quizzes.")),)
def get_first_question(self):
"""
Returns the next question.
| |
[종목명, 단축코드, 확장코드, ETF구분, 상한가, 하한가, 전일가, 주문수량단위, 기준가, 구분, 증권그룹, 기업인수목적회사여부]
result.append(lst)
columns = ['종목명', '단축코드', '확장코드', 'ETF구분', '상한가', '하한가', '전일가', '주문수량단위', '기준가', '구분', '증권그룹', '기업인수목적회사여부']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [df])
# 종목검색
class t1833(XAQuery):
def Query(self, 종목검색파일=''):
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "dummy", 0, "")
self.ActiveX.RequestService(self.MYNAME, 종목검색파일)
def OnReceiveData(self, szTrCode):
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK)
for i in range(nCount):
검색종목수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "JongCnt", i).strip())
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
종목코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "shcode", i).strip()
종목명 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "hname", i).strip()
전일대비구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "sign", i).strip()
연속봉수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "signcnt", i).strip())
현재가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "close", i).strip())
전일대비 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "change", i).strip())
등락율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "diff", i).strip())
거래량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "volume", i).strip())
lst = [종목코드, 종목명, 전일대비구분, 연속봉수, 현재가, 전일대비, 등락율, 거래량]
result.append(lst)
columns = ['종목코드', '종목명', '전일대비구분', '연속봉수', '현재가', '전일대비', '등락율', '거래량']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [검색종목수,df])
# e종목검색
class t1857(XAQuery):
def Query(self, 실시간구분,종목검색구분,종목검색입력값):
self.실시간키 = ''
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.ClearBlockdata(self.OUTBLOCK)
self.ActiveX.ClearBlockdata(self.OUTBLOCK1)
self.ActiveX.SetFieldData(self.INBLOCK, "sRealFlag", 0, 실시간구분)
self.ActiveX.SetFieldData(self.INBLOCK, "sSearchFlag", 0, 종목검색구분)
self.ActiveX.SetFieldData(self.INBLOCK, "query_index", 0, 종목검색입력값)
self.ActiveX.RequestService(self.MYNAME, "")
def OnReceiveData(self, szTrCode):
try:
검색종목수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "result_count", 0).strip())
포착시간 = self.ActiveX.GetFieldData(self.OUTBLOCK, "result_time", 0).strip()
실시간키 = self.ActiveX.GetFieldData(self.OUTBLOCK, "AlertNum", 0).strip()
self.실시간키 = 실시간키
result = []
for i in range(검색종목수):
종목코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "shcode", i).strip()
종목명 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "hname", i).strip()
현재가 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "price", i).strip())
전일대비구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "sign", i).strip()
전일대비 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "change", i).strip())
등락율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "diff", i).strip())
거래량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "volume", i).strip())
종목상태 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "JobFlag", i).strip()
lst = [종목코드, 종목명, 현재가, 전일대비구분, 전일대비, 등락율, 거래량, 종목상태]
result.append(lst)
columns = ['종목코드', '종목명', '현재가', '전일대비구분', '전일대비', '등락율', '거래량', '종목상태']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [self.식별자, 검색종목수,포착시간,실시간키,df])
except Exception as e:
pass
def OnReceiveSearchRealData(self, szTrCode):
result = dict()
result['종목코드'] = self.ActiveX.GetFieldSearchRealData(self.OUTBLOCK1, "shcode").strip()
result['종목명'] = self.ActiveX.GetFieldSearchRealData(self.OUTBLOCK1, "hname").strip()
result['현재가'] = int(self.ActiveX.GetFieldSearchRealData(self.OUTBLOCK1, "price").strip())
result['전일대비구분'] = self.ActiveX.GetFieldSearchRealData(self.OUTBLOCK1, "sign").strip()
result['전일대비'] = int(self.ActiveX.GetFieldSearchRealData(self.OUTBLOCK1, "change").strip())
result['등락율'] = float(self.ActiveX.GetFieldSearchRealData(self.OUTBLOCK1, "diff").strip())
result['거래량'] = int(self.ActiveX.GetFieldSearchRealData(self.OUTBLOCK1, "volume").strip())
result['종목상태'] = self.ActiveX.GetFieldSearchRealData(self.OUTBLOCK1, "JobFlag").strip()
if self.parent != None:
self.parent.OnReceiveSearchRealData(szTrCode, [self.식별자, result])
# print(" EXIT : %s --> %s" % (클래스이름, 함수이름))
def RemoveService(self):
if self.실시간키 != '':
result = self.ActiveX.RemoveService(self.MYNAME, self.실시간키)
# 주식종목코드조회(API용)
class t1866(XAQuery):
def Query(self, 로그인ID, 조회구분, 그룹명, 연속여부, 연속키):
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "user_id", 0, 로그인ID)
self.ActiveX.SetFieldData(self.INBLOCK, "gb", 0, 조회구분)
self.ActiveX.SetFieldData(self.INBLOCK, "group_name", 0, 그룹명)
self.ActiveX.SetFieldData(self.INBLOCK, "cont", 0, 연속여부)
self.ActiveX.SetFieldData(self.INBLOCK, "cont_key", 0, 연속키)
self.ActiveX.Request(0)
def OnReceiveData(self, szTrCode):
저장조건수 = self.ActiveX.GetFieldData(self.OUTBLOCK, "result_count", i).strip()
연속여부 = self.ActiveX.GetFieldData(self.OUTBLOCK, "cont", i).strip()
연속키 = self.ActiveX.GetFieldData(self.OUTBLOCK, "cont_key", i).strip()
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
서버저장인덱스 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "hname", i).strip()
그룹명 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "shcode", i).strip()
조건저장명 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "expcode", i).strip()
lst = [서버저장인덱스, 그룹명, 조건저장명]
result.append(lst)
columns = ['서버저장인덱스', '그룹명', '조건저장명']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [저장조건수, 연속여부, 연속키, df])
# 뉴스본문(t3102)
class t3102(XAQuery):
def Query(self, 로그인ID, 조회구분, 그룹명, 연속여부, 연속키):
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "sNewsno", 0, 뉴스번호)
self.ActiveX.Request(0)
def OnReceiveData(self, szTrCode):
뉴스종목 = self.ActiveX.GetFieldData(self.OUTBLOCK, "sJongcode", i).strip()
뉴스본문 = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
body = self.ActiveX.GetFieldData(self.OUTBLOCK1, "sBody", i).strip()
뉴스본문.append(body)
뉴스타이틀 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "sTitle", i).strip()
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [뉴스종목, 뉴스본문, 뉴스타이틀])
# 차트데이타조회
class ChartIndex(XAQuery):
def Query(self, 지표ID='',지표명='',지표조건설정='',시장구분='',주기구분='',단축코드='',요청건수='500',단위='',시작일자='',종료일자='',수정주가반영여부='',갭보정여부='',실시간데이터수신자동등록여부='0'):
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "indexid", 0, 지표ID)
self.ActiveX.SetFieldData(self.INBLOCK, "indexname", 0, 지표명)
self.ActiveX.SetFieldData(self.INBLOCK, "indexparam", 0, 지표조건설정)
self.ActiveX.SetFieldData(self.INBLOCK, "market", 0, 시장구분)
self.ActiveX.SetFieldData(self.INBLOCK, "period", 0, 주기구분)
self.ActiveX.SetFieldData(self.INBLOCK, "shcode", 0, 단축코드)
self.ActiveX.SetFieldData(self.INBLOCK, "qrycnt", 0, 요청건수)
self.ActiveX.SetFieldData(self.INBLOCK, "ncnt", 0, 단위)
self.ActiveX.SetFieldData(self.INBLOCK, "sdate", 0, 시작일자)
self.ActiveX.SetFieldData(self.INBLOCK, "edate", 0, 종료일자)
self.ActiveX.SetFieldData(self.INBLOCK, "Isamend", 0, 수정주가반영여부)
self.ActiveX.SetFieldData(self.INBLOCK, "Isgab", 0, 갭보정여부)
self.ActiveX.SetFieldData(self.INBLOCK, "IsReal", 0, 실시간데이터수신자동등록여부)
self.ActiveX.RequestService("ChartIndex", "")
def RemoveService(self):
try:
지표ID = self.ActiveX.GetFieldData(self.OUTBLOCK, "indexid", 0).strip()
self.ActiveX.RemoveService("ChartIndex", 지표ID)
except Exception as e:
pass
def OnReceiveData(self, szTrCode):
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK)
for i in range(nCount):
지표ID = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "indexid", i).strip())
레코드갯수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "rec_cnt", i).strip())
유효데이터컬럼갯수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "validdata_cnt", i).strip())
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
일자 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "date", i).strip()
시간 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "time", i).strip()
시가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "open", i).strip())
고가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "high", i).strip())
저가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "low", i).strip())
종가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "close", i).strip())
거래량 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "volume", i).strip())
지표값1 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "value1", i).strip())
지표값2 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "value2", i).strip())
지표값3 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "value3", i).strip())
지표값4 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "value4", i).strip())
지표값5 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "value5", i).strip())
위치 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "pos", i).strip())
lst = [일자, 시간, 시가, 고가, 저가, 종가, 거래량, 지표값1, 지표값2, 지표값3, 지표값4, 지표값5, 위치]
result.append(lst)
columns = ['일자', '시간', '시가', '고가', '저가', '종가', '거래량', '지표값1', '지표값2', '지표값3', '지표값4', '지표값5', '위치']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [self.식별자, 지표ID,레코드갯수,유효데이터컬럼갯수,df])
def OnReceiveChartRealData(self, szTrCode):
지표ID = self.ActiveX.GetFieldChartRealData(self.OUTBLOCK, "indexid").strip()
레코드갯수 = self.ActiveX.GetFieldChartRealData(self.OUTBLOCK, "rec_cnt").strip()
유효데이터컬럼갯수 = self.ActiveX.GetFieldChartRealData(self.OUTBLOCK, "validdata_cnt").strip()
result = dict()
result['일자'] = self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "date").strip()
result['시간'] = self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "time").strip()
result['시가'] = float(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "open").strip())
result['고가'] = float(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "high").strip())
result['저가'] = float(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "low").strip())
result['종가'] = float(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "close").strip())
result['거래량'] = float(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "volume").strip())
result['지표값1'] = float(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "value1").strip())
result['지표값2'] = float(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "value2").strip())
result['지표값3'] = float(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "value3").strip())
result['지표값4'] = float(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "value4").strip())
result['지표값5'] = float(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "value5").strip())
result['위치'] = int(self.ActiveX.GetFieldChartRealData(self.OUTBLOCK1, "pos").strip())
if self.parent != None:
self.parent.OnReceiveChartRealData(szTrCode, [self.식별자, 지표ID,레코드갯수,유효데이터컬럼갯수,result])
##----------------------------------------------------------------------------------------------------------------------
# 선물옵션 정상주문,CFOAT00100
class CFOAT00100(XAQuery):
def Query(self, 계좌번호, 비밀번호, 선물옵션종목번호, 매매구분, 선물옵션호가유형코드, 주문가격, 주문수량):
self.주문결과코드 = ''
self.주문결과메세지 = ''
if 선물옵션호가유형코드 == '03':
주문가격 = ''
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK1, "AcntNo", 0, 계좌번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "Pwd", 0, 비밀번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "FnoIsuNo", 0, 선물옵션종목번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "BnsTpCode", 0, 매매구분)
self.ActiveX.SetFieldData(self.INBLOCK1, "FnoOrdprcPtnCode", 0, 선물옵션호가유형코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "OrdPrc", 0, 주문가격)
self.ActiveX.SetFieldData(self.INBLOCK1, "OrdQty", 0, 주문수량)
self.ActiveX.Request(0)
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
레코드갯수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "RecCnt", i).strip())
주문시장코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "OrdMktCode", i).strip()
계좌번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "AcntNo", i).strip()
비밀번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "Pwd", i).strip()
선물옵션종목번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "FnoIsuNo", i).strip()
매매구분 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "BnsTpCode", i).strip()
선물옵션주문유형코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "FnoOrdPtnCode", i).strip()
선물옵션호가유형코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "FnoOrdprcPtnCode", i).strip()
선물옵션거래유형코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "FnoTrdPtnCode", i).strip()
주문가격 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "OrdPrc", i).strip())
주문수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "OrdQty", i).strip())
통신매체코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "CommdaCode", i).strip()
협의매매완료시각 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "DscusBnsCmpltTime", i).strip()
그룹ID = self.ActiveX.GetFieldData(self.OUTBLOCK1, "GrpId", i).strip()
주문일련번호 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "OrdSeqno", i).strip())
포트폴리오번호 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "PtflNo", i).strip())
바스켓번호 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "BskNo", i).strip())
트렌치번호 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "TrchNo", i).strip())
항목번호 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "ItemNo", i).strip())
운용지시번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "OpDrtnNo", i).strip()
관리사원번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "MgempNo", i).strip()
펀드ID = self.ActiveX.GetFieldData(self.OUTBLOCK1, "FundId", i).strip()
펀드주문번호 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "FundOrdNo", i).strip())
lst = [레코드갯수,주문시장코드,계좌번호,비밀번호,선물옵션종목번호,매매구분,선물옵션주문유형코드,선물옵션호가유형코드,선물옵션거래유형코드,주문가격,주문수량,통신매체코드,협의매매완료시각,그룹ID,주문일련번호,포트폴리오번호,바스켓번호,트렌치번호,항목번호,운용지시번호,관리사원번호,펀드ID,펀드주문번호]
result.append(lst)
columns = ['레코드갯수','주문시장코드','계좌번호','비밀번호','선물옵션종목번호','매매구분','선물옵션주문유형코드','선물옵션호가유형코드','선물옵션거래유형코드','주문가격','주문수량','통신매체코드','협의매매완료시각','그룹ID','주문일련번호','포트폴리오번호','바스켓번호','트렌치번호','항목번호','운용지시번호','관리사원번호','펀드ID','펀드주문번호']
df = DataFrame(data=result, columns=columns)
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK2)
for i in range(nCount):
레코드갯수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "RecCnt", i).strip())
주문번호 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdNo", i).strip())
지점명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "BrnNm", i).strip()
계좌명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "AcntNm", i).strip()
종목명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "IsuNm", i).strip()
주문가능금액 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdAbleAmt", i).strip())
현금주문가능금액 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "MnyOrdAbleAmt", i).strip())
주문증거금 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdMgn", i).strip())
현금주문증거금 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "MnyOrdMgn", i).strip())
주문가능수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdAbleQty", i).strip())
lst = [레코드갯수,주문번호,지점명,계좌명,종목명,주문가능금액,현금주문가능금액,주문증거금,현금주문증거금,주문가능수량]
result.append(lst)
columns = ['레코드갯수','주문번호','지점명','계좌명','종목명','주문가능금액','현금주문가능금액','주문증거금','현금주문증거금','주문가능수량']
df1 = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [df, df1])
# 선물옵션차트(틱/n틱)(t8414)
class t8414(XAQuery):
# def Query(self, 단축코드='201N7302', 단위='15', 요청건수='2000', 조회영업일수='0', 시작일자='20180629', 시작시간='', 종료일자='20180629', 종료시간='', 연속일자='', 연속시간='', 압축여부='N', 연속조회=False):
def Query(self, 단축코드,단위='1',요청건수='2000',조회영업일수='0',시작일자='',시작시간='',종료일자='',종료시간='',연속일자='',연속시간='',압축여부='N', 연속조회=False):
if 연속조회 == False:
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "shcode", 0, 단축코드)
self.ActiveX.SetFieldData(self.INBLOCK, "ncnt", 0, 단위)
self.ActiveX.SetFieldData(self.INBLOCK, "qrycnt", 0, 요청건수)
self.ActiveX.SetFieldData(self.INBLOCK, "nday", 0, 조회영업일수)
self.ActiveX.SetFieldData(self.INBLOCK, "sdate", 0, 시작일자)
self.ActiveX.SetFieldData(self.INBLOCK, "stime", 0, 시작시간)
self.ActiveX.SetFieldData(self.INBLOCK, "edate", 0, 종료일자)
self.ActiveX.SetFieldData(self.INBLOCK, "etime", 0, 종료시간)
self.ActiveX.SetFieldData(self.INBLOCK, "cts_date", 0, 연속일자)
self.ActiveX.SetFieldData(self.INBLOCK, "cts_time", 0, 연속시간)
self.ActiveX.SetFieldData(self.INBLOCK, "comp_yn", 0, 압축여부)
self.ActiveX.Request(0)
else:
self.ActiveX.SetFieldData(self.INBLOCK, "cts_date", 0, 연속일자)
self.ActiveX.SetFieldData(self.INBLOCK, "cts_time", 0, 연속시간)
err_code = self.ActiveX.Request(True) # 연속조회인경우만 True
if err_code < 0:
클래스이름 = self.__class__.__name__
함수이름 = inspect.currentframe().f_code.co_name
print("%s-%s " % (클래스이름, 함수이름), "error... {0}".format(err_code))
def OnReceiveData(self, szTrCode):
block = dict()
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK)
for i in range(nCount):
block['단축코드'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "shcode", i).strip()
block['전일시가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "jisiga", i).strip())
block['전일고가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "jihigh", i).strip())
block['전일저가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "jilow", i).strip())
block['전일종가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "jiclose", i).strip())
block['전일거래량'] = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "jivolume", i).strip())
block['당일시가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "disiga", i).strip())
block['당일고가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "dihigh", i).strip())
block['당일저가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "dilow", i).strip())
block['당일종가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "diclose", i).strip())
block['상한가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "highend", i).strip())
block['하한가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "lowend", i).strip())
block['연속일자'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "cts_date", i).strip()
block['연속시간'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "cts_time", i).strip()
block['장시작시간'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "s_time", i).strip()
block['장종료시간'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "e_time", i).strip()
block['동시호가처리시간'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "dshmin", i).strip()
block['레코드카운트'] = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "rec_count", i).strip())
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
날짜 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "date", i).strip()
시간 = | |
# a_i^{(1)} = x_i
# Compute z values at second layer
# zSup2 (i.e., z^{(2)}) is the matrix of z values at layer 2
# zSup2 = W^{(1)} x + b^{(1)}
zSup2 = W1.dot(data) + numpy.tile(b1, (m, 1)).transpose()
# Compute activations at second layer by mapping z^{(2)} to sigmoid(z^{(2)})
aSup2 = sigmoid(zSup2)
#Compute z at third layer, z^{(3)}
zSup3 = W2.dot(aSup2) + numpy.tile(b2, (m, 1)).transpose()
# z at third layer is the total weighted sum of inputs to unit i in layer 3,
# hypothesis = activation at the third layer: hypothesis = f(z^{(3)})
hypothesis = sigmoid(zSup3)
##########################################################################################################################################
# COMPUTE COST
# Now add sparsity (computed from activations to the output layer):
rhoHat = numpy.sum(aSup2, axis=1)/m
# Turn rho_ into matrix for vectorized computation
rho = numpy.tile(rho_, hidden_size)
# to sum over i and j in summation loops, we can use numpy.sum of W1 and W2 and then add the two summations together to account for the outermost summation to sum over all layers - 1
# now with sparsity implemented: beta_ parameter determines amount of penalty applied relative to the regular cost function (smaller beta = less penalty)
# Extra penalty term to the optimization objective penalizes rhoHat for deviating significantly from rho
cost = numpy.sum((hypothesis - data) ** 2.) / (2. * m) + (lambda_ / 2.) * ( numpy.sum(W1 **2) + numpy.sum(W2 ** 2) ) + beta_ * numpy.sum(rho * numpy.log(rho / rhoHat) + ((1 - rho) * numpy.log((1 - rho) / (1 - rhoHat))))
#TRIED WITH numpy.linalg.norm() and found it to be twice as slow as above implementation of cost:
#start = time.clock()
#for n in range(40000):
# costNorm = (1./(2. * m)) * numpy.linalg.norm(numpy.dstack((hypothesis, data)))**2
#print time.clock() - start
# 5.894494
#Compared to:
#for n in range(40000):
# costNorm = (1./(2. * m)) * numpy.linalg.norm(numpy.dstack((hypothesis, data)))**2
#print time.clock() - start
#2.99788
##########################################################################################################################################
# BACK PROPOGATION
# Compute deltas:
#\delta^{(3)}, i.e. output layer
deltaSup3 = -1. * (data - hypothesis) * sigmoidPrime(zSup3)
#\delta^{(2)}, i.e. hidden layer
# Use numpy.tile to vectorize computation by tiling out m training examples
deltaSup2 = (numpy.dot(W2.transpose(), deltaSup3) + beta_ * (numpy.tile((-1. * rho / rhoHat) + ( (1 - rho) / (1 - rhoHat) ), (m, 1)).transpose()) ) * sigmoidPrime(zSup2)
##########################################################################################################################################
# Compute gradients:
# working "backwards" from output to input
grad_WSup2 = ((1.0/m) * numpy.dot(deltaSup3, aSup2.transpose())) + (lambda_ * W2)
#or with numpy.outer:
#Onabla_WSup2 = numpy.outer(deltaSup3, aSup2)
# ^ dont think this is right
grad_WSup1 = ((1.0/m) * numpy.dot(deltaSup2, data.transpose())) + lambda_ * W1
grad_WSup1_2 = deltaSup2.dot(data.transpose()) / m + lambda_ * W1
grad_bSup2 = (1.0/m) * numpy.sum(deltaSup3, axis = 1)
grad_bSup1 = (1.0/m) * numpy.sum(deltaSup2, axis = 1)
grad = numpy.concatenate((numpy.reshape(grad_WSup1,W1.size), numpy.reshape(grad_WSup2,W2.size), numpy.reshape(grad_bSup1,b1.size), numpy.reshape(grad_bSup2,b2.size)))
return cost, grad
# -------------------------------------------------------------------------
def autoencoder_feedforward(theta, visible_size, hidden_size, data):
"""
Given a definition of an autoencoder (including the size of the hidden
and visible layers and the theta parameters) and an input data matrix
(each column is an image patch, with 1 or more columns), compute
the feedforward activation for the output visible layer for each
data column, and return an output activation matrix (same format
as the data matrix: each column is an output activation "image"
corresponding to the data input).
Once you have implemented the autoencoder_cost_and_grad() function,
simply copy your initial codes that computes the feedforward activations
up to the output visible layer activations and return that activation.
You do not need to include any of the computation of cost or gradient.
It is likely that your implementation of feedforward in your
autoencoder_cost_and_grad() is set up to handle multiple data inputs,
in which case your only task is to ensure the output_activations matrix
is in the same corresponding format as the input data matrix, where
each output column is the activation corresponding to the input column
of the same column index.
:param theta: the parameters of the autoencoder, assumed to be in this format:
{ W1, W2, b1, b2 }
W1 = weights of layer 1 (input to hidden)
W2 = weights of layer 2 (hidden to output)
b1 = layer 1 bias weights (to hidden layer)
b2 = layer 2 bias weights (to output layer)
:param visible_size: number of nodes in the visible layer(s) (input and output)
:param hidden_size: number of nodes in the hidden layer
:param data: input data matrix, where each column is an image patch,
with one or more columns
:return: output_activations: an matrix output, where each column is the
vector of activations corresponding to the input data columns
"""
### YOUR CODE HERE ###
# theta is an array with order [{W(1)}, {W(2)}, {b(1)}, {b(2)}]
# in W, ROWS INDICATE "TO" NODES AND COLUMNS INDICATE "FROM" NODES
# Pull values from theta vector and reshape:
W1 = theta[0:(hidden_size * visible_size)]
W1 = numpy.reshape(W1, (hidden_size, visible_size))
W2 = theta[(hidden_size * visible_size):((hidden_size * visible_size) + (visible_size * hidden_size))]
W2 = numpy.reshape(W2, (visible_size, hidden_size))
b1 = theta[((hidden_size * visible_size) + (visible_size * hidden_size)):(((hidden_size * visible_size) + (visible_size * hidden_size)) + hidden_size)]
b2 = theta[(((hidden_size * visible_size) + (visible_size * hidden_size)) + hidden_size) : (((hidden_size * visible_size) + (visible_size * hidden_size)) + hidden_size + visible_size)]
##########################################################################################################################################
# FEED FORWARD/FORWARD PROPOGATION:
# in W, ROWS INDICATE "TO" NODES (i) AND COLUMNS INDICATE "FROM" NODES (j)
# Activations at layer 1 = inputs, i.e., aSup1 = x
# Number of neurons = number of input data points (pixels), e.g. 784, which we can also say is the visible size?
# In the sequel, we also let z^{(l)}_i denote the total weighted sum of inputs to unit i in layer l, including the bias term (e.g., \textstyle z_i^{(2)} = \sum_{j=1}^n W^{(1)}_{ij} x_j + b^{(1)}_i), so that a^{(l)}_i = f(z^{(l)}_i).
# http://ufldl.stanford.edu/wiki/index.php/Neural_Networks
# Number of training points
m = data.shape[1]
# note that activations at the first layer are equal to the input data:
# a_i^{(1)} = x_i
# Compute z values at second layer
# zSup2 (i.e., z^{(2)}) is the matrix of z values at layer 2
# zSup2 = W^{(1)} x + b^{(1)}
zSup2 = W1.dot(data) + numpy.tile(b1, (m, 1)).transpose()
# Compute activations at second layer by mapping z^{(2)} to sigmoid(z^{(2)})
aSup2 = sigmoid(zSup2)
#Compute z at third layer, z^{(3)}
zSup3 = W2.dot(aSup2) + numpy.tile(b2, (m, 1)).transpose()
# z at third layer is the total weighted sum of inputs to unit i in layer 3,
# hypothesis = activation at the third layer: hypothesis = f(z^{(3)})
output_activations = sigmoid(zSup3)
return output_activations
# -------------------------------------------------------------------------
def save_model(theta, visible_size, hidden_size, filepath, **params):
numpy.savetxt(filepath + '_theta.csv', theta, delimiter=',')
with open(filepath + '_params.txt', 'a') as fout:
params['visible_size'] = visible_size
params['hidden_size'] = hidden_size
fout.write('{0}'.format(params))
# -------------------------------------------------------------------------
def plot_and_save_results(theta, visible_size, hidden_size, root_filepath=None,
train_patches=None, test_patches=None, show_p=False,
**params):
"""
This is a helper function to streamline saving the results of an autoencoder.
The visible_size and hidden_size provide the information needed to retrieve
the autoencoder parameters (w1, w2, b1, b2) from theta.
This function does the following:
(1) Saves the parameters theta, visible_size and hidden_size as a text file
called '<root_filepath>_model.txt'
(2) Extracts the layer 1 (input-to-hidden) weights and plots them as an image
and saves the image to file '<root_filepath>_weights.png'
(3) [optional] train_patches are intended to be a set of patches that were
used during training of the autoencoder. Typically these will be the first
100 patches of the MNIST data set.
If provided, the patches will be given as input to the autoencoder in
order to generate output 'decoded' activations that are then plotted as
patches in an image. The image is saved to '<root_filepath>_train_decode.png'
(4) [optional] test_patches are intended to be a different set of patches
that were *not* used during training. This permits inspecting | |
0:
self.ListSize = 0
return self.ListSize
if self.ItemSize == 0:
self.ListSize = self.GetInterOffset(len(self.RawDataList) - 1) + len(self.RawDataList[len(self.RawDataList)-1])
else:
for Datas in self.RawDataList:
if type(Datas) in (list, tuple):
self.ListSize += len(Datas) * self.ItemSize
else:
self.ListSize += self.ItemSize
return self.ListSize
## DbSkuHeadTableItemList
#
# The class holds the Sku header value table
#
class DbSkuHeadTableItemList (DbItemList):
def __init__(self, ItemSize, DataList=None, RawDataList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
DbItemList.__init__(self, ItemSize, DataList, RawDataList)
def PackData(self):
PackStr = "=LL"
Buffer = ''
for Data in self.RawDataList:
Buffer += pack(PackStr,
GetIntegerValue(Data[0]),
GetIntegerValue(Data[1]))
return Buffer
## DbSizeTableItemList
#
# The class holds the size table
#
class DbSizeTableItemList (DbItemList):
def __init__(self, ItemSize, DataList=None, RawDataList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
DbItemList.__init__(self, ItemSize, DataList, RawDataList)
def GetListSize(self):
length = 0
for Data in self.RawDataList:
length += (1 + len(Data[1]))
return length * self.ItemSize
def PackData(self):
PackStr = "=H"
Buffer = ''
for Data in self.RawDataList:
Buffer += pack(PackStr,
GetIntegerValue(Data[0]))
for subData in Data[1]:
Buffer += pack(PackStr,
GetIntegerValue(subData))
return Buffer
## DbStringItemList
#
# The class holds the string table
#
class DbStringItemList (DbComItemList):
def __init__(self, ItemSize, DataList=None, RawDataList=None, LenList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
if LenList is None:
LenList = []
assert(len(RawDataList) == len(LenList))
DataList = []
# adjust DataList according to the LenList
for Index in xrange(len(RawDataList)):
Len = LenList[Index]
RawDatas = RawDataList[Index]
assert(Len >= len(RawDatas))
ActualDatas = []
for i in xrange(len(RawDatas)):
ActualDatas.append(RawDatas[i])
for i in xrange(len(RawDatas), Len):
ActualDatas.append(0)
DataList.append(ActualDatas)
self.LenList = LenList
DbComItemList.__init__(self, ItemSize, DataList, RawDataList)
def GetInterOffset(self, Index):
Offset = 0
assert(Index < len(self.LenList))
for ItemIndex in xrange(Index):
Offset += self.LenList[ItemIndex]
return Offset
def GetListSize(self):
if self.ListSize:
return self.ListSize
if len(self.LenList) == 0:
self.ListSize = 0
else:
self.ListSize = self.GetInterOffset(len(self.LenList) - 1) + self.LenList[len(self.LenList)-1]
return self.ListSize
def PackData(self):
self.RawDataList = self.DataList
return DbComItemList.PackData(self)
## Find the index in two list where the item matches the key separately
#
# @param Key1 The key used to search the List1
# @param List1 The list that Key1 will be searched
# @param Key2 The key used to search the List2
# @param List2 The list that Key2 will be searched
#
# @retval Index The position inside the list where list1[Index] == Key1 and list2[Index] == Key2
#
def GetMatchedIndex(Key1, List1, Key2, List2):
StartPos = 0
while StartPos < len(List1):
Index = List1.index(Key1, StartPos)
if List2[Index] == Key2:
return Index
else:
StartPos = Index + 1
return -1
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if type(Input) in (int, long):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
## convert StringArray like {0x36, 0x00, 0x34, 0x00, 0x21, 0x00, 0x36, 0x00, 0x34, 0x00, 0x00, 0x00}
# to List like [0x36, 0x00, 0x34, 0x00, 0x21, 0x00, 0x36, 0x00, 0x34, 0x00, 0x00, 0x00]
#
# @param StringArray A string array like {0x36, 0x00, 0x34, 0x00, 0x21, 0x00, 0x36, 0x00, 0x34, 0x00, 0x00, 0x00}
#
# @retval A list object of integer items
#
def StringArrayToList(StringArray):
StringArray = StringArray[1:-1]
StringArray = '[' + StringArray + ']'
return eval(StringArray)
## Convert TokenType String like "PCD_DATUM_TYPE_UINT32 | PCD_TYPE_HII" to TokenType value
#
# @param TokenType A TokenType string like "PCD_DATUM_TYPE_UINT32 | PCD_TYPE_HII"
#
# @retval A integer representation of the TokenType
#
def GetTokenTypeValue(TokenType):
TokenTypeDict = {
"PCD_TYPE_SHIFT":28,
"PCD_TYPE_DATA":(0x0 << 28),
"PCD_TYPE_HII":(0x8 << 28),
"PCD_TYPE_VPD":(0x4 << 28),
"PCD_TYPE_SKU_ENABLED":(0x2 << 28),
"PCD_TYPE_STRING":(0x1 << 28),
"PCD_DATUM_TYPE_SHIFT":24,
"PCD_DATUM_TYPE_POINTER":(0x0 << 24),
"PCD_DATUM_TYPE_UINT8":(0x1 << 24),
"PCD_DATUM_TYPE_UINT16":(0x2 << 24),
"PCD_DATUM_TYPE_UINT32":(0x4 << 24),
"PCD_DATUM_TYPE_UINT64":(0x8 << 24),
"PCD_DATUM_TYPE_SHIFT2":20,
"PCD_DATUM_TYPE_UINT8_BOOLEAN":(0x1 << 20 | 0x1 << 24),
}
return eval(TokenType, TokenTypeDict)
## construct the external Pcd database using data from Dict
#
# @param Dict A dictionary contains Pcd related tables
#
# @retval Buffer A byte stream of the Pcd database
#
def BuildExDataBase(Dict):
# init Db items
InitValueUint64 = Dict['INIT_DB_VALUE_UINT64']
DbInitValueUint64 = DbComItemList(8, RawDataList = InitValueUint64)
VardefValueUint64 = Dict['VARDEF_DB_VALUE_UINT64']
DbVardefValueUint64 = DbItemList(8, RawDataList = VardefValueUint64)
InitValueUint32 = Dict['INIT_DB_VALUE_UINT32']
DbInitValueUint32 = DbComItemList(4, RawDataList = InitValueUint32)
VardefValueUint32 = Dict['VARDEF_DB_VALUE_UINT32']
DbVardefValueUint32 = DbItemList(4, RawDataList = VardefValueUint32)
VpdHeadValue = Dict['VPD_DB_VALUE']
DbVpdHeadValue = DbComItemList(4, RawDataList = VpdHeadValue)
ExMapTable = zip(Dict['EXMAPPING_TABLE_EXTOKEN'], Dict['EXMAPPING_TABLE_LOCAL_TOKEN'], Dict['EXMAPPING_TABLE_GUID_INDEX'])
DbExMapTable = DbExMapTblItemList(8, RawDataList = ExMapTable)
LocalTokenNumberTable = Dict['LOCAL_TOKEN_NUMBER_DB_VALUE']
DbLocalTokenNumberTable = DbItemList(4, RawDataList = LocalTokenNumberTable)
GuidTable = Dict['GUID_STRUCTURE']
DbGuidTable = DbItemList(16, RawDataList = GuidTable)
StringHeadValue = Dict['STRING_DB_VALUE']
# DbItemList to DbStringHeadTableItemList
DbStringHeadValue = DbStringHeadTableItemList(4, RawDataList = StringHeadValue)
VariableTable = Dict['VARIABLE_DB_VALUE']
DbVariableTable = DbVariableTableItemList(12, RawDataList = VariableTable)
NumberOfSkuEnabledPcd = GetIntegerValue(Dict['SKU_HEAD_SIZE'])
Dict['SKUHEAD_TABLE_VALUE'] = [(0,0) for i in xrange(NumberOfSkuEnabledPcd)]
SkuTable = Dict['SKUHEAD_TABLE_VALUE'] # Generated later
DbSkuTable = DbSkuHeadTableItemList(8, RawDataList = SkuTable)
Dict['STRING_TABLE_DB_VALUE'] = [StringArrayToList(x) for x in Dict['STRING_TABLE_VALUE']]
StringTableValue = Dict['STRING_TABLE_DB_VALUE']
# when calcute the offset, should use StringTableLen instead of StringTableValue, as string maxium len may be different with actual len
StringTableLen = Dict['STRING_TABLE_LENGTH']
DbStringTableLen = DbStringItemList(0, RawDataList = StringTableValue, LenList = StringTableLen)
PcdTokenTable = Dict['PCD_TOKENSPACE']
PcdTokenLen = Dict['PCD_TOKENSPACE_LENGTH']
PcdTokenTableValue = [StringArrayToList(x) for x in Dict['PCD_TOKENSPACE']]
DbPcdTokenTable = DbStringItemList(0, RawDataList = PcdTokenTableValue, LenList = PcdTokenLen)
PcdCNameTable = Dict['PCD_CNAME']
PcdCNameLen = Dict['PCD_CNAME_LENGTH']
PcdCNameTableValue = [StringArrayToList(x) for x in Dict['PCD_CNAME']]
DbPcdCNameTable = DbStringItemList(0, RawDataList = PcdCNameTableValue, LenList = PcdCNameLen)
PcdNameOffsetTable = Dict['PCD_NAME_OFFSET']
DbPcdNameOffsetTable = DbItemList(4,RawDataList = PcdNameOffsetTable)
SizeTableValue = zip(Dict['SIZE_TABLE_MAXIMUM_LENGTH'], Dict['SIZE_TABLE_CURRENT_LENGTH'])
DbSizeTableValue = DbSizeTableItemList(2, RawDataList = SizeTableValue)
InitValueUint16 = Dict['INIT_DB_VALUE_UINT16']
DbInitValueUint16 = DbComItemList(2, RawDataList = InitValueUint16)
VardefValueUint16 = Dict['VARDEF_DB_VALUE_UINT16']
DbVardefValueUint16 = DbItemList(2, RawDataList = VardefValueUint16)
InitValueUint8 = Dict['INIT_DB_VALUE_UINT8']
DbInitValueUint8 = DbComItemList(1, RawDataList = InitValueUint8)
VardefValueUint8 = Dict['VARDEF_DB_VALUE_UINT8']
DbVardefValueUint8 = DbItemList(1, RawDataList = VardefValueUint8)
InitValueBoolean = Dict['INIT_DB_VALUE_BOOLEAN']
DbInitValueBoolean = DbComItemList(1, RawDataList = InitValueBoolean)
VardefValueBoolean = Dict['VARDEF_DB_VALUE_BOOLEAN']
DbVardefValueBoolean = DbItemList(1, RawDataList = VardefValueBoolean)
SkuidValue = Dict['SKUID_VALUE']
DbSkuidValue = DbItemList(1, RawDataList = SkuidValue)
SkuIndexValue = Dict['SKU_INDEX_VALUE']
DbSkuIndexValue = DbItemList(0,RawDataList = SkuIndexValue)
# Unit Db Items
UnInitValueUint64 = Dict['UNINIT_GUID_DECL_UINT64']
DbUnInitValueUint64 = DbItemList(8, RawDataList = UnInitValueUint64)
UnInitValueUint32 = Dict['UNINIT_GUID_DECL_UINT32']
DbUnInitValueUint32 = DbItemList(4, RawDataList = UnInitValueUint32)
UnInitValueUint16 = Dict['UNINIT_GUID_DECL_UINT16']
DbUnInitValueUint16 = DbItemList(2, RawDataList = UnInitValueUint16)
UnInitValueUint8 = Dict['UNINIT_GUID_DECL_UINT8']
DbUnInitValueUint8 = DbItemList(1, RawDataList = UnInitValueUint8)
UnInitValueBoolean = Dict['UNINIT_GUID_DECL_BOOLEAN']
DbUnInitValueBoolean = DbItemList(1, RawDataList = UnInitValueBoolean)
PcdTokenNumberMap = Dict['PCD_ORDER_TOKEN_NUMBER_MAP']
DbNameTotle = ["InitValueUint64", "VardefValueUint64", "InitValueUint32", "VardefValueUint32", "VpdHeadValue", "ExMapTable",
"LocalTokenNumberTable", "GuidTable", "StringHeadValue", "PcdNameOffsetTable","VariableTable","SkuTable", "StringTableLen", "PcdTokenTable", "PcdCNameTable",
"SizeTableValue", "InitValueUint16", "VardefValueUint16", "InitValueUint8", "VardefValueUint8", "InitValueBoolean",
"VardefValueBoolean", "SkuidValue", "SkuIndexValue","UnInitValueUint64", "UnInitValueUint32", "UnInitValueUint16", "UnInitValueUint8", "UnInitValueBoolean"]
DbTotal = [InitValueUint64, VardefValueUint64, InitValueUint32, VardefValueUint32, VpdHeadValue, ExMapTable,
LocalTokenNumberTable, GuidTable, StringHeadValue, PcdNameOffsetTable,VariableTable,SkuTable, StringTableLen, PcdTokenTable,PcdCNameTable,
SizeTableValue, InitValueUint16, VardefValueUint16,InitValueUint8, VardefValueUint8, InitValueBoolean,
VardefValueBoolean, SkuidValue, SkuIndexValue, UnInitValueUint64, UnInitValueUint32, UnInitValueUint16, UnInitValueUint8, UnInitValueBoolean]
DbItemTotal = [DbInitValueUint64, DbVardefValueUint64, DbInitValueUint32, DbVardefValueUint32, DbVpdHeadValue, DbExMapTable,
DbLocalTokenNumberTable, DbGuidTable, DbStringHeadValue, DbPcdNameOffsetTable,DbVariableTable,DbSkuTable, DbStringTableLen, DbPcdTokenTable, DbPcdCNameTable,
DbSizeTableValue, DbInitValueUint16, DbVardefValueUint16,DbInitValueUint8, DbVardefValueUint8, DbInitValueBoolean,
DbVardefValueBoolean, DbSkuidValue, DbSkuIndexValue, DbUnInitValueUint64, DbUnInitValueUint32, DbUnInitValueUint16, DbUnInitValueUint8, DbUnInitValueBoolean]
# SkuidValue is the last table in the init table items
InitTableNum = DbTotal.index(SkuidValue) + 1 + 1 # +1 is for SkuIndexValue table
# The FixedHeader length of the PCD_DATABASE_INIT, from Signature to Pad
FixedHeaderLen = 64
# Get offset of SkuId table in the database
SkuIdTableOffset = FixedHeaderLen
for DbIndex in xrange(len(DbTotal)):
if DbTotal[DbIndex] is SkuidValue:
break
SkuIdTableOffset += DbItemTotal[DbIndex].GetListSize()
# Get offset of SkuValue table in the database
SkuTableOffset = FixedHeaderLen
for DbIndex in xrange(len(DbTotal)):
if DbTotal[DbIndex] is SkuTable:
break
SkuTableOffset += DbItemTotal[DbIndex].GetListSize()
PcdTokenTableDbOffset = FixedHeaderLen
for DbIndex in xrange(len(DbTotal)):
if DbTotal[DbIndex] is PcdTokenTable:
break
PcdTokenTableDbOffset += DbItemTotal[DbIndex].GetListSize()
PcdCNameTableDbOffset = FixedHeaderLen
for DbIndex in xrange(len(DbTotal)):
if DbTotal[DbIndex] is PcdCNameTable:
break
PcdCNameTableDbOffset += DbItemTotal[DbIndex].GetListSize()
# Fix up the LocalTokenNumberTable, SkuHeader table
SkuHeaderIndex = 0
if len(Dict['SKU_INDEX_VALUE']) > 0:
SkuIndexIndexTable = [(0) for i in xrange(len(Dict['SKU_INDEX_VALUE']))]
SkuIndexIndexTable[0] = 0 #Dict['SKU_INDEX_VALUE'][0][0]
for i in range(1,len(Dict['SKU_INDEX_VALUE'])):
SkuIndexIndexTable[i] = SkuIndexIndexTable[i-1]+Dict['SKU_INDEX_VALUE'][i-1][0] + 1
for (LocalTokenNumberTableIndex, (Offset, Table)) in enumerate(LocalTokenNumberTable):
DbIndex = 0
DbOffset = FixedHeaderLen
for DbIndex in xrange(len(DbTotal)):
if DbTotal[DbIndex] is Table:
DbOffset += | |
PlotSnapList, simulation_norm,
mean_galaxy_sfr, std_galaxy_sfr,
mean_galaxy_ssfr, std_galaxy_ssfr,
N_galaxy, model_tags, output_tag):
"""
Plots the specific star formation rate (sSFR) as a function of stellar mass.
Parallel compatible.
Accepts 3D arrays of the sSFR binned into Stellar Mass bins.
Mass units log(Msun).
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
mean_galaxy_ssfr, std_galaxy_ssfr, N_galaxy_ssfr : Nested 3-dimensional array,
mean_galaxy_sfr[model_number0][snapshot0] = [bin0_meanssfr, ..., binN_meanssfr],
with length equal to the number of models.
Mean/Standard deviation for sSFR in each stellar mass bin, for each [model_number] and [snapshot_number].
N_galaxy_fesc is the number of galaxies placed into each mass bin.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Mass units are 1e10 Msun (no h).
"""
def adjust_sfr_plot(ax):
ax.set_xlabel(r'$\log_{10}\ M_*\ [M_{\odot}]$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\langle \mathrm{SFR}\rangle_{M_*}\:[M_\odot\mathrm{yr}^{-1}]}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([4.8, 10])
ax.set_ylim([-3, 2])
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
def adjust_ssfr_plot(ax):
ax.set_xlabel(r'$\log_{10}\ M_*\ [M_{\odot}]$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\langle\mathrm{sSFR}\rangle_{M_*}\:[\mathrm{yr^{-1}}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([4.8, 10])
ax.set_ylim([-9, -4])
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.1))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
print("Plotting sSFR as a function of stellar mass.")
## Array initialization ##
master_mean_sfr_stellar, master_std_sfr_stellar, master_N_sfr_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_sfr, std_galaxy_sfr, N_galaxy,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
master_mean_ssfr_stellar, master_std_ssfr_stellar, master_N_ssfr_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_ssfr, std_galaxy_ssfr, N_galaxy,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
for model_number in range(0, len(SnapList)):
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if (model_number == 0):
label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
else:
label = ""
## Plots as a function of stellar mass ##
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_sfr_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
ax2.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_ssfr_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
#for model_number in range(0, len(SnapList)): # Just plot some garbage to get the legend labels correct.
#ax1.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
#ax3.plot(np.nan, np.nan, color = 'k', linestyle = PlotScripts.linestyles[model_number], rasterized = True, label = model_tags[model_number], linewidth = PlotScripts.global_linewidth)
## Stellar Mass plots ##
adjust_sfr_plot(ax1)
adjust_ssfr_plot(ax2)
## Output ##
outputFile = "./{0}SFR{1}".format(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
outputFile = "./{0}sSFR{1}".format(output_tag, output_format)
fig2.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
##
##
def plot_fej_Ngamma(SnapList, PlotSnapList, simulation_norm,
mean_Ngamma_fej, std_Ngamma_fej,
N_fej, model_tags, output_tag):
def adjust_plot(ax):
ax.set_xlabel(r'$\mathbf{f_\mathrm{ej}}$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\log_{10}\langle N_\gamma\rangle_{f_{ej}}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([0.0, 1.0])
#ax.set_ylim([0.05, 0.45])
#ax.axhline(0.35, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(9.1, 0.37, r"$f_\mathrm{esc} = 0.35$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.10))
#ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
#tick_locs = np.arange(6.0, 11.0)
#ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
#tick_locs = np.arange(0.0, 0.80, 0.10)
#ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
# fontsize = PlotScripts.global_fontsize)
labels = ax.xaxis.get_ticklabels()
locs = ax.xaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
## Array initialization ##
master_mean_Ngamma_fej, master_std_Ngamma_fej, master_N_Ngamma_fej, master_bin_middle_fej = \
collect_across_tasks(mean_Ngamma_fej, std_Ngamma_fej, N_fej,
SnapList, PlotSnapList, True, fej_low, fej_high,
fej_bin_width)
if rank == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
for model_number in range(0, len(SnapList)):
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
label = model_tags[model_number]
w = np.where((master_N_Ngamma_fej[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_Ngamma_fej[model_number][snapshot_idx][w] = np.nan
ax1.plot(master_bin_middle_fej[model_number][snapshot_idx],
np.log10(master_mean_Ngamma_fej[model_number][snapshot_idx]*1.0e50),
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
#ax1.plot(master_bin_middle_fej[model_number][snapshot_idx],
# np.log10(master_mean_Ngamma_fej[model_number][snapshot_idx]*1.0e50
# * master_N_Ngamma_fej[model_number][snapshot_idx]),
# color = PlotScripts.colors[plot_count],
# ls = PlotScripts.linestyles[model_number],
# rasterized = True, label = label,
#lw = PlotScripts.global_linewidth)
'''
ax2.plot(master_bin_middle_fej[model_number][snapshot_idx],
np.log10(master_N_Ngamma_fej[model_number][snapshot_idx]),
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
'''
plot_count += 1
if (plot_count == len(PlotSnapList[model_number])):
break
adjust_plot(ax1)
leg = ax1.legend(loc="upper center", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
plt.tight_layout()
## Output ##
outputFile = './%s%s' %(output_tag, output_format)
fig.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close(fig)
def plot_ejectedfraction(SnapList, PlotSnapList, simulation_norm, mean_mvir_ejected,
std_mvir_ejected, N_ejected, mean_ejected_z,
std_ejected_z, N_z, model_tags, output_tag):
'''
Plots the ejected fraction as a function of the halo mass.
Parallel compatible.
Accepts a 3D array of the ejected fraction so we can plot for multiple models and redshifts.
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
mean_mvir_ejected, std_mvir_ejected, N_ejected : Nested 3-dimensional array, mean_mvir_ejected[model_number0][snapshot0] = [bin0_meanejected, ..., binN_meanejected], with length equal to the number of models.
Mean/Standard deviation for the escape fraction binned into Halo | |
from __future__ import division
from itertools import chain
from numpy import inf
from span.chunking.utils.fixed_step import fixed_step_chunker
import random
def fragment_list(tot_len, n_frags):
"""
A little function to help trouble shoot list_chunker. Creates a list of lists
by randomly fragmenting range(tot_len) into n_frags list
Args:
tot_len: the total number of elements in the lists
n_frags: the number of lists
Returns: a list of list
"""
indices = random.sample(range(tot_len), n_frags)
indices.sort()
start = 0
list_of_list = []
for i in range(n_frags):
list_of_list.append(range(start, indices[i], 1))
start = indices[i]
list_of_list.append(range(indices[n_frags - 1], tot_len, 1))
return [item for item in list_of_list if len(item) > 0]
def list_frag(list_to_frag):
"""
Args:
list_to_frag: any list
Returns: a random partition of list_to_frag in a form of a list of sublists covering list_to_frag
"""
lile = len(list_to_frag)
n_frags = random.randint(1, lile)
indices = random.sample(range(lile), n_frags)
indices.sort()
start = 0
frag_list = []
for i in range(n_frags):
frag_list.append(list_to_frag[start:indices[i]])
start = indices[i]
frag_list.append(list_to_frag[indices[n_frags - 1]:])
return [item for item in frag_list if len(item) > 0]
def rand_sub(list_of_list, list_objects):
random.seed(8128)
for l in range(len(list_of_list)):
for item in range(len(list_of_list[l])):
list_of_list[l][item] = random.choice(list_objects)
return list_of_list
def list_chunker(list_it, chk_size, chk_step=None, start_at=0, stop_at=None, return_tail=False):
"""
a function to get (an iterator of) segments (bt, tt) of chunks from the iterator of lists, of the form
list_it = [[list_1], [list_2], ...] where the list_1, list_2...may have different lengths
:param list_it: iterator of lists, for example [[1,2], [3,4,5], [6], ...]
:param chk_size: length of the chunks
:param chk_step: step between chunks
:param start_at: value from the iterator at which we begin building the chunks (inclusive)
:param stop_at: last value from the iterator included in the chunks (exclusive)
:param return_tail: if set to false, only the chunks with max element less than stop_at are yielded
if set to true, any chunks with min value no more than stop_at are returned but they contain values no more
than stop_at
:return: an iterator of the chunks
1) If stop_at is not None and return_tail is False:
will return all full chunks with maximum element index less than stop_at
or until the iterator is exhausted. Only full chunks are returned here.
2) If stop_at is not None and return_tail is True:
will return all full chunks as above along with possibly cut off chunks
containing one term whose index is stop_at-1 or one (last) term which is the
last element of it
3) If stop_at is None and return_tail is False:
will return all full chunks with maximum element index less or equal to the last
element of it
4) If stop_at is None and return_tail is True:
will return all full chunks with maximum element index less or equal to the last
element of it plus cut off chunks whose maximum term index is the last term of it
# the next two examples show the difference between return_tail = True or False
>>> list_it = fragment_list(15, 5)
>>> f = lambda it: list_chunker(it, chk_size=3, chk_step=1, start_at=0, stop_at=None, return_tail=True)
>>> A = list(f(list_it)); B = list(f(iter(list_it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # list_it and iter(list_it) should give the same thing!
>>> A # and that thing is:
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14], [14]]
>>> list_it = fragment_list(15, 5)
>>> f = lambda it: list_chunker(it, chk_size=3, chk_step=1, start_at=0, stop_at=None, return_tail=False)
>>> A = list(f(list_it)); B = list(f(iter(list_it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # list_it and iter(list_it) should give the same thing!
>>> A # and that thing is:
[[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14]]
# start_at is set to 2: we remove the first two terms (index 0 and index 1)
# stop_at is set to 5: we remove any terms of index 5 or more
# return_tail is true: we yield the partial chunks as well
>>> list_it = fragment_list(15, 5)
>>> f = lambda it: list_chunker(it, chk_size=3, chk_step=1, start_at=2, stop_at=5, return_tail=True)
>>> A = list(f(list_it)); B = list(f(iter(list_it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # list_it and iter(list_it) should give the same thing!
>>> A # and that thing is:
[[2, 3, 4], [3, 4], [4]]
# same as above with return_tail=False: no partial chunk yielded
>>> list_it = fragment_list(15, 5)
>>> f = lambda it: list_chunker(it, chk_size=3, chk_step=1, start_at=2, stop_at=5, return_tail=False)
>>> A = list(f(list_it)); B = list(f(iter(list_it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # list_it and iter(list_it) should give the same thing!
>>> A # and that thing is:
[[2, 3, 4]]
# chk_step > chk_size in the next 4 examples
>>> list_it = fragment_list(15, 5)
>>> f = lambda it: list_chunker(it, chk_size=3, chk_step=6, start_at=5, stop_at=15, return_tail=False)
>>> A = list(f(list_it)); B = list(f(iter(list_it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # list_it and iter(list_it) should give the same thing!
>>> A # and that thing is:
[[5, 6, 7], [11, 12, 13]]
>>> list_it = fragment_list(15, 5)
>>> f = lambda it: list_chunker(it, chk_size=3, chk_step=4, start_at=2, stop_at=15, return_tail=True)
>>> A = list(f(list_it)); B = list(f(iter(list_it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # list_it and iter(list_it) should give the same thing!
>>> A # and that thing is:
[[2, 3, 4], [6, 7, 8], [10, 11, 12], [14]]
>>> list_it = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11], [12, 13, 14, 15, 16], [17], [18], [19]]
>>> f = lambda it: list_chunker(it, chk_size=3, chk_step=6, start_at=5, stop_at=None, return_tail=True)
>>> A = list(f(list_it)); B = list(f(iter(list_it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # list_it and iter(list_it) should give the same thing!
>>> A # and that thing is:
[[5, 6, 7], [11, 12, 13], [17, 18, 19]]
>>> list_it = [[0, 1], [2], [3, 4, 5], [6, 7, 8], [9, 10], [11], [12, 13, 14], [15, 16, 17, 18, 19]]
>>> f = lambda it: list_chunker(it, chk_size=3, chk_step=6, start_at=5, stop_at=None, return_tail=True)
>>> A = list(f(list_it)); B = list(f(iter(list_it))); # trying the function on it (a list) and iter(it) (and iterator)
>>> assert A == B # list_it and iter(list_it) should give the same thing!
>>> A # and that thing is:
[[5, 6, 7], [11, 12, 13], [17, 18, 19]]
"""
if chk_step is None:
chk_step = chk_size
if stop_at is not None:
assert isinstance(stop_at, int), 'stop_at should be an integer'
# setting the start_at to the first element of the iterator by default
if start_at is None:
start_at = 0
if hasattr(list_it, '__getslice__'):
# flatten the list and use fast_chunker
list_it = list(chain.from_iterable(list_it))
for x in fixed_step_chunker(list_it, chk_size, chk_step, start_at, stop_at, return_tail):
yield x
else:
# we set stop_at to be infinity by default
if stop_at is None:
stop_at = inf
# in that case, nothing to return
if stop_at - start_at < chk_size and not return_tail:
return
# getting the first list
buff = list_it.next()
# consuming list_it until we reach start_at
if start_at is not None:
position = 0
while position < start_at:
if position + len(buff) < start_at:
position += len(buff)
buff = list_it.next()
else:
buff = buff[start_at - position:]
position = start_at
break
| |
= True
break
if not found:
res.append({
# 轮次
'round_financing_id': round_financing_and_foundation_entity.round_financing_id.id,
'round_financing_name': round_financing_and_foundation_entity.round_financing_id.name,
'foundation_names': [{
'foundation_id': round_financing_and_foundation_entity.foundation_id.id,
'foundation_name': round_financing_and_foundation_entity.foundation_id.name,
'meta_sub_project_id': meta_sub_pro.id,
}],
})
count += 1
return res
# 通过rpc调用,把详细的信息传递到前端以便于显示操作
def rpc_get_info(self, **kwargs):
kwargs['prev_or_post_investment'] = True
return self._get_info(**kwargs)
# 投后的操作,操作的流程的使用的特性!!!
def rpc_get_post_info(self, **kwargs):
kwargs['prev_or_post_investment'] = False
return self._get_info(**kwargs)
# 投前新增
def rpc_new_tache_prev(self, **kwargs):
meta_sub_project_id = kwargs['meta_sub_project_id']
sub_tache_ids = kwargs['sub_tache_ids']
if len(sub_tache_ids) > 1:
return self.new_four_sub_tache(
meta_sub_project_id=meta_sub_project_id,
sub_tache_ids=sub_tache_ids,)
elif len(sub_tache_ids) == 1:
return self.new_sub_tache(
meta_sub_project_id=meta_sub_project_id,
sub_tache_id=sub_tache_ids[0],)
else:
pass
# 新增(一个)子环节
def new_sub_tache(self, **kwargs):
meta_sub_project_id = kwargs['meta_sub_project_id']
current_sub_tache_id = kwargs['sub_tache_ids']
meta_sub_project_entity = self.meta_sub_project_ids.browse(meta_sub_project_id)
sub_tache_entity = meta_sub_project_entity.sub_tache_ids.browse(current_sub_tache_id)
current_tache_entity = sub_tache_entity.tache_id
# 获取当前子环节所有的兄弟环节
brother_sub_tache_entities = meta_sub_project_entity.sub_tache_ids & current_tache_entity.tache_status_ids
# brother_sub_tache_entities = brother_sub_tache_entities
index = len(brother_sub_tache_entities) + 1
is_last = True
for sub_tache_e in meta_sub_project_entity.sub_tache_ids:
if sub_tache_e.parent_id == sub_tache_entity:
is_last = False
# 新增子环节
new_sub_tache_entity = brother_sub_tache_entities.create({
'name': brother_sub_tache_entities[0].name + ' ' + str(index),
'meta_sub_project_id': brother_sub_tache_entities[-1].meta_sub_project_id.id,
'tache_id': brother_sub_tache_entities[-1].tache_id.id,
'order_parent_id': brother_sub_tache_entities[-1].id,
'parent_id': brother_sub_tache_entities[0].id,
'is_unlocked': True,
})
sub_tache_e.write({
'order_parent_id': new_sub_tache_entity.id,
})
break
if is_last:
# index = brother_sub_tache_entities[-1].index + 1
brother_sub_tache_entities.create({
'name': brother_sub_tache_entities[0].name + ' ' + str(index),
'meta_sub_project_id': brother_sub_tache_entities[-1].meta_sub_project_id.id,
'tache_id': brother_sub_tache_entities[-1].tache_id.id,
'order_parent_id': brother_sub_tache_entities[-1].id,
'parent_id': brother_sub_tache_entities[-1].id,
'is_unlocked': True,
})
return self.rpc_get_info(meta_project_id=meta_sub_project_id)
# return self._get_info()
# is_last = True
# for sub_tache_e in meta_sub_project_entity.sub_tache_ids:
# if sub_tache_e.parent_id == sub_tache_entity:
# # 如果数据已经解锁的话,向前端报错,不能有这样的情况产生
# if sub_tache_e.is_unlocked:
# raise UserError(u'被依赖的环节已经接解锁!!!')
# # index = brother_sub_tache_entities[-1].index + 1
#
#
#
# for sub_tache_e in meta_sub_project_entity.sub_tache_ids:
#
# if sub_tache_e.order_parent_id == brother_sub_tache_entities[-1]:
# is_last = False
# # 如果数据已经解锁的话,向前端报错,不能有这样的情况产生
# # if sub_tache_e.is_unlocked:
# # raise UserError(u'依赖的环节已经接解锁!!!')
# index = brother_sub_tache_entities[-1].index + 1
#
# # 新增子环节
# new_sub_tache_entity = brother_sub_tache_entities.create({
# 'name': brother_sub_tache_entities[0].name + ' ' + str(index),
# 'meta_sub_project_id': sub_tache_entity.meta_sub_project_id.id,
# # 'tache_id': sub_tache_entity.tache_id.id,
# 'order_parent_id': brother_sub_tache_entities[-1].id,
# 'parent_id': brother_sub_tache_entities[-1].id,
# 'index': index,
# })
#
# sub_tache_e.write({
# 'parent_id': new_sub_tache_entity.id,
# })
#
# # 还需要新增子审批实体
#
# new_sub_tache_entity.sub_pro_approval_flow_settings_ids.create({
# 'sub_project_tache_id': new_sub_tache_entity.id,
# 'meta_sub_project_id': meta_sub_project_entity.id,
# # 理论上主环节中只有一份主审批流实体
# 'approval_flow_settings_id': new_sub_tache_entity.tache_id.approval_flow_settings_ids.id,
# # 默认就指向第一个位置!!!
# 'current_approval_flow_node_id': new_sub_tache_entity.tache_id.approval_flow_settings_ids.
# approval_flow_setting_node_ids.sorted('order')[0].id,
# })
#
#
#
#
#
#
# # 每次都需要调用这个方法
# # meta_sub_project_entity.sub_tache_ids.set_depency_order_by_sub_tache()
#
# break
#
# if is_last:
# index = brother_sub_tache_entities[-1].index + 1
# brother_sub_tache_entities.create({
# 'name': brother_sub_tache_entities[-1].name + ' ' + str(index),
# 'meta_sub_project_id': sub_tache_entity.meta_sub_project_id.id,
# 'tache_id': sub_tache_entity.tache_id.id,
# 'parent_id': sub_tache_entity.id,
# 'index': index,
# })
#
# # meta_sub_project_entity.sub_tache_ids.set_depency_order_by_sub_tache()
#
def new_four_sub_tache(self, **kwargs):
meta_sub_project_id = kwargs['meta_sub_project_id']
sub_tache_ids = kwargs['sub_tache_ids']
meta_sub_project_entity = self.meta_sub_project_ids.browse(meta_sub_project_id)
to_do_list = []
# 拿出当前最后的一个依赖的子环节实体
current_last_sub_tache_entity = None
for current_sub_tache_id in sub_tache_ids:
sub_tache_entity = meta_sub_project_entity.sub_tache_ids.browse(current_sub_tache_id)
current_tache_entity = sub_tache_entity.tache_id
# 获取当前子环节所有的兄弟环节
brother_sub_tache_entities = meta_sub_project_entity.sub_tache_ids & current_tache_entity.tache_status_ids
# 拿出最后的一个依赖
if current_sub_tache_id == sub_tache_ids[-1]:
current_last_sub_tache_entity = brother_sub_tache_entities[-1]
# brother_sub_tache_entities = brother_sub_tache_entities
index = len(brother_sub_tache_entities) + 1
new_sub_tache_entity = brother_sub_tache_entities.create({
'name': brother_sub_tache_entities[0].name + ' ' + str(index),
'meta_sub_project_id': brother_sub_tache_entities[-1].meta_sub_project_id.id,
'tache_id': brother_sub_tache_entities[-1].tache_id.id,
# 'order_parent_id': brother_sub_tache_entities[-1].id,
# 'parent_id': brother_sub_tache_entities[0].id,
# 'is_unlocked': True,
})
# sub_tache_e.write({
# 'order_parent_id': new_sub_tache_entity.id,
# })
to_do_list.append(new_sub_tache_entity)
# 默认情况下第一个需要解锁
to_do_list[0].write({
'is_unlocked': True,
})
# 默认情况下,把第新增条件隐藏
to_do_list[-1].write({
'once_or_more': False,
})
# 列表逆序,数据写入依赖条件
revered_to_list = to_do_list[::-1]
# 默认构建依赖关系
for i, sub_tache_entity in enumerate(revered_to_list[:-1]):
sub_tache_entity.write({
'parent_id': revered_to_list[i+1].id,
'order_parent_id': revered_to_list[i+1].id,
})
to_do_list[0].write({
'parent_id': current_last_sub_tache_entity.id,
'order_parent_id': current_last_sub_tache_entity.id,
})
remain_tachetities = set(meta_sub_project_entity.sub_tache_ids) - set(to_do_list)
for sub_tache_e in remain_tachetities:
if sub_tache_e.order_parent_id == current_last_sub_tache_entity:
sub_tache_e.write({
'order_parent_id': to_do_list[-1].id,
})
return self.rpc_get_info(meta_project_id=meta_sub_project_id)
# 投后新增
def rpc_new_tache_post(self, **kwargs):
meta_sub_project_id = kwargs['meta_sub_project_id']
sub_tache_ids = kwargs['sub_tache_ids']
if len(sub_tache_ids) > 1:
return self.new_four_sub_tache_post(
meta_sub_project_id=meta_sub_project_id,
sub_tache_ids=sub_tache_ids,)
elif len(sub_tache_ids) == 1:
return self.new_sub_tache_post(
meta_sub_project_id=meta_sub_project_id,
sub_tache_id=sub_tache_ids[0],)
else:
pass
# 新增(一个)子环节
def new_sub_tache_post(self, **kwargs):
meta_sub_project_id = kwargs['meta_sub_project_id']
current_sub_tache_id = kwargs['sub_tache_ids']
meta_sub_project_entity = self.meta_sub_project_ids.browse(meta_sub_project_id)
sub_tache_entity = meta_sub_project_entity.sub_tache_ids.browse(current_sub_tache_id)
current_tache_entity = sub_tache_entity.tache_id
# 获取当前子环节所有的兄弟环节
brother_sub_tache_entities = meta_sub_project_entity.sub_tache_ids & current_tache_entity.tache_status_ids
# brother_sub_tache_entities = brother_sub_tache_entities
index = len(brother_sub_tache_entities) + 1
is_last = True
for sub_tache_e in meta_sub_project_entity.sub_tache_ids:
if sub_tache_e.parent_id == sub_tache_entity:
is_last = False
# 新增子环节
new_sub_tache_entity = brother_sub_tache_entities.create({
'name': brother_sub_tache_entities[0].name + ' ' + str(index),
'meta_sub_project_id': brother_sub_tache_entities[-1].meta_sub_project_id.id,
'tache_id': brother_sub_tache_entities[-1].tache_id.id,
'order_parent_id': brother_sub_tache_entities[-1].id,
'parent_id': brother_sub_tache_entities[0].id,
'is_unlocked': True,
})
sub_tache_e.write({
'order_parent_id': new_sub_tache_entity.id,
})
break
if is_last:
# index = brother_sub_tache_entities[-1].index + 1
brother_sub_tache_entities.create({
'name': brother_sub_tache_entities[0].name + ' ' + str(index),
'meta_sub_project_id': brother_sub_tache_entities[-1].meta_sub_project_id.id,
'tache_id': brother_sub_tache_entities[-1].tache_id.id,
'order_parent_id': brother_sub_tache_entities[-1].id,
'parent_id': brother_sub_tache_entities[-1].id,
'is_unlocked': True,
})
return self.rpc_get_post_info(meta_project_id=meta_sub_project_id)
def new_four_sub_tache_post(self, **kwargs):
meta_sub_project_id = kwargs['meta_sub_project_id']
sub_tache_ids = kwargs['sub_tache_ids']
meta_sub_project_entity = self.meta_sub_project_ids.browse(meta_sub_project_id)
to_do_list = []
# 拿出当前最后的一个依赖的子环节实体
current_last_sub_tache_entity = None
for current_sub_tache_id in sub_tache_ids:
sub_tache_entity = meta_sub_project_entity.sub_tache_ids.browse(current_sub_tache_id)
current_tache_entity = sub_tache_entity.tache_id
# 获取当前子环节所有的兄弟环节
brother_sub_tache_entities = meta_sub_project_entity.sub_tache_ids & current_tache_entity.tache_status_ids
# 拿出最后的一个依赖
if current_sub_tache_id == sub_tache_ids[-1]:
current_last_sub_tache_entity = brother_sub_tache_entities[-1]
# brother_sub_tache_entities = brother_sub_tache_entities
index = len(brother_sub_tache_entities) + 1
new_sub_tache_entity = brother_sub_tache_entities.create({
'name': brother_sub_tache_entities[0].name + ' ' + str(index),
'meta_sub_project_id': brother_sub_tache_entities[-1].meta_sub_project_id.id,
'tache_id': brother_sub_tache_entities[-1].tache_id.id,
# 'order_parent_id': brother_sub_tache_entities[-1].id,
# 'parent_id': brother_sub_tache_entities[0].id,
# 'is_unlocked': True,
})
# sub_tache_e.write({
# 'order_parent_id': new_sub_tache_entity.id,
# })
to_do_list.append(new_sub_tache_entity)
# 默认情况下第一个需要解锁
to_do_list[0].write({
'is_unlocked': True,
})
# 默认情况下,把第新增条件隐藏
to_do_list[-1].write({
'once_or_more': False,
})
# 列表逆序,数据写入依赖条件
revered_to_list = to_do_list[::-1]
# 默认构建依赖关系
for i, sub_tache_entity in enumerate(revered_to_list[:-1]):
sub_tache_entity.write({
'parent_id': revered_to_list[i+1].id,
'order_parent_id': revered_to_list[i+1].id,
})
to_do_list[0].write({
'parent_id': current_last_sub_tache_entity.id,
'order_parent_id': current_last_sub_tache_entity.id,
})
remain_tachetities = set(meta_sub_project_entity.sub_tache_ids) - set(to_do_list)
for sub_tache_e in remain_tachetities:
if sub_tache_e.order_parent_id == current_last_sub_tache_entity:
sub_tache_e.write({
'order_parent_id': to_do_list[-1].id,
})
return self.rpc_get_post_info(meta_project_id=meta_sub_project_id)
# 构建审批流,
# 获取子审批流记录信息
def rpc_get_approval_flow_info(self, **kwargs):
tache_info = kwargs.get('tache')
meta_sub_project_id = tache_info['meta_sub_project_id']
sub_approval_flow_settings_id = tache_info['sub_approval_flow_settings_id']
meta_sub_project_entity = self.meta_sub_project_ids.browse(meta_sub_project_id)
sub_approval_flow_settings_entity = meta_sub_project_entity.sub_approval_flow_settings_ids.browse(sub_approval_flow_settings_id)
return sub_approval_flow_settings_entity.get_all_sub_aproval_flow_settings_records()
# 保存子审批流信息
def rpc_save_approval_flow_info(self, **kwargs):
tache_info = kwargs.get('tache')
meta_sub_project_id = tache_info['meta_sub_project_id']
sub_approval_flow_settings_id = tache_info['sub_approval_flow_settings_id']
meta_sub_project_entity = self.meta_sub_project_ids.browse(meta_sub_project_id)
sub_approval_flow_settings_entity = meta_sub_project_entity.sub_approval_flow_settings_ids.browse(sub_approval_flow_settings_id)
prev_or_post_investment = kwargs['prev_or_post_investment']
approval_flow_settings_record_info = kwargs.get('approval_flow_settings_record')
# 理论上只会有一个员工 审批人
approval_flow_settings_record_info['approval_person_id'] = self.env.user.employee_ids[0].id
# 审批角色
approval_flow_settings_record_info['approval_role_id'] = sub_approval_flow_settings_entity.current_approval_flow_node_id.operation_role_id.id
# 更新审批节点 拿到当前的子环节
sub_approval_flow_settings_entity.save_approval_flow_info(approval_flow_settings_record_info)
# if sub_approval_flow_settings_entity.is_success():
#
# # 触发下一个子环节
# current_sub_tache_entity = meta_sub_project_entity.sub_tache_ids.browse(tache_info['sub_tache_id'])
#
# for sub_tache_entity in meta_sub_project_entity.get_sub_taches():
# if sub_tache_entity.parent_id == current_sub_tache_entity:
# sub_tache_entity.write({
# 'is_unlocked': True,
# # 'status': 2,
# })
#
#
# # 在触发下一个子环节过程中,还需要触发下一个子环节所对应的子审批节点信息
#
# sub_approval_flow_settings_entity_next = sub_tache_entity.sub_pro_approval_flow_settings_ids
#
# sub_approval_flow_settings_entity_next.write({
# 'status': 2,
# })
#
# break
return self._get_info(meta_project_id=meta_sub_project_id, prev_or_post_investment=prev_or_post_investment)
# 获取权限配置数据
def rpc_get_permission_configuration(self):
res = []
default_is_full = True
for meta_sub_pro_entity in self.meta_sub_project_ids:
tmp = {}
tmp['meta_sub_pro_id'] = meta_sub_pro_entity.id
name1 = meta_sub_pro_entity.round_financing_and_Foundation_ids[0].round_financing_id.name
name1 = name1 if name1 else u'暂无轮次'
name2 = meta_sub_pro_entity.round_financing_and_Foundation_ids[0].foundation_id.name
name2 = name2 if name2 else u'暂无基金'
tmp['foundation_for_rund_financing_info'] = name1 + '-' + name2
tmp['approval_role_infos'] = []
approval_role_employee_rel_repr = meta_sub_pro_entity.sub_meta_pro_approval_settings_role_rel[0] if meta_sub_pro_entity.sub_meta_pro_approval_settings_role_rel else meta_sub_pro_entity.sub_meta_pro_approval_settings_role_rel
approval_role_ids = approval_role_employee_rel_repr.approval_role_id.search([])
employee_ids = approval_role_employee_rel_repr.employee_id.search([])
default_is_full = True
for approval_role_entity in approval_role_ids:
tmp2 = {}
tmp2['approval_role_id'] = approval_role_entity.id
tmp2['approval_role_name'] = approval_role_entity.name
tmp2['employee_infos'] = [{'employee_id': approval_employee_rel.employee_id.id, 'name': approval_employee_rel.employee_id.name_related}
for approval_employee_rel in meta_sub_pro_entity.sub_meta_pro_approval_settings_role_rel if approval_employee_rel.approval_role_id == approval_role_entity]
if not tmp2['employee_infos']:
default_is_full = False
tmp['approval_role_infos'].append(tmp2)
tmp['default_is_full'] = default_is_full
default_is_full = default_is_full and tmp['default_is_full']
res.append(tmp)
return {
'meta_sub_project_infos': res,
'default_is_full': default_is_full,
'is_admin': self.env.user.id == SUPERUSER_ID,
'employee_infos': [{'employee_id': employee_entity.id, 'name': employee_entity.name_related} for employee_entity in employee_ids]
}
# 保存权限配置数据
def rpc_save_permission_configuration(self, **kwargs):
meta_sub_project_infos = kwargs.get('meta_sub_project_infos')
for meta_sub_project_info in meta_sub_project_infos:
meta_sub_project_info[u'is_current_exists'] = False # 我们之前的数据在前端复制数据的时候,会提前把数据写入,可能有些数据我们并不需要在之后的操作
# meta_sub_project_entity = self.meta_sub_project_ids.browse(meta_sub_project_id)
# self._save_permission_configuration(meta_sub_project_entity, meta_sub_project_info)
for meta_sub_project_entity in self.meta_sub_project_ids:
for meta_sub_project_info in meta_sub_project_infos:
if meta_sub_project_info[u'meta_sub_pro_id'] == meta_sub_project_entity.id:
self._save_permission_configuration(meta_sub_project_entity, meta_sub_project_info)
meta_sub_project_info[u'is_current_exists'] = True
break
# 删除可能之前删除的子工程配置的数据
for meta_sub_project_entity in self.meta_sub_project_ids:
for meta_sub_project_info in meta_sub_project_infos:
if meta_sub_project_info[u'meta_sub_pro_id'] == meta_sub_project_entity.id:
if meta_sub_project_info[u'is_current_exists'] == False:
# self._save_permission_configuration(meta_sub_project_entity, meta_sub_project_info)
meta_sub_project_entity.unlink()
break
#
return self.rpc_get_permission_configuration()
def _save_permission_configuration(self, meta_sub_project_entity, meta_sub_project_info):
current_rel_entities = meta_sub_project_entity.sub_meta_pro_approval_settings_role_rel
current_rel_info_ids = set((rel.approval_role_id.id, rel.employee_id.id) for rel in current_rel_entities)
target_rel_info_ids = set((approval_role_info['approval_role_id'], employee_info['employee_id'])
for approval_role_info in meta_sub_project_info['approval_role_infos']
for employee_info in approval_role_info['employee_infos'])
todoremove_ids = current_rel_info_ids - target_rel_info_ids
todoadd_ids = target_rel_info_ids - current_rel_info_ids
# tuple_id ( approval_role_id, employee_id)
res = current_rel_entities.filtered(lambda rel: (rel.approval_role_id.id, rel.employee_id.id) in todoremove_ids)
res.unlink()
# for tuple_id in todoremove_ids:
#
# for rel_entity in current_rel_entities:
# if rel_entity.approval_role_id.id == tuple_id[0] and rel_entity.employee_id.id == tuple_id[1]:
# rel_entity.unlink()
# break
for tuple_id in todoadd_ids:
current_rel_entities.create({
'meta_sub_project_id': meta_sub_project_entity.id,
'approval_role_id': tuple_id[0],
'employee_id': tuple_id[1],
})
# 复制已有的配置,所有的主工程下面的子工程
def rpc_copy_all_permission_configuration(self):
project_entities = self.search([])
res = []
for project_entity in project_entities:
for meta_pro_entity in project_entity.meta_sub_project_ids:
round_financing_and_Foundation_entity = meta_pro_entity.round_financing_and_Foundation_ids[0]
round_financing_name = round_financing_and_Foundation_entity.round_financing_id.name if round_financing_and_Foundation_entity.round_financing_id \
else u'暂无轮次'
foundation_name = round_financing_and_Foundation_entity.foundation_id.name if round_financing_and_Foundation_entity.foundation_id \
else u'暂无基金'
sub_project_entity = meta_pro_entity.sub_project_ids
sub_project_name = sub_project_entity.name if sub_project_entity else u'暂无子工程'
res.append({
'meta_sub_project_id': meta_pro_entity.id,
'sub_project_name': sub_project_name,
'round_financing_name': round_financing_name,
'foundation_name': foundation_name,
})
return res
def rpc_copy_permission_configuration(self, **kwargs):
current_meta_sub_pro_id = kwargs['current_meta_sub_pro_id']
copy_meta_sub_pro_id = kwargs['copy_meta_sub_pro_id']
# current_meta_sub_pro_entity = self.meta_sub_project_ids.browse(current_meta_sub_pro_id)
copy_meta_sub_pro_entity = self.meta_sub_project_ids.browse(copy_meta_sub_pro_id)
# return | |
<reponame>PeterJackNaylor/segmentation_net
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""segnet package file tf_record
Segmentation_base_class -> SegmentationInput -> SegmentationCompile ->
SegmentationSummaries -> Segmentation_model_utils -> Segmentation_train
"""
from datetime import datetime
from tqdm import trange
from ..net_utils import ScoreRecorder
from .segmentation_model_utils import *
def verbose_range(beg, end, word, verbose, verbose_thresh):
"""Monitores the time in range with tqdm
If verbose, use tqdm to take care of estimating end of training.
Args:
beg: integer, where to start iterating
end: integer, where to end iteration (not included)
word: string, to print in the displayed progress_bar
verbose: integer, value of verbose given mostlikely by the object himself
verbose_thresh: integer, will display progress bar if verbose > verbose_thresh
Returns:
An object on which you can iterate that can or not, depending
on the value of verbose print a progress bar to the stdoutput.
"""
returned_range = None
if verbose > verbose_thresh:
returned_range = trange(beg, end, desc=word)
else:
returned_range = range(beg, end)
return returned_range
class SegmentationTrain(SegmentationModelUtils):
def train(self, train_record, test_record=None,
learning_rate=0.001, lr_procedure="1epoch",
weight_decay=0.0005, batch_size=1,
decay_ema=0.9999, k=0.96, n_epochs=10,
early_stopping=3, loss_func=tf.nn.l2_loss,
save_weights=True, new_log=None,
num_parallele_batch=8, restore=False,
track_variable="loss", track_training=False,
tensorboard=True, save_best=True, return_best=False,
decode=tf.float32):
""" Trains the model on train record, optionnaly you can monitor
the training by evaluation the test record
Args:
train_record: string, path to a tensorflow record file for training.
test_record: string or None, if given, the model will be evaluated on
the test data at every epoch.
learning_rate: float (default: 0.001) Initial learning rate for the
gradient descent update.
lr_procedure : string (default: 10epoch) Will be perfome learning rate
decay every 10 epochs.
weight_decay : float (default: 0.0005) Initial value given to the weight
decay, the loss is computed:
loss = loss + weight_decay * sum(loss_func(W)) where W are
training parameters of the model.
batch_size : integer (default: 1) Size of batch to be feeded at each
iterations.
decay_ema : float (default: 0) if 0: ignored
exponential moving average decay parameter to apply to weights
over time for more robust convergence.
k : float (default: 0.96) value by which the learning rate decays every
update.
n_epochs : integer (default: 10) number of epochs to perform
early_stopping : integer, if 0 or None ignored, else the model will stop
training if the tracked variable doesn't go in the right
direction in under early_stopping epochs.
loss_func : tensorflow function (default: l2_loss) to apply on the weights
for the weight decay in the loss function.
save_weights : bool (default: True) If to store the weigths
new_log : string (default: None) if to save the model in a different folder
then the one from which the variables were restored.
num_parallele_batch : integer (default: 8) number of workers to use to
perform paralelle computing.
restore : bool (default: False) if too restore from the new_log given.
track_variable : str (default: loss) which variable to track in order to
perform early stopping.
track_training : bool (default: False) if to track track_variable on the
training data or on the test data.
tensorboard : bool (default: True) if to monitor the model via tensorboard.
save_best : bool (default: True) if to save the best model as last weights
in case of early stopping or if there is a better possible model
with respect to the test set.
return_best : bool (default: True) if to return the best model in case of early
stopping or if there is a better possible model with respect to
the test set.
decode: tensorflow function (default: tf.float32) how to decode the bytes in
the tensorflow records for the input rgb data.
Returns:
An python dictionnary recaping the training and if present the test history.
"""
steps_in_epoch = max(ut.record_size(train_record) // batch_size, 1)
test_steps = ut.record_size(test_record) //batch_size if test_record is not None else None
max_steps = steps_in_epoch * n_epochs
self.tensorboard = tensorboard
if new_log is None:
new_log = self.log
else:
check_or_create(new_log)
stop_early = early_stopping is not None and early_stopping != 0
if not stop_early:
early_stopping = 0
if early_stopping not in [0, 3]:
## this saver is to ensure that we can restore to the best weights at the end
self.saver = self.saver_object(keep=early_stopping + 1,
log=new_log,
restore=restore)
self.score_recorder = ScoreRecorder(self.saver, self.sess,
new_log, stop_early=stop_early,
lag=early_stopping)
if not (k == 0 or k is None or lr_procedure is None or lr_procedure == ""):
with tf.name_scope('learning_rate_scheduler'):
lrs = self.learning_rate_scheduler(learning_rate, k, lr_procedure,
steps_in_epoch)
if self.verbose:
msg = "learning_rate_scheduler added \
with initial_value = {}, k = {} \
and decrease every = {}"
tqdm.write(msg.format(learning_rate, k, lr_procedure))
self.learning_rate = lrs
else:
lrs = learning_rate
if self.verbose:
tqdm.write("Learning_rate fixed to :{}".format(lrs))
if self.tensorboard:
sw, ms, stw, mts = self.setup_summary(new_log, test_record)
self.summary_writer = sw
self.merged_summaries = ms
if test_record:
self.summary_test_writer = stw
self.merged_summaries_test = mts
if self.verbose:
tqdm.write("summaries added")
if weight_decay != 0:
with tf.name_scope('regularization'):
self.loss = self.regularize_model(self.loss, loss_func, weight_decay)
if self.verbose:
tqdm.write('regularization weight decay added: {}'.format(weight_decay))
with tf.name_scope('optimization'):
opt = self.optimization(lrs, self.loss, self.training_variables)
if decay_ema != 0 and decay_ema is not None:
with tf.name_scope('exponential_moving_average'):
training_op = self.exponential_moving_average(opt,
self.training_variables,
decay_ema)
if self.verbose:
tqdm.write("Exponential moving average added to prediction")
else:
training_op = opt
with tf.name_scope('input_from_queue'):
image_out, anno_out = self.setup_queues(train_record, test_record,
batch_size, num_parallele_batch,
decode=decode)
# To plug in the queue to the main graph
# with tf.control_dependencies([image_out, anno_out]):
with tf.name_scope('queue_assigning'):
# Control the dependency to allow the flow thought the data queues
assign_rgb_to_queue = tf.assign(self.rgb_v, image_out,
validate_shape=False)
assign_lbl_to_queue = tf.assign(self.lbl_v, anno_out,
validate_shape=False)
assign_to_variable = [assign_rgb_to_queue, assign_lbl_to_queue]
to_control = tf.tuple(assign_to_variable, control_inputs=[image_out, anno_out])
blank = tf.tuple([self.is_training], name=None, control_inputs=to_control)
train_op = tf.tuple([training_op], name=None, control_inputs=to_control)
self.init_uninit([])
begin_iter = 0
begin_epoch = begin_iter // steps_in_epoch
last_epoch = begin_epoch + n_epochs
last_iter = max_steps + begin_iter
range_ = verbose_range(begin_iter, last_iter, "training ",
self.verbose, 0)
self.sess.run(blank)
for step in range_:
self.sess.run(train_op)
if (step - begin_epoch + 1) % steps_in_epoch == 0 and (step - begin_epoch) != 0:
# If we are at the end of an epoch
epoch_number = step // steps_in_epoch
if self.verbose:
i = datetime.now()
msg = i.strftime('[%Y/%m/%d %H:%M:%S]: ')
msg += ' Epoch {} / {}'.format(epoch_number + 1, last_epoch)
tqdm.write(msg)
if save_weights:
self.saver.save(self.sess, new_log + '/' + "model.ckpt",
global_step=epoch_number + 1)
dic_train_record = self.infer_train_step(epoch_number, control=to_control)
self.score_recorder.diggest(epoch_number, dic_train_record)
if test_record:
self.sess.run(blank, feed_dict={self.is_training:False})
dic_test_record = self.infer_test_set(epoch_number, test_steps,
during_training=True, control=to_control)
self.sess.run(blank, feed_dict={self.is_training:True})
self.score_recorder.diggest(epoch_number, dic_test_record, train=False)
if self.score_recorder.stop(track_variable, train_set=track_training):
if self.verbose > 0:
tqdm.write('stopping early')
break
if save_best:
self.score_recorder.save_best(track_variable, save_weights, train_set=track_training)
if return_best:
# actually works when save best
tqdm.write("restore_best NOT IMPLEMENTED")
return self.score_recorder.all_tables()
# ttt1, ttt2 = self.sess.run([test, self.conv1])
# ttt1, ttt2 = self.sess.run([test, self.conv1])
# import matplotlib.pylab as plt
# f, axes = plt.subplots(nrows=9, ncols=ttt1[0].shape[0])
# for i in range(ttt1[0].shape[0]):
# for j in range(8):
# axes[j, i].imshow(ttt2[i,:,:,j].astype('uint8'))
# axes[-1, i].imshow(ttt1[0][i,:,:].astype('uint8'))
# ttt1, ttt2 = self.sess.run([test, self.conv1])
# ttt1, ttt2 = self.sess.run([test, self.conv1])
# fig, axes2 = plt.subplots(nrows=9, ncols=ttt1[0].shape[0])
# for i in range(ttt1[0].shape[0]):
# for j in range(8):
# axes2[j, i].imshow(ttt2[i,:,:,j].astype('uint8'))
# axes2[-1, i].imshow(ttt1[0][i,:,:].astype('uint8'))
# plt.show()
# import pdb; pdb.set_trace()
# size = self.sess.run([warm_up, warm_up2])
# tqdm.write(str(size[0]))
# size = self.sess.run([warm_up, warm_up2])
# tqdm.write(str(size[0]))
# self.sess.run(warm)
# a, c, d, b, e, ff, ff3, ff2, ff1 = self.sess.run([test, self.probability, self.predictions, self.rgb_ph, self.lbl_ph, self.logit, self.conv3, self.conv2, self.conv1]) # self.label_int,
# import matplotlib.pylab as plt
# f, axes = plt.subplots(nrows=4, ncols=c.shape[0]);
# if b.shape[1] == c.shape[1]:
# dis = 0
# else:
# dis = 92
# if c.shape[0] == 1:
# axes[0].imshow(c[0,:,:,0])
# if dis== 0:
# axes[1].imshow(b[0,:,:].astype('uint8'))
# else:
# axes[1].imshow(b[0,dis:-dis,dis:-dis].astype('uint8'))
# axes[2].imshow(d[0,:,:])
# axes[3].imshow(e[0,:,:,0])
# #axes[4].imshow(entry[0,:,:,0])
# for j in range(5):
# axes[j].axis('off')
# else:
# for i in range(c.shape[0]):
# axes[0, i].imshow(c[i,:,:,0])
# if dis== 0:
# axes[1, i].imshow(b[i,:,:].astype('uint8'))
# else:
# axes[1, i].imshow(b[i,dis:-dis,dis:-dis].astype('uint8'))
# axes[2, i].imshow(d[i,:,:])
# axes[3, i].imshow(e[i,:,:,0])
# #axes[4, i].imshow(entry[i,:,:,0])
# for j in range(4):
# axes[j, i].axis('off')
# plt.savefig("train/train_{}.png".format(step))
# f, axes = plt.subplots(nrows=2, ncols=c.shape[0]);
# if c.shape[0] == 1:
# for j in | |
is sex-stratified).
"""
file_base = 'pananc_31063_pcgc_covariates'
file_base = file_base + ('_nosexcovar' if sex_specific else '')
suffix = '.tsv'
if dictout:
out = {anc: anc + '_' + file_base + suffix for anc in ancestries}
else:
out = [anc + '_' + file_base + suffix for anc in ancestries]
return out
def get_annot_split_names(ancestries, dictout, n_annot=25, suffix_incl=True):
""" Returns names of annotation files.
Parameters
----------
ancestries: `list`
List of ancestries to include
dictout: `bool`
If the output should be a dictionary indexed on ancestries.
"""
file_base = f'pananc_31063_pcgc_{str(n_annot)}LDMS_annotations'
suffix = '.txt.bgz' if suffix_incl else ''
if dictout:
out = {anc: anc + '_' + file_base + suffix for anc in ancestries}
else:
out = [anc + '_' + file_base + suffix for anc in ancestries]
return out
def get_result_log_split_names(ancestries, phenotype_id, dictout, suffix=''):
""" Returns names of the raw log to output from the RHEmc run.
Parameters
----------
ancestries: `list`
List of ancestries to include
dictout: `bool`
If the output should be a dictionary indexed on ancestries.
suffix: `str`
Appended to the end of the filename before the extension.
"""
file_base = 'result'
suffix = f'{suffix}.log'
if dictout:
out = {anc: anc + '_' + phenotype_id + '_' + file_base + suffix for anc in ancestries}
else:
out = [anc + '_' + phenotype_id + '_' + file_base + suffix for anc in ancestries]
return out
def parse_ancestries(args):
return args.ancestries.split(',')
def construct_iter_suffix(iter):
return '-iter' + str(iter) if iter is not None else ''
def print_pop_Ns(mt):
""" Enumerates the number of individuals per ancestry represented in the genotype table.
Parameters
----------
mt: :obj: `MatrixTable`
Returns
-------
None
"""
dict_anc = mt.aggregate_cols(hl.agg.counter(mt.pop))
_ = [print(k + ': ' + str(v)) for k,v in dict_anc.items()]
def convert_pheno_id_to_potential_saige(pheno_id: Union[hl.StringExpression, str]):
""" Convert phenotype ID to string for matching to Saige.
"""
sexes = ['both_sexes', 'females', 'males']
exclude = 'phecode'
split_pheno = pheno_id.split('-')
if type(pheno_id) == str:
if exclude in split_pheno:
newstr = pheno_id
else:
newstr = '-'.join([x for x in split_pheno if x not in sexes])
else:
hljoin = lambda x: hl.str('-').join(x)
newstr = hl.if_else(split_pheno.contains(hl.literal(exclude)),
hljoin(split_pheno),
hljoin(hl.filter(lambda x: ~hl.literal(sexes).contains(x), split_pheno)))
return newstr + '.tsv'
def run_phenotype_job(b, phenotype_id, ancestries, use_saige, checkpoint,
phenoscript, n_threads, random):
""" Runs phenotype file creation jobs.
"""
j = b.new_job(name=phenotype_id+'_create_pheno')
j.image(IMAGE)
j.cpu(n_threads)
filename = get_pheno_filename(phenotype_id, enable_suffix=False)
filename_map = get_pheno_split_names(ancestries, dictout=True, phenotype_id=phenotype_id, enable_suffix=True)
filename_compat = get_pheno_filename(compatiblify_phenotype_id(phenotype_id), enable_suffix=False)
filename_map_compat = get_pheno_split_names(ancestries, dictout=True,
phenotype_id=compatiblify_phenotype_id(phenotype_id),
enable_suffix=True)
ancestry = ','.join(ancestries)
checkpoint_val = '--checkpoint' if checkpoint else ''
saige_pull = '--pull-from-saige' if use_saige else ''
random_val = '--random' if random else ''
#write this file get_pheno_filename(args.phenotype_id, enable_suffix=False)
anc_array = '( ' + ', '.join(ancestries) + ' )'
map_dependencies = {anc: j[anc] for anc in ancestries}
command = f"""
ancestry_array={anc_array}
for i in '${{ancestry_array[@]}}'
do
mkdir $i
done
python3 {phenoscript} --phenotype-id '{phenotype_id}' --ancestries {ancestry} \
--logging --override-check {checkpoint_val} {saige_pull} {random_val}
cp '{filename_compat + '.log'}' {j.log}
"""
for anc in ancestries:
command+=f"""
cp '{filename_map_compat[anc]}' {map_dependencies[anc]}
"""
j.command(command)
b.write_output(j.log, path_pheno + 'log/' + filename + '.log')
for anc in ancestries:
b.write_output(map_dependencies[anc], path_pheno + filename_map[anc])
return b, j, map_dependencies
def run_rhemc_job(b, phenotype_id, ancestry, phenotype_file, read_previous: bool, parser,
use_fuse: bool, memsize: int, storage, jackknife_blocks: int,
random_vectors: int, nbins: int, suffix: str, iter, sex_specific):
# Localize all files (and pass phenotype files in from previous step)
# This will require GCSFUSE for the annotation, covariate, and genotype files
# Run RHEmc
# Collect results into a table
iter_suffix = construct_iter_suffix(iter)
j = b.new_job(name=ancestry + '_' + phenotype_id + '_RHEmc' + iter_suffix)
j.image(IMAGE)
log_out = get_result_log_split_names([ancestry], phenotype_id, dictout=True, suffix=suffix)[ancestry] + iter_suffix
if read_previous:
log_file = b.read_input(path_results + 'raw/' + log_out)
command = f"""
python3 {parser} \
--file {log_file} \
--pheno '{phenotype_id}{iter_suffix}' \
--ancestry {ancestry} \
--out {j.parsed_out}
"""
j.command(command)
else:
if anc == 'EUR':
j._machine_type = 'n1-highmem-16'
else:
j.memory(str(memsize) + 'G')
j.storage(str(storage) + 'G')
genotype_name = get_geno_split_names([ancestry], dictout=True, plinkprefix=True)[ancestry]
covariate_name = get_covar_split_names([ancestry], dictout=True, sex_specific=sex_specific)[ancestry]
annot_name = get_annot_split_names([ancestry], dictout=True, n_annot=nbins)[ancestry]
covarpath = b.read_input(path_covar + covariate_name)
annotpath = b.read_input(path_annot + annot_name)
if use_fuse:
genopath = f'/{fusebucket}/genos/{genotype_name}'
j.gcsfuse(fusebucket, '/'+fusebucket, read_only=True)
else:
genopath = b.read_input_group(**{x: path_geno + genotype_name + '.' + x
for x in ['bed', 'bim', 'fam']})
if True: # TODO update this to include input flags
command_rhe = f"""
/RHE-mc/build/RHEmc \
-g {genopath} \
-c covar.tsv \
-p pheno.tsv \
-annot annot.txt \
-k {random_vectors} \
-jn {jackknife_blocks} \
-o {j.h2out}
"""
else:
# single component RHE reg either runs out of memory or
# doesn't work. Gets floating point error:
# 0x00005571c3c03e86 in genotype::get_observed_pj(unsigned char const*) ()
command_rhe = f"""
/RHE-reg/build/RHE_reg \
-g {genopath} \
-c covar.tsv \
-p pheno.tsv \
-b {random_vectors}
"""
command = f"""
cp {phenotype_file} pheno.bgz
gunzip -c pheno.bgz > pheno.tsv
cp {annotpath} annot.bgz
gunzip -c {annotpath} > annot.txt
cat {covarpath} > covar.tsv
{command_rhe}
python3 {parser} \
--file {j.h2out} \
--pheno '{phenotype_id}{iter_suffix}' \
--ancestry {ancestry} \
--out {j.parsed_out}
"""
j.command(command)
b.write_output(j.h2out, path_results + 'raw/' + log_out)
return b, j, j.parsed_out
def run_ancestry_sink(b, phenotype_id, ancestry_jobs, concatter):
""" Runs ancestry sink, producing tables for final concatenation.
"""
j = b.new_job(name='ancestry_sink_' + phenotype_id)
j.image(IMAGE)
tables = ','.join([res_file for _, (_, res_file) in ancestry_jobs.items()])
command = f"""
python {concatter} --tables {tables} --out {j.tab_out}
"""
j.command(command)
return j
def run_final_sink(b, ancestry_sinks, concatter, nlen, suffix='', output_file='final_results', path_results=path_results):
""" Runs final sink to collect and concatenate all results.
Implements interim sinks of size nlen, and then has one final sink.
This is to workaround the issue with the submitted script being too long
if we allow the final sink size to become unbounded.
"""
interim_sinks = []
get_interim_id = lambda idx: math.floor(idx/nlen)
for this_id in range(0, get_interim_id(len(ancestry_sinks))+1):
jobs_in_sink = [v for idx, v in enumerate(ancestry_sinks) if get_interim_id(idx) == this_id]
interim_sink = b.new_job('interim_sink_' + str(this_id))
interim_sink.image(IMAGE)
interim_tables = ",".join([j.tab_out for j in jobs_in_sink])
command_interim = f"""
python {concatter} --tables {interim_tables} --out {interim_sink.tab_out}
"""
interim_sink.command(command_interim)
interim_sinks.append(interim_sink)
if len(interim_sinks) == 1:
final_sink = interim_sinks[0]
else:
final_sink = b.new_job('final_sink')
final_sink.image(IMAGE)
final_tables = ",".join([j.tab_out for j in interim_sinks])
command_fin = f"""
python {concatter} --tables {final_tables} --out {final_sink.tab_out}
"""
final_sink.command(command_fin)
b.write_output(final_sink.tab_out, f'{path_results}{output_file}{suffix}.tsv')
return final_sink
def get_num_bins(args):
""" Gets the number of bins that will be used in this analysis.
"""
n_maf = 2 if args.maf_bins_2 else 5
n_ld = args.num_ld_bins
return n_maf * n_ld
def get_num_iter(args):
""" Determines if iterations per phenotype must be performed (only enabled for testing).
If so, formats iterations properly for use.
"""
if args.n_iter is None:
return [None]
else:
return range(0, args.n_iter)
def _get_pheno_manifest_path_internal():
return f'gs://ukb-diverse-pops/{loc}/phenotype_manifest.tsv.bgz'
def _import_manifest(use_tsv_manifest=True):
if use_tsv_manifest:
manifest = hl.import_table(_get_pheno_manifest_path_internal())
else:
manifest = hl.read_matrix_table(get_variant_results_path('full')).cols()
annotate_dict = {}
annotate_dict.update({'pops': hl.str(',').join(manifest.pheno_data.pop),
'num_pops': hl.len(manifest.pheno_data.pop)})
for field in ['n_cases','n_controls','heritability']:
for pop in ['AFR','AMR','CSA','EAS','EUR','MID']:
new_field = field if field!='heritability' else 'saige_heritability' # new field name (only applicable to saige heritability)
idx = manifest.pheno_data.pop.index(pop)
field_expr = manifest.pheno_data[field]
annotate_dict.update({f'{new_field}_{pop}': hl.if_else(hl.is_nan(idx),
hl.missing(field_expr[0].dtype),
field_expr[idx])})
manifest = manifest.annotate(**annotate_dict)
manifest = manifest.drop(manifest.pheno_data)
if 'phenotype_id' not in list(manifest.row):
manifest = manifest.annotate(phenotype_id = construct_phenotype_id(manifest))
manifest = manifest.annotate(pheno_file = get_pheno_filename(manifest.phenotype_id),
saige_file = convert_pheno_id_to_potential_saige(manifest.phenotype_id),
pop_split = manifest.pops.split(','))
return manifest.cache()
def _make_phenotype_dict(manifest, ancestries, n_include=None, random_phenos=None, suffix='', specific_pheno=None):
""" Internal function to construct a phenotype dictionary. Used to iterate
through phenotypes during pipeline construction.
Parameters
----------
manifest: `Hail Table`
Imported manifest table. This function relies on modifications made to this table
in `_import_manifest`.
ancestries: `list`
n_include: `int` or None
This is primarily used for testing. If a non-None value is provided,
this function will trim the dictionary to include n_include items from each
trait type.
random_phenos: `int` or None
If not none, then will queue up a bunch of random phenotype jobs. Each of these will
be run on all phenotypes.
suffix: `str`
Appended to the end of the results file to search for.
specific_pheno: `list` or None
A list of formatted phenotypes to subset the manifest to. If any are not found, an error will be thrown.
"""
if random_phenos is not None:
dct_out = {}
for i in range(1, random_phenos+1):
pheno_id = RANDOMPREF(i)
| |
275: 'podoon',
276: 'life_time_fitness',
277: 'falco_e_motors', # Falco eMotors Inc.
5759: 'actigraphcorp',
},
),
'mesg_count': FieldType(
name='mesg_count',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'num_per_file',
1: 'max_per_file',
2: 'max_per_file_type',
},
),
'mesg_num': FieldType(
name='mesg_num',
base_type=BASE_TYPES[0x84], # uint16
values={
0: 'file_id',
1: 'capabilities',
2: 'device_settings',
3: 'user_profile',
4: 'hrm_profile',
5: 'sdm_profile',
6: 'bike_profile',
7: 'zones_target',
8: 'hr_zone',
9: 'power_zone',
10: 'met_zone',
12: 'sport',
15: 'goal',
18: 'session',
19: 'lap',
20: 'record',
21: 'event',
23: 'device_info',
26: 'workout',
27: 'workout_step',
28: 'schedule',
30: 'weight_scale',
31: 'course',
32: 'course_point',
33: 'totals',
34: 'activity',
35: 'software',
37: 'file_capabilities',
38: 'mesg_capabilities',
39: 'field_capabilities',
49: 'file_creator',
51: 'blood_pressure',
53: 'speed_zone',
55: 'monitoring',
72: 'training_file',
78: 'hrv',
80: 'ant_rx',
81: 'ant_tx',
82: 'ant_channel_id',
101: 'length',
103: 'monitoring_info',
105: 'pad',
106: 'slave_device',
127: 'connectivity',
128: 'weather_conditions',
129: 'weather_alert',
131: 'cadence_zone',
132: 'hr',
142: 'segment_lap',
145: 'memo_glob',
148: 'segment_id',
149: 'segment_leaderboard_entry',
150: 'segment_point',
151: 'segment_file',
158: 'workout_session',
159: 'watchface_settings',
160: 'gps_metadata',
161: 'camera_event',
162: 'timestamp_correlation',
164: 'gyroscope_data',
165: 'accelerometer_data',
167: 'three_d_sensor_calibration',
169: 'video_frame',
174: 'obdii_data',
177: 'nmea_sentence',
178: 'aviation_attitude',
184: 'video',
185: 'video_title',
186: 'video_description',
187: 'video_clip',
188: 'ohr_settings',
200: 'exd_screen_configuration',
201: 'exd_data_field_configuration',
202: 'exd_data_concept_configuration',
206: 'field_description',
207: 'developer_data_id',
208: 'magnetometer_data',
},
),
'message_index': FieldType(
name='message_index',
base_type=BASE_TYPES[0x84], # uint16
values={
0x0FFF: 'mask', # index
0x7000: 'reserved', # reserved (default 0)
0x8000: 'selected', # message is selected if set
},
),
'power_phase_type': FieldType(
name='power_phase_type',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'power_phase_start_angle',
1: 'power_phase_end_angle',
2: 'power_phase_arc_length',
3: 'power_phase_center',
},
),
'pwr_zone_calc': FieldType(
name='pwr_zone_calc',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'custom',
1: 'percent_ftp',
},
),
'rider_position_type': FieldType(
name='rider_position_type',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'seated',
1: 'standing',
2: 'transition_to_seated',
3: 'transition_to_standing',
},
),
'schedule': FieldType(
name='schedule',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'workout',
1: 'course',
},
),
'segment_delete_status': FieldType(
name='segment_delete_status',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'do_not_delete',
1: 'delete_one',
2: 'delete_all',
},
),
'segment_lap_status': FieldType(
name='segment_lap_status',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'end',
1: 'fail',
},
),
'segment_leaderboard_type': FieldType(
name='segment_leaderboard_type',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'overall',
1: 'personal_best',
2: 'connections',
3: 'group',
4: 'challenger',
5: 'kom',
6: 'qom',
7: 'pr',
8: 'goal',
9: 'rival',
10: 'club_leader',
},
),
'segment_selection_type': FieldType(
name='segment_selection_type',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'starred',
1: 'suggested',
},
),
'sensor_type': FieldType(
name='sensor_type',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'accelerometer',
1: 'gyroscope',
2: 'compass', # Magnetometer
},
),
'session_trigger': FieldType(
name='session_trigger',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'activity_end',
1: 'manual', # User changed sport.
2: 'auto_multi_sport', # Auto multi-sport feature is enabled and user pressed lap button to advance session.
3: 'fitness_equipment', # Auto sport change caused by user linking to fitness equipment.
},
),
'side': FieldType(
name='side',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'right',
1: 'left',
},
),
'source_type': FieldType(
name='source_type',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'ant', # External device connected with ANT
1: 'antplus', # External device connected with ANT+
2: 'bluetooth', # External device connected with BT
3: 'bluetooth_low_energy', # External device connected with BLE
4: 'wifi', # External device connected with Wifi
5: 'local', # Onboard device
},
),
'sport': FieldType(
name='sport',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'generic',
1: 'running',
2: 'cycling',
3: 'transition', # Mulitsport transition
4: 'fitness_equipment',
5: 'swimming',
6: 'basketball',
7: 'soccer',
8: 'tennis',
9: 'american_football',
10: 'training',
11: 'walking',
12: 'cross_country_skiing',
13: 'alpine_skiing',
14: 'snowboarding',
15: 'rowing',
16: 'mountaineering',
17: 'hiking',
18: 'multisport',
19: 'paddling',
20: 'flying',
21: 'e_biking',
22: 'motorcycling',
23: 'boating',
24: 'driving',
25: 'golf',
26: 'hang_gliding',
27: 'horseback_riding',
28: 'hunting',
29: 'fishing',
30: 'inline_skating',
31: 'rock_climbing',
32: 'sailing',
33: 'ice_skating',
34: 'sky_diving',
35: 'snowshoeing',
36: 'snowmobiling',
37: 'stand_up_paddleboarding',
38: 'surfing',
39: 'wakeboarding',
40: 'water_skiing',
41: 'kayaking',
42: 'rafting',
43: 'windsurfing',
44: 'kitesurfing',
45: 'tactical',
46: 'jumpmaster',
47: 'boxing',
48: 'floor_climbing',
254: 'all', # All is for goals only to include all sports.
},
),
'sport_bits_0': FieldType( # Bit field corresponding to sport enum type (1 << sport).
name='sport_bits_0',
base_type=BASE_TYPES[0x0A], # uint8z
values={
0x01: 'generic',
0x02: 'running',
0x04: 'cycling',
0x08: 'transition', # Mulitsport transition
0x10: 'fitness_equipment',
0x20: 'swimming',
0x40: 'basketball',
0x80: 'soccer',
},
),
'sport_bits_1': FieldType( # Bit field corresponding to sport enum type (1 << (sport-8)).
name='sport_bits_1',
base_type=BASE_TYPES[0x0A], # uint8z
values={
0x01: 'tennis',
0x02: 'american_football',
0x04: 'training',
0x08: 'walking',
0x10: 'cross_country_skiing',
0x20: 'alpine_skiing',
0x40: 'snowboarding',
0x80: 'rowing',
},
),
'sport_bits_2': FieldType( # Bit field corresponding to sport enum type (1 << (sport-16)).
name='sport_bits_2',
base_type=BASE_TYPES[0x0A], # uint8z
values={
0x01: 'mountaineering',
0x02: 'hiking',
0x04: 'multisport',
0x08: 'paddling',
0x10: 'flying',
0x20: 'e_biking',
0x40: 'motorcycling',
0x80: 'boating',
},
),
'sport_bits_3': FieldType( # Bit field corresponding to sport enum type (1 << (sport-24)).
name='sport_bits_3',
base_type=BASE_TYPES[0x0A], # uint8z
values={
0x01: 'driving',
0x02: 'golf',
0x04: 'hang_gliding',
0x08: 'horseback_riding',
0x10: 'hunting',
0x20: 'fishing',
0x40: 'inline_skating',
0x80: 'rock_climbing',
},
),
'sport_bits_4': FieldType( # Bit field corresponding to sport enum type (1 << (sport-32)).
name='sport_bits_4',
base_type=BASE_TYPES[0x0A], # uint8z
values={
0x01: 'sailing',
0x02: 'ice_skating',
0x04: 'sky_diving',
0x08: 'snowshoeing',
0x10: 'snowmobiling',
0x20: 'stand_up_paddleboarding',
0x40: 'surfing',
0x80: 'wakeboarding',
},
),
'sport_bits_5': FieldType( # Bit field corresponding to sport enum type (1 << (sport-40)).
name='sport_bits_5',
base_type=BASE_TYPES[0x0A], # uint8z
values={
0x01: 'water_skiing',
0x02: 'kayaking',
0x04: 'rafting',
0x08: 'windsurfing',
0x10: 'kitesurfing',
0x20: 'tactical',
0x40: 'jumpmaster',
0x80: 'boxing',
},
),
'sport_bits_6': FieldType( # Bit field corresponding to sport enum type (1 << (sport-48)).
name='sport_bits_6',
base_type=BASE_TYPES[0x0A], # uint8z
values={
0x01: 'floor_climbing',
},
),
'sport_event': FieldType(
name='sport_event',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'uncategorized',
1: 'geocaching',
2: 'fitness',
3: 'recreation',
4: 'race',
5: 'special_event',
6: 'training',
7: 'transportation',
8: 'touring',
},
),
'stroke_type': FieldType(
name='stroke_type',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'no_event',
1: 'other', # stroke was detected but cannot be identified
2: 'serve',
3: 'forehand',
4: 'backhand',
5: 'smash',
},
),
'sub_sport': FieldType(
name='sub_sport',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'generic',
1: 'treadmill', # Run/Fitness Equipment
2: 'street', # Run
3: 'trail', # Run
4: 'track', # Run
5: 'spin', # Cycling
6: 'indoor_cycling', # Cycling/Fitness Equipment
7: 'road', # Cycling
8: 'mountain', # Cycling
9: 'downhill', # Cycling
10: 'recumbent', # Cycling
11: 'cyclocross', # Cycling
12: 'hand_cycling', # Cycling
13: 'track_cycling', # Cycling
14: 'indoor_rowing', # Fitness Equipment
15: 'elliptical', # Fitness Equipment
16: 'stair_climbing', # Fitness Equipment
17: 'lap_swimming', # Swimming
18: 'open_water', # Swimming
19: 'flexibility_training', # Training
20: 'strength_training', # Training
21: 'warm_up', # Tennis
22: 'match', # Tennis
23: 'exercise', # Tennis
24: 'challenge', # Tennis
25: 'indoor_skiing', # Fitness Equipment
26: 'cardio_training', # Training
27: 'indoor_walking', # Walking/Fitness Equipment
28: 'e_bike_fitness', # E-Biking
29: 'bmx', # Cycling
30: 'casual_walking', # Walking
31: 'speed_walking', # Walking
32: 'bike_to_run_transition', # Transition
33: 'run_to_bike_transition', # Transition
34: 'swim_to_bike_transition', # Transition
35: 'atv', # Motorcycling
36: 'motocross', # Motorcycling
37: 'backcountry', # Alpine Skiing/Snowboarding
38: 'resort', # Alpine Skiing/Snowboarding
39: 'rc_drone', # Flying
40: 'wingsuit', # Flying
41: 'whitewater', # Kayaking/Rafting
42: 'skate_skiing', # Cross Country Skiing
43: 'yoga', # Training
44: 'pilates', # Training
45: 'indoor_running', # Run
46: 'gravel_cycling', # Cycling
47: 'e_bike_mountain', # Cycling
48: 'commuting', # Cycling
49: 'mixed_surface', # Cycling
50: 'navigate',
51: 'track_me',
52: 'map',
254: 'all',
},
),
'supported_exd_screen_layouts': FieldType(
name='supported_exd_screen_layouts',
base_type=BASE_TYPES[0x8C], # uint32z
values={
0x00000001: 'full_screen',
0x00000002: 'half_vertical',
0x00000004: 'half_horizontal',
0x00000008: 'half_vertical_right_split',
0x00000010: 'half_horizontal_bottom_split',
0x00000020: 'full_quarter_split',
0x00000040: 'half_vertical_left_split',
0x00000080: 'half_horizontal_top_split',
},
),
'swim_stroke': FieldType(
name='swim_stroke',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'freestyle',
1: 'backstroke',
2: 'breaststroke',
3: 'butterfly',
4: 'drill',
5: 'mixed',
6: 'im', # IM is a mixed interval containing the same number of lengths for each of: Butterfly, Backstroke, Breaststroke, Freestyle, swam in that order.
},
),
'switch': FieldType(
name='switch',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'off',
1: 'on',
2: 'auto',
},
),
'time_into_day': FieldType( # number of seconds into the day since 00:00:00 UTC
name='time_into_day',
base_type=BASE_TYPES[0x86], # uint32
),
'time_mode': FieldType(
name='time_mode',
base_type=BASE_TYPES[0x00], # enum
values={
0: 'hour12',
1: 'hour24', # Does not use a leading zero and has a colon
2: 'military', # Uses a leading zero and does not have a colon
3: 'hour_12_with_seconds',
4: 'hour_24_with_seconds',
5: 'utc',
},
),
'time_zone': FieldType(
name='time_zone',
base_type=BASE_TYPES[0x00], # enum
values={
0: | |
# Normal-exponential using out-of-band probes
# normex: negative control probes
# noob: ‘out-of-band’ Infinium I probes
# Lib
import logging
import numpy as np
import pandas as pd
from statsmodels import robust
from scipy.stats import norm, lognorm
# App
from ..models import ControlType, ArrayType
from ..models.sketchy_probes import qualityMask450, qualityMaskEPIC, qualityMaskEPICPLUS, qualityMaskmouse
__all__ = ['preprocess_noob']
LOGGER = logging.getLogger(__name__)
def preprocess_noob(container, offset=15, pval_probes_df=None, quality_mask_df=None, nonlinear_dye_correction=True, debug=False, unit_test_oob=False): # v1.4.5+
""" NOOB pythonized copy of https://github.com/zwdzwd/sesame/blob/master/R/background_correction.R
- The function takes a SigSet and returns a modified SigSet with the background subtracted.
- Background is modelled in a normal distribution and true signal in an exponential distribution.
- The Norm-Exp deconvolution is parameterized using Out-Of-Band (oob) probes.
- includes snps, but not control probes yet
- output should replace the container instead of returning debug dataframes
- II RED and II GREEN both have data, but manifest doesn't have a way to track this, so function tracks it.
- keep IlmnID as index for meth/unmeth snps, and convert fg_green
if nonlinear_dye_correction=True, this uses a sesame method in place of minfi method, in a later step.
if unit_test_oob==True, returns the intermediate data instead of updating the SigSet/SampleDataContainer.
"""
if debug:
print(f"DEBUG NOOB {debug} nonlinear_dye_correction={nonlinear_dye_correction}, pval_probes_df={pval_probes_df.shape if isinstance(pval_probes_df,pd.DataFrame) else 'None'}, quality_mask_df={quality_mask_df.shape if isinstance(quality_mask_df,pd.DataFrame) else 'None'}")
# stack- need one long list of values, regardless of Meth/Uneth
ibG = pd.concat([
container.ibG.reset_index().rename(columns={'Meth': 'mean_value'}).assign(used='M'),
container.ibG.reset_index().rename(columns={'Unmeth': 'mean_value'}).assign(used='U')
])
ibG = ibG[ ~ibG['mean_value'].isna() ].drop(columns=['Meth','Unmeth'])
ibR = pd.concat([
container.ibR.reset_index().rename(columns={'Meth': 'mean_value'}).assign(used='M'), #.drop(columns=['Meth','Unmeth']),
container.ibR.reset_index().rename(columns={'Unmeth': 'mean_value'}).assign(used='U') #.drop(columns=['Meth','Unmeth'])
])
ibR = ibR[ ~ibR['mean_value'].isna() ].drop(columns=['Meth','Unmeth'])
# out-of-band is Green-Unmeth and Red-Meth
# exclude failing probes
pval = pval_probes_df.loc[ pval_probes_df['poobah_pval'] > container.poobah_sig ].index if isinstance(pval_probes_df, pd.DataFrame) else []
qmask = quality_mask_df.loc[ quality_mask_df['quality_mask'] == 0 ].index if isinstance(quality_mask_df, pd.DataFrame) else []
# the ignored errors here should only be from probes that are both pval failures and qmask failures.
Rmeth = list(container.oobR['Meth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
Runmeth = list(container.oobR['Unmeth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
oobR = pd.DataFrame( Rmeth + Runmeth, columns=['mean_value'])
Gmeth = list(container.oobG['Meth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
Gunmeth = list(container.oobG['Unmeth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
oobG = pd.DataFrame( Gmeth + Gunmeth, columns=['mean_value'])
# minfi test
# ref fg_green = 442614 | vs ibG 442672 = 396374 + 46240
# ref fg_red = 528410 | vs ibR 528482 = 439279 + 89131
# ref oob_green = 178374
# ref oob_red = 92578
#oobR = pd.DataFrame( data={'mean_value': container.oobR['Meth']})
#oobG = pd.DataFrame( data={'mean_value': container.oobG['Unmeth']})
#print(f" oobR {oobR.shape} oobG {oobG.shape}")
#import pdb;pdb.set_trace()
debug_warnings = ""
if oobR['mean_value'].isna().sum() > 0:
debug_warnings += f" NOOB: oobG had {oobG['mean_value'].isna().sum()} NaNs"
oobR = oobR.dropna()
if oobG['mean_value'].isna().sum() > 0:
debug_warnings += f" NOOB: oobG had {oobG['mean_value'].isna().sum()} NaNs"
oobG = oobG.dropna()
if ibG['mean_value'].isna().sum() > 0 or ibR['mean_value'].isna().sum() > 0:
raise ValueError("ibG or ibR is missing probe intensities. need to filter them out.")
if debug:
print(f"ibG {len(ibG)} ibR {len(ibR)} oobG {len(oobG)} oobR {len(oobR)} | {debug_warnings}")
# set minimum intensity to 1
ibG_affected = len(ibG.loc[ ibG['mean_value'] < 1 ].index)
ibR_affected = len(ibR.loc[ ibR['mean_value'] < 1 ].index)
ibG.loc[ ibG['mean_value'] < 1, 'mean_value'] = 1
ibR.loc[ ibR['mean_value'] < 1, 'mean_value'] = 1
oobG_affected = len(oobG[ oobG['mean_value'] < 1])
oobR_affected = len(oobR[ oobR['mean_value'] < 1])
oobG.loc[ oobG.mean_value < 1, 'mean_value'] = 1
oobR.loc[ oobR.mean_value < 1, 'mean_value'] = 1
if debug:
if ibR_affected > 0 or ibR_affected > 0:
print(f"ib: Set {ibR_affected} red and {ibG_affected} green to 1.0 ({len(ibR[ ibR['mean_value'] == 1 ].index)}, {len(ibG[ ibG['mean_value'] == 1 ].index)})")
if oobG_affected > 0 or oobR_affected > 0:
print(f"oob: Set {oobR_affected} red and {oobG_affected} green to 1.0 ({len(oobR[ oobR['mean_value'] == 1 ].index)}, {len(oobG[ oobG['mean_value'] == 1 ].index)})")
# do background correction in each channel; returns "normalized in-band signal"
ibG_nl, params_green = normexp_bg_corrected(ibG, oobG, offset, sample_name=container.sample.name)
ibR_nl, params_red = normexp_bg_corrected(ibR, oobR, offset, sample_name=container.sample.name)
noob_green = ibG_nl.round({'bg_corrected':0})
noob_red = ibR_nl.round({'bg_corrected':0})
if unit_test_oob:
return {
'oobR': oobR,
'oobG': oobG,
'noob_green': noob_green,
'noob_red': noob_red,
}
# by default, this last step is omitted for sesame
if nonlinear_dye_correction == True:
# update() expects noob_red/green to have IlmnIDs in index, and contain bg_corrected for ALL probes.
container.update_probe_means(noob_green, noob_red)
elif nonlinear_dye_correction == False:
# this "linear" method may be anologous to the ratio quantile normalization described in Nature: https://www.nature.com/articles/s41598-020-72664-6
normexp_bg_correct_control(container.ctrl_green, params_green)
normexp_bg_correct_control(container.ctrl_red, params_red)
mask_green = container.ctrl_green['Control_Type'].isin(ControlType.normalization_green())
mask_red = container.ctrl_red['Control_Type'].isin(ControlType.normalization_red())
avg_green = container.ctrl_green[mask_green]['bg_corrected'].mean()
avg_red = container.ctrl_red[mask_red]['bg_corrected'].mean()
rg_ratios = avg_red / avg_green
red_factor = 1 / rg_ratios
container.update_probe_means(noob_green, noob_red, red_factor)
container._SigSet__minfi_noob = True
elif nonlinear_dye_correction is None:
if debug:
LOGGER.info("skipping linear/nonlinear dye-bias correction step")
# skips the minfi-linear step and won't trigger the sesame nonlinear dye bias step downstream, if you REALLY want it uncorrected. Mostly for debugging / benchmarking.
container.update_probe_means(noob_green, noob_red)
class BackgroundCorrectionParams():
""" used in apply_bg_correction """
__slots__ = (
'bg_mean',
'bg_mad',
'mean_signal',
'offset',
)
def __init__(self, bg_mean, bg_mad, mean_signal, offset):
# note: default offset was 15. In v1.3.3 (Jan 2020) I kept 15, after finding this made results match sesame's NOOB output exactly, if dye step ommitted.
# offset is specified in the preprocess_noob() function.
self.bg_mean = bg_mean
self.bg_mad = bg_mad
self.mean_signal = mean_signal
self.offset = offset
def normexp_bg_corrected(fg_probes, ctrl_probes, offset, sample_name=None):
""" analogous to sesame's backgroundCorrectionNoobCh1 """
fg_means = fg_probes['mean_value']
if fg_means.min() == fg_means.max():
LOGGER.error(f"{sample_name}: min and max intensity are same. Sample probably bad.")
params = BackgroundCorrectionParams(bg_mean=1.0, bg_mad=1.0, mean_signal=1.0, offset=15)
fg_probes['bg_corrected'] = 1.0
return fg_probes, params
fg_mean, _fg_mad = huber(fg_means)
bg_mean, bg_mad = huber(ctrl_probes['mean_value'])
mean_signal = np.maximum(fg_mean - bg_mean, 10) # "alpha" in sesame function
params = BackgroundCorrectionParams(bg_mean, bg_mad, mean_signal, offset)
corrected_signals = apply_bg_correction(fg_means, params)
fg_probes['bg_corrected'] = corrected_signals
fg_probes['bg_corrected'] = fg_probes['bg_corrected'].round(1)
return fg_probes, params
def normexp_bg_correct_control(control_probes, params):
"""Function for getting xcs controls for preprocessNoob"""
control_means = control_probes['mean_value']
corrected_signals = apply_bg_correction(control_means, params)
control_probes['bg_corrected'] = corrected_signals
return control_probes
def apply_bg_correction(mean_values, params):
""" this function won't work with float16 in practice (underflow). limits use to float32 """
if not isinstance(params, BackgroundCorrectionParams):
raise ValueError('params is not a BackgroundCorrectionParams instance')
np.seterr(under='ignore') # 'raise to explore fixing underflow warning here'
bg_mean = params.bg_mean #mu
bg_mad = params.bg_mad #sigma
mean_signal = params.mean_signal #alpha
offset = params.offset
mu_sf = mean_values - bg_mean - (bg_mad ** 2) / mean_signal
#try:
# signal_part_one = mu_sf + (bg_mad ** 2)
# signal_part_two = np.exp(norm(mu_sf, bg_mad).logpdf(0) - norm(mu_sf, bg_mad).logsf(0))
# signal = signal_part_one * signal_part_two
#except:
# print(signal_part_one, norm(mu_sf, bg_mad).logpdf(0), norm(mu_sf, bg_mad).logsf(0))
# norm is from scipy.stats
signal = mu_sf + (bg_mad ** 2) * np.exp(norm(mu_sf, bg_mad).logpdf(0) - norm(mu_sf, bg_mad).logsf(0))
""" COMPARE with sesame:
signal <- mu.sf + sigma2 * exp(
dnorm(0, mean = mu.sf, sd = sigma, log = TRUE) -
pnorm(
0, mean = mu.sf, sd = sigma,
lower.tail = FALSE, log.p = TRUE))
"""
# sesame: "Limit of numerical accuracy reached with very low intensity or very high background:
# setting adjusted intensities to small value"
signal = np.maximum(signal, 1e-6)
true_signal = signal + offset
return true_signal
def huber(vector):
"""Huber function. Designed to mirror MASS huber function in R
Parameters
----------
vector: list
list of float values
Returns
-------
local_median: float
calculated mu value
mad_scale: float
calculated s value
"""
num_values = len(vector)
positive_factor = 1.5
convergence_tol = 1.0e-6
mad_scale = robust.mad(vector)
local_median = np.median(vector)
init_local_median = local_median
if not (local_median or mad_scale):
return local_median, mad_scale
while True:
yy = np.minimum(
np.maximum(
local_median - positive_factor * mad_scale,
vector,
),
local_median + positive_factor * mad_scale,
)
init_local_median = sum(yy) / num_values
if abs(local_median - init_local_median) < convergence_tol * mad_scale:
return local_median, mad_scale
local_median = init_local_median
def _apply_sesame_quality_mask(data_container):
""" adapted from sesame's qualityMask function, which is applied just after poobah
to remove probes Wanding thinks are sketchy.
OUTPUT: this pandas DataFrame will have NaNs for probes to be excluded and 0.0 for probes to be retained. NaNs converted to 1.0 in final processing output.
SESAME:
masked <- sesameDataGet(paste0(sset@platform, '.probeInfo'))$mask
to use TCGA masking, only applies to HM450
"""
if data_container.array_type not in (
# ArrayType.ILLUMINA_27K,
ArrayType.ILLUMINA_450K,
ArrayType.ILLUMINA_EPIC,
ArrayType.ILLUMINA_EPIC_PLUS,
ArrayType.ILLUMINA_MOUSE):
LOGGER.info(f"Quality masking is not supported for {data_container.array_type}.")
return
# load set of probes to remove from local file
if data_container.array_type == ArrayType.ILLUMINA_450K:
probes = qualityMask450
elif data_container.array_type == ArrayType.ILLUMINA_EPIC:
probes = qualityMaskEPIC
elif data_container.array_type == ArrayType.ILLUMINA_EPIC_PLUS:
# this is a bit of | |
<filename>rbs_m2/solver/solver.py
import argparse
import csv
import re
import sys, os
import struct
from attr import dataclass
from abc import abstractmethod
from typing import Iterator, List, Tuple, Set
#from skyfield.sgp4lib import EarthSatellite
from skyfield.api import load, Topos, EarthSatellite
from randomize_challenge import RandomChallengeFactory, Groundstation
from orbit import SatelliteObserver
from antenna_control import AntennaController, AntennaDeterminator
import socket
NUM_CHALLENGE_SATS = 3
OBSERVATION_INTERVAL = 1.0
AZIMUTH_TOLERANCE = 1.0
ELEVATION_TOLERANCE = 1.0
MISMATCHES_ALLOWED = 5
THRESHOLD_DB = -60
VERY_LOW_DB = -120
#SAMPLE_HZ = 2048000
SAMPLE_HZ = 50 * 2048
TIMESTEP = 1.0/SAMPLE_HZ
PEAK_GROUP_THRESHOLD = 750 # max data points between start peak and end peak
# depends on sample rate and duty cycle range
MIN_DUTY_CYCLE = 0.05
MAX_DUTY_CYCLE = 0.35
ARGS = [
{
'args': ('--tle-file',),
'kwargs': {
'type': str,
'help': "Local path to TLE file of satellite orbit parameters (e.g. http://celestrak.com/NORAD/elements/noaa.txt)",
'required': True
}
},
{
'args': ('--motor-data',),
'kwargs': {
'type': str,
'help': "Local path to CSV file containing data for a motor. Three should be supplied",
'required': True,
'action': 'append'
}
},
]
CHALLENGE_FORMAT = ("Track-a-sat RF side channel detector\n"
"Groundstation location: latitude (-?[0-9]{1,3}\.[0-9]{1,7}), longitude (-?[0-9]{1,3}\.[0-9]{1,7})\n"
"Observation start time GMT: ([0-9]{10}\.?[0-9]{0,8})\n"
"Observation length \(seconds\): ([0-9\.]+)\n"
"Challenge file 1: (.+)\n"
"Challenge file 2: (.+)\n"
"Challenge file 3: (.+)\n"
"Please submit 3 satellite names as listed in active.txt \(one per line, followed by a blank line\)\n")
@dataclass
class Challenge:
latitude: float
longitude: float
start_time: float
observation_seconds: float
class ChallengeData(object):
@abstractmethod
def iterate_db_data(self):
# type: () -> Iterator[float]
raise NotImplementedError
class FileChallengeData(ChallengeData):
def __init__(self, csv_data_path: str):
self.csv_data_path = csv_data_path
#def iterate_db_data(self):
# # type: () -> Iterator[Tuple[float, float, float]]
# with open(self.csv_data_path) as csv_file:
## csv_reader = csv.reader(csv_file, delimiter=',')
# for row in csv_reader:
# # filter the data so only the significant peaks are visible
# # the THRESHOLD DB is set arbitrarily
# time = float(row[0].encode('ascii', 'ignore')) # strip unicode no width space
# az_db = float(row[1].encode('ascii', 'ignore'))
# az_db = VERY_LOW_DB if az_db < THRESHOLD_DB else az_db
# el_db = float(row[2].encode('ascii', 'ignore'))
# el_db = VERY_LOW_DB if el_db < THRESHOLD_DB else el_db
# yield(time, az_db, el_db) # assuming we will generate a CSV with 3 columns, (time, az_db, el_db)
def iterate_db_data(self):
twosamples = True
if twosamples:
sample_fmt = "<ff"
size = 8
else:
sample_fmt = "<f"
size = 4
with open(self.csv_data_path, 'rb') as f:
i = 0
while True:
try:
data = f.read(size)
if len(data) < size:
break
result = struct.unpack_from(sample_fmt, data)
az_db = VERY_LOW_DB if result[0] < THRESHOLD_DB else result[0]
el_db = VERY_LOW_DB if result[1] < THRESHOLD_DB else result[1]
yield (i*TIMESTEP, az_db, el_db)
i += 1
except EOFError:
break
def main(args):
"""
Accepts a wav or csv file.
1. Identify peak pairs
2. Find distance between each peak pair
3. Find distance between peak pairs
4. Calculate PWM: duty cycle values over time
5. Calculate antenna motion from PWM
6. Identify satellite given antenna location, time, and motion
:return:
"""
#random_challenge_factory = RandomChallengeFactory(args.tle_file, args.groundstation_file)
#challenge = random_challenge_factory.create_random_challenge(args.seed)
Host = os.getenv("HOST", "localhost")
Port = int(os.getenv("PORT", 31337))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((Host, Port))
fsock = sock.makefile('rw')
Ticket = os.getenv("TICKET", "")
if len(Ticket):
fsock.readline()
fsock.write(Ticket + "\n")
fsock.flush()
print("Sent Ticket: " + Ticket)
challenge = read_challenge_from_fsock(fsock, True)
_satellites = load.tle(args.tle_file)
_sats_by_name = {sat.name: sat for sat in _satellites.values()}
satellites = [sat for sat in _sats_by_name.values()]
assert len(args.motor_data) == NUM_CHALLENGE_SATS, "Three paths to motor data files are required!"
calculated_satellite_targets: List[EarthSatellite] = []
DIR = os.getenv("DIR", "/mnt")
for motor_data in args.motor_data:
duty_cycle_over_time = get_duty_cycle_over_time(DIR + "/" + motor_data)
sat_target = get_satellite_target(
satellites,
duty_cycle_over_time,
Groundstation(challenge.latitude, challenge.longitude),
challenge.start_time
)
calculated_satellite_targets.append(sat_target.name)
if len(calculated_satellite_targets) != NUM_CHALLENGE_SATS:
print("Did not calculate {} sats! {}".format(NUM_CHALLENGE_SATS, len(calculated_satellite_targets)), file=sys.stderr)
exit(1)
for sat_name in calculated_satellite_targets:
print("Calculated sat: {}".format(sat_name), file=sys.stderr)
fsock.write(sat_name + "\n")
fsock.flush()
fsock.write("\n")
fsock.flush()
result = fsock.readline()
print("got back: {}".format(result), file=sys.stderr)
#expected_sat_targets = set([sat.name for sat in challenge.satellites])
#assert expected_sat_targets == calculated_satellite_targets
def read_challenge_from_fsock(fsock, verbose: bool) -> Challenge:
lines = []
for i in range(8):
lines.append(fsock.readline())
challenge = "".join(lines)
if verbose:
print("Received challenge description: ", file=sys.stderr)
print(challenge, file=sys.stderr)
match = re.match(CHALLENGE_FORMAT, challenge)
latitude = float(match.group(1))
longitude = float(match.group(2))
start_time = float(match.group(3))
observation_seconds = float(match.group(4))
file_1 = match.group(5)
file_2 = match.group(6)
file_3 = match.group(7)
return Challenge(latitude, longitude, start_time, observation_seconds)
def get_duty_cycle_over_time(motor_data_path):
# type: (str) -> List[Tuple[float, float, float]]
"""
Can be adapted from Brenda's script
:param motor_data_path:
:return: List of (timestamp, az duty, el duty) pairs
"""
challenge_data = FileChallengeData(motor_data_path)
timestamps = []
azimuth_data = []
elevation_data = []
print("Reading data...", file=sys.stderr)
for data_point in challenge_data.iterate_db_data():
timestamps.append(data_point[0])
azimuth_data.append(data_point[1])
elevation_data.append(data_point[2])
print("Calculating azimuth...", file=sys.stderr)
azimuth_duty_cycle_values = get_duty_cycles(azimuth_data)
print("Calculating elevation...", file=sys.stderr)
elevation_duty_cycle_values = get_duty_cycles(elevation_data)
result = []
assert len(azimuth_duty_cycle_values) == len(elevation_duty_cycle_values)
result.append((timestamps[azimuth_duty_cycle_values[0][0]], azimuth_duty_cycle_values[0][1],
elevation_duty_cycle_values[0][1]))
# detect changes in the duty cycle at a timestamp
for i in range(1, len(azimuth_duty_cycle_values)):
# make sure our azimuth and elevation observations are around the same time ... we have messed around with these lists a lot
assert int(round(azimuth_duty_cycle_values[i][0], -5)) == int(round(elevation_duty_cycle_values[i][0], -5))
# detect changes
if azimuth_duty_cycle_values[i][1] != azimuth_duty_cycle_values[i - 1][1] or \
elevation_duty_cycle_values[i][1] != elevation_duty_cycle_values[i - 1][1]:
result.append( (timestamps[azimuth_duty_cycle_values[i][0]], azimuth_duty_cycle_values[i][1], elevation_duty_cycle_values[i][1]) )
return result
def get_duty_cycles(db_data: List[float]) -> List[Tuple[int, int]]:
"""
:param db_data:
:return: list of observation indexes and the duty cycle for it: Tuple(index, duty_cycle)
"""
peaks = detect_peaks(db_data)
duty_cycle_fractions = calculate_duty_cycle_fractions(peaks)
antenna = AntennaController(motion_restrict=False, simulate=True)
min_duty = antenna.azimuth_servo._min_duty
duty_range = antenna.azimuth_servo._duty_range
duty_cycle_indexes_values = []
for i, fraction in enumerate(duty_cycle_fractions):
duty_cycle_int = int(min_duty + fraction*duty_range)
duty_cycle_indexes_values.append((peaks[i][0], duty_cycle_int))
#duty_cycle_values = [ (, int(min_duty + fraction*duty_range)) for fraction in duty_cycle_fractions]
return duty_cycle_indexes_values
def detect_peaks(db_data: List[float]) -> List[List[int]]:
# peak detection: collect the timestamps of the peaks
peak_timestamps = []
for i in range(1, len(db_data) - 1):
if db_data[i] > db_data[i + 1] and db_data[i] > db_data[i - 1]:
peak_timestamps.append(i)
# grouping peaks together: looking at the graph, it seems like the peaks come in pairs
# group those peaks together
temp_group = []
final_peak_groups = []
for t in peak_timestamps:
if len(temp_group) == 0:
temp_group.append(t)
else:
if (t - temp_group[-1]) < PEAK_GROUP_THRESHOLD:
temp_group.append(t)
else:
# make sure we are dealing with a pair of peaks
assert (len(temp_group) <= 2), "bad group: {}".format(temp_group)
if len(temp_group) == 2:
final_peak_groups.append(temp_group)
temp_group = [t]
return final_peak_groups
def calculate_duty_cycle_fractions(peak_pairs: List[List[float]]) -> List[float]:
# stat 1: time difference within a group
time_diff = [peak[1] - peak[0] for peak in peak_pairs]
# stat 2: time difference between groups
inter_time_diff = [peak_pairs[i][0] - peak_pairs[i - 1][0] for i in
range(1, len(peak_pairs))]
# stat 4: duty cycle
duty_cycle_range = MAX_DUTY_CYCLE - MIN_DUTY_CYCLE
duty_cycle = [time_diff[i]/float(inter_time_diff[i]) for i in range(len(inter_time_diff))]
scaled_duty_cycle = [(value - MIN_DUTY_CYCLE) / duty_cycle_range for value in duty_cycle]
return scaled_duty_cycle
class WrongSatellite(ValueError):
pass
def azimuth_tolerance(current_azimuth: float, previous_azimuth: float) -> float:
difference = abs(current_azimuth - previous_azimuth)
if difference < AZIMUTH_TOLERANCE:
if difference > 0.75*AZIMUTH_TOLERANCE:
return 1.5 * AZIMUTH_TOLERANCE
return AZIMUTH_TOLERANCE
return 1.5 * difference
def elevation_tolerance(current_elevation: float, previous_elevation: float) -> float:
difference = abs(current_elevation - previous_elevation)
if difference < ELEVATION_TOLERANCE:
if difference > 0.75*ELEVATION_TOLERANCE:
return 1.5 * ELEVATION_TOLERANCE
return ELEVATION_TOLERANCE
return 1.5 * difference
def get_satellite_target(
satellites: List[EarthSatellite],
duty_cycle_over_time: List[Tuple[float, float, float]],
groundstation_location: Groundstation,
observation_time: float,
) -> EarthSatellite:
#start = time.time()
#antenna = AntennaController(motion_restrict=False, simulate=True)
correct_antenna = AntennaDeterminator()
groundstation_loc = Topos(groundstation_location.latitude, groundstation_location.longitude)
targets = []
#min_duty = antenna.azimuth_servo._min_duty
#max_duty = antenna.azimuth_servo._min_duty + antenna.azimuth_servo._duty_range
for candidate in satellites:
print("\rTrying {} \t\t".format(candidate.name), file=sys.stderr, end ="", flush=True)
observer = SatelliteObserver(groundstation_loc, candidate)
mismatches = 0
try:
for t in range(len(duty_cycle_over_time)):
correct_position = duty_cycle_over_time[t]
obs_time = observation_time + correct_position[0]
if not observer.above_horizon(obs_time):
raise WrongSatellite()
altitude, azimuth, distance = observer.altAzDist_at(obs_time)
#antenna.set_elevation(altitude)
#antenna.set_azimuth(azimuth)
correct_antenna.set_duty_cycle(int(correct_position[1]), int(correct_position[2]))
correct_azimuth, correct_elevation = correct_antenna.get_angles()
#(az_duty, el_duty) = antenna.get_duty_cycles()
previous_elevation, previous_azimuth, _ = observer.altAzDist_at(obs_time - 1)
current_azimuth_tolerance = azimuth_tolerance(azimuth, previous_azimuth)
current_elevation_tolerance = elevation_tolerance(altitude, previous_elevation)
if abs(azimuth - correct_azimuth) > current_azimuth_tolerance:
# make sure that if we get a mismatch, then both measurements are not near 0/360 degrees
if azimuth > current_azimuth_tolerance and azimuth < 360 - current_azimuth_tolerance:
mismatches += 1
if mismatches > MISMATCHES_ALLOWED:
raise WrongSatellite()
continue
if correct_azimuth > current_azimuth_tolerance and correct_azimuth < 360 - current_azimuth_tolerance:
mismatches += 1
if mismatches > MISMATCHES_ALLOWED:
raise WrongSatellite()
continue
if abs(altitude - correct_elevation) > current_elevation_tolerance:
mismatches += 1
if mismatches > MISMATCHES_ALLOWED:
raise WrongSatellite()
continue
targets.append(candidate)
except WrongSatellite:
| |
(go_trials, nogo_trials) = (0, 0)
for lever in lever_on:
if eventcode[lever + 1] in ('LightOn1', 'LightOn2'):
nogo_trials += 1
else:
go_trials += 1
return go_trials, nogo_trials
def num_switch_trials(eventcode):
"""
:param eventcode: list of event codes from operant conditioning file
:return: number of large and small rewards in the switch task
"""
return eventcode.count('LargeReward'), eventcode.count('SmallReward')
def bin_by_time(timecode, eventcode, bin_length, counted_event):
"""
:param timecode: list of time codes from operant conditioning file
:param eventcode: list of event codes from operant conditioning file
:param bin_length: length of time in seconds to split the session into
:param counted_event: event that is counted in each bin, in list format
:return: a list of counts of specified event for each bin
"""
event_on_list = get_events_indices(eventcode, counted_event)
if timecode[-1] % bin_length != 0:
num_bins = int(timecode[-1] // bin_length) + 1
elif timecode[-1] % bin_length == 0:
num_bins = int(timecode[-1] // bin_length)
counts_for_each_bin = [0] * num_bins
for i in range(num_bins):
for event_on in event_on_list:
if (i + 1) != num_bins and (i + 1) * bin_length > timecode[event_on] >= i * bin_length:
counts_for_each_bin[i] += 1
elif (i + 1) == num_bins and timecode[event_on] >= i * bin_length:
counts_for_each_bin[i] += 1
return counts_for_each_bin
def lever_press_lat_gng(timecode, eventcode, lever_on, lever_press):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param lever_on: event name for lever presentation
:param lever_press: event name for lever press
:return: the mean latency to press the lever in seconds
"""
lever_on = get_events_indices(eventcode, [lever_on, 'EndSession'])
press_latency = []
for i in range(len(lever_on) - 1):
lever_on_idx = lever_on[i]
if lever_press in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index(lever_press)
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
else:
pass
if len(press_latency) > 0:
return round(statistics.mean(press_latency), 3)
else:
return 0
def RVI_gng_weird(timecode, eventcode, lever_on, lever_press, cue_length):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param lever_on: event name for lever presentation
:param lever_press: event name for lever press
:return: the mean latency to press the lever in seconds
"""
lever_on = get_events_indices(eventcode, [lever_on, 'EndSession'])
press_latency = []
incorrect_trials = 0
for i in range(len(lever_on) - 1):
lever_on_idx = lever_on[i]
if lever_press in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index(lever_press)
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
else:
press_latency += [cue_length]
final_press_latency = []
for x in press_latency:
if x > cue_length:
incorrect_trials += 1
else:
final_press_latency += [x]
if len(final_press_latency) > 0:
return round(statistics.mean(final_press_latency), 3), incorrect_trials
else:
return 0, incorrect_trials
def RVI_nogo_latency(timecode, eventcode, lever_on, cue_length):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param lever_on: event name or list for lever presentation
:param lever_press: event name or list for lever press
:return: the mean latency to press the lever in seconds
"""
lever_on = get_events_indices(eventcode, [lever_on, 'EndSession'])
press_latency = []
for i in range(len(lever_on) - 1):
lever_on_idx = lever_on[i]
if 'LPressOn' in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index('LPressOn')
if timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx] < cue_length:
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
elif 'RPressOn' in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index('RPressOn')
if timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx] < cue_length:
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
else:
press_latency += [cue_length]
if len(press_latency) > 0:
return round(statistics.mean(press_latency), 3)
else:
return 0
def lever_press_latency_Switch(timecode, eventcode):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param lever_on: event name for lever presentation
:param lever_press: event name for lever press
:return: the mean latency to press the lever in seconds
"""
lever_on = get_events_indices(eventcode, ['LLeverOn', 'RLeverOn', 'EndSession'])
press_latency = []
for i in range(len(lever_on) - 1):
lever_on_idx = lever_on[i]
if len(press_latency) < 10:
if 'LPressOn' in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index('LPressOn')
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
elif 'RPressOn' in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index('RPressOn')
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
else:
pass
if len(press_latency) > 0:
return round(statistics.mean(press_latency), 3)
else:
return 0
def response_rate_across_cue_iti(timecode, eventcode, code_on, code_off, counted_behavior):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param code_on: event name for lever presentation
:param code_off: event name for lever press
:param code_off: event name for lever press
:return: 3 lists of cue, iti, and the subtracted responding across seconds
"""
cue_on = get_events_indices(eventcode, [code_on])
cue_off = get_events_indices(eventcode, [code_off])
if len(cue_on) != len(cue_off):
cue_off += get_events_indices(eventcode, ['EndSession'])
iti_on = get_events_indices(eventcode, [code_off, 'StartSession'])
cue_length_sec = int(timecode[cue_off[6]] - timecode[cue_on[6]])
all_cue_length_poke_rates = [0] * cue_length_sec
all_iti_length_poke_rates = [0] * cue_length_sec
for i in range(len(cue_on)):
cue_on_idx = cue_on[i]
cue_off_idx = cue_off[i]
iti_on_idx = iti_on[i]
cue_length_poke_rates = []
iti_length_poke_rates = []
for y in range(int(cue_length_sec)):
pokes = 0
iti_pokes = 0
for x in range(cue_on_idx, cue_off_idx):
if eventcode[x] == counted_behavior and (timecode[cue_on_idx] + y) <= timecode[x] < (
timecode[cue_on_idx] + y + 1):
pokes += 1
else:
pokes += 0
cue_length_poke_rates += [pokes]
for t in range(iti_on_idx, cue_on_idx):
if eventcode[t] == counted_behavior and (timecode[cue_on_idx] - (cue_length_sec - y)) \
<= timecode[t] < (timecode[cue_on_idx] - (cue_length_sec - (y + 1))):
iti_pokes += 1
else:
iti_pokes += 0
iti_length_poke_rates += [iti_pokes]
all_cue_length_poke_rates = [cue_length_poke_rates[i] + all_cue_length_poke_rates[i] for i in
range(len(all_cue_length_poke_rates))]
all_iti_length_poke_rates = [iti_length_poke_rates[i] + all_iti_length_poke_rates[i] for i in
range(len(all_iti_length_poke_rates))]
all_cue_length_poke_rates = [all_cue_length_poke_rates[i] / len(cue_on) for i in
range(len(all_cue_length_poke_rates))]
all_iti_length_poke_rates = [all_iti_length_poke_rates[i] / len(cue_on) for i in
range(len(all_iti_length_poke_rates))]
subtracted_poke_rates = [all_cue_length_poke_rates[i] - all_iti_length_poke_rates[i] for i in
range(len(all_cue_length_poke_rates))]
return all_cue_length_poke_rates, all_iti_length_poke_rates, subtracted_poke_rates
def duration_across_cue_iti(timecode, eventcode, code_on, code_off, counted_behavior_on, counted_behavior_off):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param code_on: event name for lever presentation
:param code_off: event name for lever press
:param code_off: event name for lever press
:return: 3 lists of cue, iti, and the subtracted responding across seconds
"""
cue_on = get_events_indices(eventcode, [code_on])
cue_off = get_events_indices(eventcode, [code_off])
poke_on = get_events_indices(eventcode, [counted_behavior_on])
poke_off = get_events_indices(eventcode, [counted_behavior_off])
if len(cue_on) != len(cue_off):
cue_off += get_events_indices(eventcode, ['EndSession'])
cue_length_sec = int(timecode[cue_off[6]] - timecode[cue_on[6]])
all_cue_length_poke_dur = [0] * int(cue_length_sec)
all_iti_length_poke_dur = [0] * int(cue_length_sec)
for i in range(len(cue_on)):
cue_on_idx = cue_on[i]
cue_off_idx = cue_off[i]
cue_length_poke_dur = []
iti_length_poke_dur = []
for y in range(int(cue_length_sec)):
poke_dur = 0
iti_poke_dur = 0
for c in range(len(poke_off)):
# pokes that span whole seconds
if timecode[poke_on[c]] < (timecode[cue_on_idx] + y) and timecode[poke_off[c]] > \
(timecode[cue_on_idx] + y + 1):
poke_dur += 1
break
# pokes contained within a second
elif (timecode[cue_on_idx] + y) <= timecode[poke_on[c]] < timecode[poke_off[c]] \
< (timecode[cue_on_idx] + y + 1):
poke_dur += timecode[poke_off[c]] - timecode[poke_on[c]]
# pokes that start in a second of a cue
elif (timecode[cue_on_idx] + y) <= timecode[poke_on[c]] < (timecode[cue_on_idx] + y + 1) \
< timecode[poke_off[c]]:
poke_dur += ((timecode[cue_on_idx] + y + 1) - timecode[poke_on[c]])
# pokes that end in a second of a cue
elif timecode[poke_on[c]] < (timecode[cue_on_idx] + y) <= timecode[poke_off[c]] \
< (timecode[cue_on_idx] + y + 1):
poke_dur += (timecode[poke_off[c]] - (timecode[cue_on_idx] + y))
# pokes not occurring in the cue
else:
poke_dur += 0
cue_length_poke_dur += [round(poke_dur, 3)]
for d in range(len(poke_off)):
# pokes that span whole seconds
if timecode[poke_on[d]] < (timecode[cue_on_idx] - (cue_length_sec - y)) and timecode[poke_off[d]] \
> (timecode[cue_on_idx] - (cue_length_sec - (y + 1))):
iti_poke_dur += 1
break
# pokes contained within a second
elif (timecode[cue_on_idx] - (cue_length_sec - y)) <= timecode[poke_on[d]] < timecode[poke_off[d]] \
< (timecode[cue_on_idx] - (cue_length_sec - (y + 1))):
iti_poke_dur += (timecode[poke_off[d]] - timecode[poke_on[d]])
# pokes that start in a second of an ITI
elif (timecode[cue_on_idx] - (cue_length_sec - y)) <= timecode[poke_on[d]] \
< (timecode[cue_on_idx] - (cue_length_sec - (y + 1))) < timecode[poke_off[d]]:
iti_poke_dur += ((timecode[cue_on_idx] - (cue_length_sec - (y + 1))) - | |
# MIT license
# <EMAIL>
import OpenGL
from OpenGL.GL import *
from OpenGL.GLU import *
from .math import *
from .tool import *
from .qt import *
from . import gl
import time
import sys
class Camera(object):
def __init__(self, owner):
self.owner = owner
# frame/pivot
self.frame = Rigid3()
self.frame.center[2] = 1
self.pivot = vec(0, 0, 0)
# frustum
self.znear = 0.1
self.zfar = 100.0
self.vfov = 60.0
# width / height
self.ratio = 1.0
self.rotation_sensitivity = 1
self.translation_sensitivity = 1
self.zoom_sensitivity = 1
# spin/slide
self.dframe = Rigid3()
self.damping = 1e-2
self.stop_velocity = 1e-3
@property
def projection(self):
res = QtGui.QMatrix4x4()
res.setToIdentity()
res.perspective(self.vfov, self.ratio, self.znear, self.zfar)
return res
@property
def modelview(self):
res = QtGui.QMatrix4x4()
inv = self.frame.inv()
res.setToIdentity()
res.translate(*inv.center)
axis, angle = inv.orient.axis_angle()
if axis is not None:
res.rotate(angle / deg, *axis)
return res
def pixel_coords(self, x, y):
'''pixel -> viewport coords'''
rx = float(x) / float(self.owner.width())
ry = float(self.owner.height() - 1.0 - y) / float(self.owner.height())
return rx, ry
def unproject(self, Pinv, x, y, z=0):
'''unproject viewport coordinates'''
d = 2.0 * QtGui.QVector4D(x, y, z, 1.0) - QtGui.QVector4D(1.0, 1.0, 1.0, 1.0)
res = Pinv.map(d)
return vec(res.x(), res.y(), res.z()) / res.w()
def pixel_depth(self, px, py):
'''read depth under pixel, or None'''
self.owner.makeCurrent()
read = glReadPixels(px, self.owner.height() - 1 - py,
1, 1,
GL_DEPTH_COMPONENT, GL_FLOAT)
res = read[0][0]
return res if res < 1.0 else None
def point_under_pixel(self, x, y):
'''point under pixel or None'''
z = self.pixel_depth(x, y)
if z is None:
return None
x, y = self.pixel_coords(x, y)
Pinv, ok = self.projection.inverted()
return self.unproject(Pinv, x, y, z)
@property
def pivot_distance(self):
return norm(self.frame.center - self.pivot)
@coroutine
def mouse_translate(self, start):
'''translate camera from mouse move events'''
start_pos = start.pos()
start_frame = Rigid3()
start_frame[:] = self.frame
Pinv, ok = self.projection.inverted()
assert ok
z = self.znear
z = (z - self.znear) / (self.zfar - self.znear)
sx, sy = self.pixel_coords(start_pos.x(), start_pos.y())
s = self.unproject(Pinv, sx, sy, z)
while True:
ev = yield
ex, ey = self.pixel_coords(ev.pos().x(), ev.pos().y())
e = self.unproject(Pinv, ex, ey, z)
d = e - s
scale = self.pivot_distance
f = Rigid3()
f.center = scale * 10 * self.translation_sensitivity * d
next_frame = start_frame * f.inv()
self.dframe = self.frame.inv() * next_frame
self.frame = next_frame
def axis_align(self):
'''align camera with nearest axis'''
pos = np.abs(self.frame.center - self.pivot).tolist()
index = pos.index(max(pos))
self.frame.center = [self.pivot[i] if i != index else self.frame.center[i]
for i in range(3)]
local_pivot = self.frame.inv()(self.pivot)
# look at pivot
q = Quaternion.from_vectors(-ez, local_pivot)
self.frame.orient = self.frame.orient * q
# pick axis closest to camera up axis
cam_up = self.frame.orient(ey)
proj = np.abs(cam_up).tolist()
index = proj.index(max(proj))
up = vec(0, 0, 0)
up[index] = proj[index] / cam_up[index] # to get sign right
q = Quaternion.from_vectors(ey, self.frame.orient.inv()(up))
self.frame.orient = self.frame.orient * q
def lookat(self, target, **kwargs):
'''make camera point at target'''
local_target = self.frame.inv()(target)
q = Quaternion.from_vectors(-ez, local_target)
self.frame.orient = self.frame.orient * q
# project q.inv() onto geodesic around z
up = kwargs.get('up', ey)
qinv = Quaternion.from_vectors(ey, self.frame.orient.inv()(up))
if math.fabs(q.real) < Quaternion.epsilon:
return
# gnomonic projection
pqinv = qinv / q.real
r = Quaternion()
r.imag[2] = pqinv.imag[2]
r /= norm(r)
self.frame.orient = self.frame.orient * r
wheel_direction = 1.0
@coroutine
def mouse_zoom(self, start=None):
'''ajust zoom from mouse'''
if start is not None:
last = start.pos()
while True:
ev = yield
if start is None:
degrees = self.wheel_direction * float(wheel_angle(ev)) / 256.0
else:
# dx = ev.pos().x() - start.x()
dy = ev.pos().y() - last.y()
last = ev.pos()
degrees = - self.wheel_direction * (dy / 256.0)
u = self.frame.inv()(self.pivot)
dist = norm(u)
view = u / dist
delta = (self.zoom_sensitivity * degrees) * dist
# make sure we dont zoom closer than znear
delta = min(delta, dist - self.znear)
f = Rigid3()
f.center[:] = view * delta
self.frame = self.frame * f
@coroutine
def mouse_rotate(self, start):
'''rotate camera from mouse move events'''
start = start.pos()
start_frame = Rigid3()
start_frame[:] = self.frame
Pinv, ok = self.projection.inverted()
sx, sy = self.pixel_coords(start.x(), start.y())
s = start_frame(self.unproject(Pinv, sx, sy))
while True:
ev = yield
ex, ey = self.pixel_coords(ev.pos().x(), ev.pos().y())
e = start_frame(self.unproject(Pinv, ex, ey))
f = Rigid3()
f.orient = Quaternion.from_vectors(e - self.pivot,
s - self.pivot)
scale = norm(self.frame.center - self.pivot)
f.orient = Quaternion.exp(scale * 16 * self.rotation_sensitivity * f.orient.log())
t = Rigid3()
t.center = self.pivot
g = t * f * t.inv()
next_frame = g * start_frame
self.dframe = next_frame * self.frame.inv()
self.frame[:] = next_frame
@coroutine
def mouse_drag(self, start):
start_pos = start.pos()
start_frame = Rigid3()
start_frame[:] = self.frame
Pinv, ok = self.projection.inverted()
z = self.pixel_depth(start_pos.x(), start_pos.y())
sx, sy = self.pixel_coords(start_pos.x(), start_pos.y())
s = start_frame(self.unproject(Pinv, sx, sy, z))
while True:
ev = yield
ex, ey = self.pixel_coords(ev.pos().x(), ev.pos().y())
e = start_frame(self.unproject(Pinv, ex, ey, z))
self.owner.drag(e)
@coroutine
def spin(self):
'''rotate camera around pivot (damped) on each call to next'''
delta = Rigid3()
delta[:] = self.dframe
while True:
yield
# 1% damping
factor = 1.0 - self.damping
vel = factor * delta.log()
if norm(vel) < self.stop_velocity:
break
delta = Rigid3.Deriv.exp(vel)
self.frame[:] = delta * self.frame
@coroutine
def slide(self):
'''translate camera with constant local direction (damped) on each call to next'''
delta = Rigid3()
delta[:] = self.dframe
while True:
yield
factor = 1.0 - self.damping
vel = factor * delta.log()
if norm(vel) < self.stop_velocity:
break
delta = Rigid3.Deriv.exp(vel)
self.frame[:] = self.frame * delta
def draw_axis():
glColor(1, 1, 1)
gl.sphere(radius=0.025)
glColor(1, 0.2, 0.2)
with gl.lookat(ex):
gl.arrow()
glColor(0.2, 1, 0.2)
with gl.lookat(ey):
gl.arrow()
glColor(0.2, 0.2, 1)
with gl.lookat(ez):
gl.arrow()
class Viewer(QtWidgets.QOpenGLWidget):
alt_button = QtCore.Qt.CTRL if sys.platform == 'darwin' else QtCore.Qt.ALT
def __init__(self, parent=None):
super(Viewer, self).__init__(parent)
self.camera = Camera(self)
#
self.mouse_move_handler = None
self.mouse_wheel_handler = self.camera.mouse_zoom()
self.draw_handler = None
self.setWindowTitle('Viewer')
# display flags
# TODO make these properties and emit update_needed ?
self.show_axis = True
self.show_grid = False
# animation
self.animation = QtCore.QTimer()
def on_timeout():
self.animate()
self.update()
connect(self.animation, 'timeout()', on_timeout)
self.fps = 60
# a timer to post updateGL events
self.update_timer = QtCore.QTimer()
self.update_timer.setSingleShot(True)
self.update_timer.setInterval(0)
connect(self.update_timer, 'timeout()', self.update)
# self.connect(self.update_timer, QtCore.SIGNAL("timeout()"), self.update)
# a nice signal to control it
self.update_needed.connect(self.update_timer.start)
update_needed = signal()
@property
def fps(self):
return 1.0 / (self.animation.interval() / 1000.0)
@fps.setter
def fps(self, value):
interval = (1.0 / value) * 1000.0
self.animation.setInterval(interval)
def minimumSizeHint(self):
return QtCore.QSize(100, 300)
def sizeHint(self):
rec = QApplication.desktop().screenGeometry()
# widget height is half screen height
factor = 1.0 / 2.0
height = factor * rec.height()
# 4/3 form factor
ratio = 4.0 / 3.0
width = ratio * height
return QtCore.QSize(width, height)
def resizeGL(self, w, h):
glViewport(0, 0, w, h)
self.camera.ratio = float(w) / float(h if h != 0 else 1.0)
def init(self): pass
@property
def context(self):
return QtGui.QOpenGLContext.currentContext()
def initializeGL(self):
bg = QtGui.QColor.fromCmykF(0.39, 0.39, 0.0, 0.0).darker()
ctx = self.context
gl = ctx.functions()
gl.glClearColor(bg.redF(), bg.greenF(), bg.blueF(), bg.alphaF())
self.resizeGL(self.width(), self.height())
# some reasonable defaults
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_COLOR_MATERIAL)
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)
glEnable(GL_NORMALIZE)
self.init()
# def update(self):
# if not self.animation.isActive():
# QtWidgets.QOpenGLWidget.update(self)
def mouseMoveEvent(self, e):
if self.mouse_move_handler:
self.mouse_move_handler.send(e)
self.update()
def select(self, p): pass
def drag(self, p): pass
def mousePressEvent(self, e):
self.draw_handler = None
self.camera.dframe = Rigid3()
if e.button() == QtCore.Qt.LeftButton:
if e.modifiers() == QtCore.Qt.SHIFT:
p = self.camera.point_under_pixel(e.pos().x(), e.pos().y())
if p is not None:
self.select(self.camera.frame(p))
self.mouse_move_handler = self.camera.mouse_drag(e)
else:
self.mouse_move_handler = self.camera.mouse_rotate(e)
self.update()
if e.button() == QtCore.Qt.RightButton:
if e.modifiers() == QtCore.Qt.SHIFT:
p = self.camera.point_under_pixel(e.pos().x(), e.pos().y())
if p is not None:
self.camera.pivot = self.camera.frame(p)
self.update()
else:
self.mouse_move_handler = self.camera.mouse_translate(e)
self.update()
if e.button() == QtCore.Qt.MiddleButton:
self.mouse_move_handler = self.camera.mouse_zoom(e)
self.update()
def mouseDoubleClickEvent(self, e):
if e.button() == QtCore.Qt.LeftButton:
self.camera.axis_align()
self.update()
def animate(self): pass
def reset(self): pass
def toggle_fullscreen(self):
if self.isFullScreen():
self.showNormal()
else:
self.showFullScreen()
def on_keypress(self, key): pass
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Return:
if e.modifiers() == Viewer.alt_button:
self.toggle_fullscreen()
else:
if self.animation.isActive():
self.animation.stop()
else:
self.animation.start()
if e.key() == QtCore.Qt.Key_Escape:
self.close()
if e.key() == QtCore.Qt.Key_Backspace:
self.reset()
if e.text() == 'a':
self.show_axis = not self.show_axis
self.update()
self.on_keypress(e.text())
spin_threshold = 0.05
slide_threshold = 0.04
def mouseReleaseEvent(self, e):
self.mouse_move_handler = None
if e.button() == QtCore.Qt.LeftButton:
if norm(self.camera.dframe.log()) > self.spin_threshold:
self.draw_handler = self.camera.spin()
self.update()
if e.button() == QtCore.Qt.RightButton:
if norm(self.camera.dframe.log()) / (1.0 + self.camera.pivot_distance) > self.slide_threshold:
self.draw_handler | |
<= 27:
return 1
else:
if f5 <= 26:
if f6 <= 20:
if f6 <= 5:
if f4 <= 4:
return 3
else:
return 4
else:
return 3
else:
return 3
else:
return 3
else:
if f4 <= 17:
if f4 <= 8:
if f4 <= 7:
if f5 <= 27:
if f1 <= 28:
return 1
else:
return 6
else:
return 5
else:
if f5 <= 33:
return 4
else:
return 5
else:
if f5 <= 24:
if f6 <= 19:
if f6 <= 16:
if f4 <= 13:
if f5 <= 23:
return 5
else:
return 3
else:
if f1 <= 27:
return 1
else:
if f6 <= 6:
return 4
else:
if f6 <= 8:
return 6
else:
return 4
else:
if f5 <= 23:
if f1 <= 27:
return 1
else:
return 5
else:
return 5
else:
if f6 <= 22:
if f6 <= 21:
if f1 <= 27:
return 1
else:
if f4 <= 9:
return 3
else:
return 5
else:
if f3 <= 2:
return 1
else:
return 6
else:
if f4 <= 13:
if f4 <= 10:
return 4
else:
return 5
else:
return 4
else:
if f4 <= 16:
if f6 <= 15:
if f4 <= 11:
return 4
else:
if f1 <= 27:
return 1
else:
return 5
else:
if f6 <= 33:
if f1 <= 27:
return 1
else:
if f6 <= 17:
return 4
else:
if f4 <= 11:
if f5 <= 31:
return 4
else:
return 5
else:
return 4
else:
if f4 <= 15:
return 4
else:
return 6
else:
if f5 <= 30:
return 4
else:
if f6 <= 20:
if f1 <= 27:
return 1
else:
return 5
else:
if f7 <= 21:
return 6
else:
return 7
else:
if f1 <= 27:
return 1
else:
if f3 <= 7:
return 5
else:
return 3
else:
if f5 <= 30:
if f4 <= 23:
if f8 <= 22:
if f5 <= 25:
if f3 <= 17:
if f6 <= 20:
if f6 <= 17:
if f5 <= 23:
if f6 <= 4:
return 5
else:
return 6
else:
return 4
else:
return 5
else:
if f5 <= 23:
return 5
else:
return 4
else:
if f6 <= 26:
if f6 <= 9:
if f7 <= 1:
return 6
else:
return 7
else:
if f6 <= 19:
return 5
else:
return 7
else:
return 6
else:
return 4
else:
if f9 <= 31:
if f6 <= 17:
if f6 <= 14:
return 4
else:
return 9
else:
if f6 <= 19:
if f7 <= 5:
return 6
else:
return 5
else:
return 4
else:
return 8
else:
if f4 <= 26:
if f1 <= 27:
return 1
else:
return 3
else:
if f4 <= 31:
return 4
else:
return 3
else:
if f5 <= 32:
if f5 <= 31:
if f6 <= 20:
if f3 <= 9:
return 5
else:
return 1
else:
if f7 <= 21:
return 6
else:
return 7
else:
if f6 <= 33:
if f6 <= 18:
if f6 <= 14:
if f6 <= 3:
return 5
else:
if f6 <= 8:
return 3
else:
return 5
else:
return 6
else:
return 5
else:
return 6
else:
if f3 <= 29:
if f4 <= 26:
if f6 <= 14:
return 5
else:
return 4
else:
if f3 <= 12:
return 5
else:
return 3
else:
if f6 <= 22:
if f6 <= 16:
if f6 <= 8:
return 5
else:
if f7 <= 1:
return 5
else:
if f7 <= 3:
return 6
else:
if f7 <= 15:
return 5
else:
return 6
else:
if f6 <= 21:
return 5
else:
return 6
else:
if f7 <= 21:
return 6
else:
if f7 <= 22:
return 7
else:
return 6
else:
if f2 <= 23:
if f6 <= 22:
if f3 <= 5:
if f4 <= 3:
if f5 <= 23:
if f5 <= 1:
if f4 <= 2:
return 1
else:
return 4
else:
if f5 <= 16:
if f5 <= 10:
if f5 <= 9:
return 2
else:
return 3
else:
return 2
else:
if f5 <= 20:
return 1
else:
if f4 <= 2:
return 2
else:
return 3
else:
if f1 <= 9:
return 1
else:
if f1 <= 27:
if f1 <= 11:
if f4 <= 2:
return 2
else:
if f5 <= 31:
if f6 <= 9:
return 4
else:
if f6 <= 13:
if f7 <= 10:
return 4
else:
if f7 <= 20:
return 5
else:
return 4
else:
return 4
else:
return 4
else:
if f1 <= 21:
if f1 <= 15:
return 1
else:
return 0
else:
return 1
else:
if f4 <= 2:
return 2
else:
if f5 <= 25:
if f6 <= 9:
return 4
else:
if f6 <= 12:
if f7 <= 10:
return 4
else:
if f7 <= 20:
return 5
else:
return 4
else:
return 4
else:
return 4
else:
if f1 <= 29:
if f1 <= 11:
if f1 <= 9:
if f3 <= 1:
return 1
else:
if f1 <= 4:
return 1
else:
return 2
else:
if f4 <= 27:
if f4 <= 22:
if f4 <= 15:
if f3 <= 1:
return 2
else:
if f4 <= 9:
return 4
else:
return 2
else:
if f4 <= 18:
return 2
else:
if f4 <= 20:
if f4 <= 19:
return 4
else:
return 2
else:
return 2
else:
if f5 <= 20:
if f5 <= 12:
return 2
else:
if f4 <= 23:
return 4
else:
return 2
else:
return 2
else:
return 2
else:
return 1
else:
if f4 <= 27:
if f4 <= 9:
if f4 <= 7:
return 2
else:
return 3
else:
if f5 <= 20:
if f4 <= 21:
return 2
else:
if f5 <= 12:
return 2
else:
if f4 <= 23:
return 4
else:
return 2
else:
return 2
else:
if f5 <= 13:
if f5 <= 11:
return 3
else:
return 2
else:
return 3
else:
if f1 <= 30:
if f3 <= 14:
if f1 <= 11:
if f1 <= 5:
return 1
else:
if f4 <= 21:
if f4 <= 1:
if f6 <= 11:
if f6 <= 1:
if f5 <= 3:
return 5
else:
return 3
else:
return 3
else:
return 3
else:
if f3 <= 11:
if f3 <= 7:
if f1 <= 7:
if f4 <= 17:
if f4 <= 9:
if f4 <= 5:
if f6 <= 20:
return 3
else:
return 5
else:
return 4
else:
return 1
else:
return 3
else:
return 1
else:
if f3 <= 10:
if f3 <= 8:
if f4 <= 14:
return 3
else:
return 4
else:
return 2
else:
if f4 <= 15:
if f4 <= 9:
return 3
else:
return 4
else:
return 3
else:
if f3 <= 12:
if f1 <= 9:
return 1
else:
return 2
else:
return 3
else:
if f4 <= 23:
if f5 <= 21:
if f3 <= 9:
if f5 <= 7:
return 5
else:
return 1
else:
if f3 <= 11:
return 4
else:
if f5 <= 3:
return 2
else:
return 4
else:
if f5 <= 23:
if f3 <= 9:
return 3
else:
return 5
else:
return 4
else:
if f3 <= 7:
if f4 <= 27:
return 1
else:
if f1 <= 9:
return 4
else:
return 1
else:
if f4 <= 24:
if f5 <= 9:
return 3
else:
if f5 <= 25:
if f5 <= 12:
if f6 <= 10:
return 3
else:
return 4
else:
if f5 <= 16:
if f7 <= 24:
return 3
else:
if f7 <= 25:
return 4
else:
return 3
| |
return default
return "Default"
row = cloud_utils.lower_key(db.get_row_dict("tblEntities", {"id": dbid}, order="ORDER BY id LIMIT 1"))
if row:
return row["name"]
else:
return "None"
## "external_address_type": row["externalipaddresstype"],
# "external_address": row["externalipaddress"],
# "external_address_nat": _get_entity_name(db, row["externaladdressnatentityid"], default="none"),
def _network_services_common(db, dbid, row):
try:
if row["maxinstancescount"] < row["begininstancescount"]:
row["maxinstancescount"] = row["begininstancescount"]
row["begininstancescount"] = 1
return {"params": {"availability_option": row["highavailabilityoptions"],
"default_gateway": _get_entity_name(db, row["defaultgatewayentityid"], default="default"),
"qos": row["qos"],
"throughput": row["throughputinc"],
"begin_instances_count": row["begininstancescount"],
"max_instances_count": row["maxinstancescount"],
"northbound": row["northbound_port"]
},
"uuid": row['uniqueid'],
"policy": {"sla": row["servicelevelagreement"], "sla_policy": row["servicelevelagreementpolicy"]
}
}
except:
cloud_utils.log_exception(sys.exc_info())
def _network_services_autoscale(db, dbid, row):
try:
return {"autoscale": {"throughput_enabled": row["dynopbandwidth"], "throughput_red": row["throughtput_red"],
"throughput_green": row["throughput_green"],
"compute_enabled": row["dynopcpu"], "compute_red": row["cpu_red"],
"compute_green": row["cpu_green"],
"ram_enabled": row["dynopram"], "ram_red": row["ram_red"], "ram_green": row["ram_green"],
"cooldown_add": row["cooldown_up"], "cooldown_remove": row["cooldown_down"]}}
except:
cloud_utils.log_exception(sys.exc_info())
def _network_services_interfaces(db, dbid, svc):
try:
interfaces = []
for row in cloud_utils.network_service_ports(db, dbid):
interfaces.append({"subnet": _get_entity_name(db, row["destinationserviceentityid"], default="unknown"),
"name": row["name"],
"interface_type": row["interface_type"],
"params":
{"guaranteed_bandwidth": row["guarbandwidth"],
"maximum_bandwidth": row["maxbandwidth"],
"maximum_iops": row["maxiops"],
"guaranteed_iops": row["guariops"],
"securityzone": row["securityzone"],
"qos": row["qos"],
"mtu": row["mtu"]
}
})
if interfaces:
return {"interfaces": interfaces}
else:
return {}
except:
cloud_utils.log_exception(sys.exc_info())
def provision_network_service_ports(db, port_dbid):
try:
groups = {}
grp = []
for item in cloud_utils.entity_attach(db, port_dbid, entitytype="nat_network_service"):
nat = {"nat_service": item["name"], "type": item["ipaddresstype"],
"priority": item["attachedsortsequenceid"]}
if item["ipaddresstype"].lower() == "static":
nat["static_ip"] = item["staticipaddress"]
grp.append(nat)
if len(grp) > 0:
groups["nat"] = grp
grp = []
for item in cloud_utils.entity_attach(db, port_dbid, entitytype="virtual_network"):
nat = {"network": item["name"]}
grp.append(nat)
if len(grp) > 0:
groups["virtual_networks"] = grp
for group in entity_constants.port_groups:
grp = []
for item in cloud_utils.entity_attach(db, port_dbid, entitytype=group["name"]):
grp.append({group["item"]: item["name"], "priority": item["attachedsortsequenceid"]})
if len(grp) > 0:
groups[group["type"]] = grp
return groups
except:
cloud_utils.log_exception(sys.exc_info())
nat_keys = []
def _service_port(db, dbid, row, keys, **kwargs):
j = provision_network_service_ports(db, dbid)
# j.update({"subnet" : row['name']})
# return j, None
j.update({"subnet": row["name"],
"name": row["name"],
"interface_type": row["interface_type"],
"params":
{"guaranteed_bandwidth": row["guarbandwidth"],
"maximum_bandwidth": row["maxbandwidth"],
"maximum_iops": row["maxiops"],
"guaranteed_iops": row["guariops"],
"securityzone": row["securityzone"],
"qos": row["qos"],
"mtu": row["mtu"]
}})
return j, None
def _nat(db, dbid, row, keys, **kwargs):
j = _entity(db, dbid, row, entity_keys)
j.update(_network_services_common(db, dbid, row))
j.update(_network_services_autoscale(db, dbid, row))
j.update({"pat_mode": row["nat_pat_mode"],
"nat_address_type": row["nat_address_type"],
"nat_static_address": row["nat_static_address"]})
j.update(_network_services_interfaces(db, dbid, row))
return j, None
def _post_rest_get_function_nat(db, dbid, rest_me, rest=None):
pass
lbs_keys = []
def _lbs(db, dbid, row, keys, **kwargs):
j = _entity(db, dbid, row, entity_keys)
j.update(_network_services_common(db, dbid, row))
j.update(_network_services_autoscale(db, dbid, row))
j.update(_network_services_interfaces(db, dbid, row))
j.update({"lbs_mode": row["lbs_mode"]})
return j, None
def _post_rest_get_function_lbs(db, dbid, rest_me, rest=None):
pass
fws_keys = []
def _fws(db, dbid, row, keys, **kwargs):
j = _entity(db, dbid, row, entity_keys)
j.update(_network_services_common(db, dbid, row))
j.update(_network_services_autoscale(db, dbid, row))
j.update(_network_services_interfaces(db, dbid, row))
return j, None
def _post_rest_get_function_fws(db, dbid, rest_me, rest=None):
pass
rts_keys = []
def _rts(db, dbid, row, keys, **kwargs):
j = _entity(db, dbid, row, entity_keys)
j.update(_network_services_common(db, dbid, row))
j.update(_network_services_autoscale(db, dbid, row))
j.update(_network_services_interfaces(db, dbid, row))
return j, None
def _post_rest_get_function_rts(db, dbid, rest_me, rest=None):
pass
vpn_keys = []
def _vpn(db, dbid, row, keys, **kwargs):
j = _entity(db, dbid, row, entity_keys)
j.update(_network_services_common(db, dbid, row))
j.update(_network_services_interfaces(db, dbid, row))
return j, None
def _post_rest_get_function_vpn(db, dbid, rest_me, rest=None):
pass
nms_keys = []
def _nms(db, dbid, row, keys, **kwargs):
j = _entity(db, dbid, row, entity_keys)
j.update(_network_services_common(db, dbid, row))
service_pairs = []
for int in cloud_utils.get_next_service_interface(db, dbid):
if int["interfacetype"] != "tap":
continue
if int["beginserviceentityid"] == dbid:
tap_dbid = int["endserviceentityid"]
else:
tap_dbid = int["beginserviceentityid"]
tap = entity_utils.read_partial_entity(db, tap_dbid)
if not tap or tap["entitytype"] != "tap_network_service":
continue
north_service = south_service = None
for intc in cloud_utils.get_next_service_interface(db, tap_dbid):
if intc["interfacetype"] == "tap":
continue
if intc["beginserviceentityid"] == dbid or intc["beginserviceentityid"] == dbid:
continue
if intc["beginserviceentityid"] == tap_dbid:
svc_dbid = intc["endserviceentityid"]
else:
svc_dbid = intc["beginserviceentityid"]
if not north_service:
north_service = entity_utils.read_partial_entity(db, svc_dbid)
else:
south_service = entity_utils.read_partial_entity(db, svc_dbid)
if not north_service or not south_service:
continue
service_pairs.append({"services": north_service["name"] + ":" + south_service["name"]})
j.update({"service_pairs": service_pairs})
return j, None
def _post_rest_get_function_nms(db, dbid, rest_me, rest=None):
pass
compute_keys = []
def _compute(db, dbid, row, keys, **kwargs):
try:
j = _entity(db, dbid, row, entity_keys)
entity_utils.add_ssh_keys(db, dbid, j)
add_user_data(db, dbid, j)
j.update(_network_services_common(db, dbid, row))
j.update(_network_services_interfaces(db, dbid, row))
grp = []
for item in cloud_utils.entity_attach(db, dbid, entitytype="serverfarm"):
grp.append(item["name"])
if len(grp) > 0:
j.update({"serverfarm": grp})
return j, None
except:
cloud_utils.log_exception(sys.exc_info())
return {}, None
def _post_rest_get_function_compute(db, dbid, rest_me, rest=None):
pass
storage_keys = []
def _storage(db, dbid, row, keys, **kwargs):
j = _entity(db, dbid, row, entity_keys)
j.update(_network_services_common(db, dbid, row))
j.update(_network_services_interfaces(db, dbid, row))
return j, None
def _post_rest_get_function_storage(db, dbid, rest_me, rest=None):
pass
ips_keys = []
def _ips(db, dbid, row, keys, **kwargs):
j = _entity(db, dbid, row, entity_keys)
j.update(_network_services_common(db, dbid, row))
j.update(_network_services_interfaces(db, dbid, row))
return j, None
def _tap_service_pre_db_create(db, options, mode=None, parent_row=None):
if not parent_row:
return "Invalid parent entity id"
current_interface = entity_utils.read_full_entity(db, options.get("network_interface", 0))
if not current_interface:
return "Invalid interface row id"
north_service = entity_utils.read_full_entity(db, current_interface.get("beginserviceentityid", 0))
if not north_service:
return "Invalid interface row id - no begin service row id"
if north_service["entitytype"] == "tap_network_service":
return "Invalid interface row id - already a tapped interface"
south_service = entity_utils.read_full_entity(db, current_interface.get("endserviceentityid", 0))
if not south_service:
return "Invalid interface row id - no begin service row id"
if south_service["entitytype"] == "tap_network_service":
return "Invalid interface row id - already a tapped interface"
def _tap_service_post_db_create(db, dbid, options, mode=None, **kwargs):
try:
if not mode or mode != "create":
return 0
current_interface = entity_utils.read_full_entity(db, options.get("network_interface", 0))
new_interface = {}
new_interface["entitytype"] = "network_interface"
new_interface["name"] = entity_utils.create_entity_name(db, new_interface["entitytype"])
new_interface["parententityid"] = current_interface["parententityid"]
new_interface_dbid = cloud_utils.update_or_insert(db, "tblEntities", new_interface, None,
child_table=entities[new_interface["entitytype"]].child_table)
new_interface = entity_utils.read_full_entity(db, new_interface_dbid)
if not current_interface or not new_interface:
return
north_port = entity_utils.read_full_entity(db, current_interface.get("beginserviceportid", 0))
south_port = entity_utils.read_full_entity(db, current_interface.get("endserviceportid", 0))
north_service = entity_utils.read_full_entity(db, current_interface.get("beginserviceentityid", 0))
# add north port for tap
config = {}
config["name"] = north_service["name"]
config.update({"destinationserviceentityid": north_service["id"],
"finaldestinationserviceentityid": north_service["id"],
"serviceinterfaceentityid": options.get("network_interface", 0),
"entitytype": "service_port",
"interfaceportindex": current_interface["interfaceindex"],
"parententityid": dbid})
north_pid = cloud_utils.update_or_insert(db, "tblEntities", config, None, child_table="tblServicePorts")
south_service = entity_utils.read_full_entity(db, current_interface.get("endserviceentityid", 0))
# add south port for tap
config = {}
config["name"] = south_service["name"]
config.update({"destinationserviceentityid": south_service["id"],
"finaldestinationserviceentityid": south_service["id"],
"serviceinterfaceentityid": new_interface["id"],
"entitytype": "service_port",
"interfaceportindex": new_interface["interfaceindex"],
"parententityid": dbid})
south_pid = cloud_utils.update_or_insert(db, "tblEntities", config, None, child_table="tblServicePorts")
# Update name only in the north port -- while keeping the destination service id
config = {"name": options["name"]}
cloud_utils.update_or_insert(db, "tblEntities", config, {"id": current_interface["beginserviceportid"]})
# update port with new name and new interface id for south service -- while keeping the destination service id
config = {"name": options["name"], "serviceinterfaceentityid": new_interface["id"]}
cloud_utils.update_or_insert(db, "tblEntities", config, {"id": current_interface["endserviceportid"]},
child_table="tblServicePorts")
new_interface["beginserviceentityid"] = dbid
new_interface["beginserviceportid"] = south_pid
new_interface["entitystatus"] = current_interface["entitystatus"]
new_interface["endserviceentityid"] = current_interface["endserviceentityid"]
new_interface["endserviceportid"] = current_interface["endserviceportid"]
current_interface["endserviceentityid"] = dbid
current_interface["endserviceportid"] = north_pid
cloud_utils.update_or_insert(db, "tblEntities", current_interface, {"id": current_interface["id"]},
child_table="tblServicesInterfaces")
cloud_utils.update_or_insert(db, "tblEntities", new_interface, {"id": new_interface["id"]},
child_table="tblServicesInterfaces")
except:
cloud_utils.log_exception(sys.exc_info())
def _post_rest_get_function_ips(db, dbid, rest_me, rest=None):
pass
def _service_pre_db_create(db, options, mode=None, **kwargs):
pass
def _network_service_post_db_create(db, dbid, options, mode=None, **kwargs):
# remove_and_add_attached_entities(db, dbid, options, mode)
update_db_metadata_keyvalue(db, dbid, options)
def _port_post_db_create(db, dbid, options, mode=None, **kwargs):
# remove_and_add_attached_entities(db, dbid, options, mode)
pass
def remove_and_add_attach_to_entities(db, dbid, options, mode=None):
try:
if not "attach_to" in options:
return
if not isinstance(options["attach_to"], list):
LOG.critical(
_("update attach to entities for dbid %s is not a list: %s" % (dbid, str(options["attach_to"]))))
return
current_rows = db.get_multiple_row("tblAttachedEntities", "AttachedEntityId = '%s'" % dbid)
desired_row_ids = options["attach_to"]
for row in current_rows:
if row["tblEntities"] in desired_row_ids:
desired_row_ids.remove(row["tblEntities"])
else:
db.execute_db("DELETE FROM tblAttachedEntities WHERE id='%s'" % row["id"])
for row_id in desired_row_ids:
cloud_utils.insert_db(db, "tblAttachedEntities", {"tblentities": row_id,
"attachedentityid": dbid,
"attachedentitytype": options["entitytype"]})
except:
cloud_utils.log_exception(sys.exc_info())
def remove_and_add_attached_entities(db, dbid, options, mode=None, entity_row=None):
try:
if not "attached_entities" in options:
return
if not isinstance(options["attached_entities"], list):
LOG.critical(
_("update attached entities for dbid %s is not a list: %s" % (dbid, str(options["attached_entities"]))))
return
for ent in options["attached_entities"]:
if not isinstance(ent, dict):
LOG.critical(_("update attached entities item for dbid %s is not a dict: %s" % (dbid, str(ent))))
return
if "entitytype" in ent:
# this is a special case! externla network may be attached to only one type of network
if "entitytype" in options and options["entitytype"] == "externalnetwork":
net_row = cloud_utils.lower_key(
db.get_row_dict("tblAttachedEntities", {"tblEntities": dbid}, order="ORDER BY id LIMIT 1"))
if net_row and entity_row and net_row["attachedentitytype"] == "virtual_network":
_ext_remove_priors(db, dbid, entity_row, net_row)
if ent["entitytype"] == "virtual_network":
delete_entity = "slice_attached_network"
elif ent["entitytype"] == "slice_attached_network":
delete_entity = "virtual_network"
else:
delete_entity = ent["entitytype"]
db.execute_db("DELETE FROM tblAttachedEntities "
" WHERE ( tblEntities = '%s' AND AttachedEntityType = '%s' ) " % (
dbid, delete_entity))
if ent["entitytype"] != "ssh_user":
while True:
tmp = db.execute_db("SELECT * FROM tblAttachedEntities "
" WHERE ( tblEntities = '%s' AND AttachedEntityType = '%s' ) LIMIT 1" % (
dbid, ent["entitytype"]))
if not tmp:
break
tmp = tmp[0]
db.execute_db("DELETE FROM tblAttachedEntities WHERE (id = '%s' ) " % tmp["id"])
db.execute_db("UPDATE tblEntities | |
<gh_stars>0
import pandas as pd
import os, sys, inspect
import xlsxwriter
from math import floor, log10, isnan
# below 3 lines add the parent directory to the path, so that SQL_functions can be found.
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir)
parentdir = os.path.dirname(parentdir)
sys.path.insert(0, parentdir)
class PreGenerateDataManipulation:
"""This file controls the data manipulation processes occurring prior to generating latex files.
Raw data -> Processed data."""
def __init__(self, data_xml_file):
"""The main init function.
1. data_xml_file = the raw xml data file produced by TargetLynx. This must be manually placed in the datafiles
2. folder to be read by Rover. The name of it is passed to init.
3. raw_xml_data_frame = the first DataFrame produced from the xml file. At this point, the data is not
manipulated in any way.
4. percentage_data_frame = changes: analytical concentration converted to percentage concentration.
5. blank_data_frame = changes: only information from blanks.
6. qc_data_frame = changes: only information from standards.
7. samples_and_dilutions_data_frame = changes: only information from samples and their dilutions.
8. best_recovery_qc_data_frame = changes: all the standard data has been analyzed, and the best recoveries for
each analyte have been selected for the new data frame, which consists of one samples' worth of rows.
9. min_value_blank_data_frame = changes: one axis data frame with the minimum value of each analyte from the
blank_data_frame.
10. sample_dilutions_data_frame = changes: only information from dilutions
11. samples_data_frame = changes: samples with out of range values switched out with the appropriate dil."""
print('UV selected')
self.data_xml_file = data_xml_file
self.raw_xml_data_frame = pd.DataFrame()
self.percentage_data_frame = pd.DataFrame()
self.blank_data_frame = pd.DataFrame()
self.qc_data_frame = pd.DataFrame()
self.samples_and_dilutions_data_frame = pd.DataFrame()
self.best_recovery_qc_data_frame = pd.DataFrame()
self.min_value_blank_data_frame = pd.DataFrame()
self.sample_dilutions_data_frame = pd.DataFrame()
self.samples_data_frame = pd.DataFrame()
self.condensed_samples_data_frame = pd.DataFrame()
self.samples_list_data_frame = pd.DataFrame()
self.duplicate_dataframe = pd.DataFrame()
self.unique_sample_id_list = []
# Range check dictionary - these are the high and low values for our curve. If area is smaller than the first
# number, or larger than the second one, it is out of range. If that happens, the value needs to be swapped with
# the corresponding value from the dilution.
self.range_checker_dictionary = {1: [3065, 44880, 'ibuprofen'],
2: [160, 170000, 'CBDV'],
3: [150, 10000, 'CBDVA'],
4: [259, 40000, 'THCV'],
5: [25, 5000, 'CBGVA'], # copying THCV for now
6: [200, 200995, 'CBD'],
7: [200, 100000, 'CBG'],
8: [100, 50000, 'CBDA'],
9: [100, 68050, 'CBN'],
10: [100, 50000, 'CBGA'],
11: [100, 15440, 'THCVA'],
12: [200, 200000, 'd9_THC'],
13: [200, 200000, 'd8_THC'],
14: [250, 25000, 'CBL'],
15: [100, 28000, 'CBC'],
16: [100, 50000, 'CBNA'],
17: [100, 200000, 'THCA'],
18: [50, 5000, 'CBLA'],
19: [50, 6000, 'CBCA']}
# This DataFrame is so we can join the calibration curve data to the samples DataFrame in order to flag the
# samples for being over the curve.
self.over_curve_data = {'id17': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
'max_value': [44880, 170000, 10000, 40000, 5000, 200995, 100000, 50000, 68050,
50000, 15440, 200000, 200000, 25000, 28000, 50000, 200000,
5000, 6000]}
self.over_curve_data_frame = pd.DataFrame(self.over_curve_data, columns=['id17', 'max_value'])
# This is for development - allows me to see the full DataFrame when i print to the console, rather than a
# truncated version. This is useful for debugging purposes and ensuring that methods are working as intended.
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', -1)
def data_manipulation_controller(self):
"""The main controller function. To run the methods that make up this class, this function is called."""
self.collect_data_from_xml_file()
self.convert_analytical_concentration_to_percentage_concentration()
self.split_into_blank_qc_and_sample_data_frame()
self.combine_qc_into_one_data_set_with_highest_recovery_values()
self.combine_blanks_into_one_data_set_with_lowest_percentage_concentration_values()
self.join_over_curve_df_to_samples_df()
self.assign_high_flag_to_sample_data()
self.split_samples_data_frame_into_dilutions_and_samples()
self.swap_out_out_of_range_values()
self.create_list_of_unique_samples()
self.create_condensed_sample_list_data_frame_for_gui()
# self.limit_sig_figs_in_dataframes()
self.add_dup_data_to_excel_for_tracking()
def collect_data_from_xml_file(self):
"""Reads the xml data, saves it to a Pandas DataFrame.
1. id15: the id in the batch. First standard/blank/sample is 1, second is 2, etc.
2. sampleid: the sample number, or the name of the standard/blank.
3. id17: the id of the particular analyte for the row.
4. name20: the name of the particular analyte for the row.
5. initamount: the mass, in grams, of the sample.
6. area: the peak area of the analyte from the chromatogram.
7. analconc: the concentration calculated by TargetLynx. This will improve after analyst peak integration.
8. percrecovery: the percentage recovery of ibuprofen.
9. type: Blank, QC, or analyte.
there is a try/except statement that will catch an incorrect path/filename."""
try:
raw_xml_data = pd.read_excel(self.data_xml_file,)
except FileNotFoundError:
print("ERROR: XML FILE NOT FOUND")
print(str(self.data_xml_file) + " cannot be found. Either the path to the xml_data_files folder is " +
"wrong, or the file doesn't exist.")
print("SCRIPT EXITING.")
sys.exit()
self.raw_xml_data_frame = pd.DataFrame(raw_xml_data,
columns=['id15',
'sampleid',
'id17',
'name20',
'initamount',
'area',
'analconc',
'percrecovery',
'type']
)
def convert_analytical_concentration_to_percentage_concentration(self):
"""converts the analytical concentration to a percent concentration. Saves as a new DataFrame."""
self.percentage_data_frame = self.raw_xml_data_frame
self.percentage_data_frame['percentage_concentration'] = self.percentage_data_frame['analconc']/10000
def split_into_blank_qc_and_sample_data_frame(self):
"""splits the percentage DataFrame into blank, qc, and sample DataFrames, based on the 'type' column."""
self.blank_data_frame = self.percentage_data_frame[self.percentage_data_frame.type == "Blank"]
self.qc_data_frame = self.percentage_data_frame[self.percentage_data_frame.type == "QC"]
self.samples_and_dilutions_data_frame = self.percentage_data_frame[self.percentage_data_frame.type == "Analyte"]
def combine_qc_into_one_data_set_with_highest_recovery_values(self):
"""groups the qc_data_frame and creates a new DataFrame with only the best recoveries for each analyte.
This code was taken from the following StackOverflow question: https://stackoverflow.com/questions/31361599.
Specifically, the answer given by <NAME>. Some modifications were made.
first, a new column is created in the qc_data_frame that measures the distance from 100 (%) in absolute terms
for each recovery value. Then, we do the groupby and transformation based on the indexes with the smallest
distance from 100 %. We make our 'best recovery' DataFrame based on that. The final DataFrame that is spat out
has unnecessary columns removed.
Note: almost how we want this function. Initially, we had max recovery. Now, we have closest recovery to 100.
What we want in the end is closest recovery to 100, but not over 100. """
self.qc_data_frame['distance_from_100'] = abs(self.percentage_data_frame['percrecovery']-100)
tem = self.qc_data_frame.groupby(['id17'])['distance_from_100'].transform(min) == self.qc_data_frame['distance_from_100']
self.best_recovery_qc_data_frame = self.qc_data_frame[tem]
self.best_recovery_qc_data_frame.reset_index(drop=True, inplace=True)
self.best_recovery_qc_data_frame = self.best_recovery_qc_data_frame[['id15',
'id17',
'sampleid',
'name20',
'area',
'percrecovery']].copy()
def combine_blanks_into_one_data_set_with_lowest_percentage_concentration_values(self):
""" produces a single axis data frame with one min value for each analyte, with the analytes being identified by
id17 (could also do name20 here). To access the min value for each analyte, use df.iloc[n], with n= row. """
self.min_value_blank_data_frame = self.blank_data_frame.groupby(['name20'])['percentage_concentration'].min()
def join_over_curve_df_to_samples_df(self):
""" joins the upper limits on areas for given analytes (based on calibration curve) to the samples DataFrame.
These values are used to assign flags that indicate whether a given peak area is out of calibration range."""
self.samples_and_dilutions_data_frame = pd.merge(left=self.samples_and_dilutions_data_frame,
right=self.over_curve_data_frame,
how='left',
left_on='id17',
right_on='id17')
def assign_high_flag_to_sample_data(self):
"""assigns a flag to indicate whether a peak area is over the calibration curve range. the over_curve column
will have a blank string if the area is not over, and will say 'over' if the area is over."""
self.samples_and_dilutions_data_frame = self.samples_and_dilutions_data_frame.assign(over_curve='')
self.samples_and_dilutions_data_frame.loc[self.samples_and_dilutions_data_frame['area'] >
self.samples_and_dilutions_data_frame['max_value'],
'over_curve'] = 'over'
def split_samples_data_frame_into_dilutions_and_samples(self):
"""Splits the samples DataFrame into two - one containing the dilutions, and one containing the undiluted
samples. This allows us to swap out the out of calibration range values by joining the two DataFrames together
conditional on the over_curve field. Works by assuming that any sample id with a length larger than 9 is a dil,
(xxxxxx-xx x/xx) and any with a length less than or equal to nine (xxxxxx-xx) is a sample."""
self.sample_dilutions_data_frame = self.samples_and_dilutions_data_frame[self.samples_and_dilutions_data_frame.sampleid.str.len() > 9]
self.samples_data_frame = self.samples_and_dilutions_data_frame[self.samples_and_dilutions_data_frame.sampleid.str.len() <= 9]
def swap_out_out_of_range_values(self):
"""swaps the out of range values in samples_data_frame with in range ones from sample_dilutions_data_frame.
Looks for the 'over' flag, and then gets the row that has the sampleid from the sample contained in its
sampleid (if that makes sense) and the correct id17. It then replaces percentage_concentration, and changes
the flag from 'over' to 'Corrected: new area = (area from dilution)'. """
for index, row in self.samples_data_frame.iterrows():
if row['over_curve'] == 'over':
dilution = self.sample_dilutions_data_frame.loc[(self.sample_dilutions_data_frame['id17'] == row['id17'])
& (self.sample_dilutions_data_frame['sampleid'].str.contains(row['sampleid']))]
try:
self.samples_data_frame.loc[index, 'percentage_concentration'] = dilution.iloc[0, 9]
#self.samples_data_frame.loc[index, 'over_curve'] = 'Corrected: new area = ' + str(dilution.iloc[0, 5])
self.samples_data_frame.loc[index, 'over_curve'] = 'dil. Cor.'
except IndexError:
self.samples_data_frame.loc[index, 'over_curve'] = 'Out of range, no dil. '
self.samples_data_frame.fillna(0, inplace=True)
def create_condensed_sample_list_data_frame_for_gui(self):
"""creates the data frame | |
self.solver.solve(N, self.solver.dynamics_local, x0=x0, u0=u0)
self.tti, self.F, self.f = ret
return ret
def compute_tti(self, x):
"""
Returns time to impact in seconds
"""
return self.dynamics_global.get_time_to_impact(x)[0]
def run(self, x0):
""" Execute the controller for a given system
The controller is given through the gain matrices F.
x and u are out variables that will contain the state
and action trajectories
"""
dt = self.dynamics_global.DT
framerate = self.dynamics_global.FRAMERATE
# print some analytical information about example
t_n = self.compute_tti(x0)
print "At what time t is ball on ground: %f" % (t_n)
tdt_n = t_n/dt
tf = int(round (tdt_n))
print "After how many steps (according to dt) N is ball on ground: %f" % (tdt_n, )
x_n = map (lambda t: x0[1,0]*t + x0[0,0], (t_n, ))
print "At what COORDINATE x is ball on ground: %f" % x_n[0]
z_n = map (lambda t: x0[7,0]*t + x0[6,0], (t_n, ))
print "At what COORDINATE z is ball on ground: %f" % z_n[0]
N_example = tf # this will be used to show example
# logging
x = np.zeros( (N_example+1,D_s) )
u = np.zeros( (N_example,D_a) )
t = np.arange(0, x.shape[0]+1, dt)
t = t[:x.shape[0]]
x[0, :] = x0.T
for i in range(1,N_example+1):
# compute optimal control gains
tti = self.compute_tti(x[i-1,:])
step = int(round(tti*framerate))-1
F = self.solver.F[step]
f = self.solver.f[step]
u[i-1,:] = np.dot(F, x[i-1,:].T ).T
u[i-1,:] += f.reshape(u[i-1].shape)
# if Flog is None, assume u to be an input variable
x[i,:] = self.dynamics_global.step(x[i-1:i,:].T, u[i-1:i,:].T, noise=False).reshape((-1,))
t[i] = i*dt
return t, x, u,
def build_gain_folder_name(self, output_root=None):
if output_root is None:
output_root = os.path.join(settings_root, self.solver.name)
if self.terminal_velocity > 0.:
output_root += "V%d" % self.terminal_velocity
output_root += "Dim%d" % self.dynamics_global.dim
output_root += "M%.2f" % self.solver.dynamics_local.mass
output_root += "R%.2f" % self.solver.dynamics_local.r
output_root += "C%.2f" % self.solver.dynamics_local.c
framerate = self.dynamics_global.FRAMERATE
fr_folder = "%.1f" % framerate
cur_output_folder = os.path.join(output_root, fr_folder)
return cur_output_folder
@staticmethod
def _read_gains_from_folder(path):
"""
Returns a list of tuples (tti, gains)
"""
g = []
for _p in os.listdir(path):
p = os.path.join(path, _p)
if os.path.splitext(_p)[-1] in [".txt", ".yml", ".pdf"] \
or os.path.isdir(p):
continue
g.append ((float(_p), np.loadtxt(p)))
return g
def load(self):
gain_root = self.build_gain_folder_name()
if not os.path.exists(gain_root):
print ( "[SOCBallCatching] Solving %s..." % self.solver.name )
res = self.solve()
self.export()
return res
print ( "[SOCBallCatching] Loading gains from %s" % gain_root)
# LOAD multiplicative (L) and constant gains (l) from file
F = []
f = []
yml_path = os.path.join(gain_root, "cost_info.yml")
with open(yml_path, "r") as yf:
y = yaml.load(yf)
for v in ["terminal_distance", "terminal_velocity", "control_effort"]:
assert self.__dict__[v] == y[v], \
"%s has different values. params:%f != yaml:%f" % (v, self.__dict__[v], y[v])
for _f in os.listdir(gain_root):
if _f == "Fm":
F = self._read_gains_from_folder(os.path.join(gain_root, _f))
elif _f == "fc":
f = self._read_gains_from_folder(os.path.join(gain_root, _f))
if len(f) == 0:
print ("[LQRStrategy] WARN: f is empty")
f = np.zeros( (len(F), self.dynamics_global.action_dim ))
# sort by tti
F = sorted(F, key=lambda x: x[0])
f = sorted(f, key=lambda x: x[0])
self.tti = [ _f[0] for _f in F]
self.F = np.asarray( [ _f[1] for _f in F] )
self.f = np.asarray( [ _f[1] for _f in f] )
return self.tti, self.F, self.f
def export(self, output_root=None):
cur_output_folder = self.build_gain_folder_name(output_root)
framerate = self.dynamics_global.FRAMERATE
# prepare
import shutil
try:
shutil.rmtree(cur_output_folder)
except:
pass # don't care
os.makedirs(cur_output_folder)
print "Writing to %s" % cur_output_folder
# save system matrices
print "Writing system matrices"
np.savetxt(os.path.join(cur_output_folder, "A.txt"), self.dynamics_global.Adt)
np.savetxt(os.path.join(cur_output_folder, "B.txt"), self.dynamics_global.Bdt)
np.savetxt(os.path.join(cur_output_folder, "C.txt"), self.dynamics_global.Cdt)
# write matrices
print ("Writing gain matrices")
os.makedirs(os.path.join(cur_output_folder, "Fm"))
os.makedirs(os.path.join(cur_output_folder, "fc"))
for i, (F, f) in enumerate(zip(self.solver.F, self.solver.f)):
stg = i / framerate # steps to go
Fpath = os.path.join(cur_output_folder, "Fm", ("%.5f" % stg).zfill(10))
np.savetxt(Fpath, F)
fpath = os.path.join(cur_output_folder, "fc", ("%.5f" % stg).zfill(10))
np.savetxt(fpath, f)
# write cost function info file
print ("Writing cost info")
cost_params = {'terminal_distance': self.terminal_distance,
'terminal_velocity': self.terminal_velocity,
'control_effort': self.control_effort}
with open(os.path.join(cur_output_folder, "cost_info.yml"), 'w') as outfile:
outfile.write(yaml.dump(cost_params, default_flow_style=True))
def analyze(self, output_root):
tti = self.solver.tti
F = self.solver.F
f = self.solver.f
ux = np.zeros((len(F), 2))
udotx = np.zeros((len(F), 2))
udotx_agent = np.zeros((len(F), 2))
uddotx = np.zeros((len(F), 2))
uddotx_agent = np.zeros((len(F), 2))
uz = np.zeros((len(F), 2))
udotz = np.zeros((len(F), 2))
udotz_agent = np.zeros((len(F), 2))
uddotz = np.zeros((len(F), 2))
uddotz_agent = np.zeros((len(F), 2))
u_const = np.zeros((len(f), 3))
yp = np.zeros((len(F), 2))
yw = np.zeros((len(F), 2))
xdot_symmetric=True
xddot_symmetric=True
uconst_symmetric=True
i=0
for name, M, m in sorted(zip(tti, F, f), key=lambda x: x[0]):
#print M[0,0]+M[0,6]
eps = 1e-10
#if M[0,0] + M[0,6] < eps and M[1,0] + M[1,6] < eps:
if M[0,0] + M[0,9] < eps and M[1,6] + M[1,12] < eps:
pass
else:
print "%s x and z are NOT symmetric" % name
return False
if M[0,1] + M[0,10] < eps and M[1,7] + M[1,13] < eps:
pass
elif xdot_symmetric:
print "%s xdot and zdot are NOT symmetric" % name
xdot_symmetric=False
# isn't that wrong? M[1,11] is the influence of s_x on u_z!
# but we want s_z on u_z
if (M[0,2] + M[0,11]) - (M[1,8] + M[1,14]) < eps:
pass
elif xddot_symmetric:
print "%s xddot and zddot are NOT symmetric" % name
xddot_symmetric=False
if np.abs(np.abs(m[0]) - np.abs(m[1])) < eps:
pass
elif uconst_symmetric:
print "%s uconst_x and uconst_z are NOT symmetric" % name
uconst_symmetric=False
ux[i, 0] = float(name)
ux[i, 1] = M[0,0]
udotx[i, 0] = float(name)
udotx[i, 1] = M[0,1]
uddotx[i, 0] = float(name)
uddotx[i, 1] = M[0,2]
udotx_agent[i, 0] = float(name)
udotx_agent[i, 1] = M[0,10]
uddotx_agent[i, 0] = float(name)
uddotx_agent[i, 1] = M[0,11]
uz[i, 0] = float(name)
uz[i, 1] = M[1,6]
udotz[i, 0] = float(name)
udotz[i, 1] = M[1,7]
uddotz[i, 0] = float(name)
uddotz[i, 1] = M[1,8]
udotz_agent[i, 0] = float(name)
udotz_agent[i, 1] = M[1,13]
uddotz_agent[i, 0] = float(name)
uddotz_agent[i, 1] = M[1,14]
yp[i,0] = float(name)
yp[i,1] = M[0,2]
yw[i,0] = float(name)
yw[i,1] = M[0,3]
u_const[i,0] = float(name)
u_const[i,1] = m[0]
u_const[i,2] = m[1]
i+=1
dt = dynamics.DT
print r"dt = %f" % dt
#print ux[:,1]
plt.figure(figsize=(5,3))
#plt.subplot(2,1,1)
plt.plot(ux[:,0], ux[:,1], lw=3.0, label="$k_p$")
if xdot_symmetric:
plt.plot(udotx[:,0], udotx[:,1], lw=3.0, label=r"$k_d$")
else:
plt.plot(udotx[:,0], udotx[:,1], lw=3.0, label=r"$k_d \dot{x}_b$")
plt.plot(udotx_agent[:,0], udotx_agent[:,1], lw=3.0, label=r"$k_d \dot{x}_a$")
if xddot_symmetric:
plt.plot(uddotx[:,0], uddotx[:,1], lw=3.0, label=r"$k_a$")
else:
plt.plot(uddotx[:,0], uddotx[:,1], lw=3.0, label=r"$k_a \dot{x}_b$")
plt.plot(uddotx_agent[:,0], uddotx_agent[:,1], lw=3.0, label=r"$k_a \dot{x}_a$")
# uconst
#if np.max(u_const[:,1:]) > eps:
# plt.plot(u_const[:,0], np.linalg.norm(u_const[:,1:], axis=1), lw=3.0, label="$k_{c}$")
#else:
# print (" uconst is below threshold")
#plt.plot(uz[:,0], uz[:,1], lw=2.0, label="uz")
plt.xlabel(r"\textrm{time to impact}")
plt.legend()
#plt.legend(fontsize=24)
plt.ylim([0, np.max(ux[:,1])*1.1])
plt.xlim([0, 5.])
fulltitle = r"%s $\Delta t = %.1f$, " % (solver.name, dt)
cost_term_dct = {\
'terminal_distance': r"w_\textrm{dist}",
'terminal_velocity': r"w_\textrm{vel}",
'control_effort': r"w_\textrm{ctrl}",
}
cost_info = {
'terminal_distance': self.terminal_distance,
'terminal_velocity': self.terminal_velocity,
'control_effort': self.control_effort,
}
if cost_info != None:
cost_term_dct = dict(reversed(cost_term_dct.items()))
fulltitle += " " + ", ".join([ "$"+cost_term_dct[k] + "=" + str(v)+"$" for k,v in cost_info.iteritems() if float(v) > 0])
plt.title(fulltitle)
distfigpath = os.path.join(output_root, "gains_lqr.pdf")
plt.tight_layout()
plt.savefig(distfigpath, format='pdf')
np.savetxt(os.path.join(output_root, "_t.txt"), ux[:,0])
np.savetxt(os.path.join(output_root,"_kp_t.txt"), ux[:,1])
np.savetxt(os.path.join(output_root,"_kd_t.txt"), udotx[:,1])
np.savetxt(os.path.join(output_root,"_ka_t.txt"), uddotx[:,1])
# -----------------
# now also consider time dependence
# -----------------
plt.figure(figsize=(5,3))
plt.plot(ux[:,0], ux[:,1]*ux[:,0]*ux[:,0], lw=3.0, ls="-", label=r"$t^2 k_p$")
if xdot_symmetric:
plt.plot(udotx[:,0], udotx[:,1]*ux[:,0], lw=3.0, ls="-", label=r"$t k_d$")
else:
print "NOT SYMMETRIC"
plt.plot(udotx[:,0], udotx[:,1]*ux[:,0], lw=3.0, ls="-", label=r"$k_d \dot{x}_b$*distance")
plt.plot(udotx_agent[:,0], udotx_agent[:,1]*ux[:,0], lw=3.0, ls="-", label=r"$k_d \dot{x}_a$*distance")
# FIXME: ignore acceleration
#plt.plot(uddotx[:,0], uddotx[:,1]*uddotx[:,0], lw=3.0, label="$t k_{a}$")
u_norm = u_const[:,1]
if uconst_symmetric:
plt.plot(u_const[:,0], u_norm, lw=3.0, label="$k_{c}$")
else:
u_norm = np.linalg.norm(u_const[:,1:], axis=1)
plt.plot(u_const[:,0], u_norm, lw=3.0, label="$\Vert k_{c} \Vert_2$")
#plt.legend(loc="lower right")#, fontsize=24)
#plt.legend(loc="upper left", ncol=2)#, fontsize=24)
plt.legend(loc="upper left", ncol=3)#, fontsize=24)
plt.title(fulltitle)
plt.xlabel(r"{time to impact (s)}")
xlim = [0,5]
#mx = np.max([np.max(u_norm[:xlim[1]]), np.max( ux[:xlim[1],1]*ux[:xlim[1],0]*ux[:xlim[1],0])])
#plt.ylim([0, mx*1.1])
#plt.ylim([0,3.])
#plt.ylim([0,10.])
plt.ylim([0,5.])
plt.xlim(xlim)
distfigpath = os.path.join(output_root, "gains_t_lqr.pdf")
print ("Saving "+distfigpath)
plt.tight_layout()
plt.savefig(distfigpath, format='pdf')
# ====================================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--solver", type=str.lower, choices=["lqr", "ilqr","ilqr_soccer",], default="lqr", help="solver")
parser.add_argument("--dynamics", type=str.lower, choices=["ideal", "drag",], default="ideal", help="solver")
| |
<filename>src/crl/devutils/tasks.py
from __future__ import print_function
import os
import shutil
import sys
from collections import namedtuple
from contextlib import contextmanager
from invoke import task
from invoke.main import program
from crl.devutils import (
versionhandler,
githandler,
packagehandler,
setuphandler,
changehandler,
devpihandler,
doccreator)
from crl.devutils.runner import run, Failure
from crl.devutils.versionhandler import (
VersionFileNotFound,
MultipleVersionFilesFound,
InvalidVersionValue,
FailedToCreateVersionFile,
FailedToWriteVersion,
FailedToWriteGithash)
from crl.devutils.changehandler import (
ChangeFileNotFound,
MultipleChangeFilesFound,
ChangeFileVersionCheckFailed)
from crl.devutils.packagehandler import (
MismatchOfTagAndVersionfileVersion,
MismatchOfTagAndSetupVersion,
VersionTagInWrongBranch)
from crl.devutils.githandler import UncleanGitRepository
from crl.devutils.doccreator import FailedToCreateDocs
__copyright__ = 'Copyright (C) 2019, Nokia'
MODULEDIR = os.path.dirname(os.path.abspath(__file__))
SETUP_TEMPLATE = """
import os
import imp
from setuptools import setup, find_packages
VERSIONFILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'src', 'crl', '{libname}', '_version.py')
def get_version():
return imp.load_source('_version', VERSIONFILE).get_version()
setup(
name='crl.{libname}',
version=get_version(),
author='n/a',
author_email='n/a',
description='n/a',
install_requires=[],
long_description='n/a',
license='n/a',
keywords='n/a',
url='n/a',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['crl'],
)
"""
GitRepoItem = namedtuple('GitRepoItem', ['dirname', 'repo', 'version'])
def get_verboserun(verbose):
def verboserun(cmd, **kwargs):
kwargs_copy = kwargs.copy()
kwargs_copy['verbose'] = verbose
return run(cmd, **kwargs_copy)
return verboserun
def create_packagehandler(libname=None,
pathtoversionfile=None,
verbose=False,
novirtualenv=False):
verboserun = get_verboserun(verbose)
kwargs = {'novirtualenv': True} if novirtualenv else {}
ph = packagehandler.PackageHandler(
versionhandler=versionhandler.VersionHandler(
libname=libname,
pathtoversionfile=pathtoversionfile),
setuphandler=setuphandler.SetupHandler(run=verboserun),
changehandler=changehandler.ChangeHandler(run=verboserun),
githandler=githandler.GitHandler(run=verboserun),
**kwargs)
ph.set_devpihandler(devpihandler.DevpiHandler(
run=verboserun, packagehandler=ph))
return ph
def create_doccreator(verbose=False, robotdocs_root_folders='robotdocs'):
return doccreator.DocCreator(robotdocs_root_folders=robotdocs_root_folders,
run=get_verboserun(verbose))
def create_devpihandler(verbose=False):
verboserun = get_verboserun(verbose)
return devpihandler.DevpiHandler(
run=verboserun,
packagehandler=create_packagehandler(verbose=verbose))
@task(default=True)
def help():
"""Show help, basically an alias for --help.
This task can be removed once the fix to this issue is released:
https://github.com/pyinvoke/invoke/issues/180
"""
print(run(
'invoke -r {moduledir} --help'.format(moduledir=MODULEDIR)).stdout)
@task
def tag_release(version, libname=None, push=True, verbose=False,
pathtoversionfile=None):
"""Tag specified release.
Updates version using `set_version`, creates tag, and pushes changes.
Args:
version: Version to use. See above for supported values and formats.
libname: Name of the directory under ./src/crl where version
file (_version.py) is located. By default the first
directory name is used.
push: Push updated version file and tag to the remote.
pathtoversionfile: Alternative relative path to version file.
verbose: Display task execution in more detail.
"""
ph = create_packagehandler(libname=libname,
pathtoversionfile=pathtoversionfile,
verbose=verbose)
ph.tag_release(version, push=push)
print('Version: {version}'.format(version=ph.version))
@task
def tag_setup_version(verbose=False):
"""Tag specified release.
Creates tag of version in setup and pushes changes.
Args:
verbose: Display task execution in more detail.
"""
ph = create_packagehandler(verbose=verbose)
ph.tag_setup_version()
@task
def set_version(version, push=False, libname=None,
pathtoversionfile=None, verbose=False):
"""Set version in ./src/crl/<libname>/_version.py`.
Version can have these values:
- Actual version number to use. See below for supported formats.
- String 'dev' to update version to latest development version
(e.g. 2.8 -> 2.8.1.dev, 2.8.1 -> 2.8.2.dev, 2.8a1 -> 2.8.dev) with
the current date added or updated.
- String 'keep' to keep using the previously set version.
Given version must be in one of these PEP-440 compatible formats:
- Stable version in 'X.Y' or 'X.Y.Z' format (e.g. 2.8, 2.8.6)
- Pre-releases with 'aN', 'bN' or 'rcN' postfix (e.g. 2.8a1, 2.8.6rc2)
- Development releases with '.devYYYYMMDD' postfix (e.g. 2.8.6.dev20141001)
or with '.dev' alone (e.g. 2.8.6.dev) in which case date is added
automatically.
Args:
version: Version to use. See above for supported values and formats.
push: Commit and push changes to the remote repository.
libname: Name of the directory under ./src/crl where version
file (_version.py) is located. By default the first
directory name is used.
pathtoversionfile: Alternative relative path to version file.
verbose: display task execution in more detail.
"""
ph = create_packagehandler(libname=libname,
pathtoversionfile=pathtoversionfile,
verbose=verbose)
ph.update_version(version, push=push)
print('Version: {version}'.format(version=ph.version))
@task
def clean(remove_dist=True, create_dirs=False):
"""Clean workspace.
By default deletes 'dist' directory and removes '*.pyc'
and '$py.class' files.
Args:
create_dirs: Re-create 'dist' after removing it.
"""
with error_handling():
directories = ['dist']
for name in directories:
if os.path.isdir(name) and (name != 'dist' or remove_dist):
shutil.rmtree(name)
if create_dirs and not os.path.isdir(name):
os.mkdir(name)
for directory, _, files in os.walk('.'):
for name in files:
if name.endswith(('.pyc', '$py.class')):
os.remove(os.path.join(directory, name))
@task
def sdist(deploy=False, remove_dist=False):
"""Create source distribution.
Args:
deploy: Register and upload sdist to PyPI.
remove_dist: Control is 'dist' directory initially removed or not.
"""
clean(remove_dist=remove_dist, create_dirs=True)
run('python setup.py sdist' + (' register upload' if deploy else ''))
announce()
def announce():
print()
print('Distributions:')
for name in os.listdir('dist'):
print(os.path.join('dist', name))
@task
def create_setup(libname=None, add_to_git=True):
"""Create initial setup.py into current directory from library name.
The module setup will define path to version file
by joining it with src/crl/libname/_version.py.
The new file is added and committed by default to git.
Args:
libname: Name of the library. If not given first directory under
src/crl is used as library name
add_to_git: If True, add the setup.py to git.
"""
setupp = get_setup_path()
if os.path.isfile(setupp):
raise Exception('The module setup.py already exists.')
with open(setupp, 'w') as setup_file:
setup_file.write(SETUP_TEMPLATE.replace(
'{libname}', libname or
versionhandler.VersionHandler().get_default_lib()))
if add_to_git:
githandler.GitHandler(run).add(setupp)
def get_setup_path():
return os.path.join(os.getcwd(), 'setup.py')
@task
def create_index(index, baseindex, additional_index=None,
credentials_file=None, verbose=False):
"""
Create an index with given bases
Args:
index: Name of the index to create
baseindex: URL of devpi PyPI index to be used as base index.
Format: http[s]://host:user/indexname.
additional_index: Other additional indices to use as base.
Format: 'user/index1,user/index2'
credentials_file: /full/path/to/credentials/file with plain text
content username:password. In case no
credentials_file given, the default devpi clientdir
authorization token is used.
verbose: Display task execution in more detail.
"""
dh = create_devpihandler(verbose=verbose)
dh.create_index(name=index, baseindex=baseindex,
otherbase=additional_index,
credentials_file=credentials_file)
print(
'Successfully created {index} for user {user}'.format(
index=index, user=dh.username))
@task
def delete_index(index, credentials_file=None, verbose=False):
"""
Delete an index
Args:
index: URL of the devpi PyPI index to delete.
Format: http[s]://host:user/indexname
credentials_file: /full/path/to/credentials/file with plain text
content username:password. In case no
credentials_file given, the default devpi clientdir
authorization token is used.
verbose: Display task execution in more detail.
"""
dh = create_devpihandler(verbose=verbose)
dh.delete_index(index=index, credentials_file=credentials_file)
print(
'Successfully deleted {index} for user {user}'.format(
index=index, user=dh.username))
@task
def test(baseindex, testindex=None, credentials_file=None,
save_tests_to=None, virtualenv=True,
pathtoversionfile=None, verbose=False):
""" Uploads contents of current workspace to devpi and runs tox tests.
Args:
baseindex: URL of devpi PyPI index to be used as base index.
Format: http[s]://host:user/indexname.
testindex: Name of the index to be used for running tests.
If the given index doesn't exist, it is created.
If not specified, uses a temporary index.
credentials_file: /full/path/to/credentials/file with plain text
content username:password. In case no
credentials_file given, the default devpi clientdir
authorization token is used.
save_tests_to: Copy tests temporary directory to this new not yet
existing directory.
virtualenv: Create and run the task in a new temporary virtualenv.
pathtoversionfile: Alternative relative path to version file.
verbose: Display task execution in more detail.
"""
kwargs = {} if virtualenv else {'novirtualenv': True}
ph = create_packagehandler(verbose=verbose,
pathtoversionfile=pathtoversionfile,
**kwargs)
ph.test(base_index=baseindex, test_index=testindex,
credentials_file=credentials_file,
save_tests_to=save_tests_to)
@task
def publish(srcindex, destindex, credentials_file=None, tag_if_needed=False,
tag_branch='master', verbose=False):
"""*DEPRECATED* Publish version from a given index to another index.
Args:
srcindex: URL of devpi PyPI index from where to find the new version.
Format http[s]://host:user/indexname.
destindex: URL(short format) of devpi PyPI index to publish to.
Format: user/indexname.
tag_if_needed: Tags using the package's version if not found tagged.
credentials_file: /full/path/to/credentials/file with plain text
content username:password. In case no
credentials_file given, the default devpi clientdir
authorization token is used.
tag_branch: Alternative git branch where the tag must be.
verbose: Display task execution in more detail.
"""
ph = create_packagehandler(verbose=verbose)
success = ph.publish(srcindex=srcindex, destindex=destindex,
credentials_file=credentials_file,
tag_if_needed=tag_if_needed, tag_branch=tag_branch)
if success:
print(
'Published successfully {version} of {name} to {index}'.format(
version=ph.version, name=ph.name, index=destindex))
else:
print(
'Skipping. {ver} of {name} already published to {index}'.format(
ver=ph.version, name=ph.name, index=destindex))
@task
def create_docs(robotdocs_root_folders='robotdocs', verbose=False):
""" Create both Robot Framework and Sphinx documentation.
If 'robotdocsconf.py' exists in root folders then Robot
Framework test libraries and resource files documentation
is generated and integrated with Sphinx documentation.
'robotdocsconf.py' is searched from robotdocs_root_folders
recursively.
Example 1 'robotdocsconf.py' for python library documentation:
module_name = "RunnerExecutor Libraries"
output_file = "RunnerExecutor.rst"
robotdocs = {
'RunnerExecutor.remoterunnerexecutor.RemoteRunnerExecutor': {
'args': ['None', 'None', 'False'],
'docformat': 'robot',
'synopsis': ('Command executor in the remote target shell.')},
'RunnerExecutor.remoterunnerexecutor.SftpTransfer': {
'args': ['False'],
'docformat': 'robot',
'synopsis': ('Command executor in the remote target shell.')}
Example 2 'robotdocsconf.py' for robot resource file documentation:
module_name = "Deployment Helpers"
output_file = "Framework_deployment.rst"
robotdocs = {
'resources/framework/deployment/_deployment_helper.robot': {
'docformat': 'robot'
}
}
Robotdocsconf.py's output_file will be the name of the generated
'sphinxdocs/.........rst' file. A good practice is to name it so that
library's identification is easy. If output_file is missing then
robotdocs.rst will be the file name. Robotdocsconf.py's module_name will
be written to rst file header. Header text will be: 'Robot Framework Test
Libraries' if the module name is missing.
Sphinx documentation is generated according to 'sphinxdocs/conf.py'.
Args:
robotdocs_root_folders: folders list with relative or
absolute path separated by ':',
| |
"""Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of IO events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import concurrent.futures
import heapq
import logging
import socket
import subprocess
import time
import os
import sys
from . import events
from . import futures
from . import tasks
from .log import logger
__all__ = ['BaseEventLoop', 'Server']
# Argument for default thread pool executor creation.
_MAX_WORKERS = 5
class _StopError(BaseException):
"""Raised to stop the event loop."""
def _check_resolved_address(sock, address):
# Ensure that the address is already resolved to avoid the trap of hanging
# the entire event loop when the address requires doing a DNS lookup.
family = sock.family
if family == socket.AF_INET:
host, port = address
elif family == socket.AF_INET6:
host, port = address[:2]
else:
return
type_mask = 0
if hasattr(socket, 'SOCK_NONBLOCK'):
type_mask |= socket.SOCK_NONBLOCK
if hasattr(socket, 'SOCK_CLOEXEC'):
type_mask |= socket.SOCK_CLOEXEC
# Use getaddrinfo(AI_NUMERICHOST) to ensure that the address is
# already resolved.
try:
socket.getaddrinfo(host, port,
family=family,
type=(sock.type & ~type_mask),
proto=sock.proto,
flags=socket.AI_NUMERICHOST)
except socket.gaierror as err:
raise ValueError("address must be resolved (IP address), got %r: %s"
% (address, err))
def _raise_stop_error(*args):
raise _StopError
class Server(events.AbstractServer):
def __init__(self, loop, sockets):
self.loop = loop
self.sockets = sockets
self.active_count = 0
self.waiters = []
def attach(self, transport):
assert self.sockets is not None
self.active_count += 1
def detach(self, transport):
assert self.active_count > 0
self.active_count -= 1
if self.active_count == 0 and self.sockets is None:
self._wakeup()
def close(self):
sockets = self.sockets
if sockets is not None:
self.sockets = None
for sock in sockets:
self.loop._stop_serving(sock)
if self.active_count == 0:
self._wakeup()
def _wakeup(self):
waiters = self.waiters
self.waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
@tasks.coroutine
def wait_closed(self):
if self.sockets is None or self.waiters is None:
return
waiter = futures.Future(loop=self.loop)
self.waiters.append(waiter)
yield from waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._closed = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
self._running = False
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self._debug = False
def __repr__(self):
return ('<%s running=%s closed=%s debug=%s>'
% (self.__class__.__name__, self.is_running(),
self.is_closed(), self.get_debug()))
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter, *,
server_side=False, server_hostname=None,
extra=None, server=None):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
@tasks.coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _read_from_self(self):
"""XXX"""
raise NotImplementedError
def _write_to_self(self):
"""XXX"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
if self._running:
raise RuntimeError('Event loop is running.')
self._running = True
try:
while True:
try:
self._run_once()
except _StopError:
break
finally:
self._running = False
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
XXX TBD: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
future = tasks.async(future, loop=self)
future.add_done_callback(_raise_stop_error)
self.run_forever()
future.remove_done_callback(_raise_stop_error)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback scheduled before stop() is called will run.
Callback scheduled after stop() is called won't. However,
those callbacks will run if run() is called again later.
"""
self.call_soon(_raise_stop_error)
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
"""
if self._closed:
return
self._closed = True
self._ready.clear()
self._scheduled.clear()
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def is_running(self):
"""Returns running status of event loop."""
return self._running
def time(self):
"""Return the time according to the event loop's clock."""
return time.monotonic()
def call_later(self, delay, callback, *args):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always a relative time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
return self.call_at(self.time() + delay, callback, *args)
def call_at(self, when, callback, *args):
"""Like call_later(), but uses an absolute time."""
if tasks.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with call_at()")
if self._debug:
self._assert_is_current_event_loop()
timer = events.TimerHandle(when, callback, args, self)
heapq.heappush(self._scheduled, timer)
return timer
def call_soon(self, callback, *args):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue, callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
return self._call_soon(callback, args, check_loop=True)
def _call_soon(self, callback, args, check_loop):
if tasks.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with call_soon()")
if self._debug and check_loop:
self._assert_is_current_event_loop()
handle = events.Handle(callback, args, self)
self._ready.append(handle)
return handle
def _assert_is_current_event_loop(self):
"""Asserts that this event loop is the current event loop.
Non-threadsafe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if events.get_event_loop() is not self:
raise RuntimeError(
"non-threadsafe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args):
"""XXX"""
handle = self._call_soon(callback, args, check_loop=False)
self._write_to_self()
return handle
def run_in_executor(self, executor, callback, *args):
if tasks.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with run_in_executor()")
if isinstance(callback, events.Handle):
assert not args
assert not isinstance(callback, events.TimerHandle)
if callback._cancelled:
f = futures.Future(loop=self)
f.set_result(None)
return f
callback, args = callback._callback, callback._args
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
self._default_executor = executor
return futures.wrap_future(executor.submit(callback, *args), loop=self)
def set_default_executor(self, executor):
self._default_executor = executor
def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
return self.run_in_executor(None, socket.getaddrinfo,
host, port, family, type, proto, flags)
def getnameinfo(self, sockaddr, flags=0):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
@tasks.coroutine
def create_connection(self, protocol_factory, host=None, port=None, *,
ssl=None, family=0, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None):
"""XXX"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if host is not None or port is not None:
if sock is not | |
<reponame>geoboxers/thatsDEM2<filename>thatsDEM2/array_geometry.py
# Original work Copyright (c) 2015, Danish Geodata Agency <<EMAIL>>
# Modified work Copyright (c) 2015-2016, Geoboxers <<EMAIL>>
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""
Methods to work easier with ogr geometries and numpy arrays in combination.
Contains some custom geospatial methods action on numpy arrays.
silyko, June 2016.
"""
import ctypes
import numpy as np
from osgeo import ogr
from thatsDEM2 import shared_libraries as sh
# Py2 to 3
try:
basestring
except NameError:
basestring = str
try:
xrange
except NameError:
xrange = range
XY_TYPE = np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags=['C', 'O', 'A', 'W'])
GRID_TYPE = np.ctypeslib.ndpointer(
dtype=np.float64, ndim=2, flags=['C', 'O', 'A', 'W'])
Z_TYPE = np.ctypeslib.ndpointer(
dtype=np.float64, ndim=1, flags=['C', 'O', 'A', 'W'])
MASK_TYPE = np.ctypeslib.ndpointer(
dtype=np.bool, ndim=1, flags=['C', 'O', 'A', 'W'])
UINT32_TYPE = np.ctypeslib.ndpointer(
dtype=np.uint32, ndim=1, flags=['C', 'O', 'A'])
HMAP_TYPE = np.ctypeslib.ndpointer(
dtype=np.uint32, ndim=2, flags=['C', 'O', 'A'])
UINT8_VOXELS = np.ctypeslib.ndpointer(
dtype=np.uint8, ndim=3, flags=['C', 'O', 'A', 'W'])
INT32_VOXELS = np.ctypeslib.ndpointer(
dtype=np.int32, ndim=3, flags=['C', 'O', 'A', 'W'])
INT32_TYPE = np.ctypeslib.ndpointer(
dtype=np.int32, ndim=1, flags=['C', 'O', 'A', 'W'])
# Load the library using np.ctypeslib
lib = np.ctypeslib.load_library(sh.LIB_FGEOM, sh.LIB_DIR)
##############
# corresponds to
# array_geometry.h
##############
# void p_in_buf(double *p_in, char *mout, double *verts, unsigned long np,
# unsigned long nv, double d)
lib.p_in_buf.argtypes = [XY_TYPE, MASK_TYPE, XY_TYPE,
ctypes.c_ulong, ctypes.c_ulong, ctypes.c_double]
lib.p_in_buf.restype = None
lib.p_in_poly.argtypes = [XY_TYPE, MASK_TYPE,
XY_TYPE, ctypes.c_uint, UINT32_TYPE, ctypes.c_uint]
lib.p_in_poly.restype = ctypes.c_int
lib.get_triangle_geometry.argtypes = [
XY_TYPE, Z_TYPE, sh.LP_CINT, np.ctypeslib.ndpointer(
dtype=np.float32, ndim=2, flags=[
'C', 'O', 'A', 'W']), ctypes.c_int]
lib.get_triangle_geometry.restype = None
lib.get_normals.argtypes = [
XY_TYPE, Z_TYPE, sh.LP_CINT, np.ctypeslib.ndpointer(
dtype=np.float64, ndim=2, flags=[
'C', 'O', 'A', 'W']), ctypes.c_int]
lib.get_normals.restype = None
lib.mark_bd_vertices.argtypes = [
MASK_TYPE, MASK_TYPE, sh.LP_CINT, MASK_TYPE, ctypes.c_int, ctypes.c_int]
lib.mark_bd_vertices.restype = None
# int fill_spatial_index(int *sorted_flat_indices, int *index, int
# npoints, int max_index)
lib.fill_spatial_index.argtypes = [
INT32_TYPE, INT32_TYPE, ctypes.c_int, ctypes.c_int]
lib.fill_spatial_index.restype = ctypes.c_int
FILTER_FUNC_TYPE = ctypes.CFUNCTYPE(ctypes.c_double,
sh.LP_CDOUBLE,
ctypes.c_double,
sh.LP_CINT,
sh.LP_CDOUBLE,
sh.LP_CDOUBLE,
ctypes.c_double,
ctypes.c_double,
ctypes.c_void_p)
lib.apply_filter.argtypes = (
XY_TYPE,
sh.LP_CDOUBLE,
XY_TYPE,
Z_TYPE,
Z_TYPE,
INT32_TYPE,
Z_TYPE,
ctypes.c_int,
FILTER_FUNC_TYPE,
ctypes.c_double,
ctypes.c_double,
ctypes.c_void_p
)
lib.apply_filter.restype = None
# void pc_noise_filter(double *pc_xy, double *pc_z, double *z_out, double filter_rad,
# double zlim, double den_cut, int *spatial_index, double *header, int npoints);
# binning
# void moving_bins(double *z, int *nout, double rad, int n);
lib.moving_bins.argtypes = [Z_TYPE, INT32_TYPE, ctypes.c_double, ctypes.c_int]
lib.moving_bins.restype = None
# a triangle based filter
# void tri_filter_low(double *z, double *zout, int *tri, double cut_off,
# int ntri)
lib.tri_filter_low.argtypes = [Z_TYPE, Z_TYPE,
sh.LP_CINT, ctypes.c_double, ctypes.c_int]
lib.tri_filter_low.restype = None
# hmap filler
# void fill_it_up(unsigned char *out, unsigned int *hmap, int rows, int
# cols, int stacks);
lib.fill_it_up.argtypes = [UINT8_VOXELS, HMAP_TYPE] + [ctypes.c_int] * 3
lib.fill_it_up.restype = None
lib.find_floating_voxels.argtypes = [
INT32_VOXELS, INT32_VOXELS] + [ctypes.c_int] * 4
lib.find_floating_voxels.restype = None
# unsigned long simplify_linestring(double *xy_in, double *xy_out, double dist_tol, unsigned long n_pts)
lib.simplify_linestring.argtypes = [XY_TYPE, XY_TYPE, ctypes.c_double, ctypes.c_ulong]
lib.simplify_linestring.restype = ctypes.c_ulong
# Names of defined filter functions
LIBRARY_FILTERS = ("mean_filter",
"median_filter",
"adaptive_gaussian_filter",
"min_filter",
"max_filter",
"var_filter",
"idw_filter",
"density_filter",
"distance_filter",
"nearest_filter",
"ballcount_filter",
"spike_filter",
"ray_mean_dist_filter",
"mean_3d_filter")
def apply_filter(along_xy, along_z,
pc_xy, pc_attr,
spatial_index,
index_header,
filter_func,
filter_rad,
nd_val,
params=None):
"""
Apply a bultin library filter, or a filter defined by a python function.
Args:
along_xy: Numpy array of input points.
along_z: Numpy array of z values if 3d-filter, else None.
pc_xy: The points to apply the filter on.
pc_attr: The values to apply the filter on (z if a geometric filter).
spatial_index: Pointcloud spatial index (see Pointcloud.sort_spatially)
index_header: Pointcloud index metadata header.
filter_func: A name of one of the builtin filters, or a python callable.
filter_rad: Filter radius (which the filter function will use as needed).
nd_val: No data value.
params: Optional addtional parameters. MUST be a ctypes.c_void_p pointer if not None.
Returns:
1d array of filtered values
"""
out = np.zeros(along_xy.shape[0], dtype=np.float64)
if callable(filter_func):
func = FILTER_FUNC_TYPE(filter_func)
else:
if not isinstance(filter_func, basestring):
raise ValueError("filter_func must be a name (string) or a callable.")
if filter_func not in LIBRARY_FILTERS:
raise ValueError("No builtin filter called " + filter_func)
addr = ctypes.cast(getattr(lib, filter_func), ctypes.c_void_p).value
func = FILTER_FUNC_TYPE(addr)
if along_z is not None:
# If using a 3d filter - construct pointer
assert along_z.shape[0] == along_xy.shape[0]
pz = along_z.ctypes.data_as(sh.LP_CDOUBLE)
else:
pz = None
if params is not None and not isinstance(params, ctypes.c_void_p):
raise ValueError("params must be None or a ctypes.c_void_p pointer!")
lib.apply_filter(along_xy, pz, pc_xy, pc_attr, out, spatial_index,
index_header, along_xy.shape[0], func, filter_rad,
nd_val, params)
return out
def binary_fill_gaps(M):
"""
Fill small gaps between elements in a binary mask
"""
N = np.zeros_like(M)
lib.binary_fill_gaps(M, N, M.shape[0], M.shape[1])
return N
def line_intersection(l1, l2):
"""
Test whether two lines l1, l2 in 2d intersect.
Args:
l1: Numpy array of shape (2, 2)
l2: Numpy array og shape (2, 2)
Returns:
Intersection point, lc1, lc2 (line coords) if lines are NOT colinear and intersect.
If no intersection (or colinear) return None, None, None
"""
v1 = l1[1] - l1[0]
v2 = l2[0] - l2[1]
v3 = l2[0] - l1[0]
w = np.column_stack((v1, v2, v3))
D2 = np.linalg.det(w[:, (0, 1)])
if abs(D2) < 1e-10:
return None, None, None # TODO: fix here
D1 = np.linalg.det(w[:, (0, 2)])
D0 = np.linalg.det(w[:, (1, 2)])
s1 = - D0 / D2
s2 = D1 / D2
if 0 <= s1 <= 1 and 0 <= s2 <= 1:
return l1[0] + s1 * v1, s1, s2
return None, None, None
def simplify_linestring(xy, dtol):
"""
Simplify a 2D-linestring (xy)
Args:
xy: numpy array of shape (n,2) and dtype float64
dtol: Distance tolerance.
Returns:
Simplified xy array.
"""
if xy.shape[0] < 3:
return xy
xy_out = np.zeros_like(xy)
n_out = lib.simplify_linestring(xy, xy_out, dtol, xy.shape[0])
xy_out = xy_out[:n_out].copy()
return xy_out
def moving_bins(z, rad):
"""
Count points within a bin of size 2*rad around each point.
Corresponds to a 'moving' histogram, or a 1d 'count filter'.
"""
# Will sort input -- so no need to do that first...
zs = np.sort(z).astype(np.float64)
n_out = np.zeros(zs.shape, dtype=np.int32)
lib.moving_bins(zs, n_out, rad, zs.shape[0])
return zs, n_out
def tri_filter_low(z, tri, ntri, cut_off):
"""
Triangulation based filtering of input z.
Will test dz for each edge, and replace high point with low point if dz is larger than cut_off.
Used to flatten steep triangles which connect e.g. a water point to a vegetation point on a tree
"""
zout = np.copy(z)
lib.tri_filter_low(z, zout, tri, cut_off, ntri)
return zout
def ogrpoints2array(ogr_geoms):
"""
Convert a list of OGR point geometries to a numpy array.
Slow interface.
"""
out = np.empty((len(ogr_geoms), 3), dtype=np.float64)
for i in xrange(len(ogr_geoms)):
out[i, :] = ogr_geoms[i].GetPoint()
return out
def ogrmultipoint2array(ogr_geom, flatten=False):
"""
Convert a OGR multipoint geometry to a numpy (2d or 3d) array.
"""
t = ogr_geom.GetGeometryType()
assert(t == ogr.wkbMultiPoint or t == ogr.wkbMultiPoint25D)
ng = ogr_geom.GetGeometryCount()
out = np.zeros((ng, 3), dtype=np.float64)
for i in range(ng):
out[i] = ogr_geom.GetGeometryRef(i).GetPoint()
if flatten:
out = out[:, 0:2].copy()
return out
def ogrgeom2array(ogr_geom, flatten=True):
"""
OGR geometry to numpy array dispatcher.
Will just send the geometry to the appropriate converter based on geometry type.
"""
t = ogr_geom.GetGeometryType()
if t == ogr.wkbLineString or t == ogr.wkbLineString25D:
return ogrline2array(ogr_geom, flatten)
elif t == ogr.wkbPolygon or t == ogr.wkbPolygon25D:
return ogrpoly2array(ogr_geom, flatten)
elif t == ogr.wkbMultiPoint or t == ogr.wkbMultiPoint25D:
return ogrmultipoint2array(ogr_geom, flatten)
else:
raise Exception("Unsupported geometry type: %s" %
ogr_geom.GetGeometryName())
def ogrpoly2array(ogr_poly, flatten=True):
"""
Convert a OGR polygon geometry to a list of numpy arrays.
The first element will be the outer ring. Subsequent elements correpsond to the boundary of holes.
Will not handle 'holes in holes'.
"""
ng = ogr_poly.GetGeometryCount()
rings = []
for i in range(ng):
ring = ogr_poly.GetGeometryRef(i)
arr = np.asarray(ring.GetPoints())
if flatten and arr.shape[1] > 2:
arr = arr[:, 0:2].copy()
rings.append(arr)
return rings
def ogrline2array(ogr_line, flatten=True):
"""
Convert a OGR linestring geometry to a numpy array (of vertices).
"""
t = ogr_line.GetGeometryType()
assert(t == ogr.wkbLineString or t == ogr.wkbLineString25D)
pts = ogr_line.GetPoints()
# for an incompatible geometry | |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import skimage.io as io
import skimage.filters as flt
#%matplotlib inline
# since we can't use imports
import numpy as np
import scipy.ndimage.filters as flt
import warnings
from sklearn.mixture import GaussianMixture as GM
from scipy import interpolate, ndimage
from tqdm import tqdm
import skimage.measure as measure
from joblib import Parallel, delayed
from skimage.io import imread, imsave
import skimage as sk
import os
def anisodiff(img,niter=1,kappa=50,gamma=0.1,step=(1.,1.),sigma=0, option=1,ploton=False):
"""
Anisotropic diffusion.
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the image will be plotted on every iteration
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
<NAME> and <NAME>.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by <NAME>
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by <NAME>
Department of Pharmacology
University of Oxford
<<EMAIL>>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
# ...you could always diffuse each color channel independently if you
# really want
print("performing 2D anisotropic filtering...")
if img.ndim == 3:
warnings.warn("Only grayscale images allowed, converting to 2D matrix")
img = img.mean(2)
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
from time import sleep
fig = pl.figure(figsize=(20,5.5),num="Anisotropic diffusion")
ax1,ax2 = fig.add_subplot(1,2,1),fig.add_subplot(1,2,2)
ax1.imshow(img,interpolation='nearest')
ih = ax2.imshow(imgout,interpolation='nearest',animated=True)
ax1.set_title("Original image")
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in tqdm(np.arange(1,niter)):
# calculate the diffs
deltaS[:-1,: ] = np.diff(imgout,axis=0)
deltaE[:,:-1] = np.diff(imgout,axis=1)
if 0<sigma:
deltaSf=flt.gaussian_filter(deltaS,sigma)
deltaEf=flt.gaussian_filter(deltaE,sigma)
else:
deltaSf=deltaS
deltaEf=deltaE
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaSf/kappa)**2.)/step[0]
gE = np.exp(-(deltaEf/kappa)**2.)/step[1]
elif option == 2:
gS = 1./(1.+(deltaSf/kappa)**2.)/step[0]
gE = 1./(1.+(deltaEf/kappa)**2.)/step[1]
# update matrices
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:,:] -= S[:-1,:]
EW[:,1:] -= E[:,:-1]
# update the image
imgout += gamma*(NS+EW)
if ploton:
iterstring = "Iteration %i" %(ii+1)
ih.set_data(imgout)
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return imgout
def anisodiff3(stack,niter=1,kappa=50,gamma=0.1,step=(1.,1.,1.), sigma=0, option=1,ploton=False):
"""
3D Anisotropic diffusion.
Usage:
stackout = anisodiff(stack, niter, kappa, gamma, option)
Arguments:
stack - input stack
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (z,y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the middle z-plane will be plotted on every
iteration
Returns:
stackout - diffused stack.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x,y and/or z axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
<NAME> and <NAME>.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by <NAME>
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by <NAME>
Department of Pharmacology
University of Oxford
<<EMAIL>>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
# ...you could always diffuse each color channel independently if you
# really want
print("performing 3D anisotropic filtering...")
if stack.ndim == 4:
warnings.warn("Only grayscale stacks allowed, converting to 3D matrix")
stack = stack.mean(3)
# initialize output array
stack = stack.astype('float32')
stackout = stack.copy()
# initialize some internal variables
deltaS = np.zeros_like(stackout)
deltaE = deltaS.copy()
deltaD = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
UD = deltaS.copy()
gS = np.ones_like(stackout)
gE = gS.copy()
gD = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
from time import sleep
showplane = stack.shape[0]//2
fig = pl.figure(figsize=(20,5.5),num="Anisotropic diffusion")
ax1,ax2 = fig.add_subplot(1,2,1),fig.add_subplot(1,2,2)
ax1.imshow(stack[showplane,...].squeeze(),interpolation='nearest')
ih = ax2.imshow(stackout[showplane,...].squeeze(),interpolation='nearest',animated=True)
ax1.set_title("Original stack (Z = %i)" %showplane)
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in tqdm(np.arange(1,niter)):
# calculate the diffs
deltaD[:-1,: ,: ] = np.diff(stackout,axis=0)
deltaS[: ,:-1,: ] = np.diff(stackout,axis=1)
deltaE[: ,: ,:-1] = np.diff(stackout,axis=2)
if 0<sigma:
deltaDf=flt.gaussian_filter(deltaD,sigma)
deltaSf=flt.gaussian_filter(deltaS,sigma)
deltaEf=flt.gaussian_filter(deltaE,sigma)
else:
deltaDf=deltaD
deltaSf=deltaS
deltaEf=deltaE
# conduction gradients (only need to compute one per dim!)
if option == 1:
gD = np.exp(-(deltaD/kappa)**2.)/step[0]
gS = np.exp(-(deltaS/kappa)**2.)/step[1]
gE = np.exp(-(deltaE/kappa)**2.)/step[2]
elif option == 2:
gD = 1./(1.+(deltaD/kappa)**2.)/step[0]
gS = 1./(1.+(deltaS/kappa)**2.)/step[1]
gE = 1./(1.+(deltaE/kappa)**2.)/step[2]
# update matrices
D = gD*deltaD
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'Up/North/West' by one
# pixel. don't as questions. just do it. trust me.
UD[:] = D
NS[:] = S
EW[:] = E
UD[1:,: ,: ] -= D[:-1,: ,: ]
NS[: ,1:,: ] -= S[: ,:-1,: ]
EW[: ,: ,1:] -= E[: ,: ,:-1]
# update the image
stackout += gamma*(UD+NS+EW)
if ploton:
iterstring = "Iteration %i" %(ii+1)
ih.set_data(stackout[showplane,...].squeeze())
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return stackout
def get_phase_frac(im, phase_val, ax):
dim = im.shape
if len(dim) == 2:
im = im.reshape(-1, dim[0], dim[1])
dim = im.shape
im_ = np.where(im == phase_val, 1, 0)
if ax == 'x':
result = np.sum(im_, axis=0).sum(axis=0) / (dim[0] * dim[2])
elif ax == 'y':
result = np.sum(im_, axis=0).sum(axis=1) / (dim[0] * dim[1])
elif ax == 'z':
result = np.sum(im_, axis=1).sum(axis=1) / (dim[1] * dim[2])
elif ax is None:
result = np.mean(im_)
return result
def get_saturation(im, air_phase_val, sat_phase_val, ax):
sat_frac = get_phase_frac(im, phase_val=sat_phase_val, ax=ax)
air_frac = get_phase_frac(im, phase_val=air_phase_val, ax=ax)
saturation = sat_frac / (sat_frac + air_frac)
return saturation
def get_z_aixs_profile(im_stack, agg_func=None, phase_val=None):
"""returns an array where each item reprpesents a measure of the grayscale profile
for the correspoiding slice
"""
# if phase_val is None:
# phase_val = 255
if phase_val is not None:
im_stack = np.where(im_stack == phase_val, 1, 0)
shape = im_stack.shape
x = np.reshape(im_stack, (shape[0], shape[1]*shape[2]))
if agg_func is None:
agg_func = np.mean
profile = agg_func(x, axis=1)
return profile
def norm_stack(im_stack, normalizer=None, how=None):
shape = im_stack.shape
if normalizer is None:
normalizer = im_stack[0]
if how is None:
how = "ratio"
if how == "ratio":
im_normed = im_stack / normalizer
elif how == "diff":
im_normed = im_stack - normalizer
# im_normed_f = []
# sigma=10
# im_normed_f.extend(Parallel(n_jobs=5)(delayed(gauss_filter)(slc, sigma) for slc in im_normed))
im_normed_f = np.zeros_like(im_stack, dtype=np.float32)
for slc_idx in np.arange(shape[0]):
slc = im_normed[slc_idx]
im_normed_f[slc_idx, :, :] | |
"fair_share:TAGS TIME %s"%(time.time()-start_t, ))
start_t=time.time()
Trace.trace(self.trace_level+4, 'fair_share:postponed rqs %s'%(self.postponed_requests))
if len(tags) > 1:
for key in tags:
if self.pending_work.get_sg(key) in self.postponed_requests.sg_list.keys():
pass # request for this SG is already in postponed list, no nee to process
else:
if not key in self.checked_keys:
self.checked_keys.append(key)
if key != check_key:
Trace.trace(self.trace_level+4, "fair_share: key %s"%(key,))
Trace.trace(100, "fair_share:keys TIME %s"%(time.time()-start_t, ))
return key
return None
def process_read_request(self, request, requestor):
"""
Process read request.
:type request: :obj:`manage_queue.Request`
:arg request: request to process
:type requestor: :obj:`dict`
:arg requestor: mover ticket
:rtype: :obj:`tuple` (:obj:`manage_queue.Request` - request or :obj:`None`,
:obj:`str` - key to check next or :obj:`None`)
"""
self.continue_scan = 0 # disable "scan" of pending queue
rq = request
Trace.trace(self.trace_level+4,"process_read_request %s"%(rq))
key_to_check = self.fair_share(rq)
if key_to_check:
self.continue_scan = 1
mover = requestor.get('mover', None)
label = rq.ticket["fc"]["external_label"]
if self.is_vol_busy(rq.ticket["fc"]["external_label"], mover) and self.mover_type(requestor) != 'DiskMover':
rq.ticket["reject_reason"] = ("VOL_BUSY",rq.ticket["fc"]["external_label"])
self.continue_scan = 1
Trace.trace(self.trace_level+4,"process_read_request: VOL_BUSY %s"%(rq.ticket["fc"]["external_label"]))
return rq, key_to_check
# otherwise we have found a volume that has read work pending
Trace.trace(self.trace_level+4,"process_read_request: found volume %s"%(rq.ticket,))
# ok passed criteria.
## check if there are any discipline restrictions
host_busy = False
if self.process_for_bound_vol:
host_busy = self.client_host_busy_for_mounted(self.process_for_bound_vol,
self.current_volume_info['volume_family'],
rq.ticket)
else:
host_busy = self.client_host_busy(rq.ticket)
if host_busy:
self.continue_scan = 1
sg, key_to_check = self.request_key(rq)
#return None, None
return None, key_to_check
# Check the presence of current_location field
if not rq.ticket["vc"].has_key('current_location'):
try:
rq.ticket["vc"]['current_location'] = rq.ticket['fc']['location_cookie']
except KeyError:
Trace.log(e_errors.ERROR,"process_read_request loc cookie missing %s" %
(rq.ticket,))
raise KeyError
# request has passed about all the criterias
# check if it passes the fair share criteria
# temprorarily store selected request to use it in case
# when other request(s) based on fair share criteria
# for some other reason(s) do not get selected
# in any case if request SG limit is 0 and temporarily stored rq. SG limit is not,
# do not update temporarily store rq.
# legacy encp ticket
if not rq.ticket['vc'].has_key('volume_family'):
rq.ticket['vc']['volume_family'] = rq.ticket['vc']['file_family']
rq_sg = volume_family.extract_storage_group(rq.ticket['vc']['volume_family'])
if (rq.ticket.get('ignore_fair_share', None)):
# do not count this request against fair share
# this is an automigration request
sg_limit = 0
else:
sg_limit = self.get_sg_limit(rq_sg)
self.postponed_requests.put(rq)
Trace.trace(self.trace_level+4, 'process_read_request:postponed rqs %s'%(self.postponed_requests))
if self.tmp_rq:
#tmp_rq_sg = volume_family.extract_storage_group(self.tmp_rq.ticket['vc']['volume_family'])
#tmp_sg_limit = self.get_sg_limit(tmp_rq_sg)
if sg_limit != 0: # replace tmp_rq if rq SG limit is not 0
# replace tmp_rq based on priority
if rq.pri > self.tmp_rq.pri:
self.tmp_rq = rq
else: self.tmp_rq = rq
Trace.trace(self.trace_level+4,'process_read_request:tmp_rq %s rq %s key %s'%(self.tmp_rq, rq, key_to_check))
if self.process_for_bound_vol and (rq.ticket["fc"]["external_label"] == self.process_for_bound_vol):
# do not continue scan if we have a bound volume.
self.continue_scan = 0
Trace.trace(self.trace_level+4, "process_read_request: returning %s %s"%(rq, key_to_check))
return rq, key_to_check
def process_write_request(self, request, requestor, last_work=None, would_preempt=False):
"""
Process write request.
:type request: :obj:`manage_queue.Request`
:arg request: request to process
:type requestor: :obj:`dict`
:arg requestor: mover ticket
:type last_work: :obj:`str`
:arg last_work: last work completed by requestor
:type would_preempt: :obj:`bool`
:arg would_preempt: may this request preempt mounted on requestor volume?
:rtype: :obj:`tuple` (:obj:`manage_queue.Request` - request or :obj:`None`,
:obj:`str` - key to check next or :obj:`None`)
"""
self.continue_scan = 0 # disable "scan" of pending queue
rq = request
Trace.trace(self.trace_level+4, "process_write_request: %s"%(rq,))
key_to_check = self.fair_share(rq) # check this volume label or FF
Trace.trace(self.trace_level+4, "process_write_request: exceeded rqs %s"%(self.sg_exceeded,))
Trace.trace(self.trace_level+4,"process_write_request: key %s process for bound %s"%(key_to_check, self.process_for_bound_vol))
if key_to_check:
self.continue_scan = 1
if self.process_for_bound_vol and (key_to_check != self.process_for_bound_vol):
Trace.trace(self.trace_level+4, "process_write_request: got here")
#return rq, key_to_check
vol_family = rq.ticket["vc"]["volume_family"]
if self.mover_type(requestor) != 'DiskMover':
if not self.write_vf_list.has_key(vol_family):
vol_veto_list, wr_en = self.busy_volumes(vol_family)
Trace.trace(self.trace_level+4,"process_write_request: vol veto list:%s, width:%d ff width %s"%\
(vol_veto_list, wr_en, rq.ticket["vc"]["file_family_width"]))
self.write_vf_list[vol_family] = {'vol_veto_list':vol_veto_list, 'wr_en': wr_en}
else:
vol_veto_list = self.write_vf_list[vol_family]['vol_veto_list']
wr_en = self.write_vf_list[vol_family]['wr_en']
# only so many volumes can be written to at one time
permitted = rq.ticket["vc"]["file_family_width"]
if self.process_for_bound_vol: # allow one more for bound to avoid dismounts
# but check if this is a HiPri request and it will require dismount of currently
# mounted volume
permitted = permitted + (not would_preempt)
if self.process_for_bound_vol in vol_veto_list:
# check if request can go to this volume
ret = self.is_vol_available(rq.work,
self.process_for_bound_vol,
rq.ticket['vc']['volume_family'],
rq.ticket['wrapper'].get('size_bytes', 0L),
rq.ticket['vc']['address'],
mover = requestor.get('mover'))
if ret['status'][0] == e_errors.OK:
# permit one more write request to avoid
# tape dismount
permitted = permitted+1
Trace.trace(self.trace_level+4,
"process_write_request: self.process_for_bound_vol %s permitted %s"%
(self.process_for_bound_vol, permitted))
if wr_en >= permitted:
if self.process_for_bound_vol and self.process_for_bound_vol in vol_veto_list:
# Check if there are volumes in bound or dismount state
# in veto list and if yes (they are not active),
# allow this request go to avoid dismount of the current volume.
# Volume can be in dismount state for a long period and if we skip request
# for the current volume just for this reason the current volume also gets dismounted.
# Do not check if volume bound for the current request does not
# belong to volume family of selected request.
for vol in vol_veto_list:
if vol != self.process_for_bound_vol:
volume_state = self.volumes_at_movers.get_vol_state(vol)
if volume_state in ("HAVE_BOUND", "DISMOUNT_WAIT") and last_work == "WRITE":
permitted = permitted + 1
break
if wr_en >= permitted:
rq.ticket["reject_reason"] = ("VOLS_IN_WORK","")
if self.process_for_bound_vol:
self.continue_scan = 0 # do not continue scan for bound volume
else:
self.continue_scan = 1
#return rq, key_to_check
return None, key_to_check
else:
## check if there are any discipline restrictions
host_busy = None
if self.process_for_bound_vol:
#current_volume_info = self.current_volume_info
host_busy = self.client_host_busy_for_mounted(self.process_for_bound_vol,
self.current_volume_info['volume_family'],
request.ticket)
else:
host_busy = self.client_host_busy(rq.ticket)
if host_busy:
sg, key_to_check = self.request_key(rq)
self.continue_scan = 1
return None, key_to_check # continue with key_to_ckeck
if not self.process_for_bound_vol and rq.ticket["vc"]["file_family_width"] > 1:
# check if there is a potentially available tape at bound movers
# and if yes skip request so that it will be picked by bound mover
# this is done to aviod a single stream bouncing between different tapes
# if FF width is more than 1
Trace.trace(self.trace_level+4,'process_write_request: veto %s, wr_en %s'%(vol_veto_list, wr_en))
movers = self.volumes_at_movers.get_active_movers()
found_mover = 0
Trace.trace(self.trace_level+4, 'process_write_request: movers %s'%(movers,))
for mover in movers:
Trace.trace(self.trace_level+40, "process_write_request: mover %s state %s time %s"%(mover['mover'], mover['state'],mover['time_in_state']))
if mover['state'] == 'HAVE_BOUND' and mover['external_label'] in vol_veto_list:
found_mover = 1
break
if found_mover:
# if the number of write requests for a given file family more than the
# file family width then let it go.
Trace.trace(self.trace_level+40, "process_write_request: pending work families %s"%(self.pending_work.families,))
if (self.pending_work.families.has_key(rq.ticket["vc"]["file_family"])) and \
(self.pending_work.families[rq.ticket["vc"]["file_family"]] > rq.ticket["vc"]["file_family_width"]):
#len(matching_movers) == 1:
Trace.trace(self.trace_level+40, "process_write_request: will let this request go to idle mover")
else:
# check if file will fit to the volume at mover
fsize = rq.ticket['wrapper'].get('size_bytes', 0L)
ret = self.is_vol_available(rq.work, mover['external_label'],
rq.ticket['vc']['volume_family'],
fsize, rq.ticket['vc']['address'],
mover = requestor.get('mover'))
Trace.trace(self.trace_level+40, "process_write_request: check_write_volume returned %s"%(ret,))
if (rq.work == "write_to_hsm" and
(ret['status'][0] == e_errors.VOL_SET_TO_FULL or
ret['status'][0] == e_errors.NOSPACE or
ret['status'][0] == 'full' or
ret['status'][0] == 'readonly')):
Trace.trace(self.trace_level+40, "process_write_request: will let this request go to idle mover")
else:
Trace.trace(self.trace_level+40, 'process_write_request: will wait with this request go to %s'%
(mover,))
self.continue_scan = 1
return rq, key_to_check # this request might go to the mover
else:
# disk mover
vol_veto_list = []
host_busy = self.client_host_busy(rq.ticket)
if host_busy:
sg, key_to_check = self.request_key(rq)
self.continue_scan = 1
return None, key_to_check # continue with key_to_ckeck
Trace.trace(self.trace_level+4,"process_write_request: request next write volume for %s" % (vol_family,))
# before assigning volume check if it is bound for the current family
bound_vol = self.process_for_bound_vol
# for bound volumes check what was priority of the last request
if bound_vol and requestor["current_priority"][1] < 0:
# last prority was regular
if rq.adminpri > -1: # HIRI
if bound_vol not in vol_veto_list:
bound_vol = None # this will allow preemption of regular priority requests
else:
# Case when completed request was regular priority read request from this file family
# but the file in the next request can not be written to this volume
if would_preempt:
bound_vol = None # this will allow preemption of regular priority requests
if bound_vol not in vol_veto_list:
# width not exceeded, ask volume clerk for a new volume.
Trace.trace(self.trace_level+4,"process_write_request for %s" % (rq.ticket,))
self.set_vcc(rq.ticket['vc']['address'])
start_t=time.time()
v = self.next_write_volume(rq.ticket["vc"]["library"],
rq.ticket["wrapper"]["size_bytes"]+self.min_file_size,
vol_family,
vol_veto_list,
first_found=0,
mover=requestor)
Trace.trace(100, "process_write_request: next_write_volume, time | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Tue Oct 10 00:42:20 2017 by generateDS.py version 2.28b.
# Python 2.7.12 (default, Nov 19 2016, 06:48:10) [GCC 5.4.0 20160609]
#
# Command line options:
# ('--no-process-includes', '')
# ('-o', 'esociallib/v2_04/evtAltContratual.py')
#
# Command line arguments:
# schemas/v2_04/evtAltContratual.xsd
#
# Command line:
# /usr/local/bin/generateDS --no-process-includes -o "esociallib/v2_04/evtAltContratual.py" schemas/v2_04/evtAltContratual.xsd
#
# Current working directory (os.getcwd()):
# esociallib
#
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for a example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
return instring.encode(ExternalEncoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def getSubclassFromModule_(module, class_):
'''Get the | |
<reponame>codegrande/hammer
import boto3
import json
import logging
import configparser
import re
from functools import lru_cache
from datetime import datetime, timedelta, timezone
class Config(object):
"""
Basic class do deal with hammer configuration.
It takes uses local file to construct config object.
"""
def __init__(self,
configFile="config.json",
configIniFile="config.ini",
whitelistFile="whitelist.json",
fixnowFile="fixnow.json",
ticketOwnersFile="ticket_owners.json"):
"""
:param configFile: local path to configuration file in json format
:param configIniFile: local path to configuration file in ini format (is used in r&r EC2, build from EC2 userdata)
:param whitelistFile: local path to whitelist file in json format
:param fixnowFile: local path to fixnow file in json format
:param ticketOwnersFile: local path to file with default ticket owners by bu/account in json format
"""
self._config = self.json_load_from_file(configFile)
self._config['whitelist'] = self.json_load_from_file(whitelistFile, default={})
self._config['fixnow'] = self.json_load_from_file(fixnowFile, default={})
self.local = LocalConfig(configIniFile)
self.owners = OwnersConfig(self.json_load_from_file(ticketOwnersFile, default={}))
self.cronjobs = self._config.get('cronjobs', {})
self.csv = BaseConfig(self._config, 'csv')
self.slack_channel = self._config.get("default_channel", None)
self.aws = AWSConfig(self._config)
# security group issue config
self.sg = ModuleConfig(self._config, "secgrp_unrestricted_access")
# CloudTrails issue config
self.cloudtrails = ModuleConfig(self._config, "cloudtrails")
# S3 policy issue config
self.s3policy = ModuleConfig(self._config, "s3_bucket_policy")
# S3 ACL issue config
self.s3acl = ModuleConfig(self._config, "s3_bucket_acl")
# IAM inactive keys issue config
self.iamUserInactiveKeys = IAMUserInactiveKeysConfig(self._config, "user_inactivekeys")
# IAM keys rotation issue config
self.iamUserKeysRotation = IAMUserKeysRotationConfig(self._config, "user_keysrotation")
# EBS unencrypted volume issue config
self.ebsVolume = ModuleConfig(self._config, "ebs_unencrypted_volume")
# EBS public snapshot issue config
self.ebsSnapshot = ModuleConfig(self._config, "ebs_public_snapshot")
# RDS public snapshot issue config
self.rdsSnapshot = ModuleConfig(self._config, "rds_public_snapshot")
self.bu_list = self._config.get("bu_list", [])
jira_config = self._config.get('jira', {})
# credentials to access JIRA
jira_config["credentials"] = self.json_load_from_ddb(self._config["credentials"]["ddb.table_name"],
self.aws.region,
"jira")
self.jira = JiraConfig(jira_config)
slack_config = self._config.get('slack', {})
# credentials to access Slack
slack_config["credentials"] = self.json_load_from_ddb(self._config["credentials"]["ddb.table_name"],
self.aws.region,
"slack")
# Slack configuration
self.slack = SlackConfig(slack_config)
def get_bu_by_name(self, name):
"""
Guess BU value from the issue name
:param name: string with issue name to check BU in
:return: string with BU value or None
"""
for bu in self.bu_list:
if bu in name:
return bu
return None
@property
def modules(self):
"""
:return: list with issue configuration modules
"""
return [value for value in vars(self).values() if isinstance(value, ModuleConfig)]
@property
def now(self):
return datetime.now(timezone.utc)
def json_load_from_file(self, filename, default=None):
"""
Loads json from config file to dictionary.
:param filename: file name to load config from
:param default: default value in case if file was not found/failed to parse
:return: dict with config file content or default value
.. note:: can raise exception if file can't be loaded/parsed and default is not set
"""
try:
with open(filename, "rb") as fh:
config2load = fh.read()
return json.loads(config2load)
except Exception as err:
if default is not None:
return default
else:
logging.error(f"can't get config from {filename}\n{err}")
raise
def json_load_from_ddb(self, table, region, key):
"""
Loads json from DDB table.
:param table: str, DDB table name to search config in
:param region: str, DDB table region
:param key: 'service' key to get config from
:return: dict with config content from DDB
.. note:: return empty dict in case of errors
"""
try:
dynamodb = boto3.resource('dynamodb', region_name=region)
table = dynamodb.Table(table)
response = table.get_item(Key={'service': key})
return response.get("Item", {}).get("credentials", {})
except Exception as err:
logging.warning(f"Failed to load json from DDB '{table}' with '{key}' key\n{err}")
return {}
@property
def source(self):
""" :return: pretty formatted of main config.json """
return json.dumps(self._config, indent=4)
class OwnersConfig(object):
"""
Helps to look for JIRA ticket owner and parent ticket in config file (ticket_owners.json).
"""
def __init__(self, owners):
"""
:param owners: content of `ticket_owners.json`
"""
self.owners = owners
def ticket_prop(self, prop, bu=None, product=None, account=None):
"""
:param prop: property to search for
:param bu: `business unit` tag
:param product: `product` tag
:param account: AWS account Id
:return: ticket property based on business unit, product, AWS account or None
"""
in_product = self.owners.get("bu", {}).get(bu, {}).get("product", {}).get(product, {}).get(prop, None)
if in_product:
logging.debug(f"Found '{prop}' in bu/product: {in_product}")
return in_product
in_bu = self.owners.get("bu", {}).get(bu, {}).get(prop, None)
if in_bu:
logging.debug(f"Found '{prop}' in bu: {in_bu}")
return in_bu
in_account = self.owners.get("account", {}).get(account, {}).get(prop, None)
if in_account:
logging.debug(f"Found '{prop}' in account: {in_account}")
return in_account
default = self.owners.get(prop, None)
logging.debug(f"Failed to find '{prop}', returning default: {default}")
return default
def ticket_owner(self, bu=None, product=None, account=None):
"""
:param bu: `business unit` tag
:param product: `product` tag
:param account: AWS account Id
:return: ticket owner based on business unit, product, AWS account or None
"""
return self.ticket_prop("jira_owner", bu, product, account)
def slack_owner(self, bu=None, product=None, account=None):
"""
:param bu: `business unit` tag
:param product: `product` tag
:param account: AWS account Id
:return: list with slack owners based on business unit, product, AWS account
"""
owner = self.ticket_prop("slack_owner", bu, product, account)
if owner is not None:
if isinstance(owner, str):
owner = [owner]
elif isinstance(owner, list):
# make a copy of list from config as it will be changed later,
# making changes in original list
owner = owner[:]
return owner
else:
return []
def ticket_parent(self, bu=None, product=None, account=None):
"""
:param bu: `business unit` tag
:param product: `product` tag
:param account: AWS account Id
:return: parent ticket Id based on business unit, product, AWS account or None
"""
return self.ticket_prop("jira_parent_ticket", bu, product, account)
def ticket_project(self, bu=None, product=None, account=None):
"""
:param bu: `business unit` tag
:param product: `product` tag
:param account: AWS account Id
:return: JIRA project name based on business unit, product, AWS account or None
"""
return self.ticket_prop("jira_project", bu, product, account)
class JiraConfig(object):
""" Base class for JIRA """
def __init__(self, config):
self._config = config
@property
def enabled(self):
""" :return: boolean, if Jira integration should be used """
return self._config.get("enabled", False)
@property
def text_field_character_limit(self):
return self._config.get("text_field_character_limit", 0)
def __getattr__(self, key):
""" Search for any attribute in config, if not found - raise exception """
if key in self._config:
return self._config[key]
raise AttributeError(f"section 'jira' has no option '{key}'")
class SlackConfig(object):
""" Base class for Slack logging """
def __init__(self, config):
self._config = config
# default channel to log
self.default_channel = self._config.get("default_channel", None)
# channels to log matched messages to based on RE patterns (per-compile them for faster search)
self.channels = {}
for channel, patterns in self._config["channels"].items():
self.channels[channel] = [ re.compile(pattern) for pattern in patterns ]
self.ignore = [ re.compile(pattern) for pattern in self._config.get("ignore", []) ]
def find_channel(self, msg):
"""
Find a Slack channel to send message to based on message body
:param msg: message body to match
:return: string with channel name or None
"""
# ignore messages based on patterns from config
for pattern in self.ignore:
if pattern.search(msg):
return None
# find channel to log message to based on message content
for channel, patterns in self.channels.items():
for pattern in patterns:
if pattern.search(msg):
return channel
return self.default_channel
@property
def notify_default_owner(self):
return self._config.get('notify_default_owner', True)
@property
def enabled(self):
return self._config.get('enabled', False)
@property
def api_token(self):
return self._config.get("credentials", {}).get("api_token", "")
class LocalConfig(object):
""" r&r EC2 local config in ini format. Assumes plain structure without sections, only options """
def __init__(self, inifile):
try:
self.cfg = configparser.ConfigParser()
with open(inifile, "rt") as fh:
ini = fh.read()
except Exception:
pass
else:
self.cfg.read_string(f"[default]\n{ini}")
def __getattr__(self, key):
""" Search for any attribute in config, if not found - return None """
try:
return self.cfg.get("default", key)
except configparser.NoSectionError:
return None
except configparser.NoOptionError:
logging.warning(f"Local config has no option '{key}'")
return None
class BaseConfig(object):
""" Base class for configuration file sections """
def __init__(self, config, section):
# name of the current section
self.section = section
# part of config dict for given section
self._config = config[section]
def __getattr__(self, key):
""" Search for any attribute in config, if not found - raise exception """
if key in self._config:
return self._config[key]
raise AttributeError(f"section '{self.section}' has no option '{key}'")
class AWSConfig(BaseConfig):
""" represents AWS configuration part in config.json """
def __init__(self, config):
super().__init__(config, "aws")
@property
@lru_cache()
def regions(self):
"""
:return: list of AWS regions to check based on regions in main account or hardcoded list in config
.. note:: auto detection of available AWS regions works only if "regions" key is not present in "aws" config section.
"""
# for testing purposes allow regions overriding
if "regions" in self._config:
return self._config["regions"]
# TODO: that is not the 100% right way | |
import os
import numpy as np
import logging
import h5py
from ..builder_utils import mpi_rank, mpi_size, barrier
from .edge_props_table import EdgeTypesTableMPI
logger = logging.getLogger(__name__)
class EdgesCollatorSingular(object):
"""Used to collect all the edges data-tables created and stored in the EdgeTypesTable to simplify the process
of saving into a SONATA edges file. All the actual edges may be stored across diffrent edge-type-tables/mpi-ranks
and needs to be merged together (and possibly sorted) before writing to HDF5 file.
"""
def __init__(self, edge_types_tables, network_name):
self._edge_types_tables = edge_types_tables
self._network_name = network_name
self._model_groups_md = {}
self._group_ids_lu = {}
self._grp_id_itr = 0
self.n_total_edges = sum(et.n_edges for et in edge_types_tables)
self.assign_groups()
self.can_sort = True
self.source_ids = None
self.target_ids = None
self.edge_type_ids = None
self.edge_group_ids = None
self.edge_group_index = None
self._prop_data = {}
def process(self):
logger.debug('Processing and collating {:,} edges.'.format(self.n_total_edges))
self.source_ids = np.zeros(self.n_total_edges, dtype=np.uint)
self.target_ids = np.zeros(self.n_total_edges, dtype=np.uint)
self.edge_type_ids = np.zeros(self.n_total_edges, dtype=np.uint32)
self.edge_group_ids = np.zeros(self.n_total_edges, dtype=np.uint32)
self.edge_group_index = np.zeros(self.n_total_edges, dtype=np.uint32)
self._prop_data = {
g_id: {
n: np.zeros(g_md['prop_size'], dtype=t) for n, t in zip(g_md['prop_names'], g_md['prop_type'])
} for g_id, g_md in self._model_groups_md.items()
}
idx_beg = 0
group_idx = {g_id: 0 for g_id in self._model_groups_md.keys()}
for et in self._edge_types_tables:
idx_end = idx_beg + et.n_edges
src_trg_ids = et.edge_type_node_ids
self.source_ids[idx_beg:idx_end] = src_trg_ids[:, 0]
self.target_ids[idx_beg:idx_end] = src_trg_ids[:, 1]
self.edge_type_ids[idx_beg:idx_end] = et.edge_type_id
self.edge_group_ids[idx_beg:idx_end] = et.edge_group_id
group_idx_beg = group_idx[et.edge_group_id]
group_idx_end = group_idx_beg + et.n_edges
self.edge_group_index[idx_beg:idx_end] = np.arange(group_idx_beg, group_idx_end, dtype=np.uint32)
for pname, pdata in self._prop_data[et.edge_group_id].items():
pdata[group_idx_beg:group_idx_end] = et.get_property_value(pname)
idx_beg = idx_end
group_idx[et.edge_group_id] = group_idx_end
et.free_data()
@property
def group_ids(self):
return list(self._prop_data.keys())
def get_group_metadata(self, group_id):
grp_md = self._model_groups_md[group_id]
grp_dim = (grp_md['prop_size'], )
return [{'name': n, 'type': t, 'dim': grp_dim} for n, t in zip(grp_md['prop_names'], grp_md['prop_type'])]
def assign_groups(self):
for et in self._edge_types_tables:
# Assign each edge type a group_id based on the edge-type properties. When two edge-types tables use the
# same model properties (in the hdf5) they should be put into the same group
edge_types_hash = et.hash_key
if edge_types_hash not in self._group_ids_lu:
self._group_ids_lu[edge_types_hash] = self._grp_id_itr
group_metadata = et.get_property_metatadata()
self._model_groups_md[self._grp_id_itr] = {
'prop_names': [p['name'] for p in group_metadata],
'prop_type': [p['dtype'] for p in group_metadata],
'prop_size': 0
}
self._grp_id_itr += 1
group_id = self._group_ids_lu[edge_types_hash]
et.edge_group_id = group_id
# number of rows in each model group
self._model_groups_md[group_id]['prop_size'] += et.n_edges
def itr_chunks(self):
chunk_id = 0
idx_beg = 0
idx_end = self.n_total_edges
yield chunk_id, idx_beg, idx_end
def get_target_node_ids(self, chunk_id):
return self.target_ids
def get_source_node_ids(self, chunk_id):
return self.source_ids
def get_edge_type_ids(self, chunk_id):
return self.edge_type_ids
def get_edge_group_ids(self, chunk_id):
return self.edge_group_ids
def get_edge_group_indices(self, chunk_id):
return self.edge_group_index
def get_group_data(self, chunk_id):
ret_val = []
for group_id in self._prop_data.keys():
for group_name in self._prop_data[group_id].keys():
idx_end = len(self._prop_data[group_id][group_name])
ret_val.append((group_id, group_name, 0, idx_end))
return ret_val
def get_group_property(self, group_name, group_id, chunk_id):
return self._prop_data[group_id][group_name]
def sort(self, sort_by, sort_group_properties=True):
"""In memory sort of the dataset
:param sort_by:
:param sort_group_properties:
"""
# Find the edges hdf5 column to sort by
if sort_by == 'target_node_id':
sort_ds = self.target_ids
elif sort_by == 'source_node_id':
sort_ds = self.source_ids
elif sort_by == 'edge_type_id':
sort_ds = self.edge_type_ids
elif sort_by == 'edge_group_id':
sort_ds = self.edge_group_ids
else:
logger.warning('Unable to sort dataset, unrecognized column {}.'.format(sort_by))
return
# check if dataset is already sorted
if np.all(np.diff(sort_ds) <= 0):
return
# Find order of arguments of sorted arrays, and sort main columns
sort_idx = np.argsort(sort_ds)
self.source_ids = self.source_ids[sort_idx]
self.target_ids = self.target_ids[sort_idx]
self.edge_type_ids = self.edge_type_ids[sort_idx]
self.edge_group_ids = self.edge_group_ids[sort_idx]
self.edge_group_index = self.edge_group_index[sort_idx]
if sort_group_properties:
# For sorting group properties, so the "edge_group_index" column is sorted (wrt each edge_group_id). Note
# that it is not strictly necessary to sort the group properties, as sonata edge_group_index keeps the
# reference, but doing the sorting may improve memory/efficency during setting up a simulation
for grp_id, grp_props in self._prop_data.items():
# Filter out edge_group_index array for each group_id, get the new order and apply to each property.
grp_id_filter = np.argwhere(self.edge_group_ids == grp_id).flatten()
updated_order = self.edge_group_index[grp_id_filter]
for prop_name, prop_data in grp_props.items():
grp_props[prop_name] = prop_data[updated_order]
# reorder the edge_group_index (for values with corresponding group_id)
self.edge_group_index[grp_id_filter] = np.arange(0, len(grp_id_filter), dtype=np.uint32)
class EdgesCollatorMPI(object):
"""For collecting all the different edge-types tables to make writing edges and iterating over the entire network
easier. Similar to above but for when edge-rules data are split across multiple MPI ranks/processors. Can also
be utlized for single core building when the network is too big to store in memory at once.
TODO: Consider saving tables to memory on each rank, and using MPI Gather/Send.
"""
def __init__(self, edge_types_tables, network_name):
self._edge_types_tables = edge_types_tables
self._network_name = network_name
self.n_total_edges = 0 # total number of edges across all ranks
self.n_local_edges = 0 # total number of edges for just those edge-types saved on current rank
self._edges_by_rank = {r: 0 for r in range(mpi_size)} # number of edges per .edge_types_table*h5 file
self._rank_offsets = [0]
self._model_groups_md = {} # keep track of all the edge-types metdata/properties across all ranks
self._group_ids = set() # keep track of all group_ids
self._group_offsets = {}
self._proc_fhandles = {} # reference to open readable hdf5 handles.
self.can_sort = False
def process(self):
barrier()
# Iterate through all the temp hdf5 edge-type files on the disk and gather information about all the
# different edge-types and their properties.
# NOTE: Assumes that each .edge_type_table*h5 file contains unique edge_type_ids
for rank in range(mpi_size):
rank_edge_table_path = EdgeTypesTableMPI.get_tmp_table_path(rank=rank, name=self._network_name)
if os.path.exists(rank_edge_table_path): # possible .tmp file doesn't exist
with h5py.File(rank_edge_table_path, 'r') as edge_table_h5:
for et_id, et_grp in edge_table_h5['unprocessed'][self._network_name].items():
et_size = et_grp.attrs['size']
self.n_total_edges += et_size # et_grp.attrs['size']
self.n_local_edges += et_size if rank == mpi_rank else 0
edge_type_id = int(et_id)
self._edges_by_rank[rank] += et_size
self._rank_offsets.append(self._rank_offsets[-1] + et_size)
# Save metadata about the edge-type-id
group_hash_key = et_grp.attrs['hash_key']
self._model_groups_md[edge_type_id] = {
'hash_key': group_hash_key,
'size': et_size,
'rank': rank,
'properties': []
}
et_props = [(n, d) for n, d in et_grp.items() if n not in ['source_node_id', 'target_node_id']]
for pname, pdata in et_props:
self._model_groups_md[edge_type_id]['properties'].append({
'name': pname,
'type': pdata.dtype
})
# Assign the group_ids for each edge-type. If two or more edge-types contain the same property name+types they
# should be assigned to the same group_id. Must also take care to unify group_id's across multiple
# .edge_type_table*h5 files
group_hashes = set([mg['hash_key'] for mg in self._model_groups_md.values()])
ordred_group_hashes = list(group_hashes)
ordred_group_hashes.sort() # should ensure order will be uniform across all MPI ranks, maybe use AllGather?
grp_id_map = {} # keep track of what hash keys match what group_ids
grp_id_itr = 0
for edge_type_id, mg in self._model_groups_md.items():
hash_key = mg['hash_key']
if hash_key not in grp_id_map:
# assign hash to the next group_id
grp_id_map[hash_key] = grp_id_itr
self._group_ids.add(grp_id_itr)
grp_id_itr += 1
mg['edge_group_id'] = grp_id_map[hash_key]
# For model-group, figure out where the offset for that group occurs on each rank. This is so we can align
# edge_group_index across multiple mpi ranks below.
group_rank_sizes = {group_id: np.zeros(mpi_size+1, dtype=np.uint32) for group_id in self._group_ids}
for edge_type_id, mg in self._model_groups_md.items():
rank = mg['rank']
group_id = mg['edge_group_id']
group_rank_sizes[group_id][rank+1] += mg['size']
group_rank_offsets = {}
for group_id, group_sizes in group_rank_sizes.items():
group_rank_offsets[group_id] = np.cumsum(group_sizes)
# collect info on local edge-group-properties to simplify things when building /processed data on rank
local_group_props = {}
et_count = 0
for edge_type_id, mg_grp in self._model_groups_md.items():
if mg_grp['rank'] != mpi_rank:
continue
group_id = mg_grp['edge_group_id']
if group_id not in local_group_props:
local_group_props[group_id] = {
'size': 0,
'pnames': [p['name'] for p in mg_grp['properties']],
'ptypes': [p['type'] for p in mg_grp['properties']]
}
local_group_props[group_id]['size'] += mg_grp['size']
et_count += 1
# Try to take all the edge-types-tables on the current rank and combine them into one "processed" table (and
# multiple model groups). There will be a penalty for doing more disk writing, and doing this part is not
# strictly necessary. But for very large and complicated network it will make the process more parallizable
# (since the writing is only done on one rank).
unprocessed_h5_path = EdgeTypesTableMPI.get_tmp_table_path(rank=mpi_rank, name=self._network_name)
if os.path.exists(unprocessed_h5_path):
unprocessed_h5 = h5py.File(unprocessed_h5_path.format(mpi_rank), 'r')
else:
# It is possible a .edge_types_table.<rank>.h5 file doesn't exist because there are no edges on that rank.
# As such hack together a fake hdf5 format with /unprocessed/network_name/ path but no data.
unprocessed_h5 = {
'unprocessed': {self._network_name: {}}
}
with h5py.File('.edge_types_table.processed.{}.h5'.format(mpi_rank), 'w') as local_h5:
# WARNING: Trying to process the data back | |
<filename>compute_xlmetrics.py
#!/usr/bin/env python
import errno
import os
import sys
import subprocess
import json
import argparse
def mkdir_p(path):
""" mkdir -p
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def listdir_full_path(directory):
""" like os.listdir(), but returns full paths
"""
for f in os.listdir(directory):
if not os.path.isdir(f):
yield os.path.abspath(os.path.join(directory, f))
def get_dimensions(image, classname):
""" given a source image, return dimensions
"""
start, ext = os.path.splitext(image)
if ext == '.yuv':
bitdepth = "8"
res_split = start.split('x')
width_split = res_split[0].split('_')
width = width_split[-1]
height_split = res_split[-1].split('_')
m = res_split[-1].find("bit")
if res_split[-1][m - 2] == "_":
depth = res_split[-1][m - 1]
else:
depth = res_split[-1][m - 2:m]
height = height_split[0]
elif classname == "classE_exr":
size = os.path.basename(image).split('_')[2]
try:
dimension_cmd = ["identify", '-size', size, '-format', '%w,%h,%z', image]
width, height, depth = subprocess.check_output(dimension_cmd).split(",")
except subprocess.CalledProcessError as e:
print dimension_cmd, e.output
else:
try:
dimension_cmd = ["identify", '-format', '%w,%h,%z', image]
width, height, depth = subprocess.check_output(dimension_cmd).split(",")
except subprocess.CalledProcessError as e:
print dimension_cmd, e.output
return width, height, depth
def compute_vmaf(ref_image, dist_image, width, height, pix_fmt):
""" given a pair of reference and distored images:
use the ffmpeg libvmaf filter to compute vmaf, vif, ssim, and ms_ssim.
"""
log_path = '/tmp/stats.json'
cmd = ['ffmpeg', '-s:v', '%s,%s' % (width, height), '-i', dist_image,
'-s:v', '%s,%s' % (width, height), '-i', ref_image,
'-lavfi', 'libvmaf=ssim=true:ms_ssim=true:log_fmt=json:log_path=' + log_path,
'-f', 'null', '-'
]
try:
print "\033[92m[VMAF]\033[0m " + dist_image
subprocess.check_output(" ".join(cmd), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print "\033[91m[ERROR]\033[0m " + " ".join(cmd) + "\n" + e.output
vmaf_log = json.load(open(log_path))
vmaf_dict = dict()
vmaf_dict["vmaf"] = vmaf_log["frames"][0]["metrics"]["vmaf"]
vmaf_dict["vif"] = vmaf_log["frames"][0]["metrics"]["vif_scale0"]
vmaf_dict["ssim"] = vmaf_log["frames"][0]["metrics"]["ssim"]
vmaf_dict["ms_ssim"] = vmaf_log["frames"][0]["metrics"]["ms_ssim"]
return vmaf_dict
def compute_psnr(ref_image, dist_image, width, height):
""" given a pair of reference and distorted images:
use the ffmpeg psnr filter to compute psnr and mse for each channel.
"""
log_path = '/tmp/stats.log'
cmd = ['ffmpeg', '-s:v', '%s,%s' % (width, height), '-i', dist_image,
'-s:v', '%s,%s' % (width, height), '-i', ref_image,
'-lavfi', 'psnr=stats_file=' + log_path,
'-f', 'null', '-'
]
try:
print "\033[92m[PSNR]\033[0m " + dist_image
subprocess.check_output(" ".join(cmd), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print "\033[91m[ERROR]\033[0m " + e.output
psnr_dict = dict()
psnr_log = open(log_path).read()
for stat in psnr_log.rstrip().split(" "):
key, value = stat.split(":")
if key is not "n" and not 'mse' in key:
psnr_dict[key] = float(value)
return psnr_dict
def compute_metrics(ref_image, dist_image, encoded_image, bpp_target, codec, width, height, pix_fmt):
""" given a pair of reference and distorted images:
call vmaf and psnr functions, dump results to a json file.
"""
vmaf = compute_vmaf(ref_image, dist_image, width, height, pix_fmt)
psnr = compute_psnr(ref_image, dist_image, width, height)
stats = vmaf.copy()
stats.update(psnr)
return stats
def compute_metrics_SDR(ref_image, dist_image, encoded_image, bpp_target, codec, width, height, pix_fmt, depth):
""" given a pair of reference and distorted images:
call vmaf and psnr functions, dump results to a json file.
"""
refname, ref_pix_fmt = os.path.basename(ref_image).split(".")
dist_pix_fmt = os.path.basename(dist_image).split(".")[-1]
logfile = '/tmp/stats.log'
HDRConvert_dir = '/tools/HDRTools-0.18-dev/bin/HDRConvert'
ppm_to_yuv_cfg = 'convert_configs/HDRConvertPPMToYCbCr444fr.cfg'
chroma_fmt = 3
HDRMetrics_dir = '/tools/HDRTools-0.18-dev/bin/HDRMetrics'
HDRMetrics_config = 'convert_configs/HDRMetrics.cfg'
try:
cmd = [HDRMetrics_dir, '-f', HDRMetrics_config, '-p', 'Input0File=%s' % ref_image, '-p',
'Input0Width=%s' % width,
'-p', 'Input0Height=%s' % height, '-p', 'Input0ChromaFormat=%d' % chroma_fmt, '-p',
'Input0BitDepthCmp0=%s'
% depth, '-p', 'Input0BitDepthCmp1=%s' % depth, '-p', 'Input0BitDepthCmp2=%s' % depth, '-p',
'Input1File=%s' % dist_image, '-p', 'Input1Width=%s' % width, '-p', 'Input1Height=%s' % height, '-p',
'Input1ChromaFormat=%d' % chroma_fmt, '-p', 'Input1BitDepthCmp0=%s' % depth, '-p',
'Input1BitDepthCmp1=%s' % depth, '-p', 'Input1BitDepthCmp2=%s' % depth, '-p', 'LogFile=%s' % logfile,
'-p', 'TFPSNRDistortion=0', '-p', 'EnablePSNR=1', '-p', 'EnableSSIM=1', '-p', 'EnableMSSSIM=1',
'-p', 'Input1ColorPrimaries=4', '-p', 'Input0ColorPrimaries=4', '-p', 'Input0ColorSpace=0', '-p',
'Input1ColorSpace=0', '>', '/tmp/statsHDRTools_SDRmetrics.json']
subprocess.check_output(' '.join(cmd), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print cmd, e.output
raise e
objective_dict = dict()
with open('/tmp/statsHDRTools_SDRmetrics.json', 'r') as f:
for line in f:
if '000000' in line:
metriclist = line.split()
objective_dict["psnr-y"] = metriclist[1]
if 'classB' not in ref_image:
objective_dict["psnr-avg"] = (6 * float(metriclist[1]) + float(metriclist[2]) + float(
metriclist[3])) / 8.0
objective_dict["ms_ssim"] = metriclist[4]
objective_dict["ssim"] = metriclist[7]
if depth == '8':
log_path = '/tmp/stats.json'
cmd = ['ffmpeg', '-s:v', '%s,%s' % (width, height), '-i', dist_image,
'-s:v', '%s,%s' % (width, height), '-i', ref_image,
'-lavfi', 'libvmaf=log_fmt=json:log_path=' + log_path,
'-f', 'null', '-'
]
try:
print "\033[92m[VMAF]\033[0m " + dist_image
subprocess.check_output(" ".join(cmd), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print "\033[91m[ERROR]\033[0m " + " ".join(cmd) + "\n" + e.output
vmaf_log = json.load(open(log_path))
vmaf_dict = dict()
vmaf_dict["vmaf"] = vmaf_log["frames"][0]["metrics"]["vmaf"]
vmaf_dict["vif"] = vmaf_log["frames"][0]["metrics"]["vif_scale0"]
stats = vmaf_dict.copy()
stats.update(objective_dict)
else:
stats = objective_dict
return stats
def compute_metrics_HDR(ref_image, dist_image, encoded_image, bpp_target, codec, width, height, pix_fmt, depth):
""" given a pair of reference and distorted images:
call vmaf and psnr functions, dump results to a json file.
"""
ref_pix_fmt = os.path.basename(ref_image).split(".")[-1]
dist_pix_fmt = os.path.basename(dist_image).split(".")[-1]
HDRConvert_dir = '/tools/HDRTools-0.18-dev/bin/HDRConvert'
ppm_to_exr_cfg = 'convert_configs/HDRConvertPPMToEXR.cfg'
yuv_to_exr_cfg = 'convert_configs/HDRConvertYCbCrToBT2020EXR.cfg'
logfile = '/tmp/stats.log'
primary = '1'
if dist_pix_fmt == 'ppm':
exr_dir = os.path.join('objective_images', 'PPM_EXR')
exr_dest = os.path.join(exr_dir, os.path.basename(dist_image) + '.exr')
if not os.path.isfile(exr_dest):
print "\033[92m[EXR]\033[0m " + exr_dest
mkdir_p(exr_dir)
try:
cmd = [HDRConvert_dir, '-f', ppm_to_exr_cfg, '-p', 'SourceFile=%s' % dist_image,
'-p',
'SourceWidth=%s' % width,
'-p', 'SourceHeight=%s' % height, '-p', 'SourceBitDepthCmp0=%s' % depth, '-p',
'SourceBitDepthCmp1=%s'
% depth, '-p', 'SourceBitDepthCmp2=%s' % depth, '-p', 'SourceColorPrimaries=%s' % primary, '-p',
'OutputFile=%s' % exr_dest, '-p', 'OutputWidth=%s' % width, '-p', 'OutputHeight=%s' % height,
'-p',
'OutputBitDepthCmp0=%s' % depth, '-p', 'OutputBitDepthCmp1=%s' % depth, '-p',
'OutputBitDepthCmp2=%s'
% depth, '-p', 'OutputColorPrimaries=%s' % primary]
subprocess.check_output(' '.join(cmd), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print cmd, e.output
raise e
else:
print "\033[92m[EXR OK]\033[0m " + exr_dest
dist_image = exr_dest
chroma_fmt = 3
if ref_pix_fmt == 'ppm':
exr_dir = os.path.join('objective_images', 'PPM_EXR')
exr_dest = os.path.join(exr_dir, os.path.basename(ref_image) + '.exr')
if not os.path.isfile(exr_dest):
print "\033[92m[EXR]\033[0m " + exr_dest
mkdir_p(exr_dir)
try:
cmd = [HDRConvert_dir, '-f', ppm_to_exr_cfg, '-p', 'SourceFile=%s' % ref_image,
'-p',
'SourceWidth=%s' % width,
'-p', 'SourceHeight=%s' % height, '-p', 'SourceBitDepthCmp0=%s' % depth, '-p',
'SourceBitDepthCmp1=%s'
% depth, '-p', 'SourceBitDepthCmp2=%s' % depth, '-p', 'SourceColorPrimaries=%s' % primary, '-p',
'OutputFile=%s' % exr_dest, '-p', 'OutputWidth=%s' % width, '-p', 'OutputHeight=%s' % height,
'-p',
'OutputBitDepthCmp0=%s' % depth, '-p', 'OutputBitDepthCmp1=%s' % depth, '-p',
'OutputBitDepthCmp2=%s'
% depth, '-p', 'OutputColorPrimaries=%s' % primary]
subprocess.check_output(' '.join(cmd), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print cmd, e.output
raise e
else:
print "\033[92m[EXR OK]\033[0m " + exr_dest
ref_image = exr_dest
chroma_fmt = 3
if dist_pix_fmt == 'yuv':
exr_dir = os.path.join('objective_images', 'YUV_EXR')
exr_dest = os.path.join(exr_dir, os.path.basename(dist_image) + '.exr')
if not os.path.isfile(exr_dest):
print "\033[92m[EXR]\033[0m " + exr_dest
mkdir_p(exr_dir)
try:
cmd = [HDRConvert_dir, '-f', yuv_to_exr_cfg, '-p', 'SourceFile=%s' % dist_image,
'-p',
'SourceWidth=%s' % width,
'-p', 'SourceHeight=%s' % height, '-p', 'SourceBitDepthCmp0=%s' % depth, '-p',
'SourceBitDepthCmp1=%s'
% depth, '-p', 'SourceBitDepthCmp2=%s' % depth, '-p', 'SourceColorPrimaries=%s' % primary, '-p',
'OutputFile=%s' % exr_dest, '-p', 'OutputWidth=%s' % width, '-p', 'OutputHeight=%s' % height,
'-p',
'OutputBitDepthCmp0=%s' % depth, '-p', 'OutputBitDepthCmp1=%s' % depth, '-p',
'OutputBitDepthCmp2=%s'
% depth, '-p', 'OutputColorPrimaries=%s' % primary]
subprocess.check_output(' '.join(cmd), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print cmd, e.output
raise e
else:
print "\033[92m[EXR OK]\033[0m " + exr_dest
dist_image = exr_dest
chroma_fmt = 3
if dist_pix_fmt == 'yuv':
exr_dir = os.path.join('objective_images', 'YUV_EXR')
exr_dest = os.path.join(exr_dir, os.path.basename(ref_image) + '.exr')
if not os.path.isfile(exr_dest):
print "\033[92m[EXR]\033[0m " + exr_dest
mkdir_p(exr_dir)
try:
cmd = [HDRConvert_dir, '-f', yuv_to_exr_cfg, '-p', 'SourceFile=%s' % ref_image,
'-p',
'SourceWidth=%s' % width,
'-p', 'SourceHeight=%s' % height, '-p', 'SourceBitDepthCmp0=%s' % depth, '-p',
'SourceBitDepthCmp1=%s'
% depth, '-p', 'SourceBitDepthCmp2=%s' % depth, '-p', 'SourceColorPrimaries=%s' % primary, '-p',
'OutputFile=%s' % exr_dest, '-p', 'OutputWidth=%s' % width, '-p', 'OutputHeight=%s' % height,
'-p',
'OutputBitDepthCmp0=%s' % depth, '-p', 'OutputBitDepthCmp1=%s' % depth, '-p',
'OutputBitDepthCmp2=%s'
% depth, '-p', 'OutputColorPrimaries=%s' % primary]
subprocess.check_output(' '.join(cmd), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print cmd, e.output
raise e
else:
print "\033[92m[EXR OK]\033[0m " + exr_dest
ref_image = exr_dest
chroma_fmt = 3
HDRMetrics_dir = '/tools/HDRTools-0.18-dev/bin/HDRMetrics'
HDRMetrics_config = HDRMetrics_dir + '/HDRMetrics_config'
try:
cmd = [HDRMetrics_dir, '-f', HDRMetrics_config, '-p', 'Input0File=%s' % ref_image, '-p',
'Input0Width=%s' % width,
'-p', 'Input0Height=%s' % height, '-p', 'Input0ChromaFormat=%d' % chroma_fmt, '-p', 'Input0ColorSpace=1',
'-p',
'Input0BitDepthCmp0=%s'
% depth, '-p', 'Input0BitDepthCmp1=%s' % depth, '-p', 'Input0BitDepthCmp2=%s' % depth, '-p',
'Input1ColorSpace=1', '-p',
'Input1File=%s' % dist_image, '-p', 'Input1Width=%s' % width, '-p', 'Input1Height=%s' % height, '-p',
'Input1ChromaFormat=%d' % chroma_fmt, '-p', 'Input1BitDepthCmp0=%s' % depth, '-p',
'Input1BitDepthCmp1=%s' % depth, '-p', 'Input1BitDepthCmp2=%s' % depth, '-p', 'LogFile=%s' % logfile,
'-p', 'Input0ColorPrimaries=1', '-p', 'Input1ColorPrimaries=1', '-p', '-p', 'TFPSNRDistortion=1', '-p',
'EnableTFPSNR=1', '-p', 'EnableTFMSSSIM=1',
'>', '/tmp/statsHDRTools.json']
subprocess.check_output(' '.join(cmd), stderr=subprocess.STDOUT, shell=True)
print(' '.join(cmd))
except subprocess.CalledProcessError as e:
print cmd, e.output
| |
'v_10'), ('v_10', 't_2'), ('t_2', 'v_10'), ('v_7', 't_3'), ('c_1', 't_3'), ('v_8', 't_4')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_9', 't_4'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'),
('t_3', 'v_10'), ('v_10', 't_2'), ('t_2', 'v_10'), ('v_7', 't_3'), ('c_1', 't_3'), ('t_4', 'v_9')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_9', 't_4'), ('v_5', 't_2'),
('t_4', 'v_8'), ('c_1', 't_2'), ('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_3', 'v_10'),
('v_10', 't_2'), ('t_2', 'v_10'), ('v_7', 't_3'), ('c_1', 't_3'), ('t_4', 'v_9'), ('v_8', 't_4')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('v_5', 't_2'), ('c_1', 't_2'),
('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'), ('t_3', 'v_10'),
('v_10', 't_2'), ('t_2', 'v_10'), ('t_4', 'v_10'), ('v_10', 't_4'), ('v_8', 't_3'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('v_5', 't_2'), ('c_1', 't_2'),
('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_3', 'v_10'), ('v_10', 't_2'), ('t_2', 'v_10'),
('t_4', 'v_10'), ('v_10', 't_4'), ('v_7', 't_4'), ('v_8', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_9', 't_4'), ('t_3', 'v_8'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_3', 'v_10'), ('v_10', 't_2'),
('t_2', 'v_10'), ('t_4', 'v_10'), ('v_10', 't_4'), ('v_8', 't_3'), ('c_1', 't_3'), ('t_4', 'v_9')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('v_5', 't_2'), ('c_1', 't_2'),
('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'), ('t_3', 'v_10'),
('v_10', 't_2'), ('t_2', 'v_10'), ('v_7', 't_4'), ('v_8', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_9', 't_4'), ('t_3', 'v_8'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'),
('t_3', 'v_10'), ('v_10', 't_2'), ('t_2', 'v_10'), ('v_8', 't_3'), ('c_1', 't_3'), ('t_4', 'v_9')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_9', 't_4'), ('t_3', 'v_8'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_3', 'v_10'), ('v_10', 't_2'),
('t_2', 'v_10'), ('v_7', 't_4'), ('v_8', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3'), ('t_4', 'v_9')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_5', 't_2'), ('t_3', 'v_9'), ('c_1', 't_2'),
('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_4', 'v_6'), ('v_6', 't_4'), ('t_3', 'v_10'),
('v_10', 't_2'), ('t_2', 'v_10'), ('t_4', 'v_10'), ('v_10', 't_4'), ('v_9', 't_3'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_5', 't_2'), ('t_3', 'v_9'), ('c_1', 't_2'),
('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_3', 'v_10'), ('v_10', 't_2'), ('t_2', 'v_10'),
('t_4', 'v_10'), ('v_10', 't_4'), ('v_7', 't_4'), ('v_9', 't_3'), ('t_4', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('c_1', 't_4'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_3', 'v_10'), ('t_2', 'v_7'), ('c_1', 'v_7'), ('t_4', 'v_8'), ('c_1', 'v_6'), ('t_4', 'v_10'),
('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_4', 'v_5'),
('t_4', 'v_6'), ('t_4', 'v_7'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_8'), ('t_3', 'v_6'),
('t_2', 'v_10'), ('t_4', 'v_9'), ('c_1', 'v_10')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_4', 't_2', 'v_9', 'v_8', 'c_1', 'v_10'}, 'City': {'c_1'},
'Town': {'t_3', 't_4', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_10'},
'Road': {('t_2', 'c_1'), ('t_4', 'c_1'), ('t_3', 'c_1'), ('v_5', 't_2'), ('t_3', 'v_9'), ('t_4', 'v_8'),
('c_1', 't_2'), ('c_1', 't_4'), ('v_10', 't_3'), ('t_2', 'v_5'), ('t_3', 'v_10'), ('v_10', 't_2'),
('t_2', 'v_10'), ('t_4', 'v_10'), ('v_10', | |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import, print_function
import itertools
from functools import partial
from twisted.internet.defer import inlineCallbacks
from twisted.internet.interfaces import IStreamClientEndpoint
from twisted.internet.endpoints import UNIXClientEndpoint
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.error import ReactorNotRunning
try:
_TLS = True
from twisted.internet.endpoints import SSL4ClientEndpoint
from twisted.internet.ssl import optionsForClientTLS, CertificateOptions
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from OpenSSL import SSL
except ImportError as e:
_TLS = False
if 'OpenSSL' not in str(e):
raise
import txaio
from autobahn.twisted.websocket import WampWebSocketClientFactory
from autobahn.twisted.rawsocket import WampRawSocketClientFactory
from autobahn.wamp import component
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
__all__ = ('Component')
def _is_ssl_error(e):
"""
Internal helper.
This is so we can just return False if we didn't import any
TLS/SSL libraries. Otherwise, returns True if this is an
OpenSSL.SSL.Error
"""
if _TLS:
return isinstance(e, SSL.Error)
return False
def _unique_list(seq):
"""
Return a list with unique elements from sequence, preserving order.
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def _create_transport_serializer(serializer_id):
if serializer_id in [u'msgpack', u'mgspack.batched']:
# try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
except ImportError:
pass
else:
if serializer_id == u'mgspack.batched':
return MsgPackSerializer(batched=True)
else:
return MsgPackSerializer()
if serializer_id in [u'json', u'json.batched']:
# try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
except ImportError:
pass
else:
if serializer_id == u'json.batched':
return JsonSerializer(batched=True)
else:
return JsonSerializer()
raise RuntimeError('could not create serializer for "{}"'.format(serializer_id))
def _create_transport_serializers(transport):
"""
Create a list of serializers to use with a WAMP protocol factory.
"""
serializers = []
for serializer_id in transport.serializers:
if serializer_id == u'msgpack':
# try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
except ImportError:
pass
else:
serializers.append(MsgPackSerializer(batched=True))
serializers.append(MsgPackSerializer())
elif serializer_id == u'json':
# try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
except ImportError:
pass
else:
serializers.append(JsonSerializer(batched=True))
serializers.append(JsonSerializer())
else:
raise RuntimeError(
"Unknown serializer '{}'".format(serializer_id)
)
return serializers
def _create_transport_factory(reactor, transport, session_factory):
"""
Create a WAMP-over-XXX transport factory.
"""
if transport.type == 'websocket':
# FIXME: forward WebSocket options
serializers = _create_transport_serializers(transport)
return WampWebSocketClientFactory(session_factory, url=transport.url, serializers=serializers)
elif transport.type == 'rawsocket':
# FIXME: forward RawSocket options
serializer = _create_transport_serializer(transport.serializer)
return WampRawSocketClientFactory(session_factory, serializer=serializer)
else:
assert(False), 'should not arrive here'
def _create_transport_endpoint(reactor, endpoint_config):
"""
Create a Twisted client endpoint for a WAMP-over-XXX transport.
"""
if IStreamClientEndpoint.providedBy(endpoint_config):
endpoint = IStreamClientEndpoint(endpoint_config)
else:
# create a connecting TCP socket
if endpoint_config['type'] == 'tcp':
version = int(endpoint_config.get('version', 4))
host = str(endpoint_config['host'])
port = int(endpoint_config['port'])
timeout = int(endpoint_config.get('timeout', 10)) # in seconds
tls = endpoint_config.get('tls', None)
# create a TLS enabled connecting TCP socket
if tls:
if not _TLS:
raise RuntimeError('TLS configured in transport, but TLS support is not installed (eg OpenSSL?)')
# FIXME: create TLS context from configuration
if IOpenSSLClientConnectionCreator.providedBy(tls):
# eg created from twisted.internet.ssl.optionsForClientTLS()
context = IOpenSSLClientConnectionCreator(tls)
elif isinstance(tls, CertificateOptions):
context = tls
elif tls is True:
context = optionsForClientTLS(host)
else:
raise RuntimeError('unknown type {} for "tls" configuration in transport'.format(type(tls)))
if version == 4:
endpoint = SSL4ClientEndpoint(reactor, host, port, context, timeout=timeout)
elif version == 6:
# there is no SSL6ClientEndpoint!
raise RuntimeError('TLS on IPv6 not implemented')
else:
assert(False), 'should not arrive here'
# create a non-TLS connecting TCP socket
else:
if version == 4:
endpoint = TCP4ClientEndpoint(reactor, host, port, timeout=timeout)
elif version == 6:
try:
from twisted.internet.endpoints import TCP6ClientEndpoint
except ImportError:
raise RuntimeError('IPv6 is not supported (please upgrade Twisted)')
endpoint = TCP6ClientEndpoint(reactor, host, port, timeout=timeout)
else:
assert(False), 'should not arrive here'
# create a connecting Unix domain socket
elif endpoint_config['type'] == 'unix':
path = endpoint_config['path']
timeout = int(endpoint_config.get('timeout', 10)) # in seconds
endpoint = UNIXClientEndpoint(reactor, path, timeout=timeout)
else:
assert(False), 'should not arrive here'
return endpoint
class Component(component.Component):
"""
A component establishes a transport and attached a session
to a realm using the transport for communication.
The transports a component tries to use can be configured,
as well as the auto-reconnect strategy.
"""
log = txaio.make_logger()
session_factory = ApplicationSession
"""
The factory of the session we will instantiate.
"""
def _check_native_endpoint(self, endpoint):
if IStreamClientEndpoint.providedBy(endpoint):
pass
elif isinstance(endpoint, dict):
if 'tls' in endpoint:
tls = endpoint['tls']
if isinstance(tls, (dict, bool)):
pass
elif IOpenSSLClientConnectionCreator.providedBy(tls):
pass
elif isinstance(tls, CertificateOptions):
pass
else:
raise ValueError(
"'tls' configuration must be a dict, CertificateOptions or"
" IOpenSSLClientConnectionCreator provider"
)
else:
raise ValueError(
"'endpoint' configuration must be a dict or IStreamClientEndpoint"
" provider"
)
def _connect_transport(self, reactor, transport, session_factory):
"""
Create and connect a WAMP-over-XXX transport.
"""
transport_factory = _create_transport_factory(reactor, transport, session_factory)
transport_endpoint = _create_transport_endpoint(reactor, transport.endpoint)
return transport_endpoint.connect(transport_factory)
# XXX think: is it okay to use inlineCallbacks (in this
# twisted-only file) even though we're using txaio?
@inlineCallbacks
def start(self, reactor=None):
"""
This starts the Component, which means it will start connecting
(and re-connecting) to its configured transports. A Component
runs until it is "done", which means one of:
- There was a "main" function defined, and it completed successfully;
- Something called ``.leave()`` on our session, and we left successfully;
- ``.stop()`` was called, and completed successfully;
- none of our transports were able to connect successfully (failure);
:returns: a Deferred that fires (with ``None``) when we are
"done" or with a Failure if something went wrong.
"""
if reactor is None:
self.log.warn("Using default reactor")
from twisted.internet import reactor
yield self.fire('start', reactor, self)
# transports to try again and again ..
transport_gen = itertools.cycle(self._transports)
reconnect = True
self.log.debug('Entering re-connect loop')
while reconnect:
# cycle through all transports forever ..
transport = next(transport_gen)
# only actually try to connect using the transport,
# if the transport hasn't reached max. connect count
if transport.can_reconnect():
delay = transport.next_delay()
self.log.debug(
'trying transport {transport_idx} using connect delay {transport_delay}',
transport_idx=transport.idx,
transport_delay=delay,
)
yield sleep(delay)
try:
transport.connect_attempts += 1
yield self._connect_once(reactor, transport)
transport.connect_sucesses += 1
except Exception as e:
transport.connect_failures += 1
f = txaio.create_failure()
self.log.error(u'component failed: {error}', error=txaio.failure_message(f))
self.log.debug(u'{tb}', tb=txaio.failure_format_traceback(f))
# If this is a "fatal error" that will never work,
# we bail out now
if isinstance(e, ApplicationError):
if e.error in [u'wamp.error.no_such_realm']:
reconnect = False
self.log.error(u"Fatal error, not reconnecting")
raise
# self.log.error(u"{error}: {message}", error=e.error, message=e.message)
elif _is_ssl_error(e):
# Quoting pyOpenSSL docs: "Whenever
# [SSL.Error] is raised directly, it has a
# list of error messages from the OpenSSL
# error queue, where each item is a tuple
# (lib, function, reason). Here lib, function
# and reason are all strings, describing where
# and what the problem is. See err(3) for more
# information."
for (lib, fn, reason) in e.args[0]:
self.log.error(u"TLS failure: {reason}", reason=reason)
self.log.error(u"Marking this transport as failed")
transport.failed()
else:
f = txaio.create_failure()
self.log.error(
u'Connection failed: {error}',
error=txaio.failure_message(f),
)
# some types of errors should probably have
# stacktraces logged immediately at error
# level, e.g. SyntaxError?
self.log.debug(u'{tb}', tb=txaio.failure_format_traceback(f))
raise
else:
reconnect = False
else:
# check if there is any transport left we can use
# to connect
if not self._can_reconnect():
self.log.info("No remaining transports to try")
reconnect = False
def _run(reactor, components):
if isinstance(components, Component):
components = [components]
if type(components) != list:
raise ValueError(
'"components" must be a list of Component objects - encountered'
' {0}'.format(type(components))
)
for c in components:
if | |
# -*- coding: utf-8 -*-
"""Classes for interacting with models."""
import logging
from datetime import datetime
from ._api_object import ApiObject
from urllib.parse import urlencode
from .error import NotFoundError, ResponseError
from typing import Union
from time import time as t
from time import sleep
class Models:
"""The `Models` object.
This object is used to retreive information about models from the API.
Note:
This class should not be instantiated directly but rather accessed through the `models`
attribute of an `ApiClient` instance.
"""
_base_route = '/models'
def __init__(self, api_client):
"""Creates a `Models` instance.
Args:
api_client (ApiClient): An `ApiClient` instance.
"""
self._api_client = api_client
self.logger = logging.getLogger(__name__)
def get_model_processing_details(self, model, version):
"""
Checks to see if a model with a certain id and version is active, and if it is, will return the model
details for that particular model.
Args:
model: model id, or `Model` instance
version: semantic version of previously specified model
Returns:
model: The model details for the model with the id and version specified or None if there are no active
models with these parameters
"""
model_id = Model._coerce_identifier(model)
# TODO: this was moved from the models api to the resources api, perhaps it should go in a different module?
endpoint = "/resources/processing/models"
result = self._api_client.http.get(endpoint)
for model in result:
if model["identifier"] == model_id and model["version"] == version:
return model
return None
def get_minimum_engines(self) -> int:
"""Obtains the total amount of processing engines set as the minimum processing capacity across all models."""
route = f"{self._base_route}/processing-engines"
raw_result = self._api_client.http.get(route)
minimum_engines_sum = int(raw_result["minimumProcessingEnginesSum"])
self.logger.info(f"The sum of minimum processing engines is: {minimum_engines_sum}")
return minimum_engines_sum
def update_processing_engines(
self, model, version: str, min_engines: int, max_engines: int, timeout: int = 0, poll_rate: int = 5
):
"""
Updates the minimum and maximum processing engines for a specific model identifier and version.
Args:
model: model id, or `Model` instance
version: semantic version of previously specified model
min_engines: minimum number of processing engines allowed for this model and version
max_engines: maximum number of processing engines allowed for this model and version
timeout: time in seconds to wait until processing engine is spun up. 0 means return immediately, None means
block and wait forever
poll_rate: If timeout is nonzero, this value will determine the rate at which the state of the cluster
is checked
Raises:
ForbiddenError: Occurs if the current API client does not have the appropriate entitlements in order
to update processing engines
"""
if not max_engines >= min_engines:
raise ValueError("Your min_engines value may not exceed the max_engines value")
model_id = Model._coerce_identifier(model)
base_request_body = {
"minimumParallelCapacity": min_engines,
"maximumParallelCapacity": max_engines
}
base_endpoint = f"{self._base_route}/{model_id}/versions/{version}"
error_message = None
try:
result = self._api_client.http.patch(base_endpoint, json_data={"processing": base_request_body})
self.logger.info(
f"Updated processing engines for Model {model_id} {version}: \n{result['processing']}"
)
except ResponseError as e:
error_message = e.message
self.logger.error(f"Direct try failed {error_message}, second try")
try:
result = self._api_client.http.patch(f"{base_endpoint}/processing", json_data=base_request_body)
self.logger.info(
f"Updated processing engines for Model {model_id} {version}: \n{result['processing']}"
)
except ResponseError as e:
error_message = e.message
self.logger.error(error_message)
raise ValueError(error_message)
if timeout == 0:
return
else:
assert timeout is None or timeout > 0, \
"Timeout must either be an integer >= 0 or None if you wish to indicate no timeout"
start_time = t()
while True:
current_time = t() - start_time
if timeout is not None and current_time > timeout:
self.logger.warning(
f"Timeout of {timeout} seconds reached while waiting for processing engines to initialize."
)
return
model_details = self.get_model_processing_details(model_id, version)
if model_details is not None: # This means the model with the id and version is now visible
engines_ready = sum([engine["ready"] for engine in model_details["engines"]])
if engines_ready >= min_engines:
self.logger.info(f"{engines_ready} engines are ready.")
return
sleep(poll_rate)
def get(self, model):
"""Gets a `Model` instance.
Args:
model (Union[str, Model]): The model identifier or a `Model` instance.
Returns:
Model: The `Model` instance.
Raises:
ApiError: A subclass of ApiError will be raised if the API returns an error status,
or the client is unable to connect.
"""
modelId = Model._coerce_identifier(model)
self.logger.debug("getting model %s", model)
json_obj = self._api_client.http.get('{}/{}'.format(self._base_route, modelId))
return Model(json_obj, self._api_client)
def get_by_name(self, name):
"""Gets a `Model` instance by name.
Args:
name (str): The model name.
Returns:
Model: The `Model` instance.
Raises:
ApiError: A subclass of ApiError will be raised if the API returns an error status,
or the client is unable to connect.
"""
params = {'name': name}
models = self.get_models(**params)
if models is not None and len(models) > 0:
return self.get(models[0])
else:
raise NotFoundError("Model {} not found".format(name), self._base_route, None)
def get_related(self, model):
"""Gets a list of all the models associated with the model provided, together with the model’s details.
Returns:
List[Model]: A list of `Model` instances.
Raises:
ApiError: A subclass of ApiError will be raised if the API returns an error status,
or the client is unable to connect.
"""
self.logger.debug("getting models related to model %s", model)
identifier = Model._coerce_identifier(model)
json_list = self._api_client.http.get('{}/{}/related-models'.format(self._base_route, identifier))
return list(Model(json_obj, self._api_client) for json_obj in json_list)
def get_versions(self, model):
"""Gets a list of all the versions associated with the model provided.
Returns:
List[ModelVersion]: A list of `Version` instances.
Raises:
ApiError: A subclass of ApiError will be raised if the API returns an error status,
or the client is unable to connect.
"""
self.logger.debug("getting versions related to model %s", model)
identifier = Model._coerce_identifier(model)
json_list = self._api_client.http.get('{}/{}/versions'.format(self._base_route, identifier))
return list(ModelVersion(json_obj, self._api_client) for json_obj in json_list)
def get_version(self, model, version):
"""Gets a versions associated with the model provided.
Returns:
ModelVersion: A instance of `ModelVersion` instances.
Raises:
ApiError: A subclass of ApiError will be raised if the API returns an error status,
or the client is unable to connect.
"""
self.logger.debug("getting version model %s version %s", model, version)
modelId = Model._coerce_identifier(model)
versionId = ModelVersion._coerce_identifier(version)
json_obj = self._api_client.http.get('{}/{}/versions/{}'.format(self._base_route, modelId, versionId))
return ModelVersion(json_obj, self._api_client)
def get_version_input_sample(self, model, version):
"""Gets the input sample associated with the model and version provided.
Returns:
String: A json string with the input sample
Raises:
ApiError: A subclass of ApiError will be raised if the API returns an error status,
or the client is unable to connect.
"""
self.logger.debug("getting input sample: model %s version %s", model, version)
modelId = Model._coerce_identifier(model)
versionId = ModelVersion._coerce_identifier(version)
json_obj = self._api_client.http.get('{}/{}/versions/{}/sample-input'.format(self._base_route, modelId, versionId))
return json_obj
def get_version_output_sample(self, model, version):
"""Gets the output sample associated with the model and version provided.
Returns:
String: A json string with the output sample
Raises:
ApiError: A subclass of ApiError will be raised if the API returns an error status,
or the client is unable to connect.
"""
self.logger.debug("getting output sample: model %s version %s", model, version)
modelId = Model._coerce_identifier(model)
versionId = ModelVersion._coerce_identifier(version)
json_obj = self._api_client.http.get('{}/{}/versions/{}/sample-output'.format(self._base_route, modelId, versionId))
return json_obj
def get_all(self):
"""Gets a list of all `Model` instances.
Returns:
List[Model]: A list of `Model` instances.
Raises:
ApiError: A subclass of ApiError will be raised if the API returns an error status,
or the client is unable to connect.
"""
self.logger.debug("getting all models")
return self.get_models()
def get_models(self, model_id=None, author=None, created_by_email=None, name=None, description=None,
is_active=None, is_expired=None, is_recommended=None, last_active_date_time=None,
expiration_date_time=None, sort_by=None, direction=None, page=None, per_page=1000):
"""Gets a list of `Model` instances within a set of parameters.
Args:
model_id (Optional[str]): Identifier of the model
author (Optional[str]): authoring company
created_by_email (Optional[str]): creator email
name (Optional[str]): name of the model
description (Optional[str]): description of the model
is_active (Optional[boolean, str]): availability of the model in the marketplace
is_expired (Optional[boolean, str]): expiration status
is_recommended (Optional[boolean, str]): recommended status
last_active_date_time (Optional[datetime, str]): latest use date
expiration_date_time (Optional[datetime, str]): expiration date
sort_by (Optional[str]): attribute name to sort results
direction (Optional[str]): Direction of the sorting algorithm (asc, desc)
page (Optional[float]): The page number for which results are being returned
per_page (Optional[float]): The number of models returned by page
Returns:
List[Model]: A list of `Model` instances.
Raises:
ApiError: A subclass of ApiError will be raised if the API returns an error status,
or the client is unable to connect.
"""
if model_id is not None and not isinstance(model_id, str):
raise TypeError("the model_id param should be a string")
if author is not None and not isinstance(author, str):
raise TypeError("the author param should be | |
<filename>src/cnlpt/cnlp_data.py
import os
from os.path import basename, dirname
import time
import logging
from filelock import FileLock
from typing import Callable, Dict, Optional, List, Union, Tuple
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from transformers.data.processors.utils import DataProcessor, InputExample
from transformers.tokenization_utils import PreTrainedTokenizer
from dataclasses import dataclass, field
from enum import Enum
from .cnlp_processors import cnlp_processors, cnlp_output_modes, classification, tagging, relex, mtl
special_tokens = ['<e>', '</e>', '<a1>', '</a1>', '<a2>', '</a2>', '<cr>', '<neg>']
logger = logging.getLogger(__name__)
def list_field(default=None, metadata=None):
return field(default_factory=lambda: default, metadata=metadata)
class Split(Enum):
train = "train"
dev = "dev"
test = "test"
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
"""
input_ids: List[int]
attention_mask: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
event_tokens: Optional[List[int]] = None
label: List[Optional[Union[int, float, List[int], List[Tuple[int]]]]] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self)) + "\n"
def cnlp_convert_examples_to_features(
examples: List[InputExample],
tokenizer: PreTrainedTokenizer,
max_length: Optional[int] = None,
task=None,
label_list=None,
output_mode=None,
token_classify=False,
inference=False,
):
event_start_ind = tokenizer.convert_tokens_to_ids('<e>')
event_end_ind = tokenizer.convert_tokens_to_ids('</e>')
if max_length is None:
max_length = tokenizer.max_len
if task is not None:
processor = cnlp_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = cnlp_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
def label_from_example(example: InputExample) -> Union[int, float, None]:
if example.label is None:
# give it a random label, if we didn't specify a label with the data we won't be comparing it.
# return list(label_map.values())[0]
return None
if output_mode == classification:
try:
return label_map[example.label]
except:
logger.error('Error with example %s' % (example.guid))
raise Exception()
elif output_mode == "regression":
return float(example.label)
elif output_mode == tagging:
return [ label_map[label] for label in example.label]
elif output_mode == relex:
return [ (int(start_token),int(end_token),label_map.get(category, 0)) for (start_token,end_token,category) in example.label]
elif output_mode == mtl:
return [ label_map[x] for x in example.label]
raise KeyError(output_mode)
labels = [label_from_example(example) for example in examples]
if examples[0].text_b is None:
sentences = [example.text_a.split(' ') for example in examples]
else:
sentences = [(example.text_a, example.text_b) for example in examples]
batch_encoding = tokenizer(
sentences,
max_length=max_length,
padding="max_length",
truncation=True,
is_split_into_words=True,
)
roberta_based = tokenizer.cls_token == '<s>'
if not roberta_based:
assert tokenizer.cls_token == '[CLS]', 'This tokenizer does not seem to be based on BERT or Roberta -- this will cause errors with the dataset encoding.'
# This code has to solve the problem of properly setting labels for word pieces that do not actually need to be tagged.
if not inference:
encoded_labels = []
if output_mode == tagging:
for sent_ind,sent in enumerate(sentences):
sent_labels = []
## FIXME -- this is stupid and won't work outside the roberta encoding
label_ind = 0
for wp_ind,wp in enumerate(batch_encoding[sent_ind].tokens):
if ((roberta_based and (wp.startswith('Ġ') or wp in special_tokens)) or
(not roberta_based and not wp.startswith('[') and (not wp.startswith('##') or wp in special_tokens))):
sent_labels.append(labels[sent_ind].pop(0))
else:
sent_labels.append(-100)
# if wp_ind in word_inds:
# sent_labels.append(labels[sent_ind][label_ind])
# label_ind += 1
# else:
# sent_labels.append(-100)
encoded_labels.append(np.array(sent_labels))
labels = encoded_labels
elif output_mode == relex:
# start by building a matrix that's N' x N' (word-piece length) with "None" as the default
# for word pairs, and -100 (mask) as the default if one of word pair is a suffix token
out_of_bounds = 0
num_relations = 0
for sent_ind, sent in enumerate(sentences):
num_relations += len(labels[sent_ind])
wpi_to_tokeni = {}
tokeni_to_wpi = {}
sent_labels = np.zeros( (max_length, max_length)) - 100
wps = batch_encoding[sent_ind].tokens
sent_len = len(wps)
## FIXME -- this is stupid and won't work outside the roberta encoding
for wp_ind,wp in enumerate(wps):
if wp.startswith('Ġ') or wp in special_tokens:
key = wp_ind
val = len(wpi_to_tokeni)
wpi_to_tokeni[key] = val
tokeni_to_wpi[val] = key
# make every label beween pairs a 0 to start:
for wpi in wpi_to_tokeni.keys():
for wpi2 in wpi_to_tokeni.keys():
# leave the diagonals at -100 because you can't have a relation with itself and we
# don't want to consider it because it may screw up the learning to have 2 such similar
# tokens not involved in a relation.
if wpi != wpi2:
sent_labels[wpi,wpi2] = 0.0
for label in labels[sent_ind]:
if not label[0] in tokeni_to_wpi or not label[1] in tokeni_to_wpi:
out_of_bounds +=1
continue
wpi1 = tokeni_to_wpi[label[0]]
wpi2 = tokeni_to_wpi[label[1]]
sent_labels[wpi1][wpi2] = label[2]
encoded_labels.append(sent_labels)
labels = encoded_labels
if out_of_bounds > 0:
logging.warn('During relation processing, there were %d relations (out of %d total relations) where at least one argument was truncated so the relation could not be trained/predicted.' % (out_of_bounds, num_relations) )
features = []
for i in range(len(examples)):
inputs = {k: batch_encoding[k][i] for k in batch_encoding}
try:
event_start = inputs['input_ids'].index(event_start_ind)
except:
event_start = -1
try:
event_end = inputs['input_ids'].index(event_end_ind)
except:
event_end = len(inputs['input_ids'])-1
inputs['event_tokens'] = [0] * len(inputs['input_ids'])
if event_start >= 0:
inputs['event_tokens'] = [0] * event_start + [1] * (event_end-event_start+1) + [0] * (len(inputs['input_ids'])-event_end-1)
else:
inputs['event_tokens'] = [1] * len(inputs['input_ids'])
if inference:
label = None
else:
label = [labels[i]]
feature = InputFeatures(**inputs, label=label)
features.append(feature)
for i, example in enumerate(examples[:5]):
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("features: %s" % features[i])
return features
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
data_dir: List[str] = field(
metadata={"help": "The input data dirs. A space-separated list of directories that should contain the .tsv files (or other data files) for the task. Should be presented in the same order as the task names."}
)
task_name: List[str] = field(default_factory=lambda: None, metadata={"help": "A space-separated list of tasks to train on: " + ", ".join(cnlp_processors.keys())})
# field(
# metadata={"help": "A space-separated list of tasks to train on: " + ", ".join(cnlp_processors.keys())})
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
weight_classes: bool = field(
default=False, metadata={"help": "A flag that indicates whether class-specific loss should be used. This can be useful in cases with severe class imbalance. The formula for a weight of a class is the count of that class divided the count of the rarest class."}
)
class ClinicalNlpDataset(Dataset):
""" Copy-pasted from GlueDataset with glue task-specific code changed
moved into here to be self-contained
"""
args: DataTrainingArguments
output_mode: List[str]
features: List[InputFeatures]
def __init__(
self,
args: DataTrainingArguments,
tokenizer: PreTrainedTokenizer,
limit_length: Optional[int] = None,
mode: Union[str, Split] = Split.train,
cache_dir: Optional[str] = None,
):
self.args = args
self.processors = []
self.output_mode = []
self.class_weights = []
for task in args.task_name:
self.processors.append(cnlp_processors[task]())
self.output_mode.append(cnlp_output_modes[task])
if self.output_mode[-1] == mtl:
for subtask in range(self.processors[-1].get_num_tasks()):
self.class_weights.append(None)
else:
self.class_weights.append(None)
self.features = None
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
# Load data features from cache or dataset file
self.label_lists = [processor.get_labels() for processor in self.processors]
for task_ind,data_dir in enumerate(args.data_dir):
datadir = dirname(data_dir) if data_dir[-1] == '/' else data_dir
domain = basename(datadir)
dataconfig = basename(dirname(datadir))
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else data_dir,
"cached_{}-{}_{}_{}_{}".format(
dataconfig, domain, mode.value, tokenizer.__class__.__name__, str(args.max_seq_length),
),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
features | |
entity=None, removed=None, **unknown_fields):
'''
entity : Any
removed : bool
'''
entity_ = entity
removed_ = removed
# Validate arguments against known Juju API types.
if removed_ is not None and not isinstance(removed_, bool):
raise Exception("Expected removed_ to be a bool, received: {}".format(type(removed_)))
self.entity = entity_
self.removed = removed_
self.unknown_fields = unknown_fields
class DeployerConnectionValues(Type):
_toSchema = {'api_addresses': 'api-addresses'}
_toPy = {'api-addresses': 'api_addresses'}
def __init__(self, api_addresses=None, **unknown_fields):
'''
api_addresses : typing.Sequence[str]
'''
api_addresses_ = api_addresses
# Validate arguments against known Juju API types.
if api_addresses_ is not None and not isinstance(api_addresses_, (bytes, str, list)):
raise Exception("Expected api_addresses_ to be a Sequence, received: {}".format(type(api_addresses_)))
self.api_addresses = api_addresses_
self.unknown_fields = unknown_fields
class DestroyApplicationInfo(Type):
_toSchema = {'destroyed_storage': 'destroyed-storage', 'destroyed_units': 'destroyed-units', 'detached_storage': 'detached-storage'}
_toPy = {'destroyed-storage': 'destroyed_storage', 'destroyed-units': 'destroyed_units', 'detached-storage': 'detached_storage'}
def __init__(self, destroyed_storage=None, destroyed_units=None, detached_storage=None, **unknown_fields):
'''
destroyed_storage : typing.Sequence[~Entity]
destroyed_units : typing.Sequence[~Entity]
detached_storage : typing.Sequence[~Entity]
'''
destroyed_storage_ = [Entity.from_json(o) for o in destroyed_storage or []]
destroyed_units_ = [Entity.from_json(o) for o in destroyed_units or []]
detached_storage_ = [Entity.from_json(o) for o in detached_storage or []]
# Validate arguments against known Juju API types.
if destroyed_storage_ is not None and not isinstance(destroyed_storage_, (bytes, str, list)):
raise Exception("Expected destroyed_storage_ to be a Sequence, received: {}".format(type(destroyed_storage_)))
if destroyed_units_ is not None and not isinstance(destroyed_units_, (bytes, str, list)):
raise Exception("Expected destroyed_units_ to be a Sequence, received: {}".format(type(destroyed_units_)))
if detached_storage_ is not None and not isinstance(detached_storage_, (bytes, str, list)):
raise Exception("Expected detached_storage_ to be a Sequence, received: {}".format(type(detached_storage_)))
self.destroyed_storage = destroyed_storage_
self.destroyed_units = destroyed_units_
self.detached_storage = detached_storage_
self.unknown_fields = unknown_fields
class DestroyApplicationOffers(Type):
_toSchema = {'force': 'force', 'offer_urls': 'offer-urls'}
_toPy = {'force': 'force', 'offer-urls': 'offer_urls'}
def __init__(self, force=None, offer_urls=None, **unknown_fields):
'''
force : bool
offer_urls : typing.Sequence[str]
'''
force_ = force
offer_urls_ = offer_urls
# Validate arguments against known Juju API types.
if force_ is not None and not isinstance(force_, bool):
raise Exception("Expected force_ to be a bool, received: {}".format(type(force_)))
if offer_urls_ is not None and not isinstance(offer_urls_, (bytes, str, list)):
raise Exception("Expected offer_urls_ to be a Sequence, received: {}".format(type(offer_urls_)))
self.force = force_
self.offer_urls = offer_urls_
self.unknown_fields = unknown_fields
class DestroyApplicationParams(Type):
_toSchema = {'application_tag': 'application-tag', 'destroy_storage': 'destroy-storage', 'force': 'force', 'max_wait': 'max-wait'}
_toPy = {'application-tag': 'application_tag', 'destroy-storage': 'destroy_storage', 'force': 'force', 'max-wait': 'max_wait'}
def __init__(self, application_tag=None, destroy_storage=None, force=None, max_wait=None, **unknown_fields):
'''
application_tag : str
destroy_storage : bool
force : bool
max_wait : int
'''
application_tag_ = application_tag
destroy_storage_ = destroy_storage
force_ = force
max_wait_ = max_wait
# Validate arguments against known Juju API types.
if application_tag_ is not None and not isinstance(application_tag_, (bytes, str)):
raise Exception("Expected application_tag_ to be a str, received: {}".format(type(application_tag_)))
if destroy_storage_ is not None and not isinstance(destroy_storage_, bool):
raise Exception("Expected destroy_storage_ to be a bool, received: {}".format(type(destroy_storage_)))
if force_ is not None and not isinstance(force_, bool):
raise Exception("Expected force_ to be a bool, received: {}".format(type(force_)))
if max_wait_ is not None and not isinstance(max_wait_, int):
raise Exception("Expected max_wait_ to be a int, received: {}".format(type(max_wait_)))
self.application_tag = application_tag_
self.destroy_storage = destroy_storage_
self.force = force_
self.max_wait = max_wait_
self.unknown_fields = unknown_fields
class DestroyApplicationResult(Type):
_toSchema = {'error': 'error', 'info': 'info'}
_toPy = {'error': 'error', 'info': 'info'}
def __init__(self, error=None, info=None, **unknown_fields):
'''
error : Error
info : DestroyApplicationInfo
'''
error_ = Error.from_json(error) if error else None
info_ = DestroyApplicationInfo.from_json(info) if info else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if info_ is not None and not isinstance(info_, (dict, DestroyApplicationInfo)):
raise Exception("Expected info_ to be a DestroyApplicationInfo, received: {}".format(type(info_)))
self.error = error_
self.info = info_
self.unknown_fields = unknown_fields
class DestroyApplicationResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~DestroyApplicationResult]
'''
results_ = [DestroyApplicationResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class DestroyApplicationUnits(Type):
_toSchema = {'unit_names': 'unit-names'}
_toPy = {'unit-names': 'unit_names'}
def __init__(self, unit_names=None, **unknown_fields):
'''
unit_names : typing.Sequence[str]
'''
unit_names_ = unit_names
# Validate arguments against known Juju API types.
if unit_names_ is not None and not isinstance(unit_names_, (bytes, str, list)):
raise Exception("Expected unit_names_ to be a Sequence, received: {}".format(type(unit_names_)))
self.unit_names = unit_names_
self.unknown_fields = unknown_fields
class DestroyApplicationsParams(Type):
_toSchema = {'applications': 'applications'}
_toPy = {'applications': 'applications'}
def __init__(self, applications=None, **unknown_fields):
'''
applications : typing.Sequence[~DestroyApplicationParams]
'''
applications_ = [DestroyApplicationParams.from_json(o) for o in applications or []]
# Validate arguments against known Juju API types.
if applications_ is not None and not isinstance(applications_, (bytes, str, list)):
raise Exception("Expected applications_ to be a Sequence, received: {}".format(type(applications_)))
self.applications = applications_
self.unknown_fields = unknown_fields
class DestroyConsumedApplicationParams(Type):
_toSchema = {'application_tag': 'application-tag'}
_toPy = {'application-tag': 'application_tag'}
def __init__(self, application_tag=None, **unknown_fields):
'''
application_tag : str
'''
application_tag_ = application_tag
# Validate arguments against known Juju API types.
if application_tag_ is not None and not isinstance(application_tag_, (bytes, str)):
raise Exception("Expected application_tag_ to be a str, received: {}".format(type(application_tag_)))
self.application_tag = application_tag_
self.unknown_fields = unknown_fields
class DestroyConsumedApplicationsParams(Type):
_toSchema = {'applications': 'applications'}
_toPy = {'applications': 'applications'}
def __init__(self, applications=None, **unknown_fields):
'''
applications : typing.Sequence[~DestroyConsumedApplicationParams]
'''
applications_ = [DestroyConsumedApplicationParams.from_json(o) for o in applications or []]
# Validate arguments against known Juju API types.
if applications_ is not None and not isinstance(applications_, (bytes, str, list)):
raise Exception("Expected applications_ to be a Sequence, received: {}".format(type(applications_)))
self.applications = applications_
self.unknown_fields = unknown_fields
class DestroyControllerArgs(Type):
_toSchema = {'destroy_models': 'destroy-models', 'destroy_storage': 'destroy-storage'}
_toPy = {'destroy-models': 'destroy_models', 'destroy-storage': 'destroy_storage'}
def __init__(self, destroy_models=None, destroy_storage=None, **unknown_fields):
'''
destroy_models : bool
destroy_storage : bool
'''
destroy_models_ = destroy_models
destroy_storage_ = destroy_storage
# Validate arguments against known Juju API types.
if destroy_models_ is not None and not isinstance(destroy_models_, bool):
raise Exception("Expected destroy_models_ to be a bool, received: {}".format(type(destroy_models_)))
if destroy_storage_ is not None and not isinstance(destroy_storage_, bool):
raise Exception("Expected destroy_storage_ to be a bool, received: {}".format(type(destroy_storage_)))
self.destroy_models = destroy_models_
self.destroy_storage = destroy_storage_
self.unknown_fields = unknown_fields
class DestroyMachineInfo(Type):
_toSchema = {'destroyed_storage': 'destroyed-storage', 'destroyed_units': 'destroyed-units', 'detached_storage': 'detached-storage'}
_toPy = {'destroyed-storage': 'destroyed_storage', 'destroyed-units': 'destroyed_units', 'detached-storage': 'detached_storage'}
def __init__(self, destroyed_storage=None, destroyed_units=None, detached_storage=None, **unknown_fields):
'''
destroyed_storage : typing.Sequence[~Entity]
destroyed_units : typing.Sequence[~Entity]
detached_storage : typing.Sequence[~Entity]
'''
destroyed_storage_ = [Entity.from_json(o) for o in destroyed_storage or []]
destroyed_units_ = [Entity.from_json(o) for o in destroyed_units or []]
detached_storage_ = [Entity.from_json(o) for o in detached_storage or []]
# Validate arguments against known Juju API types.
if destroyed_storage_ is not None and not isinstance(destroyed_storage_, (bytes, str, list)):
raise Exception("Expected destroyed_storage_ to be a Sequence, received: {}".format(type(destroyed_storage_)))
if destroyed_units_ is not None and not isinstance(destroyed_units_, (bytes, str, list)):
raise Exception("Expected destroyed_units_ to be a Sequence, received: {}".format(type(destroyed_units_)))
if detached_storage_ is not None and not isinstance(detached_storage_, (bytes, str, list)):
raise Exception("Expected detached_storage_ to be a Sequence, received: {}".format(type(detached_storage_)))
self.destroyed_storage = destroyed_storage_
self.destroyed_units = destroyed_units_
self.detached_storage = detached_storage_
self.unknown_fields = unknown_fields
class DestroyMachineResult(Type):
_toSchema = {'error': 'error', 'info': 'info'}
_toPy = {'error': 'error', 'info': 'info'}
def __init__(self, error=None, info=None, **unknown_fields):
'''
error : Error
info : DestroyMachineInfo
'''
error_ = Error.from_json(error) if error else None
info_ = DestroyMachineInfo.from_json(info) if info else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if info_ is not None and not isinstance(info_, (dict, DestroyMachineInfo)):
raise Exception("Expected info_ to be a DestroyMachineInfo, received: {}".format(type(info_)))
self.error = error_
self.info = info_
self.unknown_fields = unknown_fields
class DestroyMachineResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~DestroyMachineResult]
'''
results_ = [DestroyMachineResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class DestroyMachines(Type):
_toSchema = {'force': 'force', 'machine_names': 'machine-names'}
_toPy = {'force': 'force', 'machine-names': 'machine_names'}
def __init__(self, force=None, machine_names=None, **unknown_fields):
'''
force : bool
machine_names : typing.Sequence[str]
'''
| |
may have been deprecated
such as 'crs_transform'.
Returns:
An unstarted Task that exports the image to Drive.
"""
config = _capture_parameters(locals(), ['image'])
config = _prepare_image_export_config(image, config,
Task.ExportDestination.ASSET)
return _create_export_task(config, Task.Type.EXPORT_IMAGE)
# Disable argument usage check; arguments are accessed using locals().
# pylint: disable=unused-argument
@staticmethod
def toCloudStorage(image, description='myExportImageTask',
bucket=None, fileNamePrefix=None,
dimensions=None, region=None, scale=None,
crs=None, crsTransform=None, maxPixels=None,
shardSize=None, fileDimensions=None,
skipEmptyTiles=None, fileFormat=None, formatOptions=None,
**kwargs):
"""Creates a task to export an EE Image to Google Cloud Storage.
Args:
image: The image to be exported.
description: Human-readable name of the task.
bucket: The name of a Cloud Storage bucket for the export.
fileNamePrefix: Cloud Storage object name prefix for the export.
Defaults to the name of the task.
dimensions: The dimensions of the exported image. Takes either a
single positive integer as the maximum dimension or "WIDTHxHEIGHT"
where WIDTH and HEIGHT are each positive integers.
region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Defaults to the image's
region.
scale: The resolution in meters per pixel. Defaults to the
native resolution of the image assset unless a crsTransform
is specified.
crs: The coordinate reference system of the exported image's
projection. Defaults to the image's default projection.
crsTransform: A comma-separated string of 6 numbers describing
the affine transform of the coordinate reference system of the
exported image's projection, in the order: xScale, xShearing,
xTranslation, yShearing, yScale and yTranslation. Defaults to
the image's native CRS transform.
maxPixels: The maximum allowed number of pixels in the exported
image. The task will fail if the exported region covers more
pixels in the specified projection. Defaults to 100,000,000.
shardSize: Size in pixels of the shards in which this image will be
computed. Defaults to 256.
fileDimensions: The dimensions in pixels of each image file, if the
image is too large to fit in a single file. May specify a
single number to indicate a square shape, or a tuple of two
dimensions to indicate (width,height). Note that the image will
still be clipped to the overall image dimensions. Must be a
multiple of shardSize.
skipEmptyTiles: If true, skip writing empty (i.e. fully-masked)
image tiles. Defaults to false.
fileFormat: The string file format to which the image is exported.
Currently only 'GeoTIFF' and 'TFRecord' are supported, defaults to
'GeoTIFF'.
formatOptions: A dictionary of string keys to format specific options.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'crs_transform'.
Returns:
An unstarted Task that exports the image to Google Cloud Storage.
"""
config = _capture_parameters(locals(), ['image'])
config = _prepare_image_export_config(image, config,
Task.ExportDestination.GCS)
return _create_export_task(config, Task.Type.EXPORT_IMAGE)
@staticmethod
def toDrive(image, description='myExportImageTask', folder=None,
fileNamePrefix=None, dimensions=None, region=None,
scale=None, crs=None, crsTransform=None,
maxPixels=None, shardSize=None, fileDimensions=None,
skipEmptyTiles=None, fileFormat=None, formatOptions=None,
**kwargs):
"""Creates a task to export an EE Image to Drive.
Args:
image: The image to be exported.
description: Human-readable name of the task.
folder: The name of a unique folder in your Drive account to
export into. Defaults to the root of the drive.
fileNamePrefix: The Google Drive filename for the export.
Defaults to the name of the task.
dimensions: The dimensions of the exported image. Takes either a
single positive integer as the maximum dimension or "WIDTHxHEIGHT"
where WIDTH and HEIGHT are each positive integers.
region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Defaults to the image's
region.
scale: The resolution in meters per pixel. Defaults to the
native resolution of the image assset unless a crsTransform
is specified.
crs: The coordinate reference system of the exported image's
projection. Defaults to the image's default projection.
crsTransform: A comma-separated string of 6 numbers describing
the affine transform of the coordinate reference system of the
exported image's projection, in the order: xScale, xShearing,
xTranslation, yShearing, yScale and yTranslation. Defaults to
the image's native CRS transform.
maxPixels: The maximum allowed number of pixels in the exported
image. The task will fail if the exported region covers more
pixels in the specified projection. Defaults to 100,000,000.
shardSize: Size in pixels of the shards in which this image will be
computed. Defaults to 256.
fileDimensions: The dimensions in pixels of each image file, if the
image is too large to fit in a single file. May specify a
single number to indicate a square shape, or a tuple of two
dimensions to indicate (width,height). Note that the image will
still be clipped to the overall image dimensions. Must be a
multiple of shardSize.
skipEmptyTiles: If true, skip writing empty (i.e. fully-masked)
image tiles. Defaults to false.
fileFormat: The string file format to which the image is exported.
Currently only 'GeoTIFF' and 'TFRecord' are supported, defaults to
'GeoTIFF'.
formatOptions: A dictionary of string keys to format specific options.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'crs_transform', 'driveFolder', and 'driveFileNamePrefix'.
Returns:
An unstarted Task that exports the image to Drive.
"""
config = _capture_parameters(locals(), ['image'])
config = _prepare_image_export_config(image, config,
Task.ExportDestination.DRIVE)
return _create_export_task(config, Task.Type.EXPORT_IMAGE)
# pylint: enable=unused-argument
class map(object):
"""A class with a static method to start map export tasks."""
def __init__(self):
"""Forbids class instantiation."""
raise AssertionError('This class cannot be instantiated.')
# Disable argument usage check; arguments are accessed using locals().
# pylint: disable=unused-argument
@staticmethod
def toCloudStorage(image, description='myExportMapTask', bucket=None,
fileFormat=None, path=None, writePublicTiles=None,
maxZoom=None, scale=None, minZoom=None,
region=None, skipEmptyTiles=None, mapsApiKey=None,
**kwargs):
"""Creates a task to export an Image as a pyramid of map tiles.
Exports a rectangular pyramid of map tiles for use with web map
viewers. The map tiles will be accompanied by a reference
index.html file that displays them using the Google Maps API,
and an earth.html file for opening the map on Google Earth.
Args:
image: The image to export as tiles.
description: Human-readable name of the task.
bucket: The destination bucket to write to.
fileFormat: The map tiles' file format, one of 'auto', 'png',
or 'jpeg'. Defaults to 'auto', which means that opaque tiles
will be encoded as 'jpg' and tiles with transparency will be
encoded as 'png'.
path: The string used as the output's path. A trailing '/'
is optional. Defaults to the task's description.
writePublicTiles: Whether to write public tiles instead of using the
bucket's default object ACL. Defaults to True and requires the
invoker to be an OWNER of bucket.
maxZoom: The maximum zoom level of the map tiles to export.
scale: The max image resolution in meters per pixel, as an alternative
to 'maxZoom'. The scale will be converted to the most appropriate
maximum zoom level at the equator.
minZoom: The optional minimum zoom level of the map tiles to export.
region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Map tiles will be
produced in the rectangular region containing this geometry.
Defaults to the image's region.
skipEmptyTiles: If true, skip writing empty (i.e. fully-transparent)
map tiles. Defaults to false.
mapsApiKey: Used in index.html to initialize the Google Maps API. This
removes the "development purposes only" message from the map.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'crs_transform'.
Returns:
An unstarted Task that exports the image to Google Cloud Storage.
"""
config = _capture_parameters(locals(), ['image'])
config = _prepare_map_export_config(image, config)
return _create_export_task(config, Task.Type.EXPORT_MAP)
# pylint: enable=unused-argument
class table(object):
"""A class with static methods to start table export tasks."""
def __init__(self):
"""Forbids class instantiation."""
raise AssertionError('This class cannot be instantiated.')
def __new__(cls, collection, description='myExportTableTask', config=None):
"""Export an EE FeatureCollection as a table.
The exported table will reside in Google Drive or Cloud Storage.
Args:
collection: The feature collection to be exported.
description: Human-readable name of the task.
config: A dictionary that will be copied | |
<filename>yapypy/extended_python/grammar.py<gh_stars>0
RBNF = r"""
NEWLINE := ''
ENDMARKER := ''
NAME := ''
INDENT := ''
DEDENT := ''
NUMBER := ''
STRING := ''
single_input ::= it=NEWLINE | seq=simple_stmt | it=compound_stmt NEWLINE
file_input ::= (NEWLINE | seqs<<stmt)* [ENDMARKER] -> mod=Module(sum(seqs or [], [])); fix_missing_locations(mod); return mod
eval_input ::= it=testlist NEWLINE* ENDMARKER -> Expression(it)
# the restrictions of decorator syntax are released here for the sake of convenience.
decorator ::= '@' exp=test NEWLINE -> exp
decorated ::= decorators=decorator+ it=(classdef | funcdef | async_funcdef) -> it.decorator_list = list(decorators); return it
async_funcdef ::= mark='async'
'def' name=NAME args=parameters ['->' ret=test] ':' body=suite -> def_rewrite(mark, name, args, ret, body, is_async=True)
funcdef ::= mark='def' name=NAME args=parameters ['->' ret=test] ':' body=suite -> def_rewrite(mark, name, args, ret, body)
parameters ::= '(' [args=typedargslist] ')' -> args if args else arguments([], None, [], [], None, [])
lam_args ::= [args=varargslist] -> args if args else arguments([], None, [], [], None, [])
default_fp ::= '=' expr=test -> expr
kw_default_fp ::= ['=' expr=test] -> expr
tfpdef ::= name=NAME [':' annotation=test] -> arg(name.value, annotation, **loc @ name)
vfpdef ::= name=NAME -> arg(name.value, None, **loc @ name)
typedargslist ::= args << tfpdef [defaults<<default_fp] (',' args<<tfpdef [defaults<<default_fp])* [',' [
'*' [vararg=tfpdef] (',' kwonlyargs<<tfpdef kw_defaults<<kw_default_fp)* [',' ['**' kwarg=tfpdef [',']]]
| '**' kwarg=tfpdef [',']]]
| '*' [vararg=tfpdef] (',' kwonlyargs<<tfpdef kw_defaults<<kw_default_fp)* [',' ['**' kwarg=tfpdef [',']]]
| '**' kwarg=tfpdef [',']
-> arguments(args or [], vararg, kwonlyargs or [], kw_defaults or [], kwarg, defaults or [])
varargslist ::= args << vfpdef [defaults<<default_fp] (',' args<<vfpdef [defaults<<default_fp])* [',' [
'*' [vararg=vfpdef] (',' kwonlyargs<<vfpdef kw_defaults<<kw_default_fp)* [',' ['**' kwarg=vfpdef [',']]]
| '**' kwarg=vfpdef [',']]]
| '*' [vararg=vfpdef] (','kwonlyargs<<vfpdef kw_defaults<<kw_default_fp)* [',' ['**' kwargs=vfpdef [',']]]
| '**' kwargs=vfpdef [',']
-> arguments(args or [], vararg, kwonlyargs or [], kw_defaults or [], kwarg, defaults or [])
stmt ::= seq=simple_stmt | it=compound_stmt -> [it] if it else seq
simple_stmt ::= seq<<small_stmt (';' seq<<small_stmt)* [';'] NEWLINE -> seq
small_stmt ::= it=(expr_stmt | del_stmt | pass_stmt | flow_stmt | # ------------------------------
import_stmt | global_stmt | nonlocal_stmt | assert_stmt) -> it
expr_stmt ::= lhs=testlist_star_expr (ann=annassign | aug=augassign aug_exp=(yield_expr|testlist) | # ------------------------------
('=' rhs<<(yield_expr|testlist_star_expr))*) -> expr_stmt_rewrite(lhs, ann, aug, aug_exp, rhs)
annassign ::= ':' anno=test ['=' value=test] -> (anno, value)
testlist_star_expr ::= seq<<(test|star_expr) (',' seq<<(test|star_expr))* [force_tuple=','] -> Tuple(seq, Load()) if len(seq) > 1 or force_tuple else seq[0]
augassign ::= it=('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | # ------------------------------
'<<=' | '>>=' | '**=' | '//=') -> augassign_rewrite(it)
# For normal and annotated assignments, additional restrictions enforced by the interpreter -------------------------------
del_stmt ::= mark='del' lst=exprlist -> Delete([as_del(lst)], **loc @ mark)
pass_stmt ::= mark='pass' -> Pass(**loc @ mark)
flow_stmt ::= it=(break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt) -> it
break_stmt ::= mark='break' -> Break(**loc @ mark)
continue_stmt ::= mark='continue' -> Continue(**loc @ mark)
return_stmt ::= mark='return' [value=testlist_star_expr] -> Return(value, **loc @ mark)
yield_stmt ::= exp=yield_expr -> Expr(exp)
raise_stmt ::= mark='raise' [exc=test ['from' cause=test]] -> Raise(exc, cause, **loc @ mark)
import_stmt ::= it=(import_name | import_from) -> it
import_name ::= mark='import' names=dotted_as_names -> Import(names, **loc @ mark)
# note below::= the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS --------------------------------
import_level::= (_1='.' | '...') -> 1 if _1 else 3
wild ::= '*' -> [alias(name='*', asname=None)]
import_from ::= (mark='from' (levels=import_level* module=dotted_name | levels=import_level+) # ------------------------------
'import' (wild=wild | '(' names=import_as_names ')' | names=import_as_names)) -> ImportFrom(module or '', wild or names, sum(levels or []), **loc @ mark)
NAMESTR ::= n=NAME -> n.value
import_as_name ::= name=NAMESTR ['as' asname=NAMESTR] -> alias(name, asname)
dotted_as_name ::= name=dotted_name ['as' asname=NAMESTR] -> alias(name, asname)
import_as_names::= seq<<import_as_name (',' seq<<import_as_name)* [','] -> seq
dotted_as_names::= seq<<dotted_as_name (',' seq<<dotted_as_name)* -> seq
dotted_name ::= xs=(NAME ('.' NAME)*) -> ''.join(c.value for c in xs)
global_stmt ::= mark='global' names<<NAMESTR (',' name<<NAMESTR)* -> Global(names, **loc @ mark)
nonlocal_stmt ::= mark='nonlocal' names<<NAMESTR (',' name<<NAMESTR)* -> Nonlocal(names, **loc @ mark)
assert_stmt ::= mark='assert' test=test [',' msg=test] -> Assert(test, msg, **loc @ mark)
compound_stmt ::= it=(if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated # ------------------------------
| async_stmt) -> it
async_stmt ::= it=(async_funcdef | async_with_stmt | async_for_stmt) -> it
if_stmt ::= marks<<'if' tests<<test ':' # ------------------------------
bodies<<suite # ------------------------------
(marks<<'elif' tests<<test ':' bodies<<suite)* # ------------------------------
['else' ':' orelse=suite] -> if_stmt_rewrite(marks, tests, bodies, orelse)
while_stmt ::= 'while' test=test ':' body=suite ['else' ':' orelse=suite] -> while_stmt_rewrite(test, body, orelse)
async_for_stmt ::= 'async' 'for' target=exprlist 'in' iter=testlist ':' body=suite ['else' ':' orelse=suite] -> for_stmt_rewrite(target, iter, body, orelse, is_async=True)
for_stmt ::= 'for' target=exprlist 'in' iter=testlist ':' body=suite ['else' ':' orelse=suite] -> for_stmt_rewrite(target, iter, body, orelse)
try_stmt ::= (mark='try' ':' # ---------------------------
body=suite # ---------------------------
((excs<<except_clause ':' rescues<<suite)+ # ---------------------------
['else' ':' orelse=suite] # ---------------------------
['finally' ':' final=suite] | # ---------------------------
'finally' ':' final=suite)) -> try_stmt_rewrite(mark, body, excs, rescues, orelse, final)
async_with_stmt::= mark='async' 'with' items<<with_item (',' items<<with_item)* ':' body=suite -> with_stmt_rewrite(mark, items, body, is_async=True)
with_stmt ::= mark='with' items<<with_item (',' items<<with_item)* ':' body=suite -> with_stmt_rewrite(mark, items, body)
with_item ::= context_expr=test ['as' optional_vars=expr] -> withitem(context_expr, as_store(optional_vars))
except_clause ::= 'except' [type=test ['as' name=NAMESTR]] -> (type, name)
suite ::= seqs<<simple_stmt | NEWLINE INDENT (seqs<<stmt)+ DEDENT -> sum(seqs, [])
test ::= it=(ifexp| lambdef) -> it
ifexp ::= body=or_test ['if' test=or_test 'else' orelse=test] -> IfExp(test, body, orelse) if orelse else body
test_nocond ::= it=(or_test | lambdef_nocond) -> it
lambdef ::= m='lambda' args=lam_args ':' body=test -> Lambda(args, body)
lambdef_nocond ::= m='lambda' args=lam_args ':' body=test_nocond -> Lambda(args, body)
or_test ::= head=and_test ('or' tail<<and_test)* -> BoolOp(Or(), [head, *tail]) if tail else head
and_test ::= head=not_test ('and' tail<<not_test)* -> BoolOp(And(), [head, *tail]) if tail else head
not_test ::= mark='not' expr=not_test | comp=comparison -> UnaryOp(Not(), expr, **loc @ mark) if mark else comp
comparison ::= left=expr (ops<<comp_op comparators<<expr)* -> Compare(left, ops, comparators) if ops else left
comp_op ::= op=('<'|'>'|'=='|'>='|'<='|'<>'|'!='
|'in'|'not' 'in'|'is'|'is' 'not') -> comp_op_rewrite(op)
star_expr ::= mark='*' expr=expr -> Starred(expr, Load(), **loc @ mark)
expr_tr ::= op='|' expr=xor_expr -> (op, expr)
expr ::= head=xor_expr tail=expr_tr* -> expr_rewrite(head, tail)
xor_expr_tr ::= op='^' expr=and_expr -> (op, expr)
xor_expr ::= head=and_expr tail=xor_expr_tr* -> xor_expr_rewrite(head, tail)
and_expr_tr ::= op = '&' expr=shift_expr -> (op, expr)
and_expr ::= head=shift_expr tail=and_expr_tr* -> and_expr_rewrite(head, tail)
shift_expr_tr ::= op=('<<'|'>>') expr=arith_expr -> (op, expr)
shift_expr ::= head=arith_expr tail=shift_expr_tr* -> shift_expr_rewrite(head, tail)
arith_expr_tr ::= op=('+'|'-') expr=term -> (op, expr)
arith_expr ::= head=term tail=arith_expr_tr* -> arith_expr_rewrite(head, tail)
term_tr ::= op=('*'|'@'|'/'|'%'|'//') expr=factor -> (op, expr)
term ::= head=factor tail=term_tr* -> term_rewrite(head, tail)
factor ::= mark=('+'|'-'|'~') factor=factor | power=power -> factor_rewrite(mark, factor, power)
power ::= atom_expr=atom_expr ['**' factor=factor] -> BinOp(atom_expr, Pow(), factor) if factor else atom_expr
atom_expr ::= [a='await'] atom=atom trailers=trailer* -> atom_expr_rewrite(a, atom, trailers)
atom ::= (is_gen ='(' [yield_expr=yield_expr|comp=testlist_comp] ')' |
is_list='[' [comp=testlist_comp] ']' |
head='{' [dict=dictorsetmaker] is_dict='}' |
name=NAME |
number=NUMBER |
strs=STRING+ |
ellipsis='...' |
namedc='None' |
namedc='True' |
namedc='False')
-> atom_rewrite(loc, name, number, strs, namedc, ellipsis, dict, is_dict, is_gen, is_list, comp, yield_expr)
testlist_comp ::= values<<(test|star_expr) ( comp=comp_for | (',' values<<(test|star_expr))* [force_tuple=','] )
->
def app(is_tuple=None, is_list=None):
if is_list and comp:
return ListComp(*values, comp)
elif is_list:
return List(values, Load())
elif comp:
return GeneratorExp(*values, comp)
else:
return values[0] if len(values) is 1 and not force_tuple else Tuple(values, Load())
app
# `ExtSlice` is ignored here. We don't need this optimization for this project.
trailer ::= arglist=arglist | mark='[' subscr=subscriptlist ']' | mark='.' attr=NAMESTR
-> (lambda value: Subscript(value, subscr, Load(), **loc @ mark)) if subscr is not None else\
(lambda value: Call(value, *split_args_helper(arglist))) if arglist is not None else\
(lambda value: Attribute(value, attr, Load(), **loc @ mark))
# `Index` will be deprecated in Python3.8.
# See https://github.com/python/cpython/pull/9605#issuecomment-425381990
subscriptlist ::= head=subscript (',' tail << subscript)* [',']
-> Index(head if not tail else Tuple([head, *tail], Load()))
subscript3 ::= [lower=test] subscr=[':' [upper=test] [':' [step=test]]] -> Slice(lower, upper, step) if subscr else lower
subscript ::= it=(subscript3 | test) -> it
exprlist ::= seq << (expr|star_expr) (',' seq << (expr|star_expr))* [force_tuple=','] -> Tuple(seq, Load()) if force_tuple or len(seq) > 1 else seq[0]
testlist ::= seq << test (',' seq << test)* [force_tuple=','] -> Tuple(seq, Load()) if force_tuple or len(seq) > 1 else seq[0]
dict_unpack_s ::= '**' -> None
dictorsetmaker ::= (((keys<<test ':' values<<test | keys<<dict_unpack_s values<<expr)
(comp=comp_for | (',' (keys<<test ':' values<<test | keys<<dict_unpack_s values<<expr))* [','])) |
(values<<(test | star_expr)
(comp=comp_for | (',' values<<(test | star_expr))* [','])) )
-> if not comp: return ExDict(keys, values, | |
#coding: utf-8
import subprocess, sys, os
import threading, time, datetime
import logging, argparse
import shutil
from inter.apkcookpy.lib.apk import APKCook
import xml.etree.ElementTree as ET
logging.basicConfig(level = logging.INFO, format='%(asctime)s - %(levelname)s [%(filename)s:%(lineno)d]: %(message)s')
def execShellDaemon(cmd):
'''
async
'''
return subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
def execShell(cmd, t=120):
'''
sync
haskey('d') == success, only cmd success, should check output
'''
ret = {}
try:
p = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True, timeout=t)
if p.returncode == 0:
try:
ret['d'] = p.stdout.decode('utf-8')
except:
ret['d'] = p.stdout.decode('gbk')
else:
try:
ret['e'] = p.stderr.decode('utf-8')
except:
ret['e'] = p.stderr.decode('gbk')
except subprocess.TimeoutExpired:
ret['e'] = 'timeout'
except Exception as e:
logging.error('subprocess '+str(e))
return ret
def getPkgList(pkg):
if os.path.isfile(pkg):
try:
with open(pkg, 'r') as f:
pkgs = f.read().split('\n')
except Exception as e:
#logging.info(str(e))
return []
elif pkg:
pkgs = pkg.split(',')
out = []
for p in pkgs:
if p:
out.append(p.strip())
return out
def getChildNode(node):
out = []
if node.get('clickable') == 'true' or node.get('long-clickable') == 'true' or node.get('scrollable') == 'true' or (node.get('class') and node.get('class').startswith('android.widget.EditText')):
out.append(node.attrib)
if list(node):
for child in node:
out += getChildNode(child)
return out
def parseUIDump(dumpfile):
tree = ET.parse(dumpfile)
root = tree.getroot()
return getChildNode(root)
class AMonkey(object):
def __init__(self, did):
self._adb = 'adb'
self._frida = 'frida -U '
self._did = did
self._devicepkg = []
self._curdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '')
self._dirapps = os.path.join(self._curdir, 'apps', '')
self._dirappstmp = os.path.join(self._dirapps, 'tmp', '')
self._dirinter = os.path.join(self._curdir, 'inter', '')
self._androidver = ''
self._blacklist = [
'com.android.settings',
'com.topjohnwu.magisk',
'com.speedsoftware.rootexplorer',
'org.proxydroid',
'android'
]
self._init()
def _init(self):
if not self.checkOnline(self._did):
sys.exit()
if self._did:
self._adb = 'adb -s '+self._did+' '
self._devicepkg = self.getDevicePkgs()
try:
os.mkdir(self._dirapps)
except:
pass
try:
os.mkdir(self._dirappstmp)
except:
pass
cmd = self._adb + ' shell "mkdir /sdcard/monkeylogs"'
ret = execShell(cmd)
cmd = self._adb + ' shell "mkdir /sdcard/monkeyxmls"'
ret = execShell(cmd)
def checkOnline(self, deviceid=''):
devices = execShell('adb devices -l').get('d').split('\n')
ret = [d for d in devices if d.find('device ') != -1]
dids = [d.split()[0] for d in ret]
if deviceid:
if deviceid in dids:
return True
else:
logging.error('Device id error')
logging.error(execShell('adb devices -l').get('d'))
return False
else:
if len(dids) == 0:
logging.error('No device')
return False
elif len(dids) == 1:
return True
elif len(dids) > 1:
logging.error('More than one device, please set -s deviceid')
return False
def timeoutKIll(self, pkg, t):
for i in range(t):
time.sleep(1)
cmd = self._adb + ' shell "am force-stop '+pkg+' " '
execShell(cmd)
def getDevicePkgs(self):
ret = execShell(self._adb + ' shell pm list packages')
pkgs = []
if 'e' not in ret.keys():
dt = ret.get('d').split('\n')
for p in dt:
p = p.strip()
if p:
pkgs.append(p.split(':')[1])
else:
logging.error(ret.get('e'))
return pkgs
def pullXml(self, p):
logging.info('==pull xml')
if not self.setupBusybox():
logging.error('busybox error')
return
sp = self._dirapps+p
cmd = self._adb + ' shell "pm path '+p+'"'
ret = execShell(cmd)
if 'd' in ret.keys() and ret.get('d'):
# multiple returns?
apkpath = ret.get('d').split('\n')[0].split(':')[1]
cmd = self._adb + ' shell "/data/local/tmp/busybox unzip -p '+apkpath+' AndroidManifest.xml > /sdcard/monkeyxmls/'+p+'"'
ret = execShell(cmd)
cmd = self._adb + ' shell ls /sdcard/monkeyxmls/'+p
ret = execShell(cmd)
if 'No such file' in str(ret) :
logging.error('xml unzip error')
return
cmd = self._adb + ' pull /sdcard/monkeyxmls/'+p+' '+sp
ret = execShell(cmd)
if 'd' in ret.keys():
shutil.move(sp, sp+'.xml')
return sp+'.xml'
else:
logging.error('pull error'+ret.get('e')+apkpath)
else:
logging.error('device has no '+p)
def setupBusybox(self):
cmd = self._adb + ' shell ls /data/local/tmp/busybox'
ret = execShell(cmd)
if 'No such file' in str(ret) :
busybox = self._dirinter+'busybox'
if not os.path.isfile(busybox):
logging.error('please put busybox in dir "inter")')
return False
cmd = self._adb + ' push '+busybox+' /data/local/tmp/busybox'
ret = execShell(cmd)
if 'd' in ret.keys():
logging.info('push busybox success')
cmd = cmd = self._adb + ' shell "chmod +x /data/local/tmp/busybox" '
ret = execShell(cmd)
return True
else:
return False
return True
def killMonkey(self):
logging.info('Clean monkey')
cmd = self._adb + ' shell "ps -A | grep com.android.commands.monkey" '
ret = execShell(cmd)
if 'd' in ret.keys():
data = ret.get('d').split('\n')
for d in data:
tmp = d.split()
if len(tmp) == 9 and tmp[8] == 'com.android.commands.monkey':
cmd = self._adb + ' shell "su -c \' kill -9 '+tmp[1]+'\' "'
ret = execShell(cmd)
if 'e' in ret.keys():
logging.info(ret.get('e'))
logging.info('Clean monkey done')
def getCurActivity(self):
cmd = self._adb + ' shell "dumpsys activity top | grep ACTIVITY "'
ret = execShell(cmd)
out = ret.get('d')
if out:
out = out.split('\n')
out = out[-2]
out = out.split()[1]
ret = out
if '/.' in out:
ret = ret.replace('/', '')
else:
ret = ret.split('/')[1].strip()
return ret
def UIClick(self, p, a):
if p not in self.getCurActivity():
return
#some phone may not work
cmd = self._adb + ' shell "/system/bin/uiautomator dump /sdcard/window_dump.xml "'
ret = execShell(cmd)
curdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '')
dumpfile = curdir+'/apps/tmp/uidump.xml'
cmd = self._adb + ' pull /sdcard/window_dump.xml '+dumpfile
ret = execShell(cmd)
# clicks = parseUIDump(dumpfile)
clicks = []
for c in clicks:
if p not in self.getCurActivity():
break
if c.get('class') and c.get('class').startswith('android.widget.EditText'):
xy = c.get('bounds')
xy = xy.split('][')[0]
xy = xy.lstrip('[')
x,y = xy.split(',')
x = int(x) + 3
y = int(y) + 3
cmd = self._adb + ' shell "input tap {} {}"'.format(x, y)
ret = execShell(cmd)
cmd = self._adb + ' shell "input text tex{}{}"'.format(x, y)
ret = execShell(cmd)
logging.info('input '+c.get('resource-id'))
elif c.get('clickable') == 'true':
xy = c.get('bounds')
xy = xy.split('][')[0]
xy = xy.lstrip('[')
x,y = xy.split(',')
x = int(x) + 3
y = int(y) + 3
logging.info('click ({},{}) {}'.format(x, y, c.get('resource-id')))
cmd = self._adb + ' shell "input tap {} {}"'.format(x, y)
ret = execShell(cmd)
time.sleep(1)
if a not in self.getCurActivity():
cmd = self._adb + ' shell "input keyevent 4"'
ret = execShell(cmd)
return clicks
def monkey(self, pkg):
pkgs = getPkgList(pkg)
for p in pkgs:
if p in self._blacklist:
continue
if p not in self._devicepkg:
logging.error(p+' not installed')
continue
#检查设备在线
if not self.checkOnline(self._did):
logging.error('Device offline')
return
logging.info('=='+p)
try:
#准备apk文件
sp = self._dirapps+p
if os.path.isfile(sp+'.xml') and os.stat(sp+'.xml').st_size > 0:
apkcook = APKCook(sp+'.xml', True)
else:
xmlpath = self.pullXml(p)
if xmlpath:
apkcook = APKCook(xmlpath, True)
else:
logging.error('xml error'+p)
return
activity = apkcook.show('ma').split(',')
if len(activity) < 2:
logging.info('maybe encrypted')
#timeout kill
timeout = 220
timeoutThread = threading.Thread(target=self.timeoutKIll, args=(p, timeout), daemon=True)
timeoutThread.start()
cmd = self._adb + ' shell "rm /sdcard/monkeylogs/'+p+'.log"'
ret = execShell(cmd)
cmd = self._adb + ' shell "logcat -c"'
ret = execShell(cmd)
cmd = self._adb + ' shell "logcat > /sdcard/monkeylogs/'+p+'.log.log"'
logcatdameon = execShellDaemon(cmd)
UIcomponent = []
for a in activity:
if not a:
continue
logging.info(a)
cmd = self._adb + ' shell "am start -n '+p+'/'+a+'"'
#timeout not working, because connected to pipe??
execShell(cmd)
#monkey click
# cmd = self._adb + ' shell "monkey -p '+p+' -vvv --throttle 100 --pct-syskeys 0 --ignore-crashes 133 >> /sdcard/monkeylogs/'+p+'.log " '
# execShell(cmd, 40)
#uiautomator dump
time.sleep(1)
self.UIClick(p, a)
if not timeoutThread.is_alive():
timeoutThread = threading.Thread(target=self.timeoutKIll, args=(p, timeout), daemon=True)
timeoutThread.start()
service = apkcook.show('ms').split(',')
for s in service:
if not s:
continue
logging.info(s)
cmd = self._adb + ' shell "am start-service '+p+'/'+s+' " '
execShell(cmd, 40)
time.sleep(1)
receiver = apkcook.show('mr').split(',')
for s in receiver:
if not s:
continue
logging.info(s)
cmd = self._adb + ' shell "am broadcast '+p+'/'+s+' " '
execShell(cmd, 40)
time.sleep(1)
if logcatdameon.poll():
logcatdameon.terminate()
except KeyboardInterrupt:
cmd = self._adb + ' shell "am force-stop '+p+' " '
ret = execShell(cmd)
raise KeyboardInterrupt
except Exception as e:
import traceback
traceback.print_exc()
logging.error(str(e))
# cmd = self._adb + ' shell "am force-stop '+p+' " '
# ret = execShell(cmd)
time.sleep(0.2)
def getExposed(pkg):
from inter.apkcookpy.lib.apk import APKCook
if os.path.isfile(pkg) and '.apk' in pkg:
#apk
try:
APKCook(pkg).show()
except Exception as e:
logging.error(e)
elif os.path.isfile(pkg) and '.xml' in pkg:
#text xml
try:
APKCook(pkg, True, True).show()
except:
#binary xml
try:
APKCook(pkg, True).show()
except Exception as e:
logging.error(e)
else:
logging.error("python3 amonkey.py -e test.xml|test.apk")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Android Monkey', formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='''
python3 amonkey.py -p com.xiaomi.music[,com.xiaomi.youpin]
python3 amonkey.py -p plist.txt
python3 amonkey.py -e test.xml|test.apk
''')
parser.add_argument("-p", "--pkg", type=str, help="app/applist")
parser.add_argument("-e", "--exposed", type=str, help="exposed component")
parser.add_argument("-s", "--did", type=str, help="device ID")
args = parser.parse_args()
pkg = args.pkg
exposed = args.exposed
did = args.did
try:
if pkg:
amonkey = AMonkey(did)
amonkey.monkey(pkg)
elif exposed:
| |
<filename>pypsexec/scmr.py
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, <NAME> (@jborean93) <<EMAIL>>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
import logging
import struct
import uuid
from collections import (
OrderedDict,
)
from smbprotocol.connection import (
NtStatus,
)
from smbprotocol.exceptions import (
SMBResponseException,
)
from smbprotocol.ioctl import (
CtlCode,
IOCTLFlags,
SMB2IOCTLRequest,
SMB2IOCTLResponse,
)
from smbprotocol.open import (
CreateDisposition,
CreateOptions,
FilePipePrinterAccessMask,
ImpersonationLevel,
Open,
ShareAccess,
)
from smbprotocol.structure import (
IntField,
EnumField,
FlagField,
Structure,
)
from smbprotocol.tree import (
TreeConnect,
)
from pypsexec.exceptions import (
PypsexecException,
SCMRException,
)
from pypsexec.rpc import (
BindAckPDU,
BindPDU,
ContextElement,
DataRepresentationFormat,
PDUException,
parse_pdu,
PFlags,
RequestPDU,
ResponsePDU,
SyntaxIdElement,
)
log = logging.getLogger(__name__)
class ControlCode(object):
"""
https://msdn.microsoft.com/en-us/library/cc245921.aspx
"""
SERVICE_CONTROL_CONTINUE = 0x00000003
SERVICE_CONTROL_INTERROGATE = 0x00000004
SERVICE_CONTROL_NETBINDADD = 0x00000007
SERVICE_CONTROL_NETBINDDISABLE = 0x0000000A
SERVICE_CONTROL_NETBINDENABLE = 0x00000009
SERVICE_CONTROL_NETBINDREMOVE = 0x00000008
SERVICE_CONTROL_PARAMCHANGE = 0x00000006
SERVICE_CONTROL_PAUSE = 0x00000002
SERVICE_CONTROL_STOP = 0x00000001
class DesiredAccess(object):
"""
https://msdn.microsoft.com/en-us/library/cc245853.aspx
"""
DELETE = 0x00010000
READ_CONTROL = 0x00020000
WRITE_DAC = 0x00040000
WRITE_OWNER = 0x00080000
SERVICE_ALL_ACCESS = 0x000F01FF
SERVICE_CHANGE_CONFIG = 0x00000002
SERVICE_ENUMERATE_DEPENDENTS = 0x00000008
SERVICE_INTERROGATE = 0x00000080
SERVICE_PAUSE_CONTINUE = 0x00000040
SERVICE_QUERY_CONFIG = 0x00000001
SERVICE_QUERY_STATUS = 0x00000004
SERVICE_START = 0x00000010
SERVICE_STOP = 0x00000020
SERVICE_USER_DEFINED_CONTROL = 0x00000100
SERVICE_SET_STATUS = 0x00008000
SC_MANAGER_LOCK = 0x00000008
SC_MANAGER_CREATE_SERVICE = 0x00000002
SC_MANAGER_ENUMERATE_SERVICE = 0x00000004
SC_MANAGER_CONNECT = 0x00000001
SC_MANAGER_QUERY_LOCK_STATUS = 0x00000010
SC_MANAGER_MODIFY_BOOT_CONFIG = 0x00000020
class ServiceType(object):
"""
https://msdn.microsoft.com/en-us/library/cc245925.aspx
dwServiceType
flags
"""
SERVICE_KERNEL_DRIVER = 0x00000001
SERVICE_FILE_SYSTEM_DRIVER = 0x00000002
SERVICE_WIN32_OWN_PROCESS = 0x00000010
SERVICE_WIN32_SHARE_PROCESS = 0x00000020
SERVICE_INTERACTIVE_PROCESS = 0x00000100
class StartType(object):
"""
https://msdn.microsoft.com/en-us/library/cc245925.aspx
dwStartType
enum
"""
SERVICE_BOOT_START = 0x00000000
SERVICE_SYSTEM_START = 0x00000001
SERVICE_AUTO_START = 0x00000002
SERVICE_DEMAND_START = 0x00000003
SERVICE_DISABLED = 0x00000004
class ErrorControl(object):
"""
https://msdn.microsoft.com/en-us/library/cc245925.aspx
dwErrorControl
enum
"""
SERVICE_ERROR_IGNORE = 0x00000000
SERVICE_ERROR_NORMAL = 0x00000001
SERVICE_ERROR_SEVERE = 0x00000002
SERVICE_ERROR_CRITICAL = 0x00000003
class CurrentState(object):
"""
https://msdn.microsoft.com/en-us/library/cc245911.aspx
dwCurrentState
enum
"""
SERVICE_CONTINUE_PENDING = 0x00000005
SERVICE_PAUSE_PENDING = 0x00000006
SERVICE_PAUSED = 0x00000007
SERVICE_RUNNING = 0x00000004
SERVICE_START_PENDING = 0x00000002
SERVICE_STOP_PENDING = 0x00000003
SERVICE_STOPPED = 0x00000001
class ControlsAccepted(object):
"""
https://msdn.microsoft.com/en-us/library/cc245911.aspx
dwControlsAccepted
flags
"""
SERVICE_ACCEPT_PARAMCHANGE = 0x00000008
SERVICE_ACCEPT_PAUSE_CONTINUE = 0x00000002
SERVICE_ACCEPT_SHUTDOWN = 0x00000004
SERVICE_ACCEPT_STOP = 0x00000001
SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 0x00000020
SERVICE_ACCEPT_POWEREVENT = 0x00000040
SERVICE_ACCEPT_SESSIONCHANGE = 0x00000080
SERVICE_ACCEPT_PRESHUTDOWN = 0x00000100
SERVICE_ACCEPT_TIMECHANGE = 0x00000200
SERVICE_ACCEPT_TRIGGEREVENT = 0x00000400
class EnumServiceState(object):
"""
https://msdn.microsoft.com/en-us/library/cc245933.aspx
dwServiceState
Specifies the service records to enumerate
"""
SERVICE_ACTIVE = 0x00000001
SERVICE_INACTIVE = 0x00000002
SERVICE_STATE_ALL = 0x00000003
class ScmrReturnValues(object):
# The return values a RPC request can return
ERROR_SUCCESS = 0
ERROR_SUCCESS_NOTIFY_CHANGED = 0xFE75FFFF
ERROR_SUCCESS_LAST_NOTIFY_CHANGED = 0xFD75FFFF
ERROR_FILE_NOT_FOUND = 2
ERROR_PATH_NOT_FOUND = 3
ERROR_ACCESS_DENIED = 5
ERROR_INVALID_HANDLE = 6
ERROR_INVALID_DATA = 13
ERROR_INVALID_PARAMETER = 87
ERROR_INVALID_NAME = 123
ERROR_MORE_DATA = 234
ERROR_DEPENDENT_SERVICES_RUNNING = 1051
ERROR_INVALID_SERVICE_CONTROL = 1052
ERROR_SERVICE_REQUEST_TIMEOUT = 1053
ERROR_SERVICE_NO_THREAD = 1054
ERROR_SERVICE_DATABASE_LOCKED = 1055
ERROR_SERVICE_ALREADY_RUNNING = 1056
ERROR_INVALID_SERVICE_ACCOUNT = 1057
ERROR_SERVICE_DISABLED = 1058
ERROR_CIRCULAR_DEPENDENCY = 1059
ERROR_SERVICE_DOES_NOT_EXIST = 1060
ERROR_SERVICE_CANNOT_ACCEPT_CTRL = 1061
ERROR_SERVICE_NOT_ACTIVE = 1062
ERROR_DATABASE_DOES_NOT_EXIST = 1065
ERROR_SERVICE_DEPENDENCY_FAIL = 1068
ERROR_SERVICE_LOGON_FAILED = 1069
ERROR_SERVICE_MARKED_FOR_DELETE = 1072
ERROR_SERVICE_EXISTS = 1073
ERROR_SERVICE_DEPENDENCY_DELETED = 1075
ERROR_DUPLICATE_SERVICE_NAME = 1078
ERROR_SHUTDOWN_IN_PROGRESS = 1115
class ServiceStatus(Structure):
"""
[MS-SCMR] 2.2.47 SERVICE_STATUS
https://msdn.microsoft.com/en-us/library/cc245911.aspx
Defines Information about a service
"""
def __init__(self):
self.fields = OrderedDict([
('service_type', FlagField(
size=4,
flag_type=ServiceType,
flag_strict=False
)),
('current_state', EnumField(
size=4,
enum_type=CurrentState
)),
('controls_accepted', FlagField(
size=4,
flag_type=ControlsAccepted,
flag_strict=False
)),
('win32_exit_code', IntField(size=4)),
('service_specified_exit_code', IntField(size=4)),
('check_point', IntField(size=4)),
('wait_hint', IntField(size=4))
])
super(ServiceStatus, self).__init__()
class Service(object):
def __init__(self, name, smb_session):
"""
Higher-level interface over SCMR to manage Windows services. This is
customised for the PAExec service to really just be used in that
scenario.
:param name: The name of the service
:param smb_session: A connected SMB Session that can be used to connect
to the IPC$ tree.
"""
self.name = name
self.smb_session = smb_session
self._handle = None
self._scmr = None
self._scmr_handle = None
def open(self):
if self._scmr:
log.debug("Handle for SCMR on %s is already open"
% self.smb_session.connection.server_name)
return
# connect to the SCMR Endpoint
log.info("Opening handle for SCMR on %s"
% self.smb_session.connection.server_name)
self._scmr = SCMRApi(self.smb_session)
self._scmr.open()
self._scmr_handle = self._scmr.open_sc_manager_w(
self.smb_session.connection.server_name,
None,
DesiredAccess.SC_MANAGER_CONNECT |
DesiredAccess.SC_MANAGER_CREATE_SERVICE |
DesiredAccess.SC_MANAGER_ENUMERATE_SERVICE
)
def close(self):
if self._handle:
log.info("Closing Service handle for service %s" % self.name)
self._scmr.close_service_handle_w(self._handle)
self._handle = None
if self._scmr_handle:
log.info("Closing SCMR handle")
self._scmr.close_service_handle_w(self._scmr_handle)
self._scmr_handle = None
if self._scmr:
log.info("Closing bindings for SCMR")
self._scmr.close()
self._scmr = None
def start(self):
self._open_service()
if self._handle is None:
raise PypsexecException("Cannot start service %s as it does not "
"exist" % self.name)
try:
self._scmr.start_service_w(self._handle)
except SCMRException as exc:
if exc.return_code != \
ScmrReturnValues.ERROR_SERVICE_ALREADY_RUNNING:
raise exc
def stop(self):
self._open_service()
if self._handle is None:
raise PypsexecException("Cannot stop service %s as it does not "
"exist" % self.name)
try:
self._scmr.control_service(self._handle,
ControlCode.SERVICE_CONTROL_STOP)
except SCMRException as exc:
if exc.return_code != ScmrReturnValues.ERROR_SERVICE_NOT_ACTIVE:
raise exc
def create(self, path):
self._open_service()
if self._handle:
return
self._handle = self._scmr.create_service_w(
self._scmr_handle,
self.name,
self.name,
DesiredAccess.SERVICE_QUERY_STATUS | DesiredAccess.SERVICE_START |
DesiredAccess.SERVICE_STOP | DesiredAccess.DELETE,
ServiceType.SERVICE_WIN32_OWN_PROCESS,
StartType.SERVICE_DEMAND_START,
ErrorControl.SERVICE_ERROR_NORMAL,
path,
None,
0,
None,
None,
None
)[1]
def delete(self):
self._open_service()
if self._handle is None:
return
self.stop()
self._scmr.delete_service(self._handle)
def _open_service(self):
if self._handle:
return self._handle
# connect to the desired service in question
desired_access = DesiredAccess.SERVICE_QUERY_STATUS | \
DesiredAccess.SERVICE_START | \
DesiredAccess.SERVICE_STOP | \
DesiredAccess.DELETE
try:
log.info("Opening handle for Service %s" % self.name)
self._handle = self._scmr.open_service_w(self._scmr_handle,
self.name,
desired_access)
except SCMRException as exc:
if exc.return_code != \
ScmrReturnValues.ERROR_SERVICE_DOES_NOT_EXIST:
raise exc
else:
log.debug("Could not open handle for service %s as it did "
"not exist" % self.name)
class SCMRApi(object):
def __init__(self, smb_session):
# connect to the IPC tree and open a handle at svcctl
self.tree = TreeConnect(smb_session, r"\\%s\IPC$"
% smb_session.connection.server_name)
self.handle = Open(self.tree, "svcctl")
self.call_id = 0
def open(self):
log.debug("Connecting to SMB Tree %s for SCMR" % self.tree.share_name)
self.tree.connect()
log.debug("Opening handle to svcctl pipe")
self.handle.create(ImpersonationLevel.Impersonation,
FilePipePrinterAccessMask.GENERIC_READ |
FilePipePrinterAccessMask.GENERIC_WRITE,
0,
ShareAccess.FILE_SHARE_READ |
ShareAccess.FILE_SHARE_WRITE |
ShareAccess.FILE_SHARE_DELETE,
CreateDisposition.FILE_OPEN,
CreateOptions.FILE_NON_DIRECTORY_FILE)
# we need to bind svcctl to SCManagerW over DCE/RPC
bind = BindPDU()
bind['pfx_flags'].set_flag(PFlags.PFC_FIRST_FRAG)
bind['pfx_flags'].set_flag(PFlags.PFC_LAST_FRAG)
bind['packed_drep'] = DataRepresentationFormat()
bind['call_id'] = self.call_id
self.call_id += 1
context_ndr = ContextElement()
context_ndr['context_id'] = 0
context_ndr['abstract_syntax'] = SyntaxIdElement()
context_ndr['abstract_syntax']['uuid'] = \
uuid.UUID("367ABB81-9844-35F1-AD32-98F038001003")
context_ndr['abstract_syntax']['version'] = 2
# https://msdn.microsoft.com/en-us/library/cc243843.aspx
ndr_syntax = SyntaxIdElement()
ndr_syntax['uuid'] = uuid.UUID("8a885d04-1ceb-11c9-9fe8-08002b104860")
ndr_syntax['version'] = 2
context_ndr['transfer_syntaxes'] = [
ndr_syntax
]
context_bind = ContextElement()
context_bind['context_id'] = 1
context_bind['abstract_syntax'] = SyntaxIdElement()
context_bind['abstract_syntax']['uuid'] = \
uuid.UUID("367ABB81-9844-35F1-AD32-98F038001003")
context_bind['abstract_syntax']['version'] = 2
# https://msdn.microsoft.com/en-us/library/cc243715.aspx
# uuid prefix = 6CB71C2C-9812-4540
# uuid prefix bytes = b'\x2c\x1c\xb7\x6c\x12\x98\x40\x45'
# BindTimeFeatureNegotiateBitmask
# https://msdn.microsoft.com/en-us/library/cc243884.aspx
# SecurityContextMultiplexingSupported = 0x01
# KeepConnectionOnOrphanSupported = 0x02
# version number is 1
bind_syntax = SyntaxIdElement()
bind_syntax['uuid'] = b'\x2c\x1c\xb7\x6c\x12\x98\x40\x45' \
b'\x03\x00\x00\x00\x00\x00\x00\x00'
bind_syntax['version'] = 1
context_bind['transfer_syntaxes'] = [
bind_syntax
]
bind['context_elems'] = [
context_ndr,
context_bind
]
bind_data = bind.pack()
log.info("Sending bind request to svcctl")
log.debug(str(bind))
self.handle.write(bind_data)
log.info("Receiving bind result for svcctl")
bind_data = self.handle.read(0, 1024)
bind_result = parse_pdu(bind_data)
log.debug(str(bind_result))
if not isinstance(bind_result, BindAckPDU):
raise PDUException("Expecting BindAckPDU for initial bind result "
"but got: %s" % str(bind_result))
def close(self):
log.info("Closing bind to svcctl")
self.handle.close(False)
self.tree.disconnect()
# SCMR Functions below
def close_service_handle_w(self, handle):
# https://msdn.microsoft.com/en-us/library/cc245920.aspx
opnum = 0
res = self._invoke("RCloseServiceHandleW", opnum, handle)
handle = res[:20]
return_code = struct.unpack("<i", res[20:])[0]
self._parse_error(return_code, "RCloseServiceHandleW")
return handle
def control_service(self, service_handle, control_code):
# https://msdn.microsoft.com/en-us/library/cc245921.aspx
opnum = 1
data = service_handle
data += struct.pack("<i", control_code)
res = self._invoke("RControlService", opnum, data)
return_code = struct.unpack("<i", res[-4:])[0]
self._parse_error(return_code, "RControlService")
service_status = ServiceStatus()
service_status.unpack(res[:-4])
return service_status
def delete_service(self, service_handle):
# https://msdn.microsoft.com/en-us/library/cc245926.aspx
opnum = 2
res = self._invoke("RDeleteService", opnum, service_handle)
return_code = struct.unpack("<i", res)[0]
self._parse_error(return_code, "RDeleteService")
def query_service_status(self, service_handle):
# https://msdn.microsoft.com/en-us/library/cc245952.aspx
opnum = 6
res = self._invoke("RQueryServiceStatus", opnum, service_handle)
return_code = struct.unpack("<i", res[-4:])[0]
self._parse_error(return_code, "RQueryServiceStatus")
service_status = ServiceStatus()
service_status.unpack(res[:-4])
return service_status
def enum_services_status_w(self, server_handle, service_type,
service_state):
"""
Enumerates the services based on the criteria selected
:param server_handle: A handle to SCMR
:param service_type: ServiceType flags to filter by service type
:param service_state: EnumServiceState enum value
:return: List dictionaries with the following entries
service_name: The service name of the service
display_name: The display name of the service
service_status: ServiceStatus structure of the service
"""
# https://msdn.microsoft.com/en-us/library/cc245933.aspx
opnum = 14
# sent 0 bytes on the buffer size for the 1st request to get the
# buffer size that is required
req_data = server_handle
req_data += struct.pack("<i", service_type)
req_data += struct.pack("<i", service_state)
req_data += struct.pack("<i", 0)
req_data += b"\x00\x00\x00\x00"
res = self._invoke("REnumServicesStatusW", opnum, req_data)
# now send another request with the total buffer size sent
buffer_size = struct.unpack("<i", res[4:8])[0]
req_data = server_handle
req_data += struct.pack("<i", service_type)
req_data += struct.pack("<i", service_state)
req_data += res[4:8]
req_data += b"\x00\x00\x00\x00"
try:
res = self._invoke("REnumServicesStatusW", opnum, req_data)
data = res
except SMBResponseException as exc:
if exc.status | |
<reponame>vaMuchenje/Template-Python
#
# The Template-Python distribution is Copyright (C) <NAME> 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import re
from template import util
from template.vmethods import VMethods
"""
template.stash.Stash - Magical storage for template variables
SYNOPSIS
import template.stash
stash = template.stash.Stash(vars)
# get variable values
value = stash.get(variable)
value = stash.get([compound, ...])
# set variable value
stash.set(variable, value);
stash.set([compound, ...], value)
# default variable value
stash.set(variable, value, 1)
stash.set([compound, ...], $value, 1)
# set variable values en masse
stash.update(new_vars)
# methods for (de-)localising variables
stash = stash.clone(new_vars)
stash = stash.declone()
DESCRIPTION
The template.stash.Stash module defines a class which is used to store
variable values for the runtime use of the template processor.
Variable values are stored internally in a dictionary and are
accessible via the get() and set() methods.
Variables may reference dictionaries, lists, functions and objects as
well as simple values. The stash automatically performs the right
magic when dealing with variables, calling code or object methods,
indexing into lists, dictionaries, etc.
The stash has clone() and declone() methods which are used by the
template processor to make temporary copies of the stash for
localising changes made to variables.
PUBLIC METHODS
__init__(params)
The constructor initializes a new template.stash.Stash object.
stash = template.stash.Stash()
A dictionary may be passed to provide variables and values which
should be used to initialise the stash.
stash = template.stash.Stash({ 'var1': 'value1',
'var2': 'value2' })
get(variable)
The get() method retrieves the variable named by the first parameter.
value = stash.get('var1')
Dotted compound variables can be retrieved by specifying the variable
elements by list. Each node in the variable occupies two entries in
the list. The first gives the name of the variable element, the
second is a list of arguments for that element, or 0 if none.
[% foo.bar(10).baz(20) %]
stash.get([ 'foo', 0, 'bar', [ 10 ], 'baz', [ 20 ] ])
set(variable, value, default)
The set() method sets the variable name in the first parameter to the
value specified in the second.
stash.set('var1', 'value1')
If the third parameter evaluates to a true value, the variable is
set only if it did not have a true value before.
stash.set('var2', 'default_value', 1)
Dotted compound variables may be specified as per get() above.
[% foo.bar = 30 %]
stash.set([ 'foo', 0, 'bar', 0 ], 30)
The magical variable 'IMPORT' can be specified whose corresponding
value should be a dictionary. The contents of the dictionary are
copied (i.e. imported) into the current namespace.
# foo.bar = baz, foo.wiz = waz
stash.set('foo', { 'bar': 'baz', 'wiz': 'waz' })
# import 'foo' into main namespace: bar = baz, wiz = waz
stash.set('IMPORT', stash.get('foo'))
clone(params)
The clone() method creates and returns a new Stash object which
represents a localised copy of the parent stash. Variables can be
freely updated in the cloned stash and when declone() is called, the
original stash is returned with all its members intact and in the same
state as they were before clone() was called.
For convenience, a dictionary of parameters may be passed into clone()
which is used to update any simple variable (i.e. those that don't
contain any namespace elements like 'foo' and 'bar' but not 'foo.bar')
variables while cloning the stash. For adding and updating complex
variables, the set() method should be used after calling clone().
This will correctly resolve and/or create any necessary namespace
hashes.
A cloned stash maintains a reference to the stash that it was copied
from in its '__parent' member.
=head2 declone()
The declone() method returns the '__parent' reference and can be used
to restore the state of a stash as described above.
"""
class Stash:
"""Definition of an object class which stores and manages access to
variables for the Template Toolkit.
"""
# Regular expression that identifies "private" stash entries.
PRIVATE = r"^[_.]"
# Dictionary of root operations.
ROOT_OPS = VMethods.ROOT
# Dictionary of scalar operations.
SCALAR_OPS = VMethods.TEXT
# Dictionary of list operations.
LIST_OPS = VMethods.LIST
# Dictionary of hash operations.
HASH_OPS = VMethods.HASH
# Mapping of names to ops dictionaries, see define_vmethod method.
OPS = { "scalar": SCALAR_OPS,
"item": SCALAR_OPS,
"list": LIST_OPS,
"array": LIST_OPS,
"hash": HASH_OPS }
def __init__(self, params=None):
params = params or {}
self.__contents = {"global": {}}
self.__contents.update(params)
self.__contents.update(self.ROOT_OPS)
self.__parent = None
self.__debug = bool(params.get("_DEBUG"))
def __getitem__(self, key):
"""Provides direct, container-like read access to the stash contents."""
return self.__contents.get(key)
def __setitem__(self, key, value):
"""Provides direct, container-like write access to the stash contents."""
self.__contents[key] = value
def clone(self, params=None):
"""Creates a copy of the current stash object to effect
localisation of variables.
The new stash is blessed into the same class as the parent (which
may be a derived class) and has a '__parent' member added which
contains a reference to the parent stash that created it (self).
This member is used in a successive declone() method call to
return the reference to the parent.
A parameter may be provided which should be a dictionary of
variable/values which should be defined in the new stash. The
update() method is called to define these new variables in the
cloned stash.
Returns the cloned Stash.
"""
params = params or {}
import_ = params.get("import")
if isinstance(import_, dict):
del params["import"]
else:
import_ = None
clone = Stash()
clone.__contents.update(self.__contents)
clone.__contents.update(params)
clone.__debug = self.__debug
clone.__parent = self
if import_:
self.HASH_OPS["import"](clone, import_)
return clone
def declone(self):
"""Returns a reference to the PARENT stash.
When called in the following manner:
stash = stash.declone()
the reference count on the current stash will drop to 0 and be "freed"
and the caller will be left with a reference to the parent. This
contains the state of the stash before it was cloned.
"""
return self.__parent or self
def get(self, ident, args=None):
"""Returns the value for an variable stored in the stash.
The variable may be specified as a simple string, e.g. 'foo', or
as an array reference representing compound variables. In the
latter case, each pair of successive elements in the list
represent a node in the compound variable. The first is the
variable name, the second a list of arguments or 0 if undefined.
So, the compound variable [% foo.bar('foo').baz %] would be
represented as the list [ 'foo', 0, 'bar', ['foo'], 'baz', 0 ].
Returns the value of the identifier or an empty string if
undefined.
"""
ident = util.unscalar(ident)
root = self
if isinstance(ident, str) and ident.find(".") != -1:
ident = [y for x in ident.split(".")
for y in (re.sub(r"\(.*$", "", x), 0)]
if isinstance(ident, (list, tuple)):
for a, b in util.chop(ident, 2):
result = self.__dotop(root, a, b)
if result is not None:
root = result
else:
break
else:
result = self.__dotop(root, ident, args)
if result is None:
result = self.undefined(ident, args)
return util.PerlScalar(result)
def set(self, ident, value, default=False):
"""Updates the value for a variable in the stash.
The first parameter should be the variable name or list, as per
get(). The second parameter should be the intended value for the
variable. The third, optional parameter is a flag which may be
set to indicate 'default' mode. When set true, the variable will
only be updated if it is currently undefined or has a false value.
The magical 'IMPORT' variable identifier may be used to indicate
that value is a dictionary whose values should be imported.
Returns the value set, or an empty string if not set (e.g. default
mode). In the case of IMPORT, returns the number of items
imported from the hash.
"""
root = self
ident = util.unscalar(ident)
value = util.unscalar(value)
# ELEMENT: {
if isinstance(ident, str) and ident.find(".") >= 0:
ident = [y for x in ident.split(".")
for y in (re.sub(r"\(.*$", "", x), 0)]
if isinstance(ident, (list, tuple)):
chopped = list(util.chop(ident, 2))
for i in range(len(chopped)-1):
x, y = chopped[i]
result = self.__dotop(root, x, y, True)
if result is None:
# last ELEMENT
return ""
else:
root = result
result = self.__assign(root, chopped[-1][0], chopped[-1][1],
value, default)
else:
result = self.__assign(root, ident, 0, value, default)
if result is None:
return | |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _IntCurvesFace.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_IntCurvesFace', [dirname(__file__)])
except ImportError:
import _IntCurvesFace
return _IntCurvesFace
if fp is not None:
try:
_mod = imp.load_module('_IntCurvesFace', fp, pathname, description)
finally:
fp.close()
return _mod
_IntCurvesFace = swig_import_helper()
del swig_import_helper
else:
import _IntCurvesFace
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _IntCurvesFace.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_IntCurvesFace.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_IntCurvesFace.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_IntCurvesFace.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_IntCurvesFace.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_IntCurvesFace.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_IntCurvesFace.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_IntCurvesFace.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_IntCurvesFace.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_IntCurvesFace.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_IntCurvesFace.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_IntCurvesFace.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_IntCurvesFace.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_IntCurvesFace.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_IntCurvesFace.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_IntCurvesFace.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_IntCurvesFace.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _IntCurvesFace.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
import OCC.TopoDS
import OCC.MMgt
import OCC.Standard
import OCC.TCollection
import OCC.TopLoc
import OCC.gp
import OCC.TopAbs
import OCC.Adaptor3d
import OCC.GeomAbs
import OCC.TColStd
import OCC.Geom
import OCC.TColgp
import OCC.Adaptor2d
import OCC.Geom2d
import OCC.math
import OCC.IntCurveSurface
import OCC.Intf
import OCC.Bnd
import OCC.IntSurf
def register_handle(handle, base_object):
"""
Inserts the handle into the base object to
prevent memory corruption in certain cases
"""
try:
if base_object.IsKind("Standard_Transient"):
base_object.thisHandle = handle
base_object.thisown = False
except:
pass
class IntCurvesFace_Intersector(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Load a Face. The Tolerance <Tol> is used to determine if the first point of the segment is near the face. In that case, the parameter of the intersection point on the line can be a negative value (greater than -Tol).
:param F:
:type F: TopoDS_Face &
:param aTol:
:type aTol: float
:rtype: None
"""
_IntCurvesFace.IntCurvesFace_Intersector_swiginit(self,_IntCurvesFace.new_IntCurvesFace_Intersector(*args))
def Perform(self, *args):
"""
* Perform the intersection between the segment L and the loaded face. PInf is the smallest parameter on the line PSup is the highest parmaeter on the line For an infinite line PInf and PSup can be +/- RealLast.
:param L:
:type L: gp_Lin
:param PInf:
:type PInf: float
:param PSup:
:type PSup: float
:rtype: None
* same method for a HCurve from Adaptor3d. PInf an PSup can also be - and + INF.
:param HCu:
:type HCu: Handle_Adaptor3d_HCurve &
:param PInf:
:type PInf: float
:param PSup:
:type PSup: float
:rtype: None
"""
return _IntCurvesFace.IntCurvesFace_Intersector_Perform(self, *args)
def SurfaceType(self, *args):
"""
* Return the surface type
:rtype: GeomAbs_SurfaceType
"""
return _IntCurvesFace.IntCurvesFace_Intersector_SurfaceType(self, *args)
def IsDone(self, *args):
"""
* True is returned when the intersection have been computed.
:rtype: bool
"""
return _IntCurvesFace.IntCurvesFace_Intersector_IsDone(self, *args)
def NbPnt(self, *args):
"""
:rtype: int
"""
return _IntCurvesFace.IntCurvesFace_Intersector_NbPnt(self, *args)
def UParameter(self, *args):
"""
* Returns the U parameter of the ith intersection point on the surface.
:param I:
:type I: int
:rtype: float
"""
return _IntCurvesFace.IntCurvesFace_Intersector_UParameter(self, *args)
def VParameter(self, *args):
"""
* Returns the V parameter of the ith intersection point on the surface.
:param I:
:type I: int
:rtype: float
"""
return _IntCurvesFace.IntCurvesFace_Intersector_VParameter(self, *args)
def WParameter(self, *args):
"""
* Returns the parameter of the ith intersection point on the line.
:param I:
:type I: int
:rtype: float
"""
return _IntCurvesFace.IntCurvesFace_Intersector_WParameter(self, *args)
def Pnt(self, *args):
"""
* Returns the geometric point of the ith intersection between the line and the surface.
:param I:
:type I: int
:rtype: gp_Pnt
"""
return _IntCurvesFace.IntCurvesFace_Intersector_Pnt(self, *args)
def Transition(self, *args):
"""
* Returns the ith transition of the line on the surface.
:param I:
:type I: int
:rtype: IntCurveSurface_TransitionOnCurve
"""
return _IntCurvesFace.IntCurvesFace_Intersector_Transition(self, *args)
def State(self, *args):
"""
* Returns the ith state of the point on the face. The values can be either TopAbs_IN ( the point is in the face) or TopAbs_ON ( the point is on a boudary of the face).
:param I:
:type I: int
:rtype: TopAbs_State
"""
return _IntCurvesFace.IntCurvesFace_Intersector_State(self, *args)
def Face(self, *args):
"""
* Returns the significant face used to determine the intersection.
:rtype: TopoDS_Face
"""
return _IntCurvesFace.IntCurvesFace_Intersector_Face(self, *args)
def ClassifyUVPoint(self, *args):
"""
:param Puv:
:type Puv: gp_Pnt2d
:rtype: TopAbs_State
"""
return _IntCurvesFace.IntCurvesFace_Intersector_ClassifyUVPoint(self, *args)
def Bounding(self, *args):
"""
:rtype: Bnd_Box
"""
return _IntCurvesFace.IntCurvesFace_Intersector_Bounding(self, *args)
def Destroy(self, *args):
"""
:rtype: None
"""
return _IntCurvesFace.IntCurvesFace_Intersector_Destroy(self, *args)
__swig_destroy__ = _IntCurvesFace.delete_IntCurvesFace_Intersector
IntCurvesFace_Intersector.Perform = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_Perform,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.SurfaceType = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_SurfaceType,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.IsDone = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_IsDone,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.NbPnt = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_NbPnt,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.UParameter = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_UParameter,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.VParameter = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_VParameter,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.WParameter = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_WParameter,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.Pnt = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_Pnt,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.Transition = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_Transition,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.State = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_State,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.Face = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_Face,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.ClassifyUVPoint = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_ClassifyUVPoint,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.Bounding = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_Bounding,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector.Destroy = new_instancemethod(_IntCurvesFace.IntCurvesFace_Intersector_Destroy,None,IntCurvesFace_Intersector)
IntCurvesFace_Intersector_swigregister = _IntCurvesFace.IntCurvesFace_Intersector_swigregister
IntCurvesFace_Intersector_swigregister(IntCurvesFace_Intersector)
class IntCurvesFace_ShapeIntersector(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_IntCurvesFace.IntCurvesFace_ShapeIntersector_swiginit(self,_IntCurvesFace.new_IntCurvesFace_ShapeIntersector(*args))
def Load(self, *args):
"""
:param Sh:
:type Sh: TopoDS_Shape &
:param Tol:
:type Tol: float
:rtype: None
"""
return _IntCurvesFace.IntCurvesFace_ShapeIntersector_Load(self, *args)
def PerformNearest(self, *args):
"""
* Perform the intersection between the segment L and the loaded shape. PInf is the smallest parameter on the line PSup is the highest parammter on the line For an infinite line PInf and PSup can be +/- RealLast.
:param L:
:type L: gp_Lin
:param PInf:
:type PInf: float
:param PSup:
:type PSup: float
:rtype: None
"""
return _IntCurvesFace.IntCurvesFace_ShapeIntersector_PerformNearest(self, *args)
def Perform(self, *args):
"""
* Perform the intersection between the segment L and the loaded shape. PInf is the smallest parameter on the line PSup is the highest parammter on the line For an infinite line PInf and PSup can be +/- RealLast.
:param L:
:type L: gp_Lin
:param PInf:
:type PInf: float
:param PSup:
:type PSup: float
:rtype: None
* same method for a HCurve from Adaptor3d. PInf an PSup can also be - and + INF.
:param HCu:
:type HCu: Handle_Adaptor3d_HCurve &
:param PInf:
:type PInf: float
:param PSup:
:type PSup: float
:rtype: None
"""
return _IntCurvesFace.IntCurvesFace_ShapeIntersector_Perform(self, *args)
def IsDone(self, *args):
"""
* True is returned when the intersection have been computed.
:rtype: bool
"""
return _IntCurvesFace.IntCurvesFace_ShapeIntersector_IsDone(self, *args)
def NbPnt(self, *args):
"""
:rtype: int
"""
return _IntCurvesFace.IntCurvesFace_ShapeIntersector_NbPnt(self, *args)
def UParameter(self, *args):
"""
* Returns the U parameter of the ith intersection point on the surface.
:param I:
:type I: int
:rtype: float
"""
return _IntCurvesFace.IntCurvesFace_ShapeIntersector_UParameter(self, *args)
def VParameter(self, *args):
"""
* Returns the V parameter of the ith intersection point on the surface.
:param I:
:type I: int
:rtype: float
"""
return _IntCurvesFace.IntCurvesFace_ShapeIntersector_VParameter(self, *args)
def WParameter(self, *args):
"""
* Returns the parameter of the ith intersection point on the line.
:param I:
:type I: int
:rtype: float
"""
return _IntCurvesFace.IntCurvesFace_ShapeIntersector_WParameter(self, *args)
def Pnt(self, *args):
"""
* Returns the geometric point of the ith intersection between the line and the surface.
:param I:
:type I: int
:rtype: gp_Pnt
"""
return _IntCurvesFace.IntCurvesFace_ShapeIntersector_Pnt(self, *args)
def Transition(self, *args):
"""
* Returns the ith transition of the line on the surface.
:param I:
:type I: int
:rtype: IntCurveSurface_TransitionOnCurve
"""
return _IntCurvesFace.IntCurvesFace_ShapeIntersector_Transition(self, *args)
def State(self, *args):
"""
* Returns the ith state of the point on the face. The values can be either TopAbs_IN ( the point is in the face) or TopAbs_ON ( the point is on a boudary of the face).
:param I:
:type I: int
:rtype: TopAbs_State
| |
the level's random effects
covariance
Returns
-------
Jf: dict of array_like
For each level contains the derivative of the cholesky parameters
with respect to the covariance
Notes
-----
Function evaluates the derivative of the cholesky parameterization
with respect to the lower triangular components of the covariance
"""
Jf = {}
for key in self.levels:
L = L_dict[key]
E = self.elim_mats[key]
N = self.symm_mats[key]
I = self.iden_mats[key]
Jf[key] = E.dot(N.dot(np.kron(L, I))).dot(E.T)
return Jf
def loglike_c(self, theta_chol, reml=True, use_sw=False):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
loglike: scalar
Log likelihood of the model
"""
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return self.loglike(theta, reml, use_sw)
def gradient_c(self, theta_chol, reml=True, use_sw=False):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
gradient: array_like
The gradient of the log likelihood with respect to the covariance
parameterization
"""
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return self.gradient(theta, reml, use_sw)
def hessian_c(self, theta_chol, reml=True):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
hessian: array_like
The hessian of the log likelihood with respect to the covariance
parameterization
"""
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return self.hessian(theta, reml)
def gradient_chol(self, theta_chol, reml=True, use_sw=False):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
gradient: array_like
The gradient of the log likelihood with respect to the cholesky
parameterization
"""
L_dict = self.update_chol(theta_chol)
Jf_dict = self.dg_dchol(L_dict)
Jg = self.gradient_c(theta_chol, reml, use_sw)
Jf = sp.linalg.block_diag(*Jf_dict.values())
Jf = np.pad(Jf, [[0, 1]])
Jf[-1, -1] = np.exp(theta_chol[-1])
return Jg.dot(Jf)
def hessian_chol(self, theta_chol, reml=True):
"""
Parameters
----------
theta_chol: array_like
The cholesky parameterization of the components
Returns
-------
hessian: array_like
The hessian of the log likelihood with respect to the cholesky
parameterization
"""
L_dict = self.update_chol(theta_chol)
Jf_dict = self.dg_dchol(L_dict)
Hq = self.hessian_c(theta_chol, reml)
Jg = self.gradient_c(theta_chol, reml)
Hf = self.d2g_dchol
Jf = sp.linalg.block_diag(*Jf_dict.values())
Jf = np.pad(Jf, [[0, 1]])
Jf[-1, -1] = np.exp(theta_chol[-1])
A = Jf.T.dot(Hq).dot(Jf)
B = np.zeros_like(Hq)
for key in self.levels:
ix = self.indices['theta'][key]
Jg_i = Jg[ix]
Hf_i = Hf[key]
C = np.einsum('i,ijk->jk', Jg_i, Hf_i)
B[ix, ix[:, None]] += C
B[-1, -1] = Jg[-1] * np.exp(theta_chol[-1])
H = A + B
return H
def _compute_effects(self, theta=None):
"""
Parameters
----------
theta : ndarray, optional
Model parameters in the covariance form
Returns
-------
beta : ndarray
Fixed effects estimated at theta.
XtViX_inv : ndarray
Fixed effects covariance matrix.
u : ndarray
Random effect estimate at theta.
G : csc_matrix
Random effects covariance matrix.
R : dia_matrix
Matrix of residual covariance.
V : csc_matrix
Model covariance matrix given fixed effects.
"""
theta = self.theta if theta is None else theta
Ginv = self.update_gmat(theta, inverse=True)
M = self.update_mme(Ginv, theta[-1])
XZy = self.XZ.T.dot(self.y) / theta[-1]
chol_fac = cholesky(M[:-1, :-1].tocsc())
betau = chol_fac.solve_A(XZy)
u = betau[self.X.shape[1]:].reshape(-1)
beta = betau[:self.X.shape[1]].reshape(-1)
Rinv = self.R / theta[-1]
RZ = Rinv.dot(self.Zs)
Q = Ginv + self.Zs.T.dot(RZ)
M = cholesky(Q).inv()
XtRinvX = self.X.T.dot(Rinv.dot(self.X))
XtRinvZ = self.X.T.dot(Rinv.dot(self.Z))
XtVinvX = XtRinvX - XtRinvZ.dot(M.dot(XtRinvZ.T))
XtVinvX_inv = np.linalg.inv(XtVinvX)
return beta, XtVinvX_inv, u
def _optimize(self, reml=True, use_grad=True, use_hess=False, approx_hess=False,
opt_kws={}):
"""
Parameters
----------
use_grad : bool, optional
If true, the analytic gradient is used during optimization.
The default is True.
use_hess : bool, optional
If true, the analytic hessian is used during optimization.
The default is False.
approx_hess: bool, optional
If true, uses the gradient to approximate the hessian
opt_kws : dict, optional
Dictionary of options to use in scipy.optimize.minimize.
The default is {}.
Returns
-------
None.
"""
default_opt_kws = dict(verbose=0, gtol=1e-6, xtol=1e-6)
for key, value in default_opt_kws.items():
if key not in opt_kws.keys():
opt_kws[key] = value
if use_grad:
if use_hess:
hess = self.hessian_chol
elif approx_hess:
hess = lambda x, reml: so_gc_cd(self.gradient_chol, x, args=(reml,))
else:
hess = None
optimizer = sp.optimize.minimize(self.loglike_c, self.theta, args=(reml,),
jac=self.gradient_chol, hess=hess,
options=opt_kws, bounds=self.bounds,
method='trust-constr')
else:
jac = lambda x, reml: fo_fc_cd(self.loglike_c, x, args=(reml,))
hess = lambda x, reml: so_fc_cd(self.loglike_c, x, args=(reml,))
optimizer = sp.optimize.minimize(self.loglike_c, self.theta, args=(reml,),
jac=jac, hess=hess, bounds=self.bounds,
method='trust-constr', options=opt_kws)
theta_chol = optimizer.x
theta = inverse_transform_theta(theta_chol.copy(), self.dims, self.indices)
return theta, theta_chol, optimizer
def _post_fit(self, theta, theta_chol, optimizer, reml=True,
use_grad=True, analytic_se=False):
"""
Parameters
----------
use_grad : bool, optional
If true and analytic_se is False, the gradient is used in the
numerical approximation of the hessian. The default is True.
analytic_se : bool, optional
If true, then the hessian is used to compute standard errors.
The default is False.
Returns
-------
None.
"""
beta, XtWX_inv, u = self._compute_effects(theta)
params = np.concatenate([beta, theta])
re_covs, re_corrs = {}, {}
for key, value in self.dims.items():
re_covs[key] = invech(theta[self.indices['theta'][key]].copy())
C = re_covs[key]
v = np.diag(np.sqrt(1/np.diag(C)))
re_corrs[key] = v.dot(C).dot(v)
if analytic_se:
Htheta = self.hessian(theta)
elif use_grad:
Htheta = so_gc_cd(self.gradient, theta)
else:
Htheta = so_fc_cd(self.loglike, theta)
self.theta, self.beta, self.u, self.params = theta, beta, u, params
self.Hinv_beta = XtWX_inv
self.Hinv_theta = np.linalg.pinv(Htheta/2.0)
self.se_beta = np.sqrt(np.diag(XtWX_inv))
self.se_theta = np.sqrt(np.diag(self.Hinv_theta))
self.se_params = np.concatenate([self.se_beta, self.se_theta])
self.optimizer = optimizer
self.theta_chol = theta_chol
if reml:
self.llconst = (self.X.shape[0] - self.X.shape[1])*np.log(2*np.pi)
else:
self.llconst = self.X.shape[0] * np.log(2*np.pi)
self.lltheta = self.optimizer.fun
self.ll = (self.llconst + self.lltheta)
self.llf = self.ll / -2.0
self.re_covs = re_covs
self.re_corrs = re_corrs
if reml:
n = self.X.shape[0] - self.X.shape[1]
d = len(self.theta)
else:
n = self.X.shape[0]
d = self.X.shape[1] + len(self.theta)
self.AIC = self.ll + 2.0 * d
self.AICC = self.ll + 2 * d * n / (n-d-1)
self.BIC = self.ll + d * np.log(n)
self.CAIC = self.ll + d * (np.log(n) + 1)
sumstats = np.array([self.ll, self.llf, self.AIC, self.AICC,
self.BIC, self.CAIC])
self.sumstats = pd.DataFrame(sumstats, index=['ll', 'llf', 'AIC', 'AICC',
'BIC', 'CAIC'], columns=['value'])
def predict(self, X=None, Z=None):
"""
Parameters
----------
X : ndarray, optional
Model matrix for fixed effects. The default is None.
Z : ndarray, optional
Model matrix from random effects. The default is None.
Returns
-------
yhat : ndarray
Model predictions evaluated at X and Z.
"""
if X is None:
X = self.X
if Z is None:
Z = self.Z
yhat = X.dot(self.beta)+Z.dot(self.u)
return yhat
def fit(self, reml=True, use_grad=True, use_hess=False, approx_hess=False,
analytic_se=False, adjusted_pvals=True, opt_kws={}):
"""
Parameters
----------
use_grad : bool, optional
If true, the analytic gradient is used during optimization.
The default is True.
use_hess : bool, optional
If true, the analytic hessian is used during optimization.
The default is False.
approx_hess: bool, optional
If true, uses the gradient to approximate the hessian
analytic_se : bool, optional
If true, then the hessian is used to compute standard errors.
The default is False.
opt_kws : dict, optional
Dictionary of options to use in scipy.optimize.minimize.
The default is {}.
Returns
-------
None.
"""
theta, theta_chol, optimizer = self._optimize(reml, use_grad, use_hess,
approx_hess, opt_kws)
self._post_fit(theta, theta_chol, optimizer, reml, use_grad,
analytic_se)
param_names = list(self.fe_vars)
for level in self.levels:
for i, j in list(zip(*np.triu_indices(self.dims[level]['n_vars']))):
param_names.append(f"{level}:G[{i}][{j}]")
param_names.append("resid_cov")
self.param_names = param_names
res = np.vstack((self.params, self.se_params)).T
res = pd.DataFrame(res, index=param_names, columns=['estimate', 'SE'])
res['t'] = res['estimate'] / res['SE']
res['p'] = sp.stats.t(self.X.shape[0]-self.X.shape[1]).sf(np.abs(res['t']))
res['degfree'] = self.X.shape[0] - self.X.shape[1]
if adjusted_pvals:
L = np.eye(self.X.shape[1])
L_list = [L[[i]] for i in range(self.X.shape[1])]
adj_table = pd.DataFrame(self.approx_degfree(L_list), index=self.fe_vars)
res.loc[self.fe_vars, 't'] = adj_table['F']**0.5
res.loc[self.fe_vars, 'degfree'] = adj_table['df2']
res.loc[self.fe_vars, 'p'] = adj_table['p']
self.res = res
def _restricted_ll_grad(self, theta_chol_f, free_ix, theta_chol_r, reml=True):
theta_chol_r[free_ix] = theta_chol_f
ll = self.loglike_c(theta_chol_r.copy(), reml)
g = self.gradient_chol(theta_chol_r.copy(), reml)[free_ix]
return ll, g
def profile(self, n_points=40, tb=3):
theta = self.theta.copy()
free_ix = np.ones_like(theta).astype(bool)
reparam = VarCorrReparam(self.dims, self.indices)
rmodel = RestrictedModel(self, reparam)
tau = reparam.transform(theta)
n_theta = len(theta)
llmax = self.loglike(self.theta.copy())
H = so_gc_cd(vcrepara_grad, tau, args=(self.gradient, reparam,))
se = np.diag(np.linalg.inv(H/2.0))**0.5
thetas, zetas = np.zeros((n_theta*n_points, n_theta)), np.zeros(n_theta*n_points)
k = 0
pbar = tqdm.tqdm(total=n_theta*n_points, smoothing=0.001)
for i in range(n_theta):
free_ix[i] = False
t_mle = tau[i]
tau_r = tau.copy()
if self.bounds[i][0]==0:
lb = np.maximum(0.01, t_mle-tb*se[i])
else:
| |
<reponame>msobrevillac/sockeye
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Implements data iterators and I/O related functions for sequence-to-sequence models.
"""
import bisect
import gzip
import logging
import math
import pickle
import random
from collections import OrderedDict
from typing import Any, Dict, Iterator, Iterable, List, NamedTuple, Optional, Tuple
import math
import mxnet as mx
import numpy as np
from sockeye.utils import check_condition
from . import config
from . import constants as C
logger = logging.getLogger(__name__)
def define_buckets(max_seq_len: int, step=10) -> List[int]:
"""
Returns a list of integers defining bucket boundaries.
Bucket boundaries are created according to the following policy:
We generate buckets with a step size of step until the final bucket fits max_seq_len.
We then limit that bucket to max_seq_len (difference between semi-final and final bucket may be less than step).
:param max_seq_len: Maximum bucket size.
:param step: Distance between buckets.
:return: List of bucket sizes.
"""
buckets = [bucket_len for bucket_len in range(step, max_seq_len + step, step)]
buckets[-1] = max_seq_len
return buckets
def define_parallel_buckets(max_seq_len_source: int,
max_seq_len_target: int,
bucket_width: int = 10,
length_ratio: float = 1.0) -> List[Tuple[int, int]]:
"""
Returns (source, target) buckets up to (max_seq_len_source, max_seq_len_target). The longer side of the data uses
steps of bucket_width while the shorter side uses steps scaled down by the average target/source length ratio. If
one side reaches its max_seq_len before the other, width of extra buckets on that side is fixed to that max_seq_len.
:param max_seq_len_source: Maximum source bucket size.
:param max_seq_len_target: Maximum target bucket size.
:param bucket_width: Width of buckets on longer side.
:param length_ratio: Length ratio of data (target/source).
"""
source_step_size = bucket_width
target_step_size = bucket_width
if length_ratio >= 1.0:
# target side is longer -> scale source
source_step_size = max(1, int(bucket_width / length_ratio))
else:
# source side is longer, -> scale target
target_step_size = max(1, int(bucket_width * length_ratio))
source_buckets = define_buckets(max_seq_len_source, step=source_step_size)
target_buckets = define_buckets(max_seq_len_target, step=target_step_size)
# Extra buckets
if len(source_buckets) < len(target_buckets):
source_buckets += [source_buckets[-1] for _ in range(len(target_buckets) - len(source_buckets))]
elif len(target_buckets) < len(source_buckets):
target_buckets += [target_buckets[-1] for _ in range(len(source_buckets) - len(target_buckets))]
# minimum bucket size is 2 (as we add BOS symbol to target side)
source_buckets = [max(2, b) for b in source_buckets]
target_buckets = [max(2, b) for b in target_buckets]
parallel_buckets = list(zip(source_buckets, target_buckets))
# deduplicate for return
return list(OrderedDict.fromkeys(parallel_buckets))
def get_bucket(seq_len: int, buckets: List[int]) -> Optional[int]:
"""
Given sequence length and a list of buckets, return corresponding bucket.
:param seq_len: Sequence length.
:param buckets: List of buckets.
:return: Chosen bucket.
"""
bucket_idx = bisect.bisect_left(buckets, seq_len)
if bucket_idx == len(buckets):
return None
return buckets[bucket_idx]
def read_parallel_corpus(data_source: str,
data_target: str,
data_source_graphs: str,
vocab_source: Dict[str, int],
vocab_target: Dict[str, int],
vocab_edges: Dict[str, int]) -> Tuple[List[List[int]], List[List[int]]]:
"""
Loads source and target data, making sure they have the same length.
# TODO: fix return type
:param data_source: Path to source training data.
:param data_target: Path to target training data.
:param data_source_graphs: Path to graphs for source training data.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:param vocab_edges: Graph edges vocabulary.
:return: Tuple of (source sentences, target sentences).
"""
source_sentences = read_sentences(data_source, vocab_source, add_bos=False)
source_graphs = read_graphs(data_source_graphs, vocab_edges)
target_sentences = read_sentences(data_target, vocab_target, add_bos=True)
check_condition(len(source_sentences) == len(target_sentences),
"Number of source sentences does not match number of target sentences")
check_condition(len(source_sentences) == len(source_graphs),
"Number of source sentences does not match number of source graphs")
return source_sentences, target_sentences, source_graphs
def length_statistics(source_sentences: List[List[Any]], target_sentences: List[List[int]]) -> Tuple[float, float]:
"""
Returns mean and standard deviation of target-to-source length ratios of parallel corpus.
:param source_sentences: Source sentences.
:param target_sentences: Target sentences.
:return: Mean and standard deviation of length ratios.
"""
#length_ratios = []
#for ts, ss in zip(target_sentences, source_sentences):
# length_ratios += [len(t)/float(len(s)) for t, s in zip(ts, ss)]
length_ratios = np.array([len(t)/float(len(s)) for ts, ss in zip(target_sentences, source_sentences)
for t, s in zip(ts, ss)])
mean = np.asscalar(np.mean(length_ratios))
std = np.asscalar(np.std(length_ratios))
return mean, std
def get_training_data_iters(source: List[str], target: List[str], source_graphs: List[str],
validation_source: str, validation_target: str,
val_source_graphs: str,
vocab_source: Dict[str, int],
vocab_target: Dict[str, int],
vocab_edge: Dict[str, int],
vocab_source_path: Optional[str],
vocab_target_path: Optional[str],
vocab_edge_path: Optional[str],
batch_size: int,
batch_by_words: bool,
batch_num_devices: int,
fill_up: str,
max_seq_len_source: int,
max_seq_len_target: int,
bucketing: bool,
bucket_width: int,
temperature: float) -> Tuple['ParallelBucketSentenceIter',
'ParallelBucketSentenceIter',
'DataConfig']:
"""
Returns data iterators for training and validation data.
:param source: Path to source training data.
:param target: Path to target training data.
:param source_graphs: Path to source training graphs.
:param validation_source: Path to source validation data.
:param validation_target: Path to target validation data.
:param val_source_graphs: Path to source validation graphs.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:param vocab_edge: Graph edges vocabulary.
:param vocab_source_path: Path to source vocabulary.
:param vocab_target_path: Path to target vocabulary.
:param vocab_edge_path: Path to metadata vocabulary.
:param batch_size: Batch size.
:param batch_by_words: Size batches by words rather than sentences.
:param batch_num_devices: Number of devices batches will be parallelized across.
:param fill_up: Fill-up strategy for buckets.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param bucketing: Whether to use bucketing.
:param bucket_width: Size of buckets.
:return: Tuple of (training data iterator, validation data iterator, data config).
"""
logger.info("Creating train data iterator")
train_source_sentences = []
train_target_sentences = []
train_source_graphs = []
for src, tgt, src_graphs in zip(source, target, source_graphs):
(train_src_sentences,
train_tgt_sentences,
train_src_graphs) = read_parallel_corpus(src,
tgt,
src_graphs,
vocab_source,
vocab_target,
vocab_edge)
train_source_sentences.append(train_src_sentences)
train_target_sentences.append(train_tgt_sentences)
train_source_graphs.append(train_src_graphs)
max_observed_source_len = max((len(s) for train_src_stn in train_source_sentences for s in train_src_stn
if len(s) <= max_seq_len_source), default=0)
max_observed_target_len = max((len(t) for train_tgt_stn in train_target_sentences for t in train_tgt_stn
if len(t) <= max_seq_len_target), default=0)
lr_mean, lr_std = length_statistics(train_source_sentences, train_target_sentences)
logger.info("Mean training target/source length ratio: %.2f (+-%.2f)", lr_mean, lr_std)
# define buckets
buckets = define_parallel_buckets(max_seq_len_source,
max_seq_len_target,
bucket_width,
lr_mean) if bucketing else [
(max_seq_len_source, max_seq_len_target)]
train_iter = ParallelBucketSentenceIter(train_source_sentences,
train_target_sentences,
train_source_graphs,
buckets,
batch_size,
batch_by_words,
batch_num_devices,
vocab_target[C.EOS_SYMBOL],
C.PAD_ID,
vocab_target[C.UNK_SYMBOL],
vocab_edge['d'],
bucket_batch_sizes=None,
fill_up=fill_up,
temperature= temperature)
logger.info("Creating validation data iterator")
val_iter = None
(val_source_sentences,
val_target_sentences,
val_src_graphs) = read_parallel_corpus(validation_source,
validation_target,
val_source_graphs,
vocab_source,
vocab_target,
vocab_edge)
val_iter = ParallelBucketSentenceIter([val_source_sentences],
[val_target_sentences],
[val_src_graphs],
buckets,
batch_size,
batch_by_words,
batch_num_devices,
vocab_target[C.EOS_SYMBOL],
C.PAD_ID,
vocab_target[C.UNK_SYMBOL],
vocab_edge['d'],
bucket_batch_sizes=train_iter.bucket_batch_sizes,
fill_up=fill_up,
temperature=temperature)
config_data = DataConfig(source, target, source_graphs,
validation_source, validation_target,
val_source_graphs,
vocab_source_path, vocab_target_path,
vocab_edge_path,
lr_mean, lr_std, max_observed_source_len, max_observed_target_len)
return train_iter, val_iter, config_data
#train_iter = get_data_iter(source, target, source_graph, vocab_source, vocab_target, batch_size, fill_up,
# max_seq_len, bucketing, bucket_width=bucket_width)
# logger.info("Creating validation data iterator")
# eval_iter = get_data_iter(validation_source, validation_target, val_source_graph, vocab_source, vocab_target, batch_size, fill_up,
# max_seq_len, bucketing, bucket_width=bucket_width)
# return train_iter, eval_iter
class DataConfig(config.Config):
"""
Stores data paths from training.
"""
def __init__(self,
source: str,
target: str,
source_graphs: str,
validation_source: str,
validation_target: str,
val_source_graphs: str,
vocab_source: Optional[str],
vocab_target: Optional[str],
vocab_edge: Optional[str],
length_ratio_mean: float = C.TARGET_MAX_LENGTH_FACTOR,
length_ratio_std: float = 0.0,
max_observed_source_seq_len: Optional[int] = None,
max_observed_target_seq_len: Optional[int] = None) -> None:
super().__init__()
self.source = source
self.target = target
self.source_graphs = source_graphs
self.validation_source = validation_source
self.validation_target = validation_target
self.val_source_graphs = val_source_graphs
self.vocab_source = vocab_source
self.vocab_target = vocab_target
self.vocab_edge = vocab_edge
self.length_ratio_mean = length_ratio_mean
self.length_ratio_std = length_ratio_std
self.max_observed_source_seq_len = max_observed_source_seq_len
self.max_observed_target_seq_len = max_observed_target_seq_len
def smart_open(filename: str, mode="rt", ftype="auto", errors='replace'):
"""
Returns a file descriptor for filename with UTF-8 encoding.
If mode is "rt", file is opened read-only.
If ftype is "auto", uses gzip iff filename endswith .gz.
If ftype is {"gzip","gz"}, uses gzip.
Note: encoding error handling defaults to "replace"
:param filename: The filename to open.
:param mode: Reader mode.
:param ftype: File type. If 'auto' checks filename suffix for gz to try gzip.open
:param errors: Encoding error handling during reading. Defaults to 'replace'
:return: File descriptor
"""
if ftype == 'gzip' or ftype == 'gz' or (ftype == 'auto' and filename.endswith(".gz")):
return gzip.open(filename, mode=mode, encoding='utf-8', errors=errors)
else:
return open(filename, mode=mode, encoding='utf-8', errors=errors)
def read_content(path: str, limit=None) -> Iterator[List[str]]:
"""
Returns a list of tokens | |
table table-striped table-bordered "
"table-condensed"))
@property
def name(self):
return self.table_name
@property
def table_link(self):
return '<a href="{obj.explore_url}">{obj.table_name}</a>'.format(obj=self)
@property
def metrics_combo(self):
return sorted(
[
(m.metric_name, m.verbose_name or m.metric_name)
for m in self.metrics],
key=lambda x: x[1])
@property
def sql_url(self):
return self.database.sql_url + "?table_name=" + str(self.table_name)
@property
def sql_link(self):
return '<a href="{}">SQL</a>'.format(self.sql_url)
def query( # sqla
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=15, row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
extras=None,
columns=None):
"""Querying any sqla table from this common interface"""
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
cols = {col.column_name: col for col in self.columns}
qry_start_dttm = datetime.now()
if not granularity and is_timeseries:
raise Exception(_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"))
metrics_exprs = [
m.sqla_col
for m in self.metrics if m.metric_name in metrics]
if metrics:
main_metric_expr = [
m.sqla_col for m in self.metrics
if m.metric_name == metrics[0]][0]
else:
main_metric_expr = literal_column("COUNT(*)").label("ccount")
select_exprs = []
groupby_exprs = []
if groupby:
select_exprs = []
inner_select_exprs = []
inner_groupby_exprs = []
for s in groupby:
col = cols[s]
outer = col.sqla_col
inner = col.sqla_col.label(col.column_name + '__')
groupby_exprs.append(outer)
select_exprs.append(outer)
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
elif columns:
for s in columns:
select_exprs.append(cols[s].sqla_col)
metrics_exprs = []
if granularity:
dttm_expr = cols[granularity].sqla_col.label('timestamp')
timestamp = dttm_expr
# Transforming time grain into an expression based on configuration
time_grain_sqla = extras.get('time_grain_sqla')
if time_grain_sqla:
udf = self.database.grains_dict().get(time_grain_sqla, '{col}')
timestamp_grain = literal_column(
udf.function.format(col=dttm_expr)).label('timestamp')
else:
timestamp_grain = timestamp
if is_timeseries:
select_exprs += [timestamp_grain]
groupby_exprs += [timestamp_grain]
tf = '%Y-%m-%d %H:%M:%S.%f'
time_filter = [
timestamp >= text(self.database.dttm_converter(from_dttm)),
timestamp <= text(self.database.dttm_converter(to_dttm)),
]
inner_time_filter = copy(time_filter)
if inner_from_dttm:
inner_time_filter[0] = timestamp >= text(
self.database.dttm_converter(inner_from_dttm))
if inner_to_dttm:
inner_time_filter[1] = timestamp <= text(
self.database.dttm_converter(inner_to_dttm))
else:
inner_time_filter = []
select_exprs += metrics_exprs
qry = select(select_exprs)
tbl = table(self.table_name)
if self.schema:
tbl.schema = self.schema
if not columns:
qry = qry.group_by(*groupby_exprs)
where_clause_and = []
having_clause_and = []
for col, op, eq in filter:
col_obj = cols[col]
if op in ('in', 'not in'):
values = eq.split(",")
cond = col_obj.sqla_col.in_(values)
if op == 'not in':
cond = ~cond
where_clause_and.append(cond)
if extras and 'where' in extras:
where_clause_and += [text(extras['where'])]
if extras and 'having' in extras:
having_clause_and += [text(extras['having'])]
if granularity:
qry = qry.where(and_(*(time_filter + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
if groupby:
qry = qry.order_by(desc(main_metric_expr))
qry = qry.limit(row_limit)
if timeseries_limit and groupby:
subq = select(inner_select_exprs)
subq = subq.select_from(tbl)
subq = subq.where(and_(*(where_clause_and + inner_time_filter)))
subq = subq.group_by(*inner_groupby_exprs)
subq = subq.order_by(desc(main_metric_expr))
subq = subq.limit(timeseries_limit)
on_clause = []
for i, gb in enumerate(groupby):
on_clause.append(
groupby_exprs[i] == column(gb + '__'))
tbl = tbl.join(subq.alias(), and_(*on_clause))
qry = qry.select_from(tbl)
engine = self.database.get_sqla_engine()
sql = "{}".format(
qry.compile(
engine, compile_kwargs={"literal_binds": True},),
)
print(sql)
df = pd.read_sql_query(
sql=sql,
con=engine
)
sql = sqlparse.format(sql, reindent=True)
return QueryResult(
df=df, duration=datetime.now() - qry_start_dttm, query=sql)
def fetch_metadata(self):
"""Fetches the metadata for the table and merges it in"""
try:
table = self.database.get_table(self.table_name, schema=self.schema)
except Exception as e:
flasher(str(e))
flasher(
"Table doesn't seem to exist in the specified database, "
"couldn't fetch column information", "danger")
return
TC = TableColumn # noqa shortcut to class
M = SqlMetric # noqa
metrics = []
any_date_col = None
for col in table.columns:
try:
datatype = str(col.type)
except Exception as e:
datatype = "UNKNOWN"
dbcol = (
db.session
.query(TC)
.filter(TC.table == self)
.filter(TC.column_name == col.name)
.first()
)
db.session.flush()
if not dbcol:
dbcol = TableColumn(column_name=col.name)
num_types = ('DOUBLE', 'FLOAT', 'INT', 'BIGINT', 'LONG')
date_types = ('DATE', 'TIME')
str_types = ('VARCHAR', 'STRING')
datatype = str(datatype).upper()
if any([t in datatype for t in str_types]):
dbcol.groupby = True
dbcol.filterable = True
elif any([t in datatype for t in num_types]):
dbcol.sum = True
elif any([t in datatype for t in date_types]):
dbcol.is_dttm = True
db.session.merge(self)
self.columns.append(dbcol)
if not any_date_col and 'date' in datatype.lower():
any_date_col = col.name
quoted = "{}".format(
column(dbcol.column_name).compile(dialect=db.engine.dialect))
if dbcol.sum:
metrics.append(M(
metric_name='sum__' + dbcol.column_name,
verbose_name='sum__' + dbcol.column_name,
metric_type='sum',
expression="SUM({})".format(quoted)
))
if dbcol.max:
metrics.append(M(
metric_name='max__' + dbcol.column_name,
verbose_name='max__' + dbcol.column_name,
metric_type='max',
expression="MAX({})".format(quoted)
))
if dbcol.min:
metrics.append(M(
metric_name='min__' + dbcol.column_name,
verbose_name='min__' + dbcol.column_name,
metric_type='min',
expression="MIN({})".format(quoted)
))
if dbcol.count_distinct:
metrics.append(M(
metric_name='count_distinct__' + dbcol.column_name,
verbose_name='count_distinct__' + dbcol.column_name,
metric_type='count_distinct',
expression="COUNT(DISTINCT {})".format(quoted)
))
dbcol.type = datatype
db.session.merge(self)
db.session.commit()
metrics.append(M(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
expression="COUNT(*)"
))
for metric in metrics:
m = (
db.session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.table_id == self.id)
.first()
)
metric.table_id = self.id
if not m:
db.session.add(metric)
db.session.commit()
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
class SqlMetric(Model, AuditMixinNullable):
"""ORM object for metrics, each table can have multiple metrics"""
__tablename__ = 'sql_metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable', backref='metrics', foreign_keys=[table_id])
expression = Column(Text)
description = Column(Text)
@property
def sqla_col(self):
name = self.metric_name
return literal_column(self.expression).label(name)
class TableColumn(Model, AuditMixinNullable):
"""ORM object for table columns, each table can have multiple columns"""
__tablename__ = 'table_columns'
id = Column(Integer, primary_key=True)
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable', backref='columns', foreign_keys=[table_id])
column_name = Column(String(255))
verbose_name = Column(String(1024))
is_dttm = Column(Boolean, default=False)
is_active = Column(Boolean, default=True)
type = Column(String(32), default='')
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
expression = Column(Text, default='')
description = Column(Text, default='')
def __repr__(self):
return self.column_name
@property
def isnum(self):
types = ('LONG', 'DOUBLE', 'FLOAT', 'BIGINT', 'INT')
return any([t in self.type.upper() for t in types])
@property
def sqla_col(self):
name = self.column_name
if not self.expression:
col = column(self.column_name).label(name)
else:
col = literal_column(self.expression).label(name)
return col
class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
id = Column(Integer, primary_key=True)
cluster_name = Column(String(250), unique=True)
coordinator_host = Column(String(255))
coordinator_port = Column(Integer)
coordinator_endpoint = Column(
String(255), default='druid/coordinator/v1/metadata')
broker_host = Column(String(255))
broker_port = Column(Integer)
broker_endpoint = Column(String(255), default='druid/v2')
metadata_last_refreshed = Column(DateTime)
def __repr__(self):
return self.cluster_name
def get_pydruid_client(self):
cli = PyDruid(
"http://{0}:{1}/".format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def get_datasources(self):
endpoint = (
"http://{obj.coordinator_host}:{obj.coordinator_port}/"
"{obj.coordinator_endpoint}/datasources"
).format(obj=self)
return json.loads(requests.get(endpoint).text)
def refresh_datasources(self):
for datasource in self.get_datasources():
if datasource not in config.get('DRUID_DATA_SOURCE_BLACKLIST'):
DruidDatasource.sync_to_db(datasource, self)
class DruidDatasource(Model, AuditMixinNullable, Queryable):
"""ORM object referencing Druid datasources (tables)"""
type = "druid"
baselink = "druiddatasourcemodelview"
__tablename__ = 'datasources'
id = Column(Integer, primary_key=True)
datasource_name = Column(String(255), unique=True)
is_featured = Column(Boolean, default=False)
is_hidden = Column(Boolean, default=False)
description = Column(Text)
default_endpoint = Column(Text)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship('User', backref='datasources', foreign_keys=[user_id])
cluster_name = Column(
String(250), ForeignKey('clusters.cluster_name'))
cluster = relationship(
'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
@property
def metrics_combo(self):
return sorted(
[(m.metric_name, m.verbose_name) for m in self.metrics],
key=lambda x: x[1])
@property
def name(self):
return self.datasource_name
@property
def perm(self):
return (
"[{obj.cluster_name}].[{obj.datasource_name}]"
"(id:{obj.id})").format(obj=self)
@property
def link(self):
return (
'<a href="{self.url}">'
'{self.datasource_name}</a>').format(**locals())
@property
def full_name(self):
return (
"[{obj.cluster_name}]."
"[{obj.datasource_name}]").format(obj=self)
def __repr__(self):
return self.datasource_name
@property
def datasource_link(self):
url = "/caravel/explore/{obj.type}/{obj.id}/".format(obj=self)
return '<a href="{url}">{obj.datasource_name}</a>'.format(
url=url, obj=self)
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
return
max_time = results[0]['result']['maxTime']
max_time = parse(max_time)
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which trigged a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
intervals = (max_time - timedelta(days=7)).isoformat() + '/'
intervals += (max_time - timedelta(days=1)).isoformat()
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=intervals)
if segment_metadata:
return segment_metadata[-1]['columns']
def generate_metrics(self):
for col in self.columns:
col.generate_metrics()
@classmethod
def sync_to_db(cls, name, cluster):
"""Fetches metadata for that datasource and merges the Caravel db"""
print("Syncing Druid datasource [{}]".format(name))
session = get_session()
datasource = session.query(cls).filter_by(datasource_name=name).first()
if not datasource:
datasource = cls(datasource_name=name)
session.add(datasource)
flasher("Adding new datasource [{}]".format(name), "success")
else:
flasher("Refreshing datasource [{}]".format(name), "info")
session.flush()
datasource.cluster = cluster
cols = datasource.latest_metadata()
if not cols:
return
for col in cols:
col_obj = (
session
.query(DruidColumn)
.filter_by(datasource_name=name, column_name=col)
.first()
)
datatype = cols[col]['type']
if not col_obj:
col_obj = DruidColumn(datasource_name=name, column_name=col)
session.add(col_obj)
if datatype == "STRING":
col_obj.groupby = True
col_obj.filterable = True
if col_obj:
col_obj.type = cols[col]['type']
session.flush()
col_obj.datasource = | |
<reponame>piyengar/jigsaw-severity<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os # operating system library
import gc # Garbage Collector - module provides the ability to disable the collector, tune the collection frequency, and set debugging options
import copy # The assignment operation does not copy the object, it only creates a reference to the object.
import time # time library
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader # DataLoader and other utility functions for convenience
from sklearn.model_selection import StratifiedKFold, KFold, train_test_split # Stratified K-Folds cross-validator.
# Util
from tqdm import tqdm
from collections import defaultdict # Usually, a Python dictionary throws a KeyError if you try to get an item with a key that is not currently in the dictionary.
# Huggingface imports
from transformers import AutoTokenizer, AutoModel, AdamW
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Config
# Add configurations to this dict
# In[2]:
CONFIG = {
"seed": 666,
"epochs": 1000,
"model_name": "distilbert-base-uncased",
"device" : torch.device("cuda" if torch.cuda.is_available() else "cpu"),
"max_length": 128,
"n_accumulate": 1, # Gradient accumulation , set > 1 to use
"n_fold": 10, # The number of folds to split the data in, pick one for validation. We are only dpoing this for val split for now
"learning_rate": 1e-5,
"weight_decay": 1e-6,
"train_batch_size": 16,
"valid_batch_size": 64,
"is_dev_run": False,
"margin": 0.0
}
CONFIG['model_path'] = f"{CONFIG['model_name']}"
CONFIG['tokenizer'] = AutoTokenizer.from_pretrained(CONFIG['model_path'])
# In[3]:
# Sets the seed of the entire notebook so results are the same every time we run.
# This is for REPRODUCIBILITY
def set_seed(seed=42):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ['PYTHONHASHSEED'] = str(seed)
set_seed(CONFIG['seed'])
# ### Datasets
# Load datasets into dataframes. We can use multiple datasets in addition to the data provided to this competetion. We use:
# - jigsaw-toxic-severity-rating (val & sub)
# - jigsaw-toxic-comment-classification (train)
# - ...Add more here
# #### jigsaw-toxic-comment-classification
# In[4]:
df_cc = pd.read_csv("../input/jigsaw-toxic-comment-classification-challenge/train.csv")
print(df_cc.shape)
# In[5]:
df_cc.columns
# Combine the different toxicity flags into a single value
# In[6]:
# apply a weight to each kind of toxicity, can we learn this weight??
tox_weight = np.array([1,1,1,1,1,1])
# multiply the flags with the weight
df_cc['y'] = ((df_cc[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']] * tox_weight).sum(axis=1) )
Y = np.array(df_cc[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']])
df_cc['y'] = df_cc['y'] / np.sum(tox_weight) # Normalize
print(df_cc[df_cc.y > 0][df_cc.y < 0.8].sample(5, random_state=4))
print(f"max = {df_cc.y.max()}, min = {df_cc.y.min()}")
# In[7]:
Y.shape
# Retain only text and y
# In[8]:
df_cc = df_cc.rename(columns={'comment_text':'text'})
df_cc.head()
# ### Kfold split for randomly selecting validation set
# We only pick the last fold for val right now, but we can explore kfold ensemlbing later as well
# In[9]:
skf = KFold(n_splits=CONFIG['n_fold'], shuffle=True, random_state=CONFIG['seed']) # set the parameters for splitting our dataframe into data for training and testing
splits = skf.split(df_cc)
for fold, (_, val_) in enumerate(splits): # dataframe splitting
df_cc.loc[val_ , "kfold"] = int(fold)
df_cc["kfold"] = df_cc["kfold"].astype(int) # add one more column of folder number to the original dataframe
df_cc.head() # display the first 5 rows of the dataframe table
# #### jigsaw-toxic-severity-rating
# Validation and Submission datasets
# In[10]:
df_val = pd.read_csv("../input/jigsaw-toxic-severity-rating/validation_data.csv")
print(df_val.shape)
df_val.head()
# ### Kfold split for randomly selecting validation set
# We only pick the last fold for val right now, but we can explore kfold ensemlbing later as well
# In[11]:
skf = KFold(n_splits=CONFIG['n_fold'], shuffle=True, random_state=CONFIG['seed']) # set the parameters for splitting our dataframe into data for training and testing
splits = skf.split(df_val)
for fold, (_, val_) in enumerate(splits): # dataframe splitting
df_val.loc[val_ , "kfold"] = int(fold)
df_val["kfold"] = df_val["kfold"].astype(int) # add one more column of folder number to the original dataframe
df_val.head() # display the first 5 rows of the dataframe table
# In[12]:
df_sub = pd.read_csv("../input/jigsaw-toxic-severity-rating/comments_to_score.csv")
print(df_sub.shape)
df_sub.head()
# ## Dataset class
# In[13]:
class SeverityDataset(Dataset):
def __init__(self, df, tokenizer: AutoTokenizer, max_length, load_target=True):
self.load_target = load_target
self.df = df
self.max_len = max_length
self.tokenizer = tokenizer
self.text = df['text'].values
if self.load_target:
self.target = np.array(df[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']])
def __len__(self):
return len(self.df)
def __getitem__(self, index):
text = self.text[index]
inputs = self.tokenizer.encode_plus(
text,
truncation=True,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length'
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
data = {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
}
if self.load_target:
target = torch.tensor(self.target[index], dtype=torch.float)
data['target'] = target
return data
### TEST
temp_ds = SeverityDataset(df_cc, CONFIG['tokenizer'], CONFIG['max_length'])
print(temp_ds[0])
# del temp_ds
# #### Test dataset class
# In[14]:
class ContrastiveDataset(Dataset):
def __init__(self, df, tokenizer: AutoTokenizer, max_length):
self.df = df
self.max_len = max_length
self.tokenizer = tokenizer
self.more_toxic = df['more_toxic'].values
self.less_toxic = df['less_toxic'].values
def __len__(self):
return len(self.df)
def __getitem__(self, index):
more_toxic_text = self.more_toxic[index]
more_toxic_inputs = self.tokenizer.encode_plus(
more_toxic_text,
truncation=True,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length'
)
more_toxic_ids = more_toxic_inputs['input_ids']
more_toxic_mask = more_toxic_inputs['attention_mask']
less_toxic_text = self.less_toxic[index]
less_toxic_inputs = self.tokenizer.encode_plus(
less_toxic_text,
truncation=True,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length'
)
less_toxic_ids = less_toxic_inputs['input_ids']
less_toxic_mask = less_toxic_inputs['attention_mask']
# the target here is the difference in toxicity. Since our severity range is 1 we set the difference between more and less toxic to 1
target = 1
return {
'more_toxic_ids': torch.tensor(more_toxic_ids, dtype=torch.long),
'more_toxic_mask': torch.tensor(more_toxic_mask, dtype=torch.long),
'less_toxic_ids': torch.tensor(less_toxic_ids, dtype=torch.long),
'less_toxic_mask': torch.tensor(less_toxic_mask, dtype=torch.long),
'target': torch.tensor(target, dtype=torch.long)
}
### TEST
temp_ds = ContrastiveDataset(df_val, CONFIG['tokenizer'], CONFIG['max_length'])
print(temp_ds[0])
del temp_ds
# ### Define model for pretraining
# In[15]:
class SeverityModel(nn.Module):
def __init__(self, model_path): # initialization of the class at the input of the dataframe, tokenizer, max_length
# set the class attributes
super().__init__()
self.model = AutoModel.from_pretrained(model_path)
self.drop = nn.Dropout(p=0.2)
self.fc = nn.Linear(768, 6)
def forward(self, ids, mask):
out = self.model(input_ids=ids,attention_mask=mask,
output_hidden_states=False)
out = self.drop(out[0])
outputs = self.fc(out)
return outputs # returns the obtained values
# In[50]:
class JigsawModel(nn.Module):
def __init__(self, model_path, mode='severity'): # initialization of the class at the input of the dataframe, tokenizer, max_length
# set the class attributes
super(JigsawModel, self).__init__()
self.mode = mode
self.model = AutoModel.from_pretrained(model_path)
self.drop = nn.Dropout(p=0.5)
self.fc = nn.Linear(768, 300)
self.fc2 = nn.Linear(300, 1)
def forward(self, ids, mask):
out = self.model(input_ids=ids,attention_mask=mask,
output_hidden_states=False)
out = self.drop(out[0])
#out = out.mean(1)
#print(out.shape)
out = self.fc(out).relu()
#print(out.shape)
if self.mode == 'contrastive':
out = self.fc2(out)
return out # returns the obtained values
class MultiModel(nn.Module):
def __init__(self, model_path, mode='severity'): # initialization of the class at the input of the dataframe, tokenizer, max_length
# set the class attributes
super(MultiModel, self).__init__()
self.mode = mode
self.model = AutoModel.from_pretrained(model_path)
self.drop = nn.Dropout(p=0.2)
self.fc3 = nn.Linear(768, 300)
self.fc = nn.Linear(300, 6)
self.fc2 = nn.Linear(300, 1)
def forward(self, ids, mask):
out = self.model(input_ids=ids,attention_mask=mask,
output_hidden_states=False)
out = self.drop(out[0])
out = self.fc3(out.relu()).relu()
return (self.fc(out), self.fc2(out)) # returns the obtained values
# ## Severity
# ## Train
# Here we train on the classification dataset as we already have a severity score as target.
# In[17]:
def train_severity_one_epoch(model, criterion, optimizer, scheduler, dataloader, device, epoch):
# one epoch training function
model.train()
dataset_size = 0
running_loss = 0.0
epoch_loss = 0.0
bar = tqdm(enumerate(dataloader), total=len(dataloader))
for step, data in bar:
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['target'].to(device, dtype=torch.long)
batch_size = ids.size(0)
outputs = model(ids, mask)
loss = criterion(outputs, targets)
loss = loss / CONFIG['n_accumulate']
loss.backward()
if (step + 1) % CONFIG['n_accumulate'] == 0:
optimizer.step()
# zero the parameter gradients
optimizer.zero_grad()
if scheduler is not None:
scheduler.step()
running_loss += (loss.item() * batch_size)
dataset_size += batch_size
epoch_loss = running_loss / dataset_size
bar.set_postfix(Epoch=epoch, Train_Loss=epoch_loss,
LR=optimizer.param_groups[0]['lr'])
if CONFIG['is_dev_run'] and step > 5:
# Break after one step
break
gc.collect()
return epoch_loss # returns the result of the training function for one epoch
# ### Validate
# In[18]:
@torch.no_grad()
def valid_severity_one_epoch(model, criterion, optimizer, dataloader, device, epoch): # one epoch check function
model.eval()
dataset_size = 0
running_loss = 0.0
epoch_loss = 0.0
| |
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2022, SERTIT-ICube - France, https://sertit.unistra.fr/
# This file is part of eoreader project
# https://github.com/sertit/eoreader
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Class for custom products """
import logging
from datetime import datetime
from pathlib import Path
from typing import Union
import geopandas as gpd
import numpy as np
import rasterio
from cloudpathlib import CloudPath
from lxml import etree
from lxml.builder import E
from rasterio import crs
from rasterio.enums import Resampling
from sertit import files, misc, rasters, vectors
from sertit.rasters import XDS_TYPE
from eoreader import cache, cached_property, utils
from eoreader.bands import (
BandNames,
OpticalBands,
SarBands,
is_clouds,
is_dem,
is_index,
is_sat_band,
to_band,
)
from eoreader.exceptions import InvalidBandError, InvalidProductError, InvalidTypeError
from eoreader.products.product import Product, SensorType
from eoreader.reader import Platform
from eoreader.utils import DATETIME_FMT, EOREADER_NAME
LOGGER = logging.getLogger(EOREADER_NAME)
# -- CUSTOM FIELDS --
NAME = "name"
SENSOR_TYPE = "sensor_type"
ACQ_DATETIME = "acquisition_datetime"
BAND_MAP = "band_map"
PLATFORM = "platform"
DEF_RES = "default_resolution"
PROD_TYPE = "product_type"
SUN_AZ = "sun_azimuth"
SUN_ZEN = "sun_zenith"
# -- CUSTOM
CUSTOM = "CUSTOM"
class CustomProduct(Product):
"""Custom products"""
def __init__(
self,
product_path: Union[str, CloudPath, Path],
archive_path: Union[str, CloudPath, Path] = None,
output_path: Union[str, CloudPath, Path] = None,
remove_tmp: bool = False,
**kwargs,
) -> None:
self.sun_az = None
"""Sun mean angles (azimuth)"""
self.sun_zen = None
"""Sun mean angles (zenith)"""
# Initialization from the super class
super().__init__(product_path, archive_path, output_path, remove_tmp, **kwargs)
def _pre_init(self, **kwargs) -> None:
"""
Function used to pre_init the products
(setting needs_extraction and so on)
"""
self.needs_extraction = False
# -- Parse the kwargs
misc.check_mandatory_keys(kwargs, [BAND_MAP, SENSOR_TYPE])
# Sensor type
self.sensor_type = SensorType.convert_from(kwargs[SENSOR_TYPE])[0]
self.band_names = (
OpticalBands() if self.sensor_type == SensorType.OPTICAL else SarBands()
)
# Band map
band_names = kwargs[BAND_MAP] # Shouldn't be empty
assert isinstance(band_names, dict)
band_names = {to_band(key)[0]: val for key, val in band_names.items()}
assert [is_sat_band(band) for band in band_names.keys()]
self.band_names.map_bands(band_names)
# Test on the product
with rasterio.open(str(self.get_default_band_path())) as ds:
assert (
len(band_names) == ds.count
), f"You should specify {ds.count} bands in band_map, not {len(band_names)} !"
# Datetime
self.datetime = kwargs.get(ACQ_DATETIME, datetime.now())
if isinstance(self.datetime, str):
try:
self.datetime = datetime.fromisoformat(self.datetime)
except ValueError:
self.datetime = datetime.strptime(self.datetime, "%Y%m%dT%H%M%S")
assert isinstance(self.datetime, datetime)
# Sun angles
self.sun_az = kwargs.get(SUN_AZ, None)
self.sun_zen = kwargs.get(SUN_ZEN, None)
# Others
self.name = kwargs.get(NAME, files.get_filename(self.path))
self.platform = Platform.convert_from(kwargs.get(PLATFORM, CUSTOM))[0]
self.resolution = kwargs.get(DEF_RES, None)
self.product_type = kwargs.get(PROD_TYPE, CUSTOM)
def _post_init(self, **kwargs) -> None:
"""
Function used to post_init the products
(setting product-type, band names and so on)
"""
pass
def _get_name(self) -> str:
"""
Set product real name from metadata
Returns:
str: True name of the product (from metadata)
"""
return self.name
def get_datetime(self, as_datetime: bool = False) -> str:
"""
Set product real name from metadata
Returns:
str: True name of the product (from metadata)
"""
if as_datetime:
date = self.datetime
else:
date = self.datetime.strftime(DATETIME_FMT)
return date
def _get_platform(self) -> Platform:
return self.platform
def _set_resolution(self) -> float:
"""
Set product default resolution (in meters)
"""
if self.resolution is None:
with rasterio.open(str(self.get_default_band_path())) as ds:
return ds.res[0]
def _set_product_type(self) -> None:
"""Set products type"""
pass
def get_default_band(self) -> BandNames:
"""
Get default band: the first one of the stack
Returns:
str: Default band
"""
return list(self.band_names.keys())[0]
def get_default_band_path(self, **kwargs) -> Union[CloudPath, Path]:
"""
Get default band path: the stack path.
Args:
kwargs: Additional arguments
Returns:
Union[CloudPath, Path]: Default band path
"""
return self.path
@cached_property
def extent(self) -> gpd.GeoDataFrame:
"""
Get UTM extent of stack.
Returns:
gpd.GeoDataFrame: Footprint in UTM
"""
# Get extent
return rasters.get_extent(self.get_default_band_path()).to_crs(self.crs)
@cached_property
def footprint(self) -> gpd.GeoDataFrame:
"""
Get UTM footprint of the products (without nodata, *in french == emprise utile*)
.. code-block:: python
>>> from eoreader.reader import Reader
>>> path = r"S2A_MSIL1C_20200824T110631_N0209_R137_T30TTK_20200824T150432.SAFE.zip"
>>> prod = Reader().open(path)
>>> prod.footprint
index geometry
0 0 POLYGON ((199980.000 4500000.000, 199980.000 4...
Returns:
gpd.GeoDataFrame: Footprint as a GeoDataFrame
"""
return rasters.get_footprint(
self.get_default_band_path()
) # Processed by SNAP: the nodata is set
@cached_property
def crs(self) -> crs.CRS:
"""
Get UTM projection of stack.
Returns:
crs.CRS: CRS object
"""
with rasterio.open(str(self.path)) as ds:
def_crs = ds.crs
if def_crs.is_projected:
pass
else:
extent_wgs84 = rasters.get_extent(self.get_default_band_path())
# Get upper-left corner and deduce UTM proj from it
crs_str = vectors.corresponding_utm_projection(
extent_wgs84.bounds.minx, extent_wgs84.bounds.maxy
)
raise InvalidProductError(
"Only stacks with projected CRS can be processed! "
f"Please reproject it to the corresponding UTM projection ({crs_str})!"
)
return def_crs
def get_band_paths(
self, band_list: list, resolution: float = None, **kwargs
) -> dict:
"""
Get the stack path for each asked band
Args:
band_list (list): List of the wanted bands
resolution (float): Band resolution
kwargs: Other arguments used to load bands
Returns:
dict: Dictionary containing the path of each queried band
"""
band_paths = {}
for band in band_list:
band_paths[band] = self.path
return band_paths
def get_existing_band_paths(self) -> dict:
"""
Get the stack path.
Returns:
dict: Dictionary containing the path of every orthorectified bands
"""
return self.path
def get_existing_bands(self) -> list:
"""
Get the bands of the stack.
Returns:
list: List of existing bands in the products
"""
return [name for name, nb in self.band_names.items() if nb]
# unused band_name (compatibility reasons)
# pylint: disable=W0613
def _read_band(
self,
path: Union[CloudPath, Path],
band: BandNames = None,
resolution: Union[tuple, list, float] = None,
size: Union[list, tuple] = None,
**kwargs,
) -> XDS_TYPE:
"""
Read band from disk.
.. WARNING::
Invalid pixels are not managed here
Args:
path (Union[CloudPath, Path]): Band path
band (BandNames): Band to read
resolution (Union[tuple, list, float]): Resolution of the wanted band, in dataset resolution unit (X, Y)
size (Union[tuple, list]): Size of the array (width, height). Not used if resolution is provided.
kwargs: Other arguments used to load bands
Returns:
XDS_TYPE: Band xarray
"""
return utils.read(
path,
resolution=resolution,
size=size,
resampling=Resampling.bilinear,
indexes=[self.band_names[band]],
**kwargs,
).astype(np.float32)
def _load_bands(
self,
bands: Union[list, BandNames],
resolution: float = None,
size: Union[list, tuple] = None,
**kwargs,
) -> dict:
"""
Load bands as numpy arrays with the same resolution (and same metadata).
Args:
bands (list, BandNames): List of the wanted bands
resolution (float): Band resolution in meters
size (Union[tuple, list]): Size of the array (width, height). Not used if resolution is provided.
kwargs: Other arguments used to load bands
Returns:
dict: Dictionary {band_name, band_xarray}
"""
# Return empty if no band are specified
if not bands:
return {}
# Get band paths
if not isinstance(bands, list):
bands = [bands]
if resolution is None and size is not None:
resolution = self._resolution_from_size(size)
band_paths = self.get_band_paths(bands, resolution)
# Open bands and get array (resampled if needed)
band_arrays = {}
for band_name, band_path in band_paths.items():
band_arrays[band_name] = self._read_band(
band_path, band=band_name, resolution=resolution, size=size, **kwargs
)
return band_arrays
def _load(
self,
bands: list,
resolution: float = None,
size: Union[list, tuple] = None,
**kwargs,
) -> dict:
"""
Core function loading bands
Args:
bands (list): Band list
resolution (float): Resolution of the band, in meters
size (Union[tuple, list]): Size of the array (width, height). Not used if resolution is provided.
kwargs: Other arguments used to load bands
Returns:
Dictionary {band_name, band_xarray}
"""
band_list = []
dem_list = []
for band in bands:
if is_index(band):
raise NotImplementedError(
"For now, no index is implemented for SAR data."
)
elif is_sat_band(band):
if not self.has_band(band):
raise InvalidBandError(
f"{band} cannot be retrieved from {self.condensed_name}"
)
else:
band_list.append(band)
elif is_dem(band):
dem_list.append(band)
elif is_clouds(band):
raise NotImplementedError(
f"Clouds cannot be retrieved from custom data ({self.condensed_name})."
)
else:
raise InvalidTypeError(f"{band} is neither a band nor an index !")
# Check if DEM is set and exists
if dem_list:
self._check_dem_path()
# Load bands
bands = self._load_bands(band_list, resolution=resolution, size=size, **kwargs)
# Add DEM
bands.update(
self._load_dem(dem_list, resolution=resolution, size=size, **kwargs)
)
return bands
def _compute_hillshade(
self,
dem_path: str = "",
resolution: Union[float, tuple] = None,
size: Union[list, tuple] = None,
resampling: Resampling = Resampling.bilinear,
) -> | |
broker._waitForStable()
return self.schemaCache.getClasses(packageName)
def getSchema(self, classKey):
""" Get the schema for a QMF class """
for broker in self.brokers:
broker._waitForStable()
return self.schemaCache.getSchema(classKey)
def bindPackage(self, packageName):
""" Filter object and event callbacks to only those elements of the
specified package. Also filters newPackage and newClass callbacks to the
given package. Only valid if userBindings is True.
"""
if not self.userBindings:
raise Exception("userBindings option must be set for this Session.")
if not self.rcvObjects and not self.rcvEvents:
raise Exception("Session needs to be configured to receive events or objects.")
v1keys = ["console.obj.*.*.%s.#" % packageName, "console.event.*.*.%s.#" % packageName]
v2keys = ["agent.ind.data.%s.#" % packageName.replace(".", "_"),
"agent.ind.event.%s.#" % packageName.replace(".", "_"),]
if (packageName, None) not in self.class_filter:
self.class_filter.append((packageName, None))
if (packageName, None) not in self.event_filter:
self.event_filter.append((packageName, None))
self.v1BindingKeyList.extend(v1keys)
self.v2BindingKeyList.extend(v2keys)
for broker in self.brokers:
if broker.isConnected():
for v1key in v1keys:
broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key)
if broker.brokerSupportsV2:
for v2key in v2keys:
# data indications should arrive on the unsolicited indication queue
broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key)
def bindClass(self, pname, cname=None):
""" Filter object callbacks to only those objects of the specified package
and optional class. Will also filter newPackage/newClass callbacks to the
specified package and class. Only valid if userBindings is True and
rcvObjects is True.
"""
if not self.userBindings:
raise Exception("userBindings option must be set for this Session.")
if not self.rcvObjects:
raise Exception("Session needs to be configured with rcvObjects=True.")
if cname is not None:
v1key = "console.obj.*.*.%s.%s.#" % (pname, cname)
v2key = "agent.ind.data.%s.%s.#" % (pname.replace(".", "_"), cname.replace(".", "_"))
else:
v1key = "console.obj.*.*.%s.#" % pname
v2key = "agent.ind.data.%s.#" % pname.replace(".", "_")
self.v1BindingKeyList.append(v1key)
self.v2BindingKeyList.append(v2key)
if (pname, cname) not in self.class_filter:
self.class_filter.append((pname, cname))
for broker in self.brokers:
if broker.isConnected():
broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key)
if broker.brokerSupportsV2:
# data indications should arrive on the unsolicited indication queue
broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key)
def bindClassKey(self, classKey):
""" Filter object callbacks to only those objects of the specified
class. Will also filter newPackage/newClass callbacks to the specified
package and class. Only valid if userBindings is True and rcvObjects is
True.
"""
pname = classKey.getPackageName()
cname = classKey.getClassName()
self.bindClass(pname, cname)
def bindEvent(self, pname, ename=None):
""" Filter event callbacks only from a particular class by package and
event name, or all events in a package if ename=None. Will also filter
newPackage/newClass callbacks to the specified package and class. Only
valid if userBindings is True and rcvEvents is True.
"""
if not self.userBindings:
raise Exception("userBindings option must be set for this Session.")
if not self.rcvEvents:
raise Exception("Session needs to be configured with rcvEvents=True.")
if ename is not None:
v1key = "console.event.*.*.%s.%s.#" % (pname, ename)
v2key = "agent.ind.event.%s.%s.#" % (pname.replace(".", "_"), ename.replace(".", "_"))
else:
v1key = "console.event.*.*.%s.#" % pname
v2key = "agent.ind.event.%s.#" % pname.replace(".", "_")
self.v1BindingKeyList.append(v1key)
self.v2BindingKeyList.append(v2key)
if (pname, ename) not in self.event_filter:
self.event_filter.append((pname, ename))
for broker in self.brokers:
if broker.isConnected():
broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key)
if broker.brokerSupportsV2:
# event indications should arrive on the unsolicited indication queue
broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key)
def bindEventKey(self, eventKey):
""" Filter event callbacks only from a particular class key. Will also
filter newPackage/newClass callbacks to the specified package and
class. Only valid if userBindings is True and rcvEvents is True.
"""
pname = eventKey.getPackageName()
ename = eventKey.getClassName()
self.bindEvent(pname, ename)
def bindAgent(self, vendor=None, product=None, instance=None, label=None):
""" Receive heartbeats, newAgent and delAgent callbacks only for those
agent(s) that match the passed identification criteria:
V2 agents: vendor, optionally product and instance strings
V1 agents: the label string.
Only valid if userBindings is True.
"""
if not self.userBindings:
raise Exception("Session not configured for binding specific agents.")
if vendor is None and label is None:
raise Exception("Must specify at least a vendor (V2 agents)"
" or label (V1 agents).")
if vendor: # V2 agent identification
if product is not None:
v2key = "agent.ind.heartbeat.%s.%s.#" % (vendor.replace(".", "_"), product.replace(".", "_"))
else:
v2key = "agent.ind.heartbeat.%s.#" % vendor.replace(".", "_")
self.v2BindingKeyList.append(v2key)
# allow wildcards - only add filter if a non-wildcarded component is given
if vendor == "*":
vendor = None
if product == "*":
product = None
if instance == "*":
instance = None
if vendor or product or instance:
if (vendor, product, instance) not in self.agent_filter:
self.agent_filter.append((vendor, product, instance))
for broker in self.brokers:
if broker.isConnected():
if broker.brokerSupportsV2:
# heartbeats should arrive on the heartbeat queue
broker.amqpSession.exchange_bind(exchange="qmf.default.topic",
queue=broker.v2_topic_queue_hb,
binding_key=v2key)
elif label != "*": # non-wildcard V1 agent label
# V1 format heartbeats do not have any agent identifier in the routing
# key, so we cannot filter them by bindings.
if label not in self.agent_filter:
self.agent_filter.append(label)
def getAgents(self, broker=None):
""" Get a list of currently known agents """
brokerList = []
if broker == None:
for b in self.brokers:
brokerList.append(b)
else:
brokerList.append(broker)
for b in brokerList:
b._waitForStable()
agentList = []
for b in brokerList:
for a in b.getAgents():
agentList.append(a)
return agentList
def makeObject(self, classKey, **kwargs):
""" Create a new, unmanaged object of the schema indicated by classKey """
schema = self.getSchema(classKey)
if schema == None:
raise Exception("Schema not found for classKey")
return Object(None, schema, None, True, True, kwargs)
def getObjects(self, **kwargs):
""" Get a list of objects from QMF agents.
All arguments are passed by name(keyword).
The class for queried objects may be specified in one of the following ways:
_schema = <schema> - supply a schema object returned from getSchema.
_key = <key> - supply a classKey from the list returned by getClasses.
_class = <name> - supply a class name as a string. If the class name exists
in multiple packages, a _package argument may also be supplied.
_objectId = <id> - get the object referenced by the object-id
If objects should be obtained from only one agent, use the following argument.
Otherwise, the query will go to all agents.
_agent = <agent> - supply an agent from the list returned by getAgents.
If the get query is to be restricted to one broker (as opposed to all connected brokers),
add the following argument:
_broker = <broker> - supply a broker as returned by addBroker.
The default timeout for this synchronous operation is 60 seconds. To change the timeout,
use the following argument:
_timeout = <time in seconds>
If additional arguments are supplied, they are used as property selectors. For example,
if the argument name="test" is supplied, only objects whose "name" property is "test"
will be returned in the result.
"""
if "_broker" in kwargs:
brokerList = []
brokerList.append(kwargs["_broker"])
else:
brokerList = self.brokers
for broker in brokerList:
broker._waitForStable()
if broker.isConnected():
if "_package" not in kwargs or "_class" not in kwargs or \
kwargs["_package"] != "org.apache.qpid.broker" or \
kwargs["_class"] != "agent":
self.getObjects(_package = "org.apache.qpid.broker", _class = "agent",
_agent = broker.getAgent(1,0))
agentList = []
if "_agent" in kwargs:
agent = kwargs["_agent"]
if agent.broker not in brokerList:
raise Exception("Supplied agent is not accessible through the supplied broker")
if agent.broker.isConnected():
agentList.append(agent)
else:
if "_objectId" in kwargs:
oid = kwargs["_objectId"]
for broker in brokerList:
for agent in broker.getAgents():
if agent.getBrokerBank() == oid.getBrokerBank() and agent.getAgentBank() == oid.getAgentBank():
agentList.append(agent)
else:
for broker in brokerList:
for agent in broker.getAgents():
if agent.broker.isConnected():
agentList.append(agent)
if len(agentList) == 0:
return []
#
# We now have a list of agents to query, start the queries and gather the results.
#
request = SessionGetRequest(len(agentList))
for agent in agentList:
agent.getObjects(request, **kwargs)
timeout = 60
if '_timeout' in kwargs:
timeout = kwargs['_timeout']
request.wait(timeout)
return request.result
def addEventFilter(self, **kwargs):
"""Filter unsolicited events based on package and event name.
QMF v2 also can filter on vendor, product, and severity values.
By default, a console receives unsolicted events by binding to:
qpid.management/console.event.# (v1)
qmf.default.topic/agent.ind.event.# (v2)
A V1 event filter binding uses the pattern:
qpid.management/console.event.*.*[.<package>[.<event>]].#
A V2 event filter binding uses the pattern:
qmf.default.topic/agent.ind.event.<Vendor|*>.<Product|*>.<severity|*>.<package|*>.<event|*>.#
"""
package = kwargs.get("package", "*")
event = kwargs.get("event", "*")
vendor = kwargs.get("vendor", "*")
product = kwargs.get("product", "*")
severity = kwargs.get("severity", "*")
if package == "*" and event != "*":
raise Exception("'package' parameter required if 'event' parameter"
" supplied")
# V1 key - can only filter on package (and event)
if package == | |
if "transition_y" in slide.overlay_text else "center"
text = slide.overlay_text["title"].replace(':', r'\:')
# fixed text in the middle
if transition_x == "center":
x = "(main_w/2-text_w/2)"
# scroll from left to right till the middle of the image in half of the duration time
elif transition_x == "left-in" or transition_x == "left-to-center":
x = "'if(lte(x,(main_w/2-text_w/2)),(t-%s)*(main_w/2-text_w/2)/(%s/2),(main_w/2-text_w/2))'" \
% (text_offset, text_duration)
# same but from right to left
elif transition_x == "right-in" or transition_x == "right-to-center":
x = "'if(gte(x,(main_w/2-text_w/2)),main_w-(t-%s)*(main_w/2-text_w/2)/(%s/2),(main_w/2-text_w/2))'" \
% (text_offset, text_duration)
# fixed text in the middle
if transition_y == "center":
y = "(main_h/2-text_h/2)"
# scroll from top to bottom
elif transition_y == "top-to-bottom":
y = "'-text_h + ((main_h+text_h)/%s)*(t-%s)'" % (text_duration, text_offset)
# same but from bottom to top
elif transition_y == "bottom-to-top":
y = "'main_h-(((main_h+text_h)/%s)*(t-%s))'" % (text_duration, text_offset)
filters.append("drawtext=text='%s':line_spacing=20:fontsize=%s: "
"fontcolor=%s:y=%s:x=%s:borderw=1%s%s:enable='between(t,%s,%s)'"
% (text,
font_size,
font_color,
y,
x,
font,
font_file,
text_offset,
text_offset + text_duration
)
)
# if isinstance(slide, ImageSlide):
# slide.slide_duration_min = slide.slide_duration_min + duration
# Time
filters.append("setpts=PTS-STARTPTS")
# while scaling the SAR is changed as well, so we need to reset it here:
# see https://trac.ffmpeg.org/ticket/1079#comment:2
# see https://ffmpeg.org/ffmpeg-filters.html#scale-1
# The scale filter forces the output display aspect ratio to be the same
# of the input, by changing the output sample aspect ratio.
filters.append("setsar=1")
# add transparency for possible fade-in/fade-out
filters.append("format=rgba")
# split video in start, main, end sections
# get fade in duration from previous slides fade duration
fade_in_end = self.getSlideFadeOutDuration(i - 1, True) if i > 0 else 0
fade_out_start = self.getSlideFadeOutPosition(i, True)
splits = []
if fade_in_end > 0:
splits.append("start")
if fade_out_start < slide.getFrames():
splits.append("end")
if fade_out_start > fade_in_end:
splits.append("main")
slide.splits = splits
if self.config["generate_temp"]:
for step in splits:
tempfilters = filters[:]
if step == "start":
tempfilters.append(
"trim=start_frame=%s:end_frame=%s,setpts=PTS-STARTPTS" % (0, fade_in_end))
if step == "main":
tempfilters.append(
"trim=start_frame=%s:end_frame=%s,setpts=PTS-STARTPTS" % (fade_in_end, fade_out_start))
if step == "end":
tempfilters.append(
"trim=start_frame=%s:end_frame=%s,setpts=PTS-STARTPTS" % (fade_out_start, slide.getFrames()))
file = slide.tempfile if isinstance(slide, ImageSlide) else slide.file
self.queue.addItem([file], tempfilters, "%s_%s" % (i, step))
else:
filters.append("split=%s" % (len(splits)))
filter_chains.append("[%s:v]" % (i) + ", ".join(filters) + "".join(["[v%sout-%s]" % (i, s) for s in splits]))
# prevent buffer overflow with fifo:
# https://trac.ffmpeg.org/ticket/4950#comment:1
# https://superuser.com/a/1135202
# https://superuser.com/a/1148850
# https://stackoverflow.com/a/40746988
# https://stackoverflow.com/a/51978577
if "start" in splits:
filter_chains.append("[v%sout-start]fifo,trim=start_frame=%s:end_frame=%s,"
"setpts=PTS-STARTPTS[v%sstart]" % (i, 0, fade_in_end, i))
if "main" in splits:
filter_chains.append("[v%sout-main]fifo,trim=start_frame=%s:end_frame=%s,"
"setpts=PTS-STARTPTS[v%smain]" % (i, fade_in_end, fade_out_start, i))
if "end" in splits:
filter_chains.append("[v%sout-end]fifo,trim=start_frame=%s:end_frame=%s,setpts=PTS-STARTPTS[v%send]" % (
i, fade_out_start, slide.getFrames(), i))
# Concat videos
videos = []
for i, slide in enumerate(self.getSlides()):
if "start" in slide.splits:
if self.config["generate_temp"]:
end = "[v0]"
start = "[v1]"
transition = ""
else:
end = "[v%send]" % (i - 1)
start = "[v%sstart]" % (i)
transition = "[v%strans]" % (i)
filter, _ = self.getTransition(i - 1, end, start, transition)
if filter is not None:
if self.config["generate_temp"]:
# temporary transition video
tempvideo_end = "%s%s_%s.mp4" % (self.tempFileFullPrefix, i - 1, "end")
tempvideo_start = "%s%s_%s.mp4" % (self.tempFileFullPrefix, i, "start")
filter = "[0:v]format=rgba[v0];[1:v]format=rgba[v1];%s, setsar=1" % (filter)
trans_slide = self.getSlides()[i - 1]
output = self.queue.addItem(
[tempvideo_end, tempvideo_start], filter, "%s_trans_%s" % (i, trans_slide.transition))
self.tempInputFiles.append(output)
else:
filter_chains.append(filter)
videos.append(transition)
else:
if self.config["generate_temp"]:
self.tempInputFiles.append("%s%s_%s.mp4" % (self.tempFileFullPrefix, i - 1, "end"))
self.tempInputFiles.append("%s%s_%s.mp4" % (self.tempFileFullPrefix, i, "start"))
else:
videos.append("[v%send]" % (i - 1))
videos.append("[v%sstart]" % (i))
# append video between transitions
if "main" in slide.splits:
if self.config["generate_temp"]:
self.tempInputFiles.append("%s%s_%s.mp4" % (self.tempFileFullPrefix, i, "main"))
else:
videos.append("[v%smain]" % (i))
# on the last slide the end needs to be added (if available)
# if "end" in slide.splits and i == len(self.getSlides())-1:
# videos.append("[v%send]" %(i))
# use input files instead of filter outputs
if self.config["generate_temp"]:
count = 0
while len(self.tempInputFiles) > self.reduceVariable:
files = self.tempInputFiles
self.tempInputFiles = []
temp = []
for k, video in enumerate(files):
temp.append(video)
if len(temp) >= self.reduceVariable:
filter_names = ["[%s]" % (i) for i in range(len(temp))]
filter = "%s concat=n=%s" % ("".join(filter_names), len(filter_names))
output = self.queue.addItem(temp, filter, "%s_%s_combine" % (count, k))
# add concated video
self.tempInputFiles.append(output)
temp = []
# add remaining files
self.tempInputFiles.extend(temp)
count = count + 1
videos = ["[%s:v]" % (i) for i in range(len(self.tempInputFiles))]
subtitles = ""
# Burn subtitles to last element
if burnSubtitles and self.hasSubtitles():
subtitles = ",subtitles=%s" % (srtFilename)
filter_chains.append("%s concat=n=%s:v=1:a=0%s,format=yuv420p[out]" % ("".join(videos), len(videos), subtitles))
return filter_chains
###################################
# Audio #
###################################
def getBackgroundTracks(self):
return self.background_tracks
def hasAudio(self):
return len(self.background_tracks) > 0 or len([video for video in self.getVideos() if video.has_audio]) > 0
def getMusicFadeOutDuration(self, idx):
# first and last slide should fade the total music in/out
if idx < 0 or idx == len(self.getSlides()) - 1:
slide = self.getSlides()[idx]
return slide.getDuration()
return self.getSlideFadeOutDuration(idx, False)
def getVideoAudioDuration(self):
return sum([slide.getDuration() for slide in self.getVideos() if slide.has_audio])
def getAudioDuration(self):
return sum([audio.duration for audio in self.getBackgroundTracks()])
def getAudioFilterChains(self):
logger.debug("get Audio Filter Chains")
offset = len(self.tempInputFiles)
filter_chains = []
# audio from video slides
audio_tracks = []
for i, slide in enumerate(self.getSlides()):
if isinstance(slide, VideoSlide) and slide.has_audio:
audio_tracks.append("[a%s]" % (i))
filters = []
audio_filter = slide.getAudioFilter()
if audio_filter:
filters.append(audio_filter)
# Fade music in filter
if slide.fade_duration > 0:
filters.append("afade=t=in:st=0:d=%s" % (self.getSlideFadeOutDuration(i - 1, False)))
filters.append("afade=t=out:st=%s:d=%s" % (self.getSlideFadeOutPosition(i, False),
self.getSlideFadeOutDuration(i, False)))
filters.append("adelay=%s|%s" % (int(self.getOffset(i, False) * 1000),
int(self.getOffset(i, False) * 1000)))
input_number = i
# append video with sound to input list
if self.config["generate_temp"]:
input_number = offset
self.tempInputFiles.append(slide.file)
offset = offset + 1
filter_chains.append("[%s:a] %s [a%s]" % (input_number, ",".join(filters), i))
# background-tracks
music_input_offset = len(self.getSlides()) if not self.config["generate_temp"] else len(self.tempInputFiles)
background_audio = ["[%s:a]" % (i + music_input_offset) for i, track in enumerate(self.background_tracks)]
if len(background_audio) > 0:
# extract background audio sections between videos
background_sections = []
# is it starting with a video or an image?
section_start_slide = None if isinstance(self.getSlides()[0], VideoSlide) and slide.has_audio else 0
for i, slide in enumerate(self.getSlides()):
# is it a video and we have a start value => end of this section
if isinstance(slide, VideoSlide) and slide.has_audio and section_start_slide is not None:
background_sections.append({"start": self.getOffset(section_start_slide, False),
"fade_in": self.getMusicFadeOutDuration(section_start_slide - 1),
"end": self.getOffset(i, False),
"fade_out": self.getMusicFadeOutDuration(i)})
section_start_slide = None
# is it a image but the previous one was a video => start new section
if isinstance(slide, ImageSlide) and section_start_slide is None:
section_start_slide = i
# the last section is ending with an image => end of section is end generated video
if section_start_slide is not None:
background_sections.append({"start": self.getOffset(section_start_slide, False),
"fade_in": self.getMusicFadeOutDuration(section_start_slide - 1),
"end": self.getTotalDuration() - self.getMusicFadeOutDuration(i),
"fade_out": self.getMusicFadeOutDuration(i)})
if len(background_sections) > 0:
# merge background tracks
filter_chains.append("%s concat=n=%s:v=0:a=1[background_audio]" % ("".join(background_audio),
len(self.background_tracks)))
# split the background tracks in the necessary sections
filter_chains.append("[background_audio]asplit=%s %s" % (len(background_sections), "".join(
["[b%s]" % (i) for i, section in enumerate(background_sections)])))
# fade background sections in/out
for i, section in enumerate(background_sections):
audio_tracks.append("[b%sf]" % (i))
filter_chains.append("[b%s]afade=t=in:st=%s:d=%s,afade=t=out:st=%s:d=%s[b%sf]" % (
i, section["start"], section["fade_in"], section["end"], section["fade_out"], i))
else:
logger.debug("no background section")
else:
logger.debug("no background music")
# video audio and background sections should be merged
if len(audio_tracks) > 0:
filter_chains.append("%s amix=inputs=%s[aout]" % ("".join(audio_tracks), len(audio_tracks)))
else:
logger.debug("no audio track")
return filter_chains
def getTimestampsFromAudio(self):
logger.debug("get Timestamps from Audio Files")
timestamps = []
offset = 0
for track in self.getBackgroundTracks():
# add beginning of track
timestamps.append(0 + offset)
# get timestamps of track
timestamps = timestamps + [float(timestamp) + offset for timestamp in track.getTimestamps(self.config["aubio"])]
# next track has the offsets after the current
offset = offset + track.duration
logger.debug("Timestamps: %s", timestamps)
return timestamps
def adjustDurationsFromAudio(self):
logger.debug("adjust slide durations")
timestamps = self.getTimestampsFromAudio()
logger.debug("Slide durations (before): %s", [slide.getDuration() for slide in self.getSlides()])
# change slide durations
timestamp_idx = 0
for i, slide in enumerate(self.getSlides()):
if not slide.has_audio and not isinstance(slide, VideoSlide) and timestamp_idx < len(timestamps):
slide_start = self.getOffset(i, False)
# find the next timestamp after the slide starts
# and skip timestamps until the minimum duration is reached
no_result = False
while ((slide_start >= (timestamps[timestamp_idx])
or (timestamps[timestamp_idx] - slide_start))
< slide.slide_duration_min):
# is the music long enough?
if (timestamp_idx + 1) < len(timestamps):
timestamp_idx = timestamp_idx + 1
else:
no_result = True
break
if not no_result:
duration = timestamps[timestamp_idx] - slide_start
# the next timestamp is earlier than the initial slide duration
# and after | |
"""
mfcln module. Contains the ModflowCln class. Note that the user can access
the ModflowCln class as `flopy.modflow.ModflowCln`.
Compatible with USG-Transport Version 1.7.0. which can be downloade from
https://www.gsi-net.com/en/software/free-software/modflow-usg.html
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<NAME>., 2021; USG-Transport Version 1.7.0: The Block-Centered Transport
Process for MODFLOW-USG, GSI Environmental, March, 2021
<NAME>, <NAME>., <NAME>., <NAME>, and Hughes,
J.D., 2013, MODFLOW–USG version 1: An unstructured grid version of MODFLOW
for simulating groundwater flow and tightly coupled processes using a control
volume finite-difference formulation: U.S. Geological Survey Techniques and
Methods, book 6, chap. A45, 66 p.
"""
import sys
import numpy as np
from ..pakbase import Package
from ..utils import Util2d,read1d
class ModflowCln(Package):
"""
Connected Linear Network class
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ncln : int
is a flag or the number of CLN segments. If NCLN = 0, this flag
indicates that the CLN domain connectivity is input in a general IA-JA
manner as is used for the GWF Process.If NCLN > 0, linear CLN segments
(for instance multi-aquifer wells) or simple CLN networks are simulated
and NCLN is the total number of CLN segments in the domain.
iclnnds : int
is a flag or number of CLN-nodes simulated in the model. Multiple
CLN-nodes constitute a segment.If ICLNNDS < 0, the CLN-nodes are
ordered in a sequential manner from the first CLN node to the last
CLN node. Therefore, only linear CLN segments are simulated since a
CLN segment does not share any of its nodes with another CLN segment.
If ICLNNDS > 0, CLN networks can be simulated and ICLNNDS is
the total number of CLN-nodes simulated by the model (NCLNNDS). CLN
nodes can be shared among CLN segments and therefore, the CLN-nodal
connectivity for the network is also required as input.
nndcln : list of int
is the number of CLN-nodes that are associated with each CLN segment.
Only read if NCLN > 0. If ICLNNDS < 0, sum of nndcln is the total number
of CLN-nodes (NCLNNDS)
clncon : list of list
are the CLN-node numbers associated with each CLN segment. Only read
if NCLN > 0 and ICLNNDS > 0. It is read NCLN times, once for each CLN
segment. The number of entries for each line is the number of CLN
cells (NNDCLN) associated with each CLN segment
nja_cln : int
is the total number of connections of the CLN domain. NJA_CLN is used
to dimension the sparse matrix in a compressed row storage format.
iac_cln : list of int
is a matrix indicating the number of connections plus 1 for each CLN
node to another CLN node. Note that the IAC_CLN array is only supplied
for the CLN cells; the IAC_CLN array is internally expanded to include
other domains if present in a simulation. sum(IAC)=NJAG
ja_cln : list of list
is a list of CLN cell number (n) followed by its connecting CLN cell
numbers (m) for each of the m CLN cells connected to CLN cell n. This
list is sequentially provided for the first to the last CLN cell.
Note that the cell and its connections are only supplied for the CLN
cells and their connections to the other CLN cells using the local CLN
cell numbers.
node_prop : matrix
[IFNO IFTYP IFDIR FLENG FELEV FANGLE IFLIN ICCWADI X1 Y1 Z1 X2 Y2 Z2]
is a table of the node properties. Total rows equal the total number
of CLN-nodes (NCLNNDS). The first 6 fields is required for running
model. Rest of fields have default value of 0.
nclngwc : int
is the number of CLN to porous-medium grid-block connections present
in the model. A CLN node need not be connected to any groundwater node.
Conversely, a CLN node may be connected to multiple groundwater nodes,
or multiple CLN nodes may be connected to the same porous medium mode.
cln_gwc : matrix
unstructured: [IFNOD IGWNOD IFCON FSKIN FLENGW FANISO ICGWADI]
structured: [IFNOD IGWLAY IGWROW IGWFCOL IFCON FSKIN FLENGW FANISO
ICGWADI]
is a table define connections between CLN nodes and groundwater cells.
Total rows of the table equals nclngwc.
nconduityp : int
is the number of circular conduit-geometry types.
cln_circ :
[ICONDUITYP FRAD CONDUITK TCOND TTHK TCFLUID TCONV]
is a table define the circular conduit properties. Total rows of the
table equals nconduityp. Last 4 fields only needed for heat transport
simulation.
ibound : 1-D array
is the boundary array for CLN-nodes. Length equal NCLNNDS
strt : 1-D array
is initial head at the beginning of the simulation in CLN nodes.
Length equal NCLNNDS
transient : bool
if there is transient IBOUND for each stress period
printiaja : bool
whether to print IA_CLN and JA_CLN to listing file
nrectyp : int
is the number of rectangular conduit-geometry types.
cln_rect : rectangular fracture properties
[IRECTYP FLENGTH FHEIGHT CONDUITK TCOND TTHK TCFLUID TCONV]
is read for each rectangular conduit. Total rows of the table equals
nrectyp. Last 4 fields only needed for heat transport simulation.
BHE : bool
is a flag indicating that BHE details are also included in a heat transport
model. Specifically, the thermal conductance and BHE tube thickness are
included in transfer of heat between groundwater and CLN cells along with
the heat conductivity of the BHE fluid and the convective heat transfer
coefficient.
grav : float
is the gravitational acceleration constant in model simulation units.
The value of the constant follows the keyword GRAVITY. Note that the
constant value is 9.81 m/s2 in SI units; 32.2 ft/s2 in fps units.
visk : float
is the kinematic viscosity of water in model simulation units [L2/T].
The value of kinematic viscosity follows the keyword VISCOSITY. Note
that the constant value is 1.787 x 10-6 m2/s in SI units;
1.924 x 10-5 ft2/s in fps units.
extension : list of strings
(default is ['cln','clncb','clnhd','clndd','clnib','clncn','clnmb']).
unitnumber : list of int
File unit number for the package and the output files.
(default is [71, 0, 0, 0, 0, 0, 0] ).
filenames : list of str
Filenames to use for the package and the output files. If filenames
= None the package name will be created using the model name and package
extensions.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow(version='mfusg', exe_name='mfusg.exe')
>>> node_prop = [[1,1,0,10.0,-110.0,1.57,0,0],[2,1,0,10.0,-130.0,1.57,0,0]]
>>> cln_gwc = [[1,1,50,50,0,0,10.0,1.0,0],[2,2,50,50,0,0,10.0,1.0,0]]
>>> cln = flopy.modflow.ModflowCln(ml, ncln=1, iclnnds=-1, nndcln=2,
nclngwc = 2, node_prop =node_prop, cln_gwc =cln_gwc)
"""
def __init__(
self,
model,
ncln=None, # number of CLNs
iclnnds=None, # number of nodes
nndcln=None, # number of nodes in each CLN segments
clncon=None, # node IDs in each CLN segments
nja_cln=None, # total number of node-node connections (NJAG)
iac_cln=None, # number of connections for each node (sum(IAC)=NJAG
ja_cln=None, # node connections
node_prop=None, # node properties
nclngwc=None, # number of CLN-GW connections
cln_gwc=None, # CLN-GW connections
nconduityp=1, # number of circular conduit types
cln_circ=[[1,10.0,3.23e10]], #circular conduit properties
ibound=1, # boundary condition types
strt=1.0, # initial head in CLN cells
transient=False, # OPTIONS: transient IBOUND for each stress period
printiaja=False, # OPTIONS: print IA_CLN and JA_CLN to listing file
nrectyp=0, # OPTIONS2: number of rectangular fracture types
cln_rect=None, # rectangular fracture properties
BHE=False, # OPTIONS2: borehole heat exchanger (BHE)
grav=None, # OPTIONS2: gravitational acceleration constant
visk=None, # OPTIONS2: kinematic viscosity of water
extension=['cln', 'clncb', 'clnhd', 'clndd','clnib', 'clncn', 'clnmb'],
unitnumber=None,
filenames=None,
):
if model.version != "mfusg":
err = "Error: model version must be mfusg to use CLN package"
raise Exception(err)
# set default | |
k_hop: Number of layers for gcnii.
lambda_l: The hyperparameter of lambda in the paper.
alpha: The hyperparameter of alpha in the paper.
dropout: Feature dropout rate.
"""
def __init__(self,
hidden_size,
activation=None,
lambda_l=0.5,
alpha=0.2,
k_hop=10,
dropout=0.6):
super(GCNII, self).__init__()
self.hidden_size = hidden_size
self.activation = activation
self.lambda_l = lambda_l
self.alpha = alpha
self.k_hop = k_hop
self.dropout = dropout
self.drop_fn = nn.Dropout(dropout)
self.mlps = nn.LayerList()
for _ in range(k_hop):
self.mlps.append(nn.Linear(hidden_size, hidden_size))
if isinstance(activation, str):
activation = getattr(F, activation)
self.activation = activation
def forward(self, graph, feature, norm=None):
"""
Args:
graph: `pgl.Graph` instance.
feature: A tensor with shape (num_nodes, input_size)
norm: (default None). If :code:`norm` is not None, then the feature will be normalized by given norm. If :code:`norm` is None, then we use `lapacian degree norm`.
Return:
A tensor with shape (num_nodes, output_size)
"""
if norm is None:
norm = GF.degree_norm(graph)
h0 = feature
for i in range(self.k_hop):
beta_i = np.log(1.0 * self.lambda_l / (i + 1) + 1)
feature = self.drop_fn(feature)
feature = feature * norm
feature = graph.send_recv(feature)
feature = feature * norm
feature = self.alpha * h0 + (1 - self.alpha) * feature
feature_transed = self.mlps[i](feature)
feature = beta_i * feature_transed + (1 - beta_i) * feature
if self.activation is not None:
feature = self.activation(feature)
return feature
class TransformerConv(nn.Layer):
"""Implementation of TransformerConv from UniMP
This is an implementation of the paper Unified Message Passing Model for Semi-Supervised Classification
(https://arxiv.org/abs/2009.03509).
Args:
input_size: The size of the inputs.
hidden_size: The hidden size for gat.
activation: (default None) The activation for the output.
num_heads: (default 4) The head number in transformerconv.
feat_drop: (default 0.6) Dropout rate for feature.
attn_drop: (default 0.6) Dropout rate for attention.
concat: (default True) Whether to concat output heads or average them.
skip_feat: (default True) Whether to add a skip conect from input to output.
gate: (default False) Whether to use a gate function in skip conect.
layer_norm: (default True) Whether to aply layer norm in output
"""
def __init__(self,
input_size,
hidden_size,
num_heads=4,
feat_drop=0.6,
attn_drop=0.6,
concat=True,
skip_feat=True,
gate=False,
layer_norm=True,
activation='relu'):
super(TransformerConv, self).__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.feat_drop = feat_drop
self.attn_drop = attn_drop
self.concat = concat
self.q = nn.Linear(input_size, num_heads * hidden_size)
self.k = nn.Linear(input_size, num_heads * hidden_size)
self.v = nn.Linear(input_size, num_heads * hidden_size)
self.feat_dropout = nn.Dropout(p=feat_drop)
self.attn_dropout = nn.Dropout(p=attn_drop)
if skip_feat:
if concat:
self.skip_feat = nn.Linear(input_size, num_heads * hidden_size)
else:
self.skip_feat = nn.Linear(input_size, hidden_size)
else:
self.skip_feat = None
if gate:
if concat:
self.gate = nn.Linear(3 * num_heads * hidden_size, 1)
else:
self.gate = nn.Linear(3 * hidden_size, 1)
else:
self.gate = None
if layer_norm:
if self.concat:
self.layer_norm = nn.LayerNorm(num_heads * hidden_size)
else:
self.layer_norm = nn.LayerNorm(hidden_size)
else:
self.layer_norm = None
if isinstance(activation, str):
activation = getattr(F, activation)
self.activation = activation
def send_attention(self, src_feat, dst_feat, edge_feat):
if "edge_feat" in edge_feat:
alpha = dst_feat["q"] * (src_feat["k"] + edge_feat['edge_feat'])
src_feat["v"] = src_feat["v"] + edge_feat["edge_feat"]
else:
alpha = dst_feat["q"] * src_feat["k"]
alpha = paddle.sum(alpha, axis=-1)
return {"alpha": alpha, "v": src_feat["v"]}
def reduce_attention(self, msg):
alpha = msg.reduce_softmax(msg["alpha"])
alpha = paddle.reshape(alpha, [-1, self.num_heads, 1])
if self.attn_drop > 1e-15:
alpha = self.attn_dropout(alpha)
feature = msg["v"]
feature = feature * alpha
if self.concat:
feature = paddle.reshape(feature,
[-1, self.num_heads * self.hidden_size])
else:
feature = paddle.mean(feature, axis=1)
feature = msg.reduce(feature, pool_type="sum")
return feature
def send_recv(self, graph, q, k, v, edge_feat):
q = q / (self.hidden_size**0.5)
if edge_feat is not None:
msg = graph.send(
self.send_attention,
src_feat={'k': k,
'v': v},
dst_feat={'q': q},
edge_feat={'edge_feat': edge_feat})
else:
msg = graph.send(
self.send_attention,
src_feat={'k': k,
'v': v},
dst_feat={'q': q})
output = graph.recv(reduce_func=self.reduce_attention, msg=msg)
return output
def forward(self, graph, feature, edge_feat=None):
if self.feat_drop > 1e-5:
feature = self.feat_dropout(feature)
q = self.q(feature)
k = self.k(feature)
v = self.v(feature)
q = paddle.reshape(q, [-1, self.num_heads, self.hidden_size])
k = paddle.reshape(k, [-1, self.num_heads, self.hidden_size])
v = paddle.reshape(v, [-1, self.num_heads, self.hidden_size])
if edge_feat is not None:
if self.feat_drop > 1e-5:
edge_feat = self.feat_dropout(edge_feat)
edge_feat = paddle.reshape(edge_feat,
[-1, self.num_heads, self.hidden_size])
output = self.send_recv(graph, q, k, v, edge_feat=edge_feat)
if self.skip_feat is not None:
skip_feat = self.skip_feat(feature)
if self.gate is not None:
gate = F.sigmoid(
self.gate(
paddle.concat(
[skip_feat, output, skip_feat - output], axis=-1)))
output = gate * skip_feat + (1 - gate) * output
else:
output = skip_feat + output
if self.layer_norm is not None:
output = self.layer_norm(output)
if self.activation is not None:
output = self.activation(output)
return output
class GINConv(nn.Layer):
"""Implementation of Graph Isomorphism Network (GIN) layer.
This is an implementation of the paper How Powerful are Graph Neural Networks?
(https://arxiv.org/pdf/1810.00826.pdf).
In their implementation, all MLPs have 2 layers. Batch normalization is applied
on every hidden layer.
Args:
input_size: The size of input.
output_size: The size of output.
activation: The activation for the output.
init_eps: float, optional
Initial :math:`\epsilon` value, default is 0.
train_eps: bool, optional
if True, :math:`\epsilon` will be a learnable parameter.
"""
def __init__(self,
input_size,
output_size,
activation=None,
init_eps=0.0,
train_eps=False):
super(GINConv, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.linear1 = nn.Linear(input_size, output_size, bias_attr=True)
self.linear2 = nn.Linear(output_size, output_size, bias_attr=True)
self.layer_norm = nn.LayerNorm(output_size)
if train_eps:
self.epsilon = self.create_parameter(
shape=[1, 1],
dtype='float32',
default_initializer=nn.initializer.Constant(value=init_eps))
else:
self.epsilon = init_eps
if isinstance(activation, str):
activation = getattr(F, activation)
self.activation = activation
def forward(self, graph, feature):
"""
Args:
graph: `pgl.Graph` instance.
feature: A tensor with shape (num_nodes, input_size)
Return:
A tensor with shape (num_nodes, output_size)
"""
neigh_feature = graph.send_recv(feature, reduce_func="sum")
output = neigh_feature + feature * (self.epsilon + 1.0)
output = self.linear1(output)
output = self.layer_norm(output)
if self.activation is not None:
output = self.activation(output)
output = self.linear2(output)
return output
class RGCNConv(nn.Layer):
"""Implementation of Relational Graph Convolutional Networks (R-GCN)
This is an implementation of the paper
Modeling Relational Data with Graph Convolutional Networks
(http://arxiv.org/abs/1703.06103).
Args:
in_dim: The input dimension.
out_dim: The output dimension.
etypes: A list of edge types of the heterogeneous graph.
num_bases: int, number of basis decomposition. Details can be found in the paper.
"""
def __init__(self, in_dim, out_dim, etypes, num_bases=0):
super(RGCNConv, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.etypes = etypes
self.num_rels = len(self.etypes)
self.num_bases = num_bases
if self.num_bases <= 0 or self.num_bases >= self.num_rels:
self.num_bases = self.num_rels
self.weight = self.create_parameter(
shape=[self.num_bases, self.in_dim, self.out_dim])
if self.num_bases < self.num_rels:
self.w_comp = self.create_parameter(
shape=[self.num_rels, self.num_bases])
def forward(self, graph, feat):
"""
Args:
graph: `pgl.HeterGraph` instance or a dictionary of `pgl.Graph` with their edge type.
feat: A tensor with shape (num_nodes, in_dim)
"""
if self.num_bases < self.num_rels:
weight = paddle.transpose(self.weight, perm=[1, 0, 2])
weight = paddle.matmul(self.w_comp, weight)
# [num_rels, in_dim, out_dim]
weight = paddle.transpose(weight, perm=[1, 0, 2])
else:
weight = self.weight
def send_func(src_feat, dst_feat, edge_feat):
return src_feat
def recv_func(msg):
return msg.reduce_mean(msg["h"])
feat_list = []
for idx, etype in enumerate(self.etypes):
w = weight[idx, :, :].squeeze()
h = paddle.matmul(feat, w)
msg = graph[etype].send(send_func, src_feat={"h": h})
h = graph[etype].recv(recv_func, msg)
feat_list.append(h)
h = paddle.stack(feat_list, axis=0)
h = paddle.sum(h, axis=0)
return h
class SGCConv(nn.Layer):
"""Implementation of simplified graph convolutional neural networks (SGC)
This is an implementation of the paper Simplifying Graph Convolutional Networks
(https://arxiv.org/pdf/1902.07153.pdf).
Args:
input_size: The size of the inputs.
output_size: The size of outputs
k_hop: K Steps for Propagation
activation: The activation for the output.
cached: If :code:`cached` is True, then the graph convolution will be pre-computed and stored.
"""
def __init__(self,
input_size,
output_size,
k_hop=2,
cached=True,
activation=None,
bias=False):
super(SGCConv, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.k_hop = k_hop
self.linear = nn.Linear(input_size, output_size, bias_attr=False)
if bias:
self.bias = self.create_parameter(
shape=[output_size], is_bias=True)
self.cached = cached
self.cached_output = None
if isinstance(activation, str):
activation = getattr(F, activation)
self.activation = activation
def forward(self, graph, feature):
"""
Args:
graph: `pgl.Graph` instance.
feature: A tensor with shape (num_nodes, input_size)
Return:
A tensor with shape (num_nodes, output_size)
"""
if self.cached:
if self.cached_output is None:
norm = GF.degree_norm(graph)
for hop in range(self.k_hop):
feature = feature * norm
feature = graph.send_recv(feature, "sum")
feature = feature * norm
self.cached_output = feature
else:
feature = self.cached_output
else:
norm = GF.degree_norm(graph)
for hop in range(self.k_hop):
feature = feature * norm
feature = | |
<reponame>hbrunie/cctbx_project
from __future__ import absolute_import, division, print_function
from iotbx.pdb.atom_selection import selection_string_from_selection
from scitbx.array_family import flex
from mmtbx.ncs import ncs_search
from libtbx.utils import Sorry, null_out
import libtbx.phil
import iotbx.pdb.hierarchy
from mmtbx.ncs import ncs
from mmtbx.ncs.ncs_restraints_group_list import class_ncs_restraints_group_list, \
NCS_restraint_group
from scitbx import matrix
import sys
from iotbx.pdb.utils import all_chain_ids
from time import time
from six.moves import cStringIO as StringIO
from six.moves import zip
ncs_search_options = """\
ncs_search
.short_caption = Search options
.style = box
.help = Set of parameters for NCS search procedure. Some of them also used \
for filtering user-supplied ncs_group.
{
enabled = False
.type = bool
.help = Enable NCS restraints or constraints in refinement (in some cases \
may be switched on inside refinement program).
.short_caption = Use NCS
.style = noauto bold
exclude_selection = "element H or element D or water"
.type = atom_selection
.help = Atoms selected by this selection will be excluded from the model \
before any NCS search and/or filtering procedures. There is no way \
atoms defined by this selection will be in NCS.
.expert_level = 2
chain_similarity_threshold = 0.85
.type=float
.short_caption = Sequence alignment threshold
.help='''Threshold for sequence similarity between matching chains.
A smaller value may cause more chains to be grouped together and can lower
the number of common residues'''
.expert_level = 0
chain_max_rmsd = 2.
.type = float
.short_caption = Max RMSD between matching chains
.help = '''limit of rms difference between chains to be considered
as copies'''
.expert_level = 0
residue_match_radius = 4.0
.type = float
.help = Maximum allowed distance difference between pairs of matching \
atoms of two residues
.expert_level = 0
try_shortcuts = False
.type = bool
.help = Try very quick check to speed up the search when chains are identical. \
If failed, regular search will be performed automatically.
.expert_level = 0
minimum_number_of_atoms_in_copy = 3
.type = int
.help = Do not create ncs groups where master and copies would contain \
less than specified amount of atoms
.expert_level = 3
}
"""
# parameters for manual specification of NCS - ASU mapping
ncs_group_phil_str = '''\
ncs_group
.multiple = True
.short_caption = NCS group definition
.help = The definition of one NCS group. Note, that almost always in \
refinement programs they will be checked and filtered if needed.
.style = auto_align
.expert_level=0
{
reference = None
.type = str
.short_caption = Reference selection
.help = 'Residue selection string for the complete master NCS copy'
.expert_level=0
selection = None
.type = str
.short_caption = NCS related selection
.help = 'Residue selection string for each NCS copy location in ASU'
.multiple = True
.expert_level=0
}
'''
ncs_group_master_phil = libtbx.phil.parse(ncs_group_phil_str)
class input(object):
def __init__(self,
hierarchy=None,
# XXX warning, ncs_phil_groups can be changed inside...
ncs_phil_groups = None,
params = None,
log=None,
):
"""
TODO:
1. Transfer get_ncs_info_as_spec() to ncs/ncs.py:ncs
Select method to build ncs_group_object
order of implementation:
1) ncs_phil_groups - user-supplied definitions are filtered
2) hierarchy only - Performing NCS search
Args:
-----
ncs_phil_groups: iotbx.phil.parse(ncs_group_phil_str).extract().ncs_group
chain_max_rmsd (float): limit of rms difference between chains to be considered
as copies
min_percent (float): Threshold for similarity between chains
similarity define as:
(number of matching res) / (number of res in longer chain)
chain_similarity_threshold (float): min similarity between matching chains
residue_match_radius (float): max allow distance difference between pairs of matching
atoms of two residues
"""
self.number_of_ncs_groups = 0 # consider removing/replacing with function
self.ncs_restraints_group_list = class_ncs_restraints_group_list()
# keep hierarchy for writing (To have a source of atoms labels)
self.hierarchy = hierarchy
# residues common to NCS copies. Used for .spec representation
self.common_res_dict = {}
# Collect messages, recommendation and errors
self.messages = '' # Not used outside...
self.old_i_seqs = None
self.original_hierarchy = None
self.truncated_hierarchy = None
self.truncated_h_asc = None
self.chains_info = None
extension = ''
# set search parameters
self.params = params
if self.params is None:
self.params = input.get_default_params().ncs_search
#
if log is None:
self.log = sys.stdout
else:
self.log = log
if hierarchy:
# for a in hierarchy.atoms():
# print "oo", a.i_seq, a.id_str()
# print "====="
hierarchy.atoms().reset_i_seq()
self.original_hierarchy = hierarchy.deep_copy()
self.original_hierarchy.reset_atom_i_seqs()
if self.params.exclude_selection is not None:
# pdb_hierarchy_inp.hierarchy.write_pdb_file("in_ncs_pre_before.pdb")
cache = hierarchy.atom_selection_cache()
sel = cache.selection("not (%s)" % self.params.exclude_selection)
self.truncated_hierarchy = hierarchy.select(sel)
else:
# this could be to save iseqs but I'm not sure
self.truncated_hierarchy = hierarchy.select(flex.size_t_range(hierarchy.atoms_size()))
self.old_i_seqs = self.truncated_hierarchy.atoms().extract_i_seq()
# print "self.old_i_seqs", list(self.old_i_seqs)
# self.truncated_hierarchy.atoms().reset_i_seq()
self.truncated_hierarchy.reset_atom_i_seqs()
self.truncated_h_asc = self.truncated_hierarchy.atom_selection_cache()
# self.truncated_hierarchy.write_pdb_file("in_ncs_pre_after.pdb")
self.chains_info = ncs_search.get_chains_info(self.truncated_hierarchy)
if self.truncated_hierarchy.atoms_size() == 0:
return
#
# print "ncs_groups before validation", ncs_phil_groups
validated_ncs_phil_groups = None
validated_ncs_phil_groups = self.validate_ncs_phil_groups(
pdb_h = self.truncated_hierarchy,
ncs_phil_groups = ncs_phil_groups,
asc = self.truncated_h_asc)
if validated_ncs_phil_groups is None:
# print "Last chance, building from hierarchy"
self.build_ncs_obj_from_pdb_asu(
pdb_h=self.truncated_hierarchy,
asc=self.truncated_h_asc)
# error handling
if self.ncs_restraints_group_list.get_n_groups() == 0:
print('========== WARNING! ============\n', file=self.log)
print(' No NCS relation were found !!!\n', file=self.log)
print('================================\n', file=self.log)
if self.messages != '':
print(self.messages, file=self.log)
@staticmethod
def get_default_params():
"""
Get parsed parameters (in form of Python objects). Use this function to
avoid importing ncs_search phil strings and think about how to
parse it. Does not need the instance of class (staticmethod).
Then modify what needed to be modified and init this class normally.
"""
import iotbx.phil
return iotbx.phil.parse(
input_string=ncs_search_options,
process_includes=True).extract()
def pdb_h_into_chain(self, pdb_h, ch_id="A"):
new_chain = iotbx.pdb.hierarchy.chain(id=ch_id)
n_res_groups = 0
for chain in pdb_h.only_model().chains():
n_res_groups += chain.residue_groups_size()
new_chain.pre_allocate_residue_groups(
number_of_additional_residue_groups=n_res_groups)
new_resseq = 1
for chain in pdb_h.only_model().chains():
for rg in chain.residue_groups():
new_rg = rg.detached_copy()
new_rg.resseq = new_resseq
original_iseqs = rg.atoms().extract_i_seq()
for atom, orig_iseq in zip(new_rg.atoms(), original_iseqs):
atom.tmp = orig_iseq
new_resseq += 1
new_chain.append_residue_group(residue_group=new_rg)
return new_chain
def validate_ncs_phil_groups(self, pdb_h, ncs_phil_groups, asc):
"""
Note that the result of this procedure is corrected ncs_phil_groups.
These groups will be later submitted to build_ncs_obj_from_phil
procedure. This is sub-optimal and should be changed because
everything is already processed here and ready to build proper
NCS_restraint_group object.
add filtered groups in self.ncs_restraints_group_list
"""
def show_particular_ncs_group(ncs_gr):
p_obj = ncs_group_master_phil.extract()
p_obj.ncs_group[0].reference = ncs_gr.reference
p_obj.ncs_group[0].selection = ncs_gr.selection
to_show = ncs_group_master_phil.format(python_object=p_obj)
to_show.show(out=self.log)
def show_empty_selection_error_message(ng, where="reference"):
print(" Missing or corrupted %s field:" % where, file=self.log)
print(" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", file=self.log)
print(" _ALL_ user-supplied groups will be ignored", file=self.log)
print(" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", file=self.log)
show_particular_ncs_group(ng)
# Massage NCS groups
# return ncs_phil_groups
validated_ncs_groups = []
if ncs_phil_groups is None:
return None
if(ncs_phil_groups is not None and len(ncs_phil_groups)==0):
# print "exiting here"
ncs_phil_groups=None
return None
if (ncs_phil_groups is not None and
len(ncs_phil_groups)==1 and
ncs_phil_groups[0].reference is None and
len(ncs_phil_groups[0].selection) == 1 and
ncs_phil_groups[0].selection[0] is None):
# This is empty ncs_group definition somehow creeped into here.
# Not a big deal.
return None
if(ncs_phil_groups is not None):
print("Validating user-supplied NCS groups...", file=self.log)
empty_cntr = 0
for ng in ncs_phil_groups:
if ng.reference is None or len(ng.reference.strip())==0:
show_empty_selection_error_message(ng, where="reference")
empty_cntr += 1
for s in ng.selection:
if s is None or len(s.strip())==0:
show_empty_selection_error_message(ng, where="selection")
empty_cntr += 1
if(empty_cntr>0):
print(" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", file=self.log)
print(" _ALL_ user-supplied groups are ignored.", file=self.log)
print(" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", file=self.log)
ncs_phil_groups=None
return None
# Verify NCS selections
msg="Empty selection in NCS group definition: %s"
for ncs_group in ncs_phil_groups:
print(" Validating:", file=self.log)
show_particular_ncs_group(ncs_group)
selection_list = []
# first, check for selections producing 0 atoms
user_original_reference_iselection = None
user_original_copies_iselections = []
n_atoms_in_user_ncs = 0
s_string = ncs_group.reference
if s_string is not None:
sel = asc.iselection(s_string)
selection_list.append(s_string)
n_atoms_in_user_ncs = sel.size()
if(n_atoms_in_user_ncs==0):
raise Sorry(msg%s_string)
user_original_reference_iselection = sel
for s_string in ncs_group.selection:
if(s_string is not None):
sel = asc.iselection(s_string)
selection_list.append(s_string)
n_copy = sel.size()
if(n_copy==0):
raise Sorry(msg%s_string)
user_original_copies_iselections.append(sel)
#
# The idea for user's groups is to pick them one by one,
# select only reference and selections from the model,
# If there are multiple chains in ref or selection -
# combine them in one chain,
# save atom original i_seq in atom.tmp
# run searching procedure for the resulting hierarchy
# if the user's selections were more or less OK - there should be
# one group, get atom.tmp values for the selected atoms and using
# original hierarchy convert them into string selections when needed.
# If multiple groups produced - use them, most likely the user
# provided something really wrong.
# Need to pay some attention to what came out | |
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
import numpy as np
import pdb
import kornia
class residualBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, n_filters, stride=1, downsample=None,dilation=1,with_bn=True):
super(residualBlock, self).__init__()
if dilation > 1:
padding = dilation
else:
padding = 1
if with_bn:
self.convbnrelu1 = conv2DBatchNormRelu(in_channels, n_filters, 3, stride, padding, dilation=dilation)
self.convbn2 = conv2DBatchNorm(n_filters, n_filters, 3, 1, 1)
else:
self.convbnrelu1 = conv2DBatchNormRelu(in_channels, n_filters, 3, stride, padding, dilation=dilation,with_bn=False)
self.convbn2 = conv2DBatchNorm(n_filters, n_filters, 3, 1, 1, with_bn=False)
self.downsample = downsample
self.relu = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
residual = x
out = self.convbnrelu1(x)
out = self.convbn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return self.relu(out)
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.1,inplace=True))
class conv2DBatchNorm(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, dilation=1, with_bn=True):
super(conv2DBatchNorm, self).__init__()
bias = not with_bn
if dilation > 1:
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias, dilation=dilation)
else:
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias, dilation=1)
if with_bn:
self.cb_unit = nn.Sequential(conv_mod,
nn.BatchNorm2d(int(n_filters)),)
else:
self.cb_unit = nn.Sequential(conv_mod,)
def forward(self, inputs):
outputs = self.cb_unit(inputs)
return outputs
class conv2DBatchNormRelu(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, dilation=1, with_bn=True):
super(conv2DBatchNormRelu, self).__init__()
bias = not with_bn
if dilation > 1:
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias, dilation=dilation)
else:
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias, dilation=1)
if with_bn:
self.cbr_unit = nn.Sequential(conv_mod,
nn.BatchNorm2d(int(n_filters)),
nn.LeakyReLU(0.1, inplace=True),)
else:
self.cbr_unit = nn.Sequential(conv_mod,
nn.LeakyReLU(0.1, inplace=True),)
def forward(self, inputs):
outputs = self.cbr_unit(inputs)
return outputs
class pyramidPooling(nn.Module):
def __init__(self, in_channels, with_bn=True, levels=4):
super(pyramidPooling, self).__init__()
self.levels = levels
self.paths = []
for i in range(levels):
self.paths.append(conv2DBatchNormRelu(in_channels, in_channels, 1, 1, 0, with_bn=with_bn))
self.path_module_list = nn.ModuleList(self.paths)
self.relu = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
h, w = x.shape[2:]
k_sizes = []
strides = []
for pool_size in np.linspace(1,min(h,w)//2,self.levels,dtype=int):
k_sizes.append((int(h/pool_size), int(w/pool_size)))
strides.append((int(h/pool_size), int(w/pool_size)))
k_sizes = k_sizes[::-1]
strides = strides[::-1]
pp_sum = x
for i, module in enumerate(self.path_module_list):
out = F.avg_pool2d(x, k_sizes[i], stride=strides[i], padding=0)
out = module(out)
out = F.upsample(out, size=(h,w), mode='bilinear')
pp_sum = pp_sum + 1./self.levels*out
pp_sum = self.relu(pp_sum/2.)
return pp_sum
class pspnet(nn.Module):
"""
Modified PSPNet. https://github.com/meetshah1995/pytorch-semseg/blob/master/ptsemseg/models/pspnet.py
"""
def __init__(self, is_proj=True,groups=1):
super(pspnet, self).__init__()
self.inplanes = 32
self.is_proj = is_proj
# Encoder
self.convbnrelu1_1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=16,
padding=1, stride=2)
self.convbnrelu1_2 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=16,
padding=1, stride=1)
self.convbnrelu1_3 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=32,
padding=1, stride=1)
# Vanilla Residual Blocks
self.res_block3 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block5 = self._make_layer(residualBlock,128,1,stride=2)
self.res_block6 = self._make_layer(residualBlock,128,1,stride=2)
self.res_block7 = self._make_layer(residualBlock,128,1,stride=2)
self.pyramid_pooling = pyramidPooling(128, levels=3)
# Iconvs
self.upconv6 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv5 = conv2DBatchNormRelu(in_channels=192, k_size=3, n_filters=128,
padding=1, stride=1)
self.upconv5 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv4 = conv2DBatchNormRelu(in_channels=192, k_size=3, n_filters=128,
padding=1, stride=1)
self.upconv4 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv3 = conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1)
self.upconv3 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1))
self.iconv2 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=64,
padding=1, stride=1)
if self.is_proj:
self.proj6 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj5 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj4 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj3 = conv2DBatchNormRelu(in_channels=64, k_size=1,n_filters=64//groups, padding=0,stride=1)
self.proj2 = conv2DBatchNormRelu(in_channels=64, k_size=1,n_filters=64//groups, padding=0,stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if hasattr(m.bias,'data'):
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# H, W -> H/2, W/2
conv1 = self.convbnrelu1_1(x)
conv1 = self.convbnrelu1_2(conv1)
conv1 = self.convbnrelu1_3(conv1)
## H/2, W/2 -> H/4, W/4
pool1 = F.max_pool2d(conv1, 3, 2, 1)
# H/4, W/4 -> H/16, W/16
rconv3 = self.res_block3(pool1)
conv4 = self.res_block5(rconv3)
conv5 = self.res_block6(conv4)
conv6 = self.res_block7(conv5)
conv6 = self.pyramid_pooling(conv6)
conv6x = F.upsample(conv6, [conv5.size()[2],conv5.size()[3]],mode='bilinear')
concat5 = torch.cat((conv5,self.upconv6[1](conv6x)),dim=1)
conv5 = self.iconv5(concat5)
conv5x = F.upsample(conv5, [conv4.size()[2],conv4.size()[3]],mode='bilinear')
concat4 = torch.cat((conv4,self.upconv5[1](conv5x)),dim=1)
conv4 = self.iconv4(concat4)
conv4x = F.upsample(conv4, [rconv3.size()[2],rconv3.size()[3]],mode='bilinear')
concat3 = torch.cat((rconv3,self.upconv4[1](conv4x)),dim=1)
conv3 = self.iconv3(concat3)
conv3x = F.upsample(conv3, [pool1.size()[2],pool1.size()[3]],mode='bilinear')
concat2 = torch.cat((pool1,self.upconv3[1](conv3x)),dim=1)
conv2 = self.iconv2(concat2)
if self.is_proj:
proj6 = self.proj6(conv6)
proj5 = self.proj5(conv5)
proj4 = self.proj4(conv4)
proj3 = self.proj3(conv3)
proj2 = self.proj2(conv2)
return proj6,proj5,proj4,proj3,proj2
else:
return conv6, conv5, conv4, conv3, conv2
class pspnet_s(nn.Module):
"""
Modified PSPNet. https://github.com/meetshah1995/pytorch-semseg/blob/master/ptsemseg/models/pspnet.py
"""
def __init__(self, is_proj=True,groups=1):
super(pspnet_s, self).__init__()
self.inplanes = 32
self.is_proj = is_proj
# Encoder
self.convbnrelu1_1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=16,
padding=1, stride=2)
self.convbnrelu1_2 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=16,
padding=1, stride=1)
self.convbnrelu1_3 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=32,
padding=1, stride=1)
# Vanilla Residual Blocks
self.res_block3 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block5 = self._make_layer(residualBlock,128,1,stride=2)
self.res_block6 = self._make_layer(residualBlock,128,1,stride=2)
self.res_block7 = self._make_layer(residualBlock,128,1,stride=2)
self.pyramid_pooling = pyramidPooling(128, levels=3)
# Iconvs
self.upconv6 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv5 = conv2DBatchNormRelu(in_channels=192, k_size=3, n_filters=128,
padding=1, stride=1)
self.upconv5 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv4 = conv2DBatchNormRelu(in_channels=192, k_size=3, n_filters=128,
padding=1, stride=1)
self.upconv4 = nn.Sequential(nn.Upsample(scale_factor=2),
conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1))
self.iconv3 = conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1)
#self.upconv3 = nn.Sequential(nn.Upsample(scale_factor=2),
# conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
# padding=1, stride=1))
#self.iconv2 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=64,
# padding=1, stride=1)
if self.is_proj:
self.proj6 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj5 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj4 = conv2DBatchNormRelu(in_channels=128,k_size=1,n_filters=128//groups, padding=0,stride=1)
self.proj3 = conv2DBatchNormRelu(in_channels=64, k_size=1,n_filters=64//groups, padding=0,stride=1)
#self.proj2 = conv2DBatchNormRelu(in_channels=64, k_size=1,n_filters=64//groups, padding=0,stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if hasattr(m.bias,'data'):
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# H, W -> H/2, W/2
conv1 = self.convbnrelu1_1(x)
conv1 = self.convbnrelu1_2(conv1)
conv1 = self.convbnrelu1_3(conv1)
## H/2, W/2 -> H/4, W/4
pool1 = F.max_pool2d(conv1, 3, 2, 1)
# H/4, W/4 -> H/16, W/16
rconv3 = self.res_block3(pool1)
conv4 = self.res_block5(rconv3)
conv5 = self.res_block6(conv4)
conv6 = self.res_block7(conv5)
conv6 = self.pyramid_pooling(conv6)
conv6x = F.upsample(conv6, [conv5.size()[2],conv5.size()[3]],mode='bilinear')
concat5 = torch.cat((conv5,self.upconv6[1](conv6x)),dim=1)
conv5 = self.iconv5(concat5)
conv5x = F.upsample(conv5, [conv4.size()[2],conv4.size()[3]],mode='bilinear')
concat4 = torch.cat((conv4,self.upconv5[1](conv5x)),dim=1)
conv4 = self.iconv4(concat4)
conv4x = F.upsample(conv4, [rconv3.size()[2],rconv3.size()[3]],mode='bilinear')
concat3 = torch.cat((rconv3,self.upconv4[1](conv4x)),dim=1)
conv3 = self.iconv3(concat3)
#conv3x = F.upsample(conv3, [pool1.size()[2],pool1.size()[3]],mode='bilinear')
#concat2 = torch.cat((pool1,self.upconv3[1](conv3x)),dim=1)
#conv2 = self.iconv2(concat2)
if self.is_proj:
proj6 = self.proj6(conv6)
proj5 = self.proj5(conv5)
proj4 = self.proj4(conv4)
proj3 = self.proj3(conv3)
# proj2 = self.proj2(conv2)
# return proj6,proj5,proj4,proj3,proj2
return proj6,proj5,proj4,proj3
else:
# return conv6, conv5, conv4, conv3, conv2
return conv6, conv5, conv4, conv3
class bfmodule(nn.Module):
def __init__(self, inplanes, outplanes):
super(bfmodule, self).__init__()
self.proj = conv2DBatchNormRelu(in_channels=inplanes,k_size=1,n_filters=64,padding=0,stride=1)
self.inplanes = 64
# Vanilla Residual Blocks
self.res_block3 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block5 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block6 = self._make_layer(residualBlock,64,1,stride=2)
self.res_block7 = self._make_layer(residualBlock,128,1,stride=2)
self.pyramid_pooling = pyramidPooling(128, levels=3)
# Iconvs
self.upconv6 = conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1)
self.upconv5 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1)
self.upconv4 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1)
self.upconv3 = conv2DBatchNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1)
self.iconv5 = conv2DBatchNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1)
self.iconv4 = conv2DBatchNormRelu(in_channels=96, k_size=3, n_filters=64,
padding=1, stride=1)
self.iconv3 = conv2DBatchNormRelu(in_channels=96, k_size=3, n_filters=64,
padding=1, stride=1)
self.iconv2 = nn.Sequential(conv2DBatchNormRelu(in_channels=96, k_size=3, n_filters=64,
padding=1, stride=1),
nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True))
self.proj6 = nn.Conv2d(128, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
self.proj5 = nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
self.proj4 = nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
self.proj3 = nn.Conv2d(64, outplanes,kernel_size=3, stride=1, padding=1, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if hasattr(m.bias,'data'):
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
proj = self.proj(x) # 4x
rconv3 = self.res_block3(proj) #8x
conv4 = self.res_block5(rconv3) #16x
conv5 = self.res_block6(conv4) #32x
conv6 = self.res_block7(conv5) #64x
conv6 = self.pyramid_pooling(conv6) #64x
pred6 = self.proj6(conv6)
conv6u = F.upsample(conv6, [conv5.size()[2],conv5.size()[3]], mode='bilinear')
concat5 = torch.cat((conv5,self.upconv6(conv6u)),dim=1)
conv5 = self.iconv5(concat5) #32x
pred5 = self.proj5(conv5)
| |
#Python 2.7.9 (default, Apr 5 2015, 22:21:35)
# full env in environment.yml
import sys, os
'''
This is a full aggregation of the Pulsar Hunters project, including user weighting.
Note it's quite a simple project - basically one Yes/No question - and there is gold-standard data, so the weighting is relatively straightforward and the aggregation is just determining a single fraction for each subject.
For an example of an aggregation of a much more complex question tree, check out scripts for Galaxy Zoo. The user weighting in that project is also completely different.
Hopefully this is well-enough commented below to be useful for others.
--BDS
'''
# file with raw classifications (csv) needed
# put this way up here so if there are no inputs we exit quickly before even trying to load everything else
try:
classfile_in = sys.argv[1]
except:
#classfile_in = 'pulsar-hunters-classifications_first500k.csv'
# just a shout-out to whoever changed Panoptes so that the export filenames
# are human-readable instead of their previous format. Thank you
#classfile_in = 'data/2e3d12a2-56ca-4d1f-930a-9ecc7fd39885.csv'
print("\nUsage: %s classifications_infile [weight_class aggregations_outfile]" % sys.argv[0])
print(" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.")
print(" weight_class is 1 if you want to calculate and apply user weightings, 0 otherwise.")
print(" aggregations_outfile is the name of the file you want written. If you don't specify,")
print(" the filename is %s by default." % outfile_default)
sys.exit(0)
import numpy as np # using 1.10.1
import pandas as pd # using 0.13.1
#import datetime
#import dateutil.parser
import json
############ Define files and settings below ##############
# default outfile
outfile_default = 'pulsar_aggregations.csv'
rankfile_stem = 'subjects_ranked_by_weighted_class_asof_'
# file with tags left in Talk, for value-added columns below
talk_export_file = "helperfiles/project-764-tags_2016-01-15.json"
# file with master list between Zooniverse metadata image filename (no source coords) and
# original filename with source coords and additional info
# also I get to have a variable that uses "filename" twice where each means a different thing
# a filename for a file full of filenames #alliterationbiyotch
filename_master_list_filename = "helperfiles/HTRU-N_sets_keys.csv"
# this is a list of possible matches to known pulsars that was done after the fact so they
# are flagged as "cand" in the database instead of "known" etc.
poss_match_file = 'helperfiles/PossibleMatches.csv'
# later we will select on tags by the project team and possibly weight them differently
# note I've included the moderators and myself (though I didn't tag anything).
# Also note it's possible to do this in a more general fashion using a file with project users and roles
# However, hard-coding seemed the thing to do given our time constraints (and the fact that I don't think
# you can currently export the user role file from the project builder)
project_team = 'bretonr jocelynbb spindizzy Simon_Rookyard <NAME>_ilie jamesy23 <NAME> walkcr <NAME> benjamin_shaw bhaswati djchampion jwbmartin bstappers ElisabethB Capella05 vrooje'.split()
# define the active workflow - we will ignore all classifications not on this workflow
# we could make this an input but let's not get too fancy for a specific case.
# for beta test
#active_workflow_id = 1099
#active_workflow_major = 6
# for live project
active_workflow_id = 1224
active_workflow_major = 4
# do we want sum(weighted vote count) = sum(raw vote count)?
normalise_weights = True
# do we want to write an extra file with just classification counts and usernames
# (and a random color column, for treemaps)?
counts_out = True
counts_out_file = 'class_counts_colors.csv'
############ Set the other inputs now ###############
try:
apply_weight = int(sys.argv[2])
except:
apply_weight = 0
try:
outfile = sys.argv[3]
except:
outfile = outfile_default
#################################################################################
#################################################################################
#################################################################################
# This is the function that actually does the aggregating
def aggregate_class(grp):
# translate the group to a dataframe because FML if I don't (some indexing etc is different)
thegrp = pd.DataFrame(grp)
# figure out what we're looping over below
answers = thegrp.pulsar_classification.unique()
# aggregating is a matter of grouping by different answers and summing the counts/weights
byans = thegrp.groupby('pulsar_classification')
ans_ct_tot = byans['count'].aggregate('sum')
ans_wt_tot = byans['weight'].aggregate('sum')
# we want fractions eventually, so we need denominators
count_tot = np.sum(ans_ct_tot) # we could also do len(thegrp)
weight_tot = np.sum(ans_wt_tot)
# okay, now we should have a series of counts for each answer, one for weighted counts, and
# the total votes and weighted votes for this subject.
# now loop through the possible answers and create the raw and weighted vote fractions
# and save the counts as well.
# this is a list for now and we'll make it into a series and order the columns later
class_agg = {}
class_agg['count_unweighted'] = count_tot
class_agg['count_weighted'] = weight_tot
class_agg['subject_type'] = thegrp.subject_type.unique()[0]
class_agg['filename'] = thegrp.filename.unique()[0]
for a in answers:
# don't be that jerk who labels things with "p0" or otherwise useless internal indices.
# Use the text of the response next to this answer choice in the project builder (but strip spaces)
raw_frac_label = ('p_'+a).replace(' ', '_')
wt_frac_label = ('p_'+a+'_weight').replace(' ', '_')
class_agg[raw_frac_label] = ans_ct_tot[a]/float(count_tot)
class_agg[wt_frac_label] = ans_wt_tot[a]/float(weight_tot)
# oops, this is hard-coded so that there's Yes and No as answers - sorry to those trying to generalise
col_order = ["filename", "p_Yes", "p_No", "p_Yes_weight", "p_No_weight",
"count_unweighted", "count_weighted", "subject_type"]
return pd.Series(class_agg)[col_order]
#################################################################################
#################################################################################
#################################################################################
# The new weighting assignment function allows the user to choose between different weighting schemes
# though note the one in this function is not preferred for reasons explained below.
def assign_weight_old(seed):
# keep the two seed cases separate because we might want to use a different base for each
if seed < 0.:
return max([0.05, pow(1.0025, seed)])
elif seed > 0:
return min([3.0, pow(1.0025, seed)])
else:
return 1.0
# assigns a weight based on a seed parameter
# The weight is assigned using the seed as an exponent and the number below as the base.
# The number is just slightly offset from 1 so that it takes many classifications for
# a user's potential weight to cap out at the max weight (3) or bottom out at the min (0.05).
# Currently there are 641 "known" pulsars in the DB so the base of 1.025 is largely based on that.
# Update: there are now about 5,000 simulated pulsars in the subject set as well, and they have a
# much higher retirement limit, so that more people will have classified them and we have more info.
# Note I'd rather this did a proper analysis with a confusion matrix etc but under a time crunch
# we went with something simpler.
def assign_weight(q, which_weight):
# the floor weight for the case of which_weight == 2
# i.e. someone who has seed = 0 will have this
# seed = 0 could either be equal numbers right & wrong, OR that we don't have any information
c0 = 0.5
seed = q[1].seed
n_gs = q[1].n_gs
# Two possible weighting schemes:
# which_weight == 1: w = 1.0025^(seed), bounded between 0.05 and 3.0
# which_weight == 2: w = (1 + log n_gs)^(seed/n_gs), bounded between 0.05 and 3.0
#
# Weighting Scheme 1:
# this is an okay weighting scheme, but it doesn't account for the fact that someone might be prolific
# but not a very good classifier, and those classifiers shouldn't have a high weight.
# Example: Bob does 10000 gold-standard classifications and gets 5100 right, 4900 wrong.
# In this weighting scheme, Bob's weighting seed is +100, which means a weight of 1.0025^100 = 1.3,
# despite the fact that Bob's classifications are consistent with random within 1%.
# The weighting below this one would take the weight based on 100/10000, which is much better.
if which_weight == 1:
# keep the two seed cases separate because we might want to use a different base for each
if seed < 0.:
return max([0.05, pow(1.0025, seed)])
elif seed > 0:
return min([3.0, pow(1.0025, seed)])
else:
return 1.0
elif which_weight == 2:
if n_gs < 1: # don't divide by or take the log of 0
# also if they didn't do any gold-standard classifications assume they have the default weight
return c0
else:
# note the max of 3 is unlikely to be reached, but someone could hit the floor.
return min([3.0, max([0.05, c0*pow((1.0 + np.log10(n_gs)), (float(seed)/float(n_gs)))])])
else:
# unweighted - so maybe don't even enter this function if | |
<gh_stars>1-10
'''
Class for transforming a lexed list of tokens into a parse tree
for a WoW macro.
'''
import re
import sys
from sys import exit
# Macro modules
from macro.exceptions import *
from macro.logger import logger
from macro.util import clean_macro
from macro.lex.lexer import GLOBAL_MACRO_TOKENIZER, MacroCommandTokenizer
from macro.lex.token import MacroToken
from macro.lex.ids import *
# The parser class. Evaluates the tokens parsed out of a macro to
# form a parse tree.
class MacroParser:
'''MacroParser
Simple parser for the wow macro langage. Uses recursive
forward-looking parsing.
Here is the EBNF definition (although Im pretty sure this is
incomplete):
command = "/", command-verb, [ {command-object, ";" } command-object] ]
command-verb = ? any secure command word ?
command-object = { condition } parameters
parameters = ? anything which may be passed to the command word ?
condition = "[" condition-phrase { "," condition-phrase } "]"
condition-phrase = [ "no" ], option-word, [ ":" option-argument { "/" option-argument } ]
option-argument = ? any one-word option, such as "shift, "ctrl", "target", "1", "2" ?
'''
# Constructor.
def __init__(self, lexer_obj=None, macro=None, debug=False):
''' Constructor '''
self.DEBUG = debug
# Save the lexer object
if lexer_obj is None:
self.__tokenizer = GLOBAL_MACRO_TOKENIZER
self.__tokenizer.DEBUG = debug
elif isinstance(lexer_obj, MacroCommandTokenizer):
self.__tokenizer = lexer_obj
else:
raise ConfigError("Requires a valid lexer object!")
# The current token.
self.__current_token = None
# Parse the macro if we got one.
if macro is not None: self.lex_and_parse_macro(macro)
# Lex the macro with the global lexer instance.
# This is just a shortcut method.
def lex_macro(self, macro_input_line, index):
self.macro_line = clean_macro(macro_input_line)
if self.macro_line is None or self.macro_line.isspace() or len(self.macro_line) < 1:
raise UserInputError("Macro input blank. Require valid macro.")
if self.DEBUG: logger.debug("About to lex: %s" % (self.macro_line))
self.__tokenizer.reset(self.macro_line, index)
if self.DEBUG: logger.debug("Tokens:\n%s" % str(self.__tokenizer))
# Kick off into recursive top-down parser for a SINGLE MACRO
# COMMAND. For a given macro line, gets the first token, and then
# calls into the recursive parser. This is the public interface
# for the parser.
def parse_macro(self):
if not self.__tokenizer.ready:
raise ConfigError("Parser called without lexer ready.")
if self.DEBUG: logger.debug("About to parse: %s" % (self.macro_line))
# Advance to the first token
self.__current_token = self.__tokenizer.next()
# Kick off the recursive top-down descent.
return self.__handle_command()
# One-stop parsing solution
def lex_and_parse_macro(self, macro_input_line, index=0):
self.lex_macro(macro_input_line, index)
return self.parse_macro()
# Accessors to the tokenizer.
def mark_token_error(self, id, index=NULL_TOKEN_ID):
self.__tokenizer[id].error_index = index
def mark_tokens_useless(self, start, end=-1, tok_type=None):
# Define helper to remove repeated code.
# If tok_type filter is specified, check it.
def mark_toks(t, t_f=None):
if t_f:
if t.is_type(t_f):
t.strike = True
t.js = False
else:
t.strike = True
t.js = False
# If end is < 0 we mark everything from start on.
if end < 0:
for t in self.__tokenizer[start:]:
mark_toks(t, tok_type)
else:
for t in self.__tokenizer[start:end]:
mark_toks(t, tok_type)
def get_command_html(self):
if self.__tokenizer.ready:
return self.__tokenizer.get_command_html()
return ''
def get_command_str(self):
if self.__tokenizer.ready:
return self.__tokenizer.get_command_str()
return ''
def get_useless_tokens(self):
if self.__tokenizer.ready:
return self.__tokenizer.get_useless_tokens()
return ''
def get_tokens(self):
return self.__tokenizer.get_tokens()
# Convienience method to get a default target object. If no target
# was specified, save a "fake" target that just refers to the
# current target. This makes interpretation MUCH easier later.
def get_default_target(self, target):
''' Create a target tuple from a string target name. '''
result = self.__tokenizer.get_default_target(target)
if not result:
return (None, None, [])
# Mark that these tokens were created while
# parsing, and did not come from the original
# lexed set.
for r in result:
r.added = True
r.js = False
return (result[0], result[1], result[2:])
# Given a parameter structure, create and return a target
# structure. Input is a list if (toggle, param) tuples.
# Only the first tuple in the list is considered.
def get_target_from_param(self, param):
''' Create a target tuple from a param list. '''
if not param: return None,None,None,
t,p = param[0]
# [target, gets, tar, [args], [targetof...]]
result = self.__tokenizer.get_default_target(p.data)
# If the target command requires an argument, check for it.
if result[2].attrs.req_num_arg and len(result) < 4:
raise ParseErrorUnexpectedToken(None,
MacroToken(TARGET_OBJ),
result[2])
# Adjust the positions and ids of the targets
tgt_list = []
for tgt in result[2:]:
tgt.start = tgt.start + p.start
tgt.end = tgt.end + p.end
tgt.token_id = p.token_id
tgt_list.append(tgt)
# Return target tuple
return (result[0], result[1], tgt_list)
# Use a parameter as a target unit. Check for
# any illegal targeting (right now, just totems)
# Returns a list of tokens for rendering. Raises
# an exception on error.
def use_param_as_target(self, verb, param):
if not param: return []
for t,p in param:
words = p.data.split()
if len(words) > 1 and any([w.lower() == 'totem' for w in words]):
raise ParseErrorTargetTotem(verb,
p)
return verb.attrs.param_function(param)
#
# Private Methods
#
# Advance token stream.
def __consume_token(self, expected_type, exception=True):
'''
Check the current token and consume if it is the type
requested. Tosses an exception if there is no match
unless exception is set to False.
'''
# Get the next token.
if (self.__current_token is None) or \
(not self.__current_token.is_type(expected_type)):
# If we're not silent, throw an exception
if exception:
err = "Expected: %s, received: %s" % (expected_type,
self.__current_token.token_type)
if self.DEBUG: logger.debug(err)
raise ParseErrorUnexpectedToken(None,
err,
self.__current_token)
else:
return None
# Consume the token by setting the current_token to the next one.
consumed_token = self.__current_token
if len(self.__tokenizer) > 0:
self.__current_token = self.__tokenizer.next()
else:
# If we're at the end of a macro, assign a null token.
self.__current_token = MacroToken(END_OF_COMMAND)
if self.DEBUG: logger.debug("Popping token: %s" % (consumed_token))
if self.DEBUG: logger.debug("Current token: %s" % (self.__current_token))
return consumed_token
# command = "/", command-verb, [ {command-object, ";" } command-object] ]
def __handle_command(self):
'''
Handles a complete macro command.
Returns:
(command_verb, [command_objects])
'''
if self.DEBUG: logger.debug(" self.__handle_command")
# The command must start with a command verb.
command_verb = self.__handle_command_verb()
# Next come any number of command objects.
# This is really what a macro command is: a list of objects.
# Every command object gets its own copy of the command,
# as every command object contains different param
objects = [self.__handle_command_object(command_verb)]
while self.__current_token.is_type(ELSE):
objects.append(self.__handle_command_object(command_verb,
self.__consume_token(ELSE)))
return (command_verb, objects)
# command-object = { condition } parameters
def __handle_command_object(self, command_verb, else_token=None):
'''
Handles a command object, which is the parameter and any
conditions pertaining to it being passed to the verb.
Because the target for the command is set in the conditions,
each condition is grouped with a target.
Returns:
(else_token, [(target, condition)..], modifier, parameter)
Where else_token is a token or None, target and parameter are
lists or None, and condition is a tuple of (if, [phrases],
endif) or None.
'''
if self.DEBUG: logger.debug(" self.__handle_command_object")
# If the command is not secure (doesn't accept options), then
# everything is a parameter.
if command_verb.attrs.secure:
target_and_condition_tuples = []
# Apparently its legal to repeat the verb before
# every command object--this worked in my tests.
# So, attempt to consume another set of verb, modifer(=None).
same_verb = self.__handle_command_verb(req=False)
if same_verb is not None:
if same_verb.data != command_verb.data:
# Verb must be the same as the original verb, or this
# macro fails.
raise ParseErrorMultipleVerbs(command_verb,
same_verb)
# Flag the extra verb as meaningless.
same_verb.strike = True
# If there are no conditions, then a modifier can follow
# the command. Attempt to parse it here so we can throw
# an accurate exception if its put before conditions.
modifier = None
if self.__current_token.is_type(MODIFIER):
modifier = self.__handle_command_modifer(command_verb)
# Empty command objects are still valid command objects.
if self.__current_token.is_type(IF):
# If we have a modifier already, oops.
if modifier:
raise ParseErrorResetBeforeConditions(command_verb,
modifier[0],
None)
# If there is a condition, take care of it. There can
# be multiple conditions. Multiple conditions are
# intrinisically an OR.
while self.__current_token.is_type(IF):
# Each condition can specify both a conjunction
# of tests AND set the target the tests are run
# against. Further, that target is passed to
# the command along with the | |
#!/usr/bin/env python
# coding=utf-8
# author=<NAME>
"""SFTPClone tests."""
# Simply launch me by using nosetests and I'll do the magic.
# I require paramiko
# Python 2.7 backward compatibility
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import functools
import logging
import os
import random
import select
import socket
import threading
import unicodedata
from os.path import join
from shutil import rmtree, copy
from stat import S_ISDIR
import paramiko
from nose import with_setup
from nose.tools import assert_raises, raises, eq_
from sftpclone.sftpclone import SFTPClone, main, parse_username_password_hostname, get_ssh_agent_keys
from sftpclone.t.stub_sftp import StubServer, StubSFTPServer
from sftpclone.t.utils import t_path, list_files, file_tree, \
suppress_logging, capture_sys_output, override_env_variables, override_ssh_auth_env
try: # Python >= 3.3
import unittest.mock as mock
except ImportError:
import mock # Python 2, external module.
REMOTE_ROOT = t_path("server_root")
REMOTE_FOLDER = "server_folder"
REMOTE_PATH = join(REMOTE_ROOT, REMOTE_FOLDER)
LOCAL_FOLDER_NAME = "local_folder"
LOCAL_FOLDER = t_path(LOCAL_FOLDER_NAME)
_u = functools.partial(unicodedata.normalize, "NFKD")
event = threading.Event()
# attach existing loggers (use --nologcapture option to see output)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
def _start_sftp_server():
"""Start the SFTP local server."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
sock.bind(('localhost', 2222))
sock.listen(10)
reads = {sock}
others = set()
while not event.is_set():
ready_to_read, _, _ = select.select(reads, others, others, 1)
if sock in ready_to_read:
client_socket, address = sock.accept()
ts = paramiko.Transport(client_socket)
host_key = paramiko.RSAKey.from_private_key_file(t_path('server_id_rsa'))
ts.add_server_key(host_key)
server = StubServer()
ts.set_subsystem_handler('sftp', paramiko.SFTPServer, StubSFTPServer)
ts.start_server(server=server)
sock.close()
def setup_module():
"""Setup in a new thread the SFTP local server."""
os.mkdir(REMOTE_ROOT)
t = threading.Thread(target=_start_sftp_server, name="server")
t.start()
def teardown_module():
"""
Stop the SFTP server by setting its event.
Clean remaining directories (in case of failures).
"""
event.set()
rmtree(REMOTE_PATH, ignore_errors=True)
rmtree(LOCAL_FOLDER, ignore_errors=True)
rmtree(REMOTE_ROOT, ignore_errors=True)
def setup_test():
"""Create the needed directories."""
os.mkdir(REMOTE_PATH)
os.mkdir(LOCAL_FOLDER)
setup_test.__test__ = False
def teardown_test():
"""Clean the created directories."""
logging.info(list_files(LOCAL_FOLDER))
logging.info(list_files(REMOTE_PATH))
rmtree(REMOTE_PATH, ignore_errors=True)
rmtree(LOCAL_FOLDER, ignore_errors=True)
teardown_test.__test__ = False
def _sync(
password=False, fix=False,
exclude=None, ssh_agent=False,
delete=True, identity_files=None,
):
"""Launch sync and do basic comparison of dir trees."""
if not password:
remote = 'test@12172.16.31.10:' + '/' + REMOTE_FOLDER
else:
remote = 'test:secret@127.0.0.1:' + '/' + REMOTE_FOLDER
if identity_files is None:
identity_files = [t_path("id_rsa")]
SFTPClone(
LOCAL_FOLDER,
remote,
port=2222,
fix_symlinks=fix,
identity_files=identity_files,
exclude_file=exclude,
ssh_agent=ssh_agent,
delete=delete
).run()
if not exclude and delete:
# Check the directory trees
assert file_tree(LOCAL_FOLDER)[LOCAL_FOLDER_NAME] == file_tree(REMOTE_PATH)[REMOTE_FOLDER]
_sync.__test__ = False
def _sync_argv(argv):
"""Launch the module's main with given argv and check the result."""
argv.append("-o") # allow unknown hosts
main(argv)
assert file_tree(LOCAL_FOLDER)[LOCAL_FOLDER_NAME] == file_tree(REMOTE_PATH)[REMOTE_FOLDER]
_sync_argv.__test__ = False
def test_get_ssh_agent_keys():
"""Test getting SSH keys from the SSH agent."""
logger = logging.getLogger('_')
logger.addHandler(logging.NullHandler)
truth = {('A', 'B', 'C'), ('K',)}
for keys in truth:
with mock.patch('paramiko.agent.Agent', autospec=paramiko.agent.Agent) as mocked_agent:
mocked_agent.return_value.get_keys.return_value = keys
agent, agent_keys = get_ssh_agent_keys(logger)
assert agent is mocked_agent.return_value
assert agent_keys == keys
with mock.patch('paramiko.agent.Agent', autospec=paramiko.agent.Agent) as mocked_agent:
keys = []
mocked_agent.return_value.get_keys.return_value = keys
agent, agent_keys = get_ssh_agent_keys(logger)
assert agent is mocked_agent.return_value
assert agent_keys is None
with mock.patch('paramiko.agent.Agent', autospec=paramiko.agent.Agent) as mocked_agent:
def _raise_paramiko_exception():
raise paramiko.SSHException
mocked_agent.return_value.get_keys.side_effect = _raise_paramiko_exception
agent, agent_keys = get_ssh_agent_keys(logger)
assert not agent
assert not agent_keys
def test_parse_username_password_hostname():
"""Test parsing remote url from command line."""
ground_truth = {
'foo:bar@bis:/test': ('foo', 'bar', 'bis', '/test'),
'foo@bis:/test': ('foo', None, 'bis', '/test'),
'bis:/test': (None, None, 'bis', '/test'),
'a@b@bis:/test': ('a@b', None, 'bis', '/test'),
'a@b:password@bis:/test': ('a@b', 'password', 'bis', '/test'),
}
for test, truth in ground_truth.items():
assert parse_username_password_hostname(test) == truth
fail = {'bis', 'bis:', '', ':'}
for test in fail:
assert_raises(AssertionError, parse_username_password_hostname, test)
@with_setup(setup_test, teardown_test)
def test_cli_args():
"""Test CLI arguments."""
# Suppress STDERR
with capture_sys_output():
assert_raises(SystemExit, _sync_argv, [])
assert_raises(SystemExit, _sync_argv, [LOCAL_FOLDER])
with override_env_variables():
_sync_argv(
[LOCAL_FOLDER,
'127.0.0.1:' + '/' + REMOTE_FOLDER,
'-f',
'-k', t_path("id_rsa"),
'-p', "2222",
'-d',
'-c', '/dev/null'
],
)
_sync_argv(
[LOCAL_FOLDER,
'test@127.0.0.1:' + '/' + REMOTE_FOLDER,
'-f',
'-k', t_path("id_rsa"),
'-p', "2222",
'-d',
'-c', '/dev/null'
],
)
_sync_argv(
[LOCAL_FOLDER,
'test:secret@127.0.0.1:' + '/' + REMOTE_FOLDER,
'-p', "2222",
'-d'
],
)
_sync_argv(
[LOCAL_FOLDER,
'test:secret@127.0.0.1:' + '/' + REMOTE_FOLDER,
'-p', "2222",
'-n', t_path("known_hosts")
],
)
_sync_argv(
[LOCAL_FOLDER,
'backup:' + '/' + REMOTE_FOLDER,
'-c', t_path("config"),
# hard to insert relative path in cfg, so we have to cheat
'-k', t_path("id_rsa"),
'-d'
],
)
@with_setup(setup_test, teardown_test)
def test_remote_tilde_home():
"""Test tilde expansion on remote end."""
normal_files = ("bar", "bis") # just to add noise
for f in normal_files:
os.open(join(LOCAL_FOLDER, f), os.O_CREAT)
os.open(join(REMOTE_PATH, f), os.O_CREAT)
sync = SFTPClone(
LOCAL_FOLDER,
remote_url='test@127.0.0.1:' + '~' + REMOTE_FOLDER,
port=2222,
identity_files=[t_path("id_rsa"), ]
)
sync.run()
assert file_tree(LOCAL_FOLDER)[LOCAL_FOLDER_NAME] == file_tree(REMOTE_PATH)[REMOTE_FOLDER]
@with_setup(setup_test, teardown_test)
@raises(SystemExit)
def test_ssh_agent_failure():
"""Test ssh_agent failure with bad keys (default)."""
# Suppress logging and stdout/stderr
with suppress_logging():
with capture_sys_output():
_sync(ssh_agent=True, identity_files=[])
@with_setup(setup_test, teardown_test)
def test_no_ssh_agent():
"""Test without a running SSH agent."""
# Suppress logging and stdout/stderr
with suppress_logging():
with override_ssh_auth_env():
_sync(ssh_agent=True)
@with_setup(setup_test, teardown_test)
def test_relative_link_to_inner_dir():
"""Test creation of a relative link to a subnode of the tree.
dovecot.sieve -> sieve/filtri.sieve
sieve/
filtri.sieve
"""
local_no_slash = LOCAL_FOLDER \
if not LOCAL_FOLDER.endswith("/") else LOCAL_FOLDER.rstrip("/")
os.mkdir(join(LOCAL_FOLDER, "sieve"))
source = join(LOCAL_FOLDER, "sieve", "filtri.sieve")
os.open(source, os.O_CREAT)
os.symlink(
source[len(local_no_slash) + 1:],
join(LOCAL_FOLDER, "dovecot.sieve")
)
_sync()
eq_(
source[len(local_no_slash) + 1:],
os.readlink(
join(REMOTE_PATH, "dovecot.sieve")
)
)
@with_setup(setup_test, teardown_test)
def test_already_relative_link_to_inner_dir():
"""Test creation of a relative link (that already exists) to a subnode of the tree.
dovecot.sieve -> sieve/filtri.sieve
sieve/
filtri.sieve
while on remote there is:
dovecot.sieve -> foo
"""
local_no_slash = LOCAL_FOLDER \
if not LOCAL_FOLDER.endswith("/") else LOCAL_FOLDER.rstrip("/")
os.mkdir(join(LOCAL_FOLDER, "sieve"))
source = join(LOCAL_FOLDER, "sieve", "filtri.sieve")
os.open(source, os.O_CREAT)
os.symlink(
source[len(local_no_slash) + 1:],
join(LOCAL_FOLDER, "dovecot.sieve")
)
os.symlink(
"foo",
join(REMOTE_PATH, "dovecot.sieve")
)
_sync()
eq_(
source[len(local_no_slash) + 1:],
os.readlink(
join(REMOTE_PATH, "dovecot.sieve")
)
)
@with_setup(setup_test, teardown_test)
def test_exclude():
"""Test pattern exclusion handling."""
excluded = {"foofolder"}
os.mkdir(join(LOCAL_FOLDER, "foofolder"))
excluded |= {"foo", "foofile"}
os.open(join(LOCAL_FOLDER, "file_one"), os.O_CREAT)
os.open(join(LOCAL_FOLDER, "file_two"), os.O_CREAT)
os.open(join(LOCAL_FOLDER, "foo"), os.O_CREAT)
os.open(join(LOCAL_FOLDER, "foofile"), os.O_CREAT)
_sync(exclude=t_path("exclude"))
assert not set(os.listdir(REMOTE_PATH)) & excluded
@with_setup(setup_test, teardown_test)
def test_inner_exclude():
"""Test pattern exclusion (with recursion) handling."""
os.mkdir(join(LOCAL_FOLDER, "bar"))
os.mkdir(join(LOCAL_FOLDER, "bar", "inner"))
os.open(join(LOCAL_FOLDER, "bar", "file_one"), os.O_CREAT)
os.open(join(LOCAL_FOLDER, "bar", "inner", "foo"), os.O_CREAT)
os.open(join(LOCAL_FOLDER, "bar", "inner", "bar"), os.O_CREAT)
_sync(exclude=t_path("exclude"))
assert set(os.listdir(join(REMOTE_PATH, "bar"))) == {"file_one", "inner"}
eq_(set(os.listdir(join(REMOTE_PATH, "bar", "inner"))), {"bar"})
@with_setup(setup_test, teardown_test)
def test_local_relative_link():
"""Test relative links creation/update (cases C/D)."""
old_cwd = os.getcwd()
os.chdir(LOCAL_FOLDER) # relative links!
inside_symlinks = {
"3": "afile",
"5": "inner/foo"
}
outside_symlinks = {
"4": "../foo"
}
for link_name, source in inside_symlinks.items():
os.symlink(source, link_name)
for link_name, source in outside_symlinks.items():
os.symlink(source, link_name)
normal_files = ("bar", "bis") # just to add noise
for f in normal_files:
os.open(f, os.O_CREAT)
os.open(join(REMOTE_PATH, f), os.O_CREAT)
_sync()
for link_name, source in inside_symlinks.items():
assert os.readlink(join(REMOTE_PATH, link_name)) == source
for link_name, source in outside_symlinks.items():
assert os.readlink(join(REMOTE_PATH, link_name)) == source
os.chdir(old_cwd)
@with_setup(setup_test, teardown_test)
def test_local_absolute_link():
"""Test absolute links creation/update (cases A/B)."""
inside_symlinks = {
"3": "afile", # case A
}
outside_symlinks = {
"4": "/dev/null" # case B
}
os.mkdir(join(REMOTE_ROOT, "dev")) # otherwise absolute links will fail!
for link_name, source in inside_symlinks.items():
os.symlink(join(LOCAL_FOLDER, source), join(LOCAL_FOLDER, link_name))
for link_name, source in outside_symlinks.items():
os.symlink(source, join(LOCAL_FOLDER, link_name))
_sync(fix=True)
for link_name, source in inside_symlinks.items():
assert os.readlink(join(REMOTE_PATH, link_name)) == join(
REMOTE_PATH, source)
for link_name, source in outside_symlinks.items():
assert os.readlink(join(REMOTE_PATH, link_name))[
len(REMOTE_ROOT):] == source
@with_setup(setup_test, teardown_test)
def test_orphaned_remote_symlink():
"""Test deletion of orphaned remote links (not existing in local folder)."""
os.open(join(REMOTE_PATH, "file"), os.O_CREAT)
os.open(join(LOCAL_FOLDER, "file"), os.O_CREAT)
os.symlink(
join(REMOTE_PATH, "file"),
join(REMOTE_PATH, "link")
)
_sync(fix=True)
@with_setup(setup_test, teardown_test)
def test_directory_upload():
"""Test upload/creation of whole directory trees."""
# add some dirs to both the local/remote directories
local_dirs = {str(f) for f in range(8)}
remote_dirs = set(random.sample(local_dirs, 3))
spurious_dir = join(
REMOTE_PATH, random.choice(tuple(local_dirs - remote_dirs)))
os.open(spurious_dir, os.O_CREAT)
for f in local_dirs:
os.mkdir(join(LOCAL_FOLDER, f))
for f in remote_dirs:
os.mkdir(join(REMOTE_PATH, f))
# Locally different is folder, but remotely is a file
f = "different"
remote_dirs |= {f}
os.open(join(REMOTE_PATH, f), os.O_CREAT)
local_dirs |= {f}
os.mkdir(join(LOCAL_FOLDER, f))
full_dirs = set(random.sample(local_dirs, 2))
for f in full_dirs:
for i in range(random.randint(1, 10)):
os.open(join(LOCAL_FOLDER, f, str(i)), os.O_CREAT)
_sync()
assert S_ISDIR(os.stat(spurious_dir).st_mode)
for d in full_dirs:
assert os.listdir(join(LOCAL_FOLDER, d)) == os.listdir(
join(REMOTE_PATH, d))
@with_setup(setup_test, teardown_test)
def test_file_upload():
"""
Test upload/creation of files.
Upload files present in the local directory but not in the remote one.
"""
# add some file to both the local/remote directories
local_files = {str(f) for f in range(5)}
remote_files = set(random.sample(local_files, 3))
for f in local_files:
os.open(join(LOCAL_FOLDER, f), os.O_CREAT)
for f in remote_files:
os.open(join(REMOTE_PATH, f), os.O_CREAT)
local_files |= {"5"}
with open(join(LOCAL_FOLDER, "5"), 'w') as f:
print("This is the local file.", file=f)
remote_files |= {"5"}
with open(join(REMOTE_PATH, "5"), 'w') as f:
print("This is the remote file.", file=f)
local_files |= {"6"}
lf = join(LOCAL_FOLDER, "6")
with open(lf, 'w') as f:
print("This is another file.", file=f)
remote_files |= {"6"}
copy(lf, join(REMOTE_PATH, "6"))
local_files |= {"permissions"}
lf = join(LOCAL_FOLDER, "permissions")
os.open(lf, os.O_CREAT)
# Sync and check that missing files | |
'''expand the INCA Ecoli model to account for additional metabolites'''
query = stage02_isotopomer_query()
# get the xml model
cobra_model_sbml = ''
cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I);
# load the model
if cobra_model_sbml:
if cobra_model_sbml['file_type'] == 'sbml':
with open('data/cobra_model_tmp.xml','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True);
elif cobra_model_sbml['file_type'] == 'json':
with open('data/cobra_model_tmp.json','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = load_json_model('data/cobra_model_tmp.json');
else:
print('file_type not supported')
#get the atomMapping_reactions
atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I);
#change the mapping_id
for cnt,row in enumerate(atomMappingReactions):
atomMappingReactions[cnt]['mapping_id']=mapping_id_O;
#add in glucose transporters and intracellular glc
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"atp_c");
atp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
atp.charge = met_row['charge']
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_c");
glc_c = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
glc_c.charge = met_row['charge']
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_e");
glc_e = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'e')
glc_e.charge = met_row['charge']
glcext = Metabolite('glc_DASH_D_e.ext',met_row['formula'],met_row['met_name'],'e')
glcext.charge = met_row['charge']
glcpre = Metabolite('glc_DASH_D_e.pre',met_row['formula'],met_row['met_name'],'e')
glcpre.charge = met_row['charge']
#get metabolites in the model
pep = cobra_model.metabolites.get_by_id('pep_c')
pyr = cobra_model.metabolites.get_by_id('pyr_c')
g6p = cobra_model.metabolites.get_by_id('g6p_c')
#make EX_glc_LPAREN_e_RPAREN_
rxn_mets = {};
rxn_mets[glcext] = -1;
rxn_mets[glc_e] = 1;
rxn = Reaction('EX_glc_LPAREN_e_RPAREN_');
cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN_']);
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN_';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.ext']
row_tmp['products_ids_tracked']=['glc_DASH_D_e']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
##make EX_glc_LPAREN_e_RPAREN__pre
#rxn_mets = {};
#rxn_mets[glcpre] = -1;
#rxn_mets[glc_e] = 1;
#rxn = Reaction('EX_glc_LPAREN_e_RPAREN__pre');
#cobra_model.remove_reactions(['v60']);
#rxn.add_metabolites(rxn_mets);
#cobra_model.add_reactions([rxn]);
#cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
#cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
#cobra_model.repair();
##append the new atom mappings
#row_tmp = {};
#row_tmp['mapping_id']=mapping_id_O;
#row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN__pre';
#row_tmp['rxn_description']='';
#row_tmp['rxn_equation']='';
#row_tmp['reactants_stoichiometry_tracked']=[-1]
#row_tmp['products_stoichiometry_tracked']=[1]
#row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.pre']
#row_tmp['products_ids_tracked']=['glc_DASH_D_e']
#row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row_tmp['reactants_mapping']=['abcdef']
#row_tmp['products_mapping']=['abcdef']
#row_tmp['used_']=True
#row_tmp['comment_']='added'
#atomMappingReactions.append(row_tmp);
#make GLCptspp "glc_DASH_D_p + pep_c --> g6p_c + pyr_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[pep] = -1;
rxn_mets[g6p] = 1;
rxn_mets[pyr] = 1;
rxn = Reaction('GLCptspp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCptspp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1,-1]
row_tmp['products_stoichiometry_tracked']=[1,1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e','pep_c']
row_tmp['products_ids_tracked']=['g6p_c','pyr_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['reactants_mapping']=['abcdef','ghi']
row_tmp['products_mapping']=['abcdef','ghi']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make GLCt2pp "glc_DASH_D_p + h_p --> glc_DASH_D_c + h_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[glc_c] = 1;
rxn = Reaction('GLCt2pp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCt2pp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e']
row_tmp['products_ids_tracked']=['glc_DASH_D_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make HEX1 "atp_c + glc_DASH_D_c --> g6p_c + h_c + adp_c"
rxn_mets = {};
rxn_mets[glc_c] = -1;
rxn_mets[atp] = -1;
rxn_mets[g6p] = 1;
rxn = Reaction('HEX1');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='HEX1';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_c']
row_tmp['products_ids_tracked']=['g6p_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
# add in PRPPS phosphoribosylpyrophosphate synthetase atp[c] + r5p[c] <=> amp[c] + h[c] + prpp[c]
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"prpp_c");
prpp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
prpp.charge = met_row['charge']
r5p = cobra_model.metabolites.get_by_id('r5p_c')
# expand the model
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[atp] = -1;
rxn_mets[prpp] = 1;
rxn = Reaction('PRPPS');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.repair();
# add in rxn mapping
row={};
row['mapping_id']=mapping_id_O;
row['rxn_id']='PRPPS';
row['rxn_description']='';
row['rxn_equation']='';
row['reactants_stoichiometry_tracked']=[-1]
row['products_stoichiometry_tracked']=[1]
row['reactants_ids_tracked']=['r5p_c']
row['products_ids_tracked']=['prpp_c']
row['reactants_elements_tracked']=[["C", "C", "C", "C", "C"]]
row['products_elements_tracked']=[["C", "C", "C", "C", "C"]]
row['reactants_positions_tracked']=[[0, 1, 2, 3, 4]]
row['products_positions_tracked']=[[0, 1, 2, 3, 4]]
row['reactants_mapping']=['abcde']
row['products_mapping']=['abcde']
row['used_']=True
row['comment_']='added'
atomMappingReactions.append(row)
##expand the model
#acon = Metabolite('acon_DASH_C_c','C6H3O6','cis-Aconitate','c');
#cit = cobra_model.metabolites.get_by_id('cit_c')
#icit = cobra_model.metabolites.get_by_id('icit_c')
#e4p = cobra_model.metabolites.get_by_id('e4p_c')
#phe = cobra_model.metabolites.get_by_id('phe_DASH_L_c')
his = cobra_model.metabolites.get_by_id('his_DASH_L_c')
#phpyr = Metabolite('phpyr_c','C9H7O3','Phenylpyruvate','c');
# update selected reactions to account for new metabolites
for rxn,row in enumerate(atomMappingReactions):
if row['rxn_id'] == 'HisSYN':
# split HisSYN to add in prpp
cobra_model.reactions.get_by_id(row['rxn_id']).subtract_metabolites({atp:-1,r5p:-1})
cobra_model.reactions.get_by_id(row['rxn_id']).add_metabolites({prpp:-1})
# Update the mapping_ids
atomMappingReactions[rxn]['reactants_ids_tracked']=[r.replace('r5p_c','prpp_c') for r in atomMappingReactions[rxn]['reactants_ids_tracked']]
# write the model to a temporary file
save_json_model(cobra_model,'data/cobra_model_tmp.json')
# add the model information to the database
io = stage02_isotopomer_io()
dataStage02IsotopomerModelRxns_data = [];
dataStage02IsotopomerModelMets_data = [];
dataStage02IsotopomerModels_data,\
dataStage02IsotopomerModelRxns_data,\
dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json')
io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data);
io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data);
io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data);
#add atomMappingReactions to the database
io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions);
# expand atomMappingReactions
imm = stage02_isotopomer_metaboliteMapping()
irm = stage02_isotopomer_reactionMapping()
mappingUtilities = stage02_isotopomer_mappingUtilities()
# make atomMappingMetabolites
mappingUtilities.make_missingMetaboliteMappings(experiment_id_I,model_id_I=[model_id_O],
mapping_id_rxns_I=[mapping_id_O],
mapping_id_mets_I=[],
mapping_id_new_I=mapping_id_O);
# update symmetric metabolites
imm.get_metaboliteMapping(mapping_id_O,'succ_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'fum_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'26dap_DASH_M_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
#analysis functions
def load_isotopomer_matlab(self,matlab_data,isotopomer_data=None):
'''Load 13CFlux isotopomer simulation data from matlab file'''
# load measured isotopomers from MATLAB file into numpy array
# load names and calculated isotopomers from MATLAB file into numpy array
names = scipy.io.loadmat(matlab_data)['output']['names'][0][0];
calculated_ave = scipy.io.loadmat(matlab_data)['output']['ave'][0][0];
calculated_stdev = scipy.io.loadmat(matlab_data)['output']['stdev'][0][0];
# load residuals from MATLAB file into numpy array
residuals = scipy.io.loadmat(matlab_data)['residuals'];
if isotopomer_data:
measured_dict = json.load(open(isotopomer_data,'r'));
measured_names = [];
measured_ave = [];
measured_stdev = [];
# extract data to lists
for frag,data in measured_dict['fragments'].items():
for name in data['data_names']:
measured_names.append(name);
for ave in data['data_ave']:
measured_ave.append(ave);
for stdev in data['data_stdev']:
measured_stdev.append(stdev);
# convert lists to dict
measured_dict = {};
for i,name in enumerate(measured_names):
measured_dict[name]={'measured_ave':measured_ave[i],
'measured_stdev':measured_stdev[i]};
# match measured names to calculated names
measured_ave = [];
measured_stdev = [];
residuals = [];
for i,name in enumerate(names):
if name[0][0] in measured_dict:
measured_ave.append(measured_dict[name[0][0]]['measured_ave']);
measured_stdev.append(measured_dict[name[0][0]]['measured_stdev']);
residuals.append(measured_dict[name[0][0]]['measured_ave']-calculated_ave[i][0]);
else:
measured_ave.append(None);
measured_stdev.append(None);
residuals.append(None);
else:
measured_ave_tmp = scipy.io.loadmat(matlab_data)['toCompare'];
measured_ave = [];
for d in measured_ave_tmp:
measured_ave.append(d[0]);
measured_stdev = numpy.zeros(len(measured_ave));
# combine into a dictionary
isotopomer = {};
for i in range(len(names)):
isotopomer[names[i][0][0]] = {'measured_ave':measured_ave[i], #TODO: extract out by fragment names
'measured_stdev':measured_stdev[i],
'calculated_ave':calculated_ave[i][0],
'calculated_stdev':calculated_stdev[i][0],
'residuals':residuals[i]};
return isotopomer;
def load_confidenceIntervals_matlab(self,matlab_data,cobra_model_matlab,cobra_model_name):
'''Load confidence intervals from matlab file'''
# load confidence intervals from MATLAB file into numpy array
cimin_h5py = h5py.File(matlab_data)['ci']['minv'][0];
cimax_h5py = h5py.File(matlab_data)['ci']['maxv'][0];
cimin = numpy.array(cimin_h5py);
cimax = numpy.array(cimax_h5py);
# load cobramodel
rxns = scipy.io.loadmat(cobra_model_matlab)[cobra_model_name]['rxns'][0][0]
# combine cimin, cimax, and rxns into dictionary
ci = {};
for i in range(len(cimin)):
ci[rxns[i][0][0]] = {'minv':cimin[i],'maxv':cimax[i]};
return ci;
def compare_isotopomers_calculated(self,isotopomer_1, isotopomer_2):
'''compare two calculated isotopomer distributions'''
# extract into lists
absDif_list = [];
ssr_1_list = [];
ssr_2_list = [];
bestFit_list = [];
frag_list = [];
ssr_1 = 0.0; # sum of squared residuals (threshold of 10e1, Antoniewicz poster, co-culture, Met Eng X)
ssr_2 = 0.0;
measured_1_list = [];
measured_2_list = [];
calculatedAve_1_list = [];
calculatedAve_2_list = [];
measuredStdev_1_list = [];
measuredStdev_2_list = [];
for frag,data in isotopomer_1.items():
absDif = 0.0;
sr_1 = 0.0;
sr_2 = 0.0;
bestFit = None;
absDif = fabs(isotopomer_1[frag]['calculated_ave'] - isotopomer_2[frag]['calculated_ave']);
sr_1 = pow(isotopomer_1[frag]['calculated_ave']-isotopomer_1[frag]['measured_ave'],2);
sr_2 = pow(isotopomer_2[frag]['calculated_ave']-isotopomer_2[frag]['measured_ave'],2);
if sr_1>sr_2: bestFit = '2';
elif sr_1<sr_2: bestFit = '1';
elif sr_1==sr_2: bestFit = None;
absDif_list.append(absDif);
ssr_1_list.append(sr_1);
ssr_2_list.append(sr_2);
bestFit_list.append(bestFit);
frag_list.append(frag);
ssr_1 += sr_1;
ssr_2 += sr_2;
measured_1_list.append(isotopomer_1[frag]['measured_ave'])
measured_2_list.append(isotopomer_2[frag]['measured_ave'])
calculatedAve_1_list.append(isotopomer_1[frag]['calculated_ave']);
calculatedAve_2_list.append(isotopomer_2[frag]['calculated_ave']);
measuredStdev_1_list.append(isotopomer_1[frag]['measured_stdev']);
measuredStdev_2_list.append(isotopomer_2[frag]['measured_stdev']);
# calculate the correlation coefficient
# 1. between measured vs. calculated (1 and 2)
# 2. between calculated 1 vs. calculated 2
r_measuredVsCalculated_1 = None;
r_measuredVsCalculated_2 = None;
r_measured1VsMeasured2 = None;
p_measuredVsCalculated_1 = None;
p_measuredVsCalculated_2 = None;
p_measured1VsMeasured2 = None;
r_measuredVsCalculated_1, p_measuredVsCalculated_1 = scipy.stats.pearsonr(measured_1_list,calculatedAve_1_list);
r_measuredVsCalculated_2, p_measuredVsCalculated_2 = scipy.stats.pearsonr(measured_2_list,calculatedAve_2_list);
r_measured1VsMeasured2, p_measured1VsMeasured2 = scipy.stats.pearsonr(calculatedAve_1_list,calculatedAve_2_list);
# wrap stats into a dictionary
isotopomer_comparison_stats = {};
isotopomer_comparison_stats = dict(list(zip(('r_measuredVsCalculated_1', 'p_measuredVsCalculated_1',
'r_measuredVsCalculated_2', 'p_measuredVsCalculated_2',
'r_measured1VsMeasured2', 'p_measured1VsMeasured2',
'ssr_1,ssr_2'),
(r_measuredVsCalculated_1, p_measuredVsCalculated_1,
r_measuredVsCalculated_2, p_measuredVsCalculated_2,
r_measured1VsMeasured2, p_measured1VsMeasured2,
ssr_1,ssr_2))));
## zip, sort, unzip # does not appear to sort correctly!
#zipped = zip(absDif_list,ssr_1_list,ssr_2_list,bestFit_list,frag_list,
# measured_1_list,measured_2_list,calculatedAve_1_list,calculatedAve_2_list,
# measuredStdev_1_list,measuredStdev_2_list);
#zipped.sort();
#zipped.reverse();
#absDif_list,ssr_1_list,sst_2_list,bestFit_list,frag_list,\
# measured_1_list,measured_2_list,calculatedAve_1_list,calculatedAve_2_list,\
# measuredStdev_1_list,measuredStdev_2_list = zip(*zipped);
# restructure into a list of dictionaries for easy parsing or data base viewing
isotopomer_comparison = [];
for i in range(len(absDif_list)):
isotopomer_comparison.append({'isotopomer_absDif':absDif_list[i],
'isotopomer_1_sr':ssr_1_list[i],
'isotopomer_2_sr':ssr_2_list[i],
'bestFit':bestFit_list[i],
'frag':frag_list[i],
'measured_1_ave':measured_1_list[i],
'measured_2_ave':measured_2_list[i],
'measured_1_stdev':measuredStdev_1_list[i],
'measured_2_stdev':measuredStdev_2_list[i],
'calculated_1_ave':calculatedAve_1_list[i],
'calculated_2_ave':calculatedAve_2_list[i]});
return isotopomer_comparison,isotopomer_comparison_stats;
def compare_ci_calculated(self,ci_1,ci_2):
'''compare 2 | |
#!/usr/bin/env python
#
# Copyright 2020 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import unittest
from maya import cmds
from AL import usdmaya
from pxr import Usd, UsdUtils, Tf
import fixturesUtils
class CubeGenerator(usdmaya.TranslatorBase):
'''
Basic Translator which doesn't support update
'''
initializeCount = 0
preTearDownCount = 0
tearDownCount = 0
postImportCount = 0
importObjectCount = 0
updateCount = 0
importObjectMObjects = []
@classmethod
def resetState(cls):
cls.initializeCount = 0
cls.preTearDownCount = 0
cls.tearDownCount = 0
cls.postImportCount = 0
cls.importObjectCount = 0
cls.updateCount = 0
cls.importObjectMObjects = []
@classmethod
def getState(cls):
return {"initializeCount": cls.initializeCount,
"preTearDownCount": cls.preTearDownCount,
"tearDownCount": cls.tearDownCount,
"postImportCount":cls.postImportCount,
"importObjectCount":cls.importObjectCount,
"updateCount":cls.updateCount,
"importObjectMObjects":cls.importObjectMObjects }
def initialize(self):
return True
def preTearDown(self, prim):
self.__class__.preTearDownCount +=1
return True
def tearDown(self, path):
self.__class__.tearDownCount +=1
self.removeItems(path)
return True
def canExport(self, mayaObjectName):
return usdmaya.ExportFlag.kNotSupported
def needsTransformParent(self):
return True
def supportsUpdate(self):
return False
def importableByDefault(self):
return True
def exportObject(self, stage, path, usdPath, params):
return
def postImport(self, prim):
return True
def getTranslatedType(self):
return Tf.Type.Unknown
def importObject(self, prim, parent=None):
self.__class__.importObjectCount +=1
numCubesAttr = prim.GetAttribute("numCubes")
numCubes = 0
if numCubesAttr.IsValid():
numCubes = numCubesAttr.Get()
dgNodes = []
for x in range(numCubes):
nodes = cmds.polyCube()
dgNodes.append(nodes[1])
cmds.parent(nodes[0], parent)
# Only insert the DG nodes. The Dag nodes
# created under the parent will be deleted
# by AL_USDMaya automatically when the parent
# transform is deleted.
for n in dgNodes:
self.insertItem(prim, n)
self.__class__.importObjectMObjects = self.context().getMObjectsPath(prim)
return True
def update(self, prim):
self.__class__.updateCount +=1
return True
class DeleteParentNodeOnPostImport(usdmaya.TranslatorBase):
'''
Translator that deletes the parent node on postImport.
'''
nbImport = 0
nbPostImport = 0
nbPreTeardown = 0
parentNode = None
def initialize(self):
return True
def preTearDown(self, prim):
self.__class__.nbPreTeardown += 1
return True
def tearDown(self, path):
self.removeItems()
return True
def needsTransformParent(self):
return True
def importableByDefault(self):
return True
def postImport(self, prim):
self.__class__.nbPostImport += 1
if self.__class__.parentNode:
cmds.delete(self.__class__.parentNode)
# re-create the node
# print 'CCCCCCCCCCCCc', cmds.createNode('AL_usdmaya_Transform', name='rig', parent='|bobo|root|peter01')
return True
def getTranslatedType(self):
return Tf.Type.Unknown
def importObject(self, prim, parent=None):
self.__class__.nbImport += 1
self.__class__.parentNode = parent
return True
def supportsUpdate(self):
return False
def update(self, prim):
return True
def canExport(self, mayaObjectName):
return usdmaya.ExportFlag.kNotSupported
def exportObject(self, stage, path, usdPath, params):
return
class UpdateableTranslator(usdmaya.TranslatorBase):
def initialize(self):
self.actions = []
return True
def getTranslatedType(self):
return Tf.Type.Unknown
def needsTransformParent(self):
return True
def supportsUpdate(self):
return True
def importableByDefault(self):
return True
def importObject(self, prim, parent=None):
self.actions.append('import ' + str(prim.GetPath()))
return True
def postImport(self, prim):
self.actions.append('postImport ' + str(prim.GetPath()))
return True
def generateUniqueKey(self, prim):
return str(prim.GetPath())
def update(self, prim):
self.actions.append('update ' + str(prim.GetPath()))
return True
def preTearDown(self, prim):
return True
def tearDown(self, path):
return True
def canExport(self, mayaObjectName):
return usdmaya.ExportFlag.kNotSupported
def exportObject(self, stage, path, usdPath, params):
return
class TestPythonTranslators(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Setup for test output
inputPath = fixturesUtils.setUpClass(__file__, loadPlugin=False)
cls._testDataDir = os.path.join(inputPath, '../test_data/')
sys.path.append(cls._testDataDir)
@classmethod
def tearDownClass(cls):
fixturesUtils.tearDownClass(unloadPlugin=False)
def setUp(self):
cmds.file(force=True, new=True)
cmds.loadPlugin("AL_USDMayaPlugin", quiet=True)
self.assertTrue(cmds.pluginInfo("AL_USDMayaPlugin", query=True, loaded=True))
def tearDown(self):
CubeGenerator.resetState()
UsdUtils.StageCache.Get().Clear()
usdmaya.TranslatorBase.clearTranslators()
def test_registration(self):
import examplecubetranslator #This registers the translator
self.assertTrue(len(usdmaya.TranslatorBase.getPythonTranslators())==1)
usdmaya.TranslatorBase.registerTranslator(examplecubetranslator.BoxNodeTranslator())
self.assertTrue(len(usdmaya.TranslatorBase.getPythonTranslators())==2) #at the moment we allow duplicates
usdmaya.TranslatorBase.clearTranslators()
self.assertTrue(len(usdmaya.TranslatorBase.getPythonTranslators())==0) #check empty
def test_import(self):
usdmaya.TranslatorBase.registerTranslator(CubeGenerator(), 'beast_rig')
stage = Usd.Stage.Open(self._testDataDir + "inactivetest.usda")
prim = stage.GetPrimAtPath('/root/peter01')
vs = prim.GetVariantSet("cubes")
vs.SetVariantSelection("fiveCubes")
stageCache = UsdUtils.StageCache.Get()
stageCache.Insert(stage)
stageId = stageCache.GetId(stage)
cmds.AL_usdmaya_ProxyShapeImport(stageId=stageId.ToLongInt(), name='bobo')
self.assertTrue(CubeGenerator.getState()["importObjectCount"]==1)
self.assertTrue(len(CubeGenerator.importObjectMObjects)==5)
prim = stage.GetPrimAtPath('/root/peter01/rig')
# self.assertTrue(usdmaya.TranslatorBase.generateTranslatorId(prim)=="assettype:beast_rig")
def test_variantSwitch_that_removes_prim_and_create_new_one(self):
usdmaya.TranslatorBase.registerTranslator(CubeGenerator(), 'beast_rig')
stage = Usd.Stage.Open(self._testDataDir + "inactivetest.usda")
prim = stage.GetPrimAtPath('/root/peter01')
vs = prim.GetVariantSet("cubes")
vs.SetVariantSelection("fiveCubes")
stageCache = UsdUtils.StageCache.Get()
stageCache.Insert(stage)
stageId = stageCache.GetId(stage)
cmds.AL_usdmaya_ProxyShapeImport(stageId=stageId.ToLongInt(), name='bobo')
self.assertEqual(CubeGenerator.getState()["importObjectCount"],1)
self.assertEqual(len(CubeGenerator.importObjectMObjects),5)
self.assertTrue(cmds.objExists('|bobo|root|peter01|rig'))
'''
Variant switch that leads to another prim being created.
'''
try:
vs.SetVariantSelection("sixCubesRig2")
except RecursionError:
# Force RecursionErrors to fail the test instead of erroring them
self.fail("Raised RecursionError unexpectedly!")
self.assertEqual(CubeGenerator.getState()["tearDownCount"],1)
self.assertEqual(CubeGenerator.getState()["importObjectCount"],2)
self.assertEqual(CubeGenerator.getState()["updateCount"],0)
self.assertEqual(len(CubeGenerator.importObjectMObjects),6)
self.assertFalse(cmds.objExists('|bobo|root|peter01|rig'))
self.assertTrue(cmds.objExists('|bobo|root|peter01|rig2'))
def test_variantSwitch_that_removes_prim_runs_teardown(self):
usdmaya.TranslatorBase.registerTranslator(CubeGenerator(), 'beast_rig')
stage = Usd.Stage.Open(self._testDataDir + "inactivetest.usda")
prim = stage.GetPrimAtPath('/root/peter01')
vs = prim.GetVariantSet("cubes")
vs.SetVariantSelection("fiveCubes")
stageCache = UsdUtils.StageCache.Get()
stageCache.Insert(stage)
stageId = stageCache.GetId(stage)
cmds.AL_usdmaya_ProxyShapeImport(stageId=stageId.ToLongInt(), name='bobo')
self.assertEqual(CubeGenerator.getState()["importObjectCount"],1)
self.assertTrue(cmds.objExists('|bobo|root|peter01|rig'))
'''
Test that we can swap in another empty variant and our content gets deleted
'''
vs.SetVariantSelection("noCubes")
self.assertEqual(CubeGenerator.getState()["tearDownCount"], 1)
self.assertEqual(CubeGenerator.getState()["importObjectCount"], 1)
self.assertFalse(cmds.objExists('|bobo|root|peter01|rig'))
def test_variantSwitch_that_keeps_existing_prim_runs_teardown_and_import(self):
usdmaya.TranslatorBase.registerTranslator(CubeGenerator(), 'beast_rig')
stage = Usd.Stage.Open(self._testDataDir + "inactivetest.usda")
prim = stage.GetPrimAtPath('/root/peter01')
vs = prim.GetVariantSet("cubes")
vs.SetVariantSelection("fiveCubes")
stageCache = UsdUtils.StageCache.Get()
stageCache.Insert(stage)
stageId = stageCache.GetId(stage)
cmds.AL_usdmaya_ProxyShapeImport(stageId=stageId.ToLongInt(), name='bobo')
self.assertEqual(CubeGenerator.getState()["importObjectCount"],1)
self.assertEqual(len(CubeGenerator.importObjectMObjects),5)
self.assertTrue(cmds.objExists('|bobo|root|peter01|rig'))
# check the number of items under the parent. We should have 6 created nodes
parentPath = '|bobo|root|peter01|rig'
self.assertTrue(cmds.objExists(parentPath))
nbItems = cmds.listRelatives(parentPath)
self.assertEqual(5, len(nbItems))
'''
Variant switch that leads to same prim still existing.
'''
try:
vs.SetVariantSelection("sixCubesRig")
except RecursionError:
# Force RecursionErrors to fail the test instead of erroring them
self.fail("Raised RecursionError unexpectedly!")
self.assertEqual(CubeGenerator.getState()["tearDownCount"],1)
self.assertEqual(CubeGenerator.getState()["importObjectCount"],2)
self.assertEqual(CubeGenerator.getState()["updateCount"],0)
self.assertEqual(len(CubeGenerator.importObjectMObjects),6)
# check the number of items under the parent. We should have 6 created nodes
# and not 11 as teardown is supposed to have run and removed the old ones.
self.assertTrue(cmds.objExists(parentPath))
nbItems = cmds.listRelatives(parentPath)
self.assertEqual(6, len(nbItems))
def test_set_inactive_prim_removes_parent_transform(self):
usdmaya.TranslatorBase.registerTranslator(CubeGenerator(), 'beast_rig')
stage = Usd.Stage.Open(self._testDataDir + "inactivetest.usda")
prim = stage.GetPrimAtPath('/root/peter01')
vs = prim.GetVariantSet("cubes")
vs.SetVariantSelection("fiveCubes")
stageCache = UsdUtils.StageCache.Get()
stageCache.Insert(stage)
stageId = stageCache.GetId(stage)
cmds.AL_usdmaya_ProxyShapeImport(stageId=stageId.ToLongInt(), name='bobo')
self.assertEqual(CubeGenerator.getState()["importObjectCount"],1)
self.assertEqual(len(CubeGenerator.importObjectMObjects),5)
self.assertTrue(cmds.objExists('|bobo|root|peter01|rig'))
p = stage.GetPrimAtPath('/root/peter01/rig')
p.SetActive(False)
self.assertEqual(CubeGenerator.getState()["tearDownCount"],1)
self.assertEqual(CubeGenerator.getState()["importObjectCount"],1)
self.assertEqual(CubeGenerator.getState()["updateCount"],0)
self.assertFalse(cmds.objExists('|bobo|root|peter01|rig'))
def test_variantSwitch_listener_from_different_stage(self):
"""Test listener only responds to changes made to layers found in proxy shape owned stages."""
usdmaya.TranslatorBase.registerTranslator(CubeGenerator(), 'beast_rig')
# Make a dummy stage that mimics prim path found in test data
otherHandle = os.path.abspath(type(self).__name__ + ".usda")
# Scope
if True:
stage = Usd.Stage.CreateInMemory()
stage.DefinePrim("/root/peter01")
stage.Export(otherHandle)
# Open both stages
testStage = Usd.Stage.Open(self._testDataDir + "inactivetest.usda")
otherStage = Usd.Stage.Open(otherHandle)
# Cache
stageCache = UsdUtils.StageCache.Get()
stageCache.Insert(testStage)
stageCache.Insert(otherStage)
stageId = stageCache.GetId(testStage)
# Import legit test data
cmds.AL_usdmaya_ProxyShapeImport(stageId=stageId.ToLongInt(), name='bobo')
# Make sure both paths are valid
self.assertTrue(testStage.GetPrimAtPath("/root/peter01"))
self.assertTrue(otherStage.GetPrimAtPath("/root/peter01"))
# Modify stage that isn't loaded by AL_USDMaya
prim = otherStage.GetPrimAtPath("/root/peter01")
prim.SetActive(False)
# Ensure stage on proxy wasn't modified
self.assertEqual(CubeGenerator.getState()["tearDownCount"], 0)
# this test is in progress... I cannot make it fail currently but
# the motion translator in unicorn is definitely crashing Maya
# if needsTransformParent() returns True.
def test_deletion_of_parent_node_by_translator_does_not_crash_Maya(self):
usdmaya.TranslatorBase.registerTranslator(DeleteParentNodeOnPostImport(), 'beast_rig')
stage = Usd.Stage.Open(self._testDataDir + "inactivetest.usda")
prim = stage.GetPrimAtPath('/root/peter01')
vs = prim.GetVariantSet("cubes")
vs.SetVariantSelection("fiveCubes")
stageCache = UsdUtils.StageCache.Get()
stageCache.Insert(stage)
stageId = stageCache.GetId(stage)
cmds.AL_usdmaya_ProxyShapeImport(stageId=stageId.ToLongInt(), name='bobo')
self.assertEqual(DeleteParentNodeOnPostImport.nbImport, 1)
self.assertEqual(DeleteParentNodeOnPostImport.parentNode, '|bobo|root|peter01|rig')
self.assertEqual(DeleteParentNodeOnPostImport.nbPostImport, 1)
self.assertEqual(DeleteParentNodeOnPostImport.nbPreTeardown, 0)
vs.SetVariantSelection("noCubes")
def test_import_and_update_consistency(self):
'''
test consistency when called via TranslatePrim, or triggered via onObjectsChanged
'''
updateableTranslator = UpdateableTranslator()
usdmaya.TranslatorBase.registerTranslator(updateableTranslator, 'test')
stage = Usd.Stage.Open(self._testDataDir + "translator_update_postimport.usda")
stageCache = UsdUtils.StageCache.Get()
stageCache.Insert(stage)
stageId = stageCache.GetId(stage)
shapeName = 'updateProxyShape'
cmds.AL_usdmaya_ProxyShapeImport(stageId=stageId.ToLongInt(), name=shapeName)
# Verify if the methods have been called
self.assertTrue("import /root/peter01/rig" in updateableTranslator.actions)
self.assertTrue("postImport /root/peter01/rig" in updateableTranslator.actions)
# "update()" method should not be called
self.assertFalse("update /root/peter01/rig" in updateableTranslator.actions)
updateableTranslator.actions = []
cmds.AL_usdmaya_TranslatePrim(up="/root/peter01/rig", fi=True, proxy=shapeName)
# "update()" should have been called
self.assertTrue("update /root/peter01/rig" in updateableTranslator.actions)
class TestTranslatorUniqueKey(usdmaya.TranslatorBase):
"""
Basic Translator for testing unique key
"""
def __init__(self, *args, **kwargs):
super(TestTranslatorUniqueKey, self).__init__(*args, **kwargs)
self._supportsUpdate = False
self.resetCounters()
self.primHashValues = dict()
def initialize(self):
return True
def resetCounters(self):
self.preTearDownCount = 0
self.tearDownCount = 0
self.importObjectCount = 0
self.postImportCount = 0
self.updateCount = 0
def setSupportsUpdate(self, state):
self._supportsUpdate = state
def generateUniqueKey(self, prim):
return self.primHashValues.get(str(prim.GetPath()))
def preTearDown(self, prim):
self.preTearDownCount += 1
return True
def tearDown(self, path):
self.tearDownCount += 1
self.removeItems(path)
return True
def canExport(self, mayaObjectName):
return usdmaya.ExportFlag.kNotSupported
def needsTransformParent(self):
return True
def supportsUpdate(self):
return self._supportsUpdate
def importableByDefault(self):
return True
def exportObject(self, stage, path, usdPath, params):
return
def postImport(self, prim):
self.postImportCount += 1
return True
def getTranslatedType(self):
return Tf.Type.Unknown
def importObject(self, prim, parent=None):
self.importObjectCount += 1
numCubesAttr = prim.GetAttribute("numCubes")
numCubes = 0
if numCubesAttr.IsValid():
numCubes = numCubesAttr.Get()
dgNodes = []
for x in range(numCubes):
nodes = cmds.polyCube()
dgNodes.append(nodes[1])
cmds.parent(nodes[0], parent)
# Only insert the DG nodes. The Dag nodes
# created under the parent will be deleted
# by AL_USDMaya automatically when the parent
# transform is deleted.
for n in dgNodes:
self.insertItem(prim, n)
return True
def update(self, prim):
self.updateCount += 1
return True
class TestPythonTranslatorsUniqueKey(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Setup for test output
inputPath = | |
<reponame>dbernaciak/bayes-np<gh_stars>1-10
import scipy.stats as st
import numpy as np
import numba as nb
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
import asyncio
import nest_asyncio
from nb_stats import *
nest_asyncio.apply()
class GaussianMixture:
def __init__(self, sd, rho, mu=None):
"""
:param sd: covariance matrix of data points around the, i.e. this is the standard deviation in either direction
:param rho: vector of component frequencies
:param mu: cluster-specific mean is [sd^2, 0; 0, sd^2]
"""
self.sd = sd
self.rho = np.asarray(rho) / sum(rho)
self.mu = np.array([[3, 3], [-3, 3], [3, -3], [-3, -3]]) if mu is None else mu
self.z = None
def rvs(self, size):
"""
:param size:
:return:
"""
# assign each data point to a component
z = np.random.choice(np.arange(0, len(self.rho), 1), size=size, replace=True, p=self.rho)
self.z = z
# draw each data point according to the cluster-specific
# likelihood of its component
# an Ndata x 2 matrix of data points
if isinstance(self.sd, (float, int)):
self.sd = np.array([np.array([[self.sd, 0],[0, self.sd]]) for i in range(len(self.rho))])
x = np.array([st.multivariate_normal(self.mu[z[i], :], self.sd[z[i]]).rvs(size=1) for i in range(len(z))]).T
return x.T
class DPMM:
def __init__(self, data):
self.data = data
self.z = None # array of cluster indices
self.probs = None # dictionary of probabilities of given point to belong to a cluster
self.alphas = None # draws from Gibbs sampler for alpha
self.cluster_means = None
self.cluster_cov = None
def run_mcmc(self, specification, iter, **kwargs):
if specification == "normal":
self.z, self.probs, self.alphas, self.cluster_means, self.cluster_cov = run_mcmc(self.data,
max_iter=iter, **kwargs)
elif specification == "normal_invwishart":
self.z, self.probs, self.alphas, self.cluster_means, self.cluster_cov = run_mcmc_mvn_normal_invwishart(
self.data, max_iter=iter, **kwargs)
else:
raise NotImplementedError
@nb.jit(nopython=True, fastmath=True)
def run_mcmc(data: np.ndarray, alpha: float = None, max_iter: int = 1000, sd: float = 1, sig0: float = 3.0, a_gamma: float =None, b_gamma: float =None):
"""
Fast implementation of the algorithm 3 of Neal (2000) with extension to hyperparameter inference for alpha
:param data:
:param alpha:
:param max_iter:
:param sd:
:param sig0: prior covariance
:return:
"""
# Defaults
infer_alpha = True if alpha is None else False
alpha = 0.01 if alpha is None else alpha
a_gamma = 2 if a_gamma is None else a_gamma
b_gamma = 4 if b_gamma is None else b_gamma
final_loc_probs = {}
data_dim = data.shape[1] # dimension of the data points
sig = np.eye(data_dim) * sd ** 2 # cluster-specific covariance matrix
sig0 = np.eye(data_dim) * sig0 ** 2 # prior covariance matrix TODO: fix me
prec = np.linalg.inv(sig)
prec0 = np.linalg.inv(sig0)
mu0 = np.array([0.0, 0.0]) # prior mean on cluster parameters
ndata = int(data.shape[0]) # number of data points
z = np.zeros(data.shape[0], dtype=np.int64) # initial cluster assignments
counts = np.array([data.shape[0]])
n_clusters = len(counts)
pi_choice = np.random.uniform(0.0, 1.0, size=max_iter) # for alpha inference
cluster_means = None
cluster_cov = None
alphas = np.empty(max_iter)
for it in range(max_iter):
for n in range(ndata):
c = int(z[n])
counts[c] -= 1
if counts[c] == 0:
counts[c] = counts[n_clusters - 1]
loc_c = np.argwhere(z == n_clusters - 1).ravel()
for loc in loc_c:
z[loc] = c
counts = np.delete(counts, n_clusters - 1)
n_clusters -= 1
z[n] = -1 # ensures z[n] doesn't get counted as a cluster
log_weights = np.empty(n_clusters + 1)
cluster_means = np.empty((n_clusters, 2))
cluster_cov = np.empty((n_clusters, 2, 2))
# find the unnormalized log probabilities
# for each existing cluster
for c in range(n_clusters):
c_precision = prec0 + counts[c] * prec # BDA3 3.5 MULTIVARIATE NORMAL MODEL WITH KNOWN VARIANCE
c_sig = np.linalg.inv(c_precision) # np.eye(data_dim) * 1 / np.diag(c_precision) #
loc_z = np.where(z == int(c))[0]
if len(loc_z) > 1:
sum_data = np.sum(data[z == c, :], axis=0)
else:
sum_data = data[z == c, :].ravel()
c_mean = c_sig @ (prec @ sum_data.T + prec0 @ mu0.T) # BDA3 3.5 MULTIVARIATE NORMAL MODEL WITH KNOWN VARIANCE
log_weights[c] = np.log(counts[c]) + normal_logpdf(data[n, :], c_mean, c_sig + sig) # (3.7)
if (it == max_iter - 1) and (n == ndata - 1):
cluster_means[c, :] = c_mean
cluster_cov[c, :, :] = c_sig + sig
# find the unnormalized log probability
# for the "new" cluster
log_weights[n_clusters] = np.log(alpha) + normal_logpdf(data[n, :], mu0, sig0 + sig) # (3.7)
# transform unnormalized log probabilities
# into probabilities
max_weight = np.max(log_weights)
log_weights = log_weights - max_weight
loc_probs = np.exp(log_weights)
loc_probs = loc_probs / sum(loc_probs)
# sample which cluster this point should belong to
newz = np.argwhere(np.random.multinomial(1, loc_probs, size=1).ravel() == 1)[0][0]
# newz = np.random.choice(np.arange(0, n_clusters + 1, 1), 1, replace=True, p=loc_probs)
# if necessary, instantiate a new cluster
if newz == n_clusters:
counts = np.append(counts, 0)
n_clusters += 1
z[n] = newz
# update the cluster counts
counts[newz] += 1
if it == max_iter - 1:
final_loc_probs[n] = loc_probs
alphas[it] = alpha
if infer_alpha:
z_temp = np.random.beta(alpha + 1, ndata)
pi1 = a_gamma + n_clusters - 1
pi2 = ndata * (b_gamma - np.log(z_temp))
pi_prob = pi1 / (pi1 + pi2)
if pi_prob >= pi_choice[it]:
alpha = np.random.gamma(a_gamma + n_clusters, 1 / (b_gamma - np.log(z_temp)))
else:
alpha = np.random.gamma(a_gamma + n_clusters - 1, 1 / (b_gamma - np.log(z_temp)))
return z, final_loc_probs, alphas, cluster_means, cluster_cov
@nb.jit(nopython=True, fastmath=True)
def run_mcmc_mvn_normal_invwishart(data: np.ndarray, alpha: float = None, max_iter: int = 1000, sig0: float = 3.0, kappa_0=0.01):
"""
Fast implementation of the algorithm 3 of Neal (2000) with extension to hyperparameter inference for alpha
:param data:
:param alpha:
:param max_iter:
:param sd:
:param sig0: prior covariance
:return:
"""
nu_0 = 2
final_loc_probs = {}
data_dim = data.shape[1] # dimension of the data points
# sig = np.eye(data_dim) * sig0 ** 2 / kappa_0 # cluster-specific covariance matrix
sig0 = np.eye(data_dim) * sig0 ** 2 # prior covariance matrix
mu0 = np.array([0.0, 0.0]) # prior mean on cluster parameters
ndata = int(data.shape[0]) # number of data points
z = np.zeros(data.shape[0], dtype=np.int64) # initial cluster assignments
counts = np.array([data.shape[0]])
n_clusters = len(counts)
pi_choice = np.random.uniform(0.0, 1.0, size=max_iter) # for alpha inference
infer_alpha = True if alpha is None else False
alpha = 0.01 if alpha is None else alpha
a_gamma = 2
b_gamma = 4
cluster_means = None
cluster_cov = None
alphas = np.empty(max_iter)
for it in range(max_iter):
for n in range(ndata):
c = int(z[n])
counts[c] -= 1
if counts[c] == 0:
counts[c] = counts[n_clusters - 1]
loc_c = np.argwhere(z == n_clusters - 1).ravel()
for loc in loc_c:
z[loc] = c
counts = np.delete(counts, n_clusters - 1)
n_clusters -= 1
z[n] = -1 # ensures z[n] doesn't get counted as a cluster
log_weights = np.empty(n_clusters + 1)
cluster_means = np.empty((n_clusters, 2))
cluster_cov = np.empty((n_clusters, 2, 2))
# find the unnormalized log probabilities
# for each existing cluster
for c in range(n_clusters):
# BDA3 3.6 BDA3 3.6 MULTIVARIATE NORMAL MODEL WITH UN KNOWN MEAN AND VARIANCE
data_c = data[z == c, :]
loc_z = np.where(z == int(c))[0]
if len(loc_z) > 1:
c_bar = np.sum(data[z == c, :], axis=0) / len(loc_z)
else:
c_bar = data[z == c, :].ravel()
temp = np.empty((len(loc_z), data_dim, data_dim))
for i in range(len(loc_z)):
temp[i, :, :] = np.outer((data_c[i, :] - c_bar), (data_c[i, :] - c_bar))
s = np.sum(temp, axis=0)
n_c = counts[c]
c_sig = sig0 + s + (kappa_0 * n_c / (kappa_0 + n_c)) * np.outer((c_bar - mu0), (c_bar - mu0))
kappa_n = kappa_0 + n_c
nu_n = nu_0 + n_c
c_sig = c_sig * (kappa_n + 1) / (kappa_n * (nu_n - 2 + 1))
# BDA3 3.6 MULTIVARIATE NORMAL MODEL WITH UN KNOWN MEAN AND VARIANCE
c_mean = (kappa_0 * mu0 + n_c * c_bar) / (kappa_0 + n_c)
log_weights[c] = np.log(counts[c]) + t_logpdf(data[n, :], c_mean, c_sig, nu_n - 1)
if (it == max_iter - 1) and (n == ndata - 1):
cluster_means[c, :] = c_mean
cluster_cov[c, :, :] = c_sig
# find the unnormalized log probability
# for the "new" cluster
log_weights[n_clusters] = np.log(alpha) + t_logpdf(data[n, :], mu0, (kappa_0 + 1) / (kappa_0 * (nu_0 - 2 + 1)) * sig0, nu_0)
# transform unnormalized log probabilities
# into probabilities
max_weight = np.max(log_weights)
log_weights = log_weights - max_weight
loc_probs = np.exp(log_weights)
loc_probs = loc_probs / sum(loc_probs)
# sample | |
a zero form pressed.
def plot(self, axis):
'''
Finilises the plotting
Uses the attribues of the object as set originally and as customised
with methods to create a plot of the 2-form.
parametes:
-------------
axis - matplotlib axis that 0-form will be plotted on
'''
# Extract L from the x and y grids
Lx = 0.5*(self.xg[0, -1] - self.xg[0, 0])
Ly = 0.5*(self.yg[-1, 0] - self.yg[0, 0])
x0 = self.xg[0, 0] + Lx
y0 = self.yg[0, 0] + Ly
# reset axis limits
ax_Lx = Lx + Lx/self.delta_factor
ax_Ly = Ly + Ly/self.delta_factor
axis.set_xlim(-ax_Lx + x0, ax_Lx + x0)
axis.set_ylim(-ax_Ly + y0, ax_Ly + y0)
# cehck requests as to density of lines
if self.denser != 1:
if self.form_0_str == None:
# This cannot be done if a string has not been supplied
# ERROR
raise TypeError('Error: You need to supply the 0-form equation to do this, look at \'give_eqn\' method')
else:
# get the supplied form as a string
zero_form_str = str(simplify(self.form_0_str))
# set up grids for contours
contour_x, contour_y = np.linspace(self.xg[0,0] , self.xg[0,-1] , self.pt_den_x*self.denser), np.linspace(self.yg[0,0] , self.yg[-1,0], self.pt_den_y*self.denser)
contour_x_grid, contour_y_grid = np.meshgrid(contour_x, contour_y)
# format the given ftring
zero_form_str = zero_form_str.replace('x', 'contour_x_grid')
zero_form_str = zero_form_str.replace('y', 'contour_y_grid')
# evaluate bearing in mind zeros
if zero_form_str.find('contour_x_grid') & zero_form_str.find('contour_y_grid') == -1:
form_0_contour = eval(zero_form_str)*np.ones(np.shape(contour_x_grid))
else:
form_0_contour = eval(zero_form_str)
form_0 = form_0_contour
xg = contour_x_grid
yg = contour_y_grid
else:
form_0 = self.form_0
xg = self.xg
yg = self.yg
# set all insignificant values to zero:
form_0[np.abs(form_0) < 1e-15] = 0
# deal with sinularities that appear on evaluated points
isnan_arr = np.isnan(form_0)
for i in range(len(xg[0, :])):
for j in range(len(yg[:, 0])):
# set to zero points that are not defined or inf
if isnan_arr[j, i] or abs(form_0[j, i]) == np.inf or abs(form_0[j, i]) > 1e15:
# colour this region as a red dot, not square to
# not confuse with high mag 2-forms in stacks. or worse, in
# blocks
circ = patch.Circle((xg[j, i], yg[j, i]), Lx*0.05/3, color='red')
axis.add_patch(circ)
form_0[j, i] = 0
if self.logarithmic_scale_bool:
mag1 = np.abs(form_0) + 1
form_0_norm = form_0/(mag1)
logmag = np.log10(mag1)
form_0 = form_0_norm*logmag
else:
pass
CS = axis.contour(xg, yg, form_0, levels=self.lines, cmap=self.cmap)
axis.clabel(CS, inline=self.inline_bool, fontsize=self.fontsize)
# define a method to compute the exterior derivative
def ext_d(self):
'''
Takes in no argument
computes the exterior derivative and returns it as the 1-form object
Returns 1 form object
'''
# first make sure that the string has been supplied
if self.form_0_str == None:
# ERROR
raise TypeError('Error: You need to supply the 0-form equation to do this, look at \'give_eqn\' method')
else:
# can compute the exterior derivative:
form_0_str = str(simplify(self.form_0_str))
# from this, need derivatives so set it as a SymPy object
sympy_expr_form_0 = parse_expr(form_0_str, evaluate=False)
# set up an array of coordinates that need to be used (in standard order)
coords = ['x', 'y']
# from these, find the derivatives
form_1_x_str = str(diff(sympy_expr_form_0, coords[0]))
form_1_y_str = str(diff(sympy_expr_form_0, coords[1]))
# need to uspply these unformatted, so save those:
form_1_x_unformated, form_1_y_unformated = form_1_x_str*1, form_1_y_str*1
# from these strings, get the numerical 1-form:
form_1_x_str = form_1_x_str.replace('x', '(self.xg)')
form_1_x_str = form_1_x_str.replace('y', '(self.yg)')
form_1_y_str = form_1_y_str.replace('x', '(self.xg)')
form_1_y_str = form_1_y_str.replace('y', '(self.yg)')
if form_1_x_str.find('x') & form_1_x_str.find('y') == -1:
form_1_x_str = '(' + str(form_1_x_str) + ')* np.ones(np.shape(self.xg))'
if form_1_y_str.find('x') & form_1_y_str.find('y') == -1:
form_1_y_str = '(' + str(form_1_y_str) + ')* np.ones(np.shape(self.yg))'
form_1_x = eval(form_1_x_str)
form_1_y = eval(form_1_y_str)
# supply these to the 1-form object function and return object
result_1_form = form_1(self.xg, self.yg, form_1_x, form_1_y, form_1_x_unformated, form_1_y_unformated)
return result_1_form
# deifne a method to complete the exterior derivative numerically
def num_ext_d(self, edge_order=1):
'''
Takes in 1 argument:
-- edge_order: determines order same as in numpy gradient {1 or 2}
Return 1 object - 1-form
computes the exterior derivative numerically only
The equations do not need to be given
If given, they do not get passed onto the 1-form object anyway
NUMERICAL ONLY
'''
# from numpy gradient, get the gradient array
fy, fx = np.gradient(self.form_0, edge_order=edge_order)
# supply these to the 1-form object function
result_1_form = form_1(self.xg, self.yg, fx, fy)
# return the new object to user
return result_1_form
# deinfe a method for Hodge of a 0-form
def num_hodge(self):
'''
Takes in no arguments
It calulates the Hodge on R^2 by the standard definition:
1* = (dx^dy)
Does so numerically via instance provided arrays
IF equations were given, this method will lose them
returns a 2-form
'''
# check if equations have been given:
# if they have, doing it only numerically would create
# a mismatch, avoid that
if self.form_0_str != None:
print('Warning: You supplied equations, doing it numerically only will lose these')
# now complete the process numerically
# pass these in to the object to create a new one and return
new_object = form_2(self.xg, self.yg, self.form_0) # N.B no equations to supply
return new_object
def hodge(self):
'''
Takes in no arguments
It calulates the Hodge on R^2 by the standard definition:
1* = (dx^dy)
Does so analytically via instance provided equtions
changes the equations AND the numerical answers
returns a 2-form
'''
# can only be done if equations have been given, check:
if self.form_0_str != None:
# some equations are there, compute the Hodge on these:
# get numerical solutions, evaulated on local strings
# to relate parameter to the self grids and keep strings, because
# need to supply these unformatted:
form_2_str_unformated = self.form_0_str + ''
string_2_form = self.form_0_str # to be formated
# from these strings, get the numerical 2-form:
string_2_form = string_2_form.replace('x', '(self.xg)')
string_2_form = string_2_form.replace('y', '(self.yg)')
if string_2_form.find('x') & string_2_form.find('y') == -1:
string_2_form = '(' + str(string_2_form) + ')* np.ones(np.shape(self.xg))'
# evaulated numerically
form_2_result = eval(string_2_form)
# create and return object
new_object = form_2(self.xg, self.yg, form_2_result, form_2_eq=form_2_str_unformated)
return new_object
else:
# ERROR
raise TypeError('You need to supply the 2-form equation to do this, look at \'give_eqn\' method')
# define a fucntion to compute a wedge product
def wedge(self, form_second, degree=0, keep_object=False):
'''
Parameters:
----------------
form_second - the form to wedge the 0-form with.
Can be supplied as a DFormPy instance, a tuple of equations,
or a single string equation depending on what form is to be
wedged.
To wedge with 1-form, supply 1-form instance, or tuple of
component equations as strings in terms of x and y.
To wedge with 0-form or 2-form, supply corresponding
instances or a single equation. When using equations,
to distinguish between them, provide parmater 'degree'.
degree - default is 0. Only used when a single string is supplied
as form_second, to distinguish betwen 0-form and 2-form
for 0-form, degree=0, for 2-form, degree=2.
Determines what form is to be wegded with the
given 0-form.
keep_object - bool -default=False - Only needed when 0-form /\ 0-form
If False, a new object is created
as a result of the wedge. If True, the 0-form acted on
is modified to be the result of the wedge.
To do so here, strings for the form must be supplied.
Computes the Wedge product using strings, ANALYTICALLY
Returns:
--------------
Wedged with 0-form returns a 0-form object if keep_object is False
(default), and returns nothing when it is True
Wedged with a 1-form, returns a 1-form instance
Wedged with a 2-form, returns a 2-form instance
'''
# test if equations were given first:
if self.form_0_str is None:
raise ValueError('Error: You need to supply the 0-form equation to do this, look at \'give_eqn\' method')
# set up variable to store order of supplied form, initially assume 1-form
order = 0
# get needed second obejct strings dep. | |
'blocking':
self.mode = 'blocking'
curses.flushinp()
return self.__poll_waiting_blocking(0, flag, indent)
# change verbosity
if k == ord('v'):
self.verbose_mode += 1
if self.verbose_mode > 3:
self.verbose_mode = 1
do_refresh = True
# change duration mode
if k == ord('T'):
self.duration_mode += 1
if self.duration_mode > 3:
self.duration_mode = 1
do_refresh = True
# turnoff/on colors
if k == ord('C'):
if self.color is True:
self.set_nocolor()
else:
self.set_color()
do_refresh = True
# sorts
if k == ord('t') and self.sort != 't':
self.sort = 't'
known = True
if k == ord('+') and self.refresh_time < 3:
self.refresh_time += 1
if k == ord('-') and self.refresh_time > 1:
self.refresh_time -= 1
if k == ord('h'):
self.__help_window()
do_refresh = True
# Refresh
if k == ord('R'):
known = True
if k == curses.KEY_RESIZE and self.uibuffer is not None and \
'procs' in self.uibuffer:
do_refresh = True
if do_refresh is True and self.uibuffer is not None and \
'procs' in self.uibuffer:
self.check_window_size()
self.refresh_window(
self.uibuffer['procs'],
self.uibuffer['extras'],
self.uibuffer['flag'],
self.uibuffer['indent'],
self.uibuffer['io'],
self.uibuffer['tps'],
self.uibuffer['active_connections'],
self.uibuffer['size_ev'],
self.uibuffer['total_size'])
curses.flushinp()
t_end = time.time()
if k > -1 and \
not known and \
(t_end - t_start) < (self.refresh_time * interval):
return self.__poll_waiting_blocking(
((self.refresh_time * interval) -\
(t_end - t_start))/self.refresh_time,
flag,
indent,
process,
disp_proc)
# poll postgresql activity
if self.mode == 'waiting':
queries = self.data.pg_get_waiting(self.duration_mode)
else:
queries = self.data.pg_get_blocking(self.duration_mode)
new_procs = {}
for query in queries:
new_procs[query['pid']] = query
new_procs[query['pid']]['duration'] = \
self.data.get_duration(query['duration'])
# return processes sorted by query/transaction/backend duration
disp_procs = sorted(
queries,
key=lambda q: q['duration'],
reverse=True)
return (disp_procs, new_procs)
def print_string(self, lineno, colno, word, color = 0):
return self.__print_string(lineno, colno, word, color)
def __print_string(self, lineno, colno, word, color = 0):
"""
Print a string at position (lineno, colno) and returns its length.
"""
try:
self.win.addstr(lineno, colno, word, color)
except curses.error:
pass
return len(word)
def __add_blank(self, line, offset = 0):
"""
Complete string with white spaces from the end of string to the end of line.
"""
line += " " * (self.maxx - (len(line) + offset))
return line
def get_indent(self, flag):
"""
Returns identation for Query column.
"""
indent = ''
res = [0] * self.max_ncol
for _, val in PGTOP_COLS[self.mode].items():
if val['mandatory'] or \
(not val['mandatory'] and val['flag'] & flag):
res[int(val['n'])] = val
for val in res:
if val is not 0:
if val['name'] is not 'Query':
indent += val['template_h'] % ' '
return indent
def __print_cols_header(self, flag):
"""
Print columns headers
"""
line = ''
disp = ''
xpos = 0
res = [0] * self.max_ncol
color = self.__get_color(C_GREEN)
for _, val in PGTOP_COLS[self.mode].items():
if val['mandatory'] or \
(not val['mandatory'] and val['flag'] & flag):
res[int(val['n'])] = val
for val in res:
if val is not 0:
disp = val['template_h'] % val['name']
if ((val['name'] == "CPU%" and self.sort == 'c') or
(val['name'] == "MEM%" and self.sort == 'm') or
(val['name'] == "READ/s" and self.sort == 'r') or
(val['name'] == "WRITE/s" and self.sort == 'w') or
(val['name'] == "TIME+" and self.sort == 't')):
color_highlight = self.__get_color(C_CYAN)
else:
color_highlight = color
if val['name'] == "Query":
disp += " " * (self.maxx - (len(line) + len(disp)))
line += disp
self.__print_string(
self.lineno,
xpos,
disp,
color_highlight|curses.A_REVERSE)
xpos += len(disp)
self.lineno += 1
def __print_header(self, pg_version, hostname, user, host, \
port, database, ios, tps, active_connections, size_ev, total_size):
"""
Print window header
"""
self.lineno = 0
colno = 0
version = " %s" % (pg_version)
colno = self.__print_string(
self.lineno,
colno,
version)
colno += self.__print_string(
self.lineno,
colno,
" - ")
colno += self.__print_string(
self.lineno,
colno,
hostname,
curses.A_BOLD)
colno += self.__print_string(
self.lineno,
colno,
" - ")
colno += self.__print_string(
self.lineno,
colno,
user,
self.__get_color(C_CYAN))
colno += self.__print_string(
self.lineno,
colno,
"@")
colno += self.__print_string(
self.lineno,
colno,
host,
self.__get_color(C_CYAN))
colno += self.__print_string(
self.lineno,
colno,
":")
colno += self.__print_string(
self.lineno,
colno,
port,
self.__get_color(C_CYAN))
colno += self.__print_string(
self.lineno,
colno,
"/")
colno += self.__print_string(
self.lineno,
colno,
database,
self.__get_color(C_CYAN))
colno += self.__print_string(
self.lineno,
colno,
" - Ref.: %ss" % (self.refresh_time,))
if self.options.minduration > 0:
colno += self.__print_string(
self.lineno,
colno,
" - Min. duration: %ss" % (self.options.minduration,))
colno = 0
self.lineno += 1
colno += self.__print_string(
self.lineno,
colno,
" Size: ")
colno += self.__print_string(
self.lineno,
colno,
"%8s" % (bytes2human(total_size),),)
colno += self.__print_string(
self.lineno,
colno,
" - %9s/s" % (bytes2human(size_ev),),)
colno += self.__print_string(
self.lineno,
colno,
" | TPS: ")
colno += self.__print_string(
self.lineno,
colno,
"%11s" % (tps,),
self.__get_color(C_GREEN)|curses.A_BOLD)
colno += self.__print_string(
self.lineno,
colno,
" | Active Connections: ")
colno += self.__print_string(
self.lineno,
colno,
"%11s" % (active_connections,),
self.__get_color(C_GREEN)|curses.A_BOLD)
colno += self.__print_string(
self.lineno,
colno,
" | Duration mode: ")
colno += self.__print_string(
self.lineno,
colno,
"%11s" % (self.data.get_duration_mode_name(self.duration_mode),),
self.__get_color(C_GREEN)|curses.A_BOLD)
# If not local connection, don't get and display system informations
if not self.is_local:
return
# Get memory & swap usage
(mem_used_per, mem_used, mem_total, swap_used_per, \
swap_used, swap_total) = self.data.get_mem_swap()
# Get load average
(av1, av2, av3) = self.data.get_load_average()
self.lineno += 1
line = " Mem.: %6s0%% - %9s/%-8s" % \
(mem_used_per, bytes2human(mem_used), \
bytes2human(mem_total))
colno_io = self.__print_string(self.lineno, 0, line)
if (int(ios['read_count'])+int(ios['write_count'])) > self.max_iops:
self.max_iops = (int(ios['read_count'])+int(ios['write_count']))
line_io = " | IO Max: %8s/s" % (self.max_iops,)
colno = self.__print_string(self.lineno, colno_io, line_io)
# swap usage
line = " Swap: %6s0%% - %9s/%-8s" % \
(swap_used_per, bytes2human(swap_used), \
bytes2human(swap_total))
self.lineno += 1
colno = self.__print_string(self.lineno, 0, line)
line_io = " | Read : %10s/s - %6s/s" % \
(bytes2human(ios['read_bytes']), int(ios['read_count']),)
colno = self.__print_string(self.lineno, colno_io, line_io)
# load average, uptime
line = " Load: %.2f %.2f %.2f" % (av1, av2, av3)
self.lineno += 1
colno = self.__print_string(self.lineno, 0, line)
line_io = " | Write: %10s/s - %6s/s" % \
(bytes2human(ios['write_bytes']), int(ios['write_count']),)
colno = self.__print_string(self.lineno, colno_io, line_io)
def __help_window(self,):
"""
Display help window
"""
self.win.erase()
self.lineno = 0
text = "pg_activity %s - (c) 2012-2019 <NAME>" % \
(self.version)
self.__print_string(
self.lineno,
0,
text,
self.__get_color(C_GREEN)|curses.A_BOLD)
self.lineno += 1
text = "Released under PostgreSQL License."
self.__print_string(
self.lineno,
0,
text)
self.lineno += 2
self.__display_help_key(
self.lineno,
00,
"Up/Down",
"scroll process list")
self.__display_help_key(
self.lineno,
45,
" C",
"activate/deactivate colors")
self.lineno += 1
self.__display_help_key(
self.lineno,
00,
" Space",
"pause")
self.__display_help_key(
self.lineno,
45,
" r",
"sort by READ/s desc. (activities)")
self.lineno += 1
self.__display_help_key(
self.lineno,
00,
" v",
"change display mode")
self.__display_help_key(
self.lineno,
45,
" w",
"sort by WRITE/s desc. (activities)")
self.lineno += 1
self.__display_help_key(
self.lineno,
00,
" q",
"quit")
self.__display_help_key(
self.lineno,
45,
" c",
"sort by CPU% desc. (activities)")
self.lineno += 1
self.__display_help_key(
self.lineno,
00,
" +",
"increase refresh time (max:5s)")
self.__display_help_key(
self.lineno,
45,
" m",
"sort by MEM% desc. (activities)")
self.lineno += 1
self.__display_help_key(
self.lineno,
00,
" -",
"decrease refresh time (min:0.5s)")
self.__display_help_key(
self.lineno,
45,
" t",
"sort by TIME+ desc. (activities)")
self.lineno += 1
self.__display_help_key(
self.lineno,
00,
" R",
"force refresh")
self.__display_help_key(
self.lineno,
45,
" T",
"change duration mode")
self.lineno += 1
self.__display_help_key(
self.lineno,
00,
" D",
"force refresh database size")
self.lineno += 1
self.__print_string(
self.lineno,
0,
"Mode")
self.lineno += 1
self.__display_help_key(
self.lineno,
00,
" F1/1",
"running queries")
self.lineno += 1
self.__display_help_key(
self.lineno,
00,
" F2/2",
"waiting queries")
self.lineno += 1
self.__display_help_key(
self.lineno,
00,
" F3/3",
"blocking queries")
self.lineno += 2
self.__print_string(
self.lineno,
0,
"Press any key to exit.")
self.win.timeout(-1)
try:
self.win.getch()
except KeyboardInterrupt as err:
raise err
def __display_help_key(self, lineno, colno, key, help_msg):
"""
Display help key
"""
pos1 = self.__print_string(
lineno,
colno,
key,
self.__get_color(C_CYAN)|curses.A_BOLD)
pos2 = self.__print_string(
lineno,
colno + pos1,
": %s" % (help_msg,))
return (colno + pos1 + pos2)
def refresh_window(self, procs, extras, flag, indent, ios, \
tps, active_connections, size_ev, total_size):
"""
Refresh the window
"""
self.lines = []
(pg_version, hostname, user, host, port, dbname) = extras
self.win.erase()
self.__print_header(
pg_version,
hostname,
user,
host,
port,
dbname,
ios,
tps,
active_connections,
size_ev,
total_size)
self.lineno += 2
line_trunc = self.lineno
self.__current_position()
self.__print_cols_header(flag)
for proc in procs:
try:
self.__refresh_line(proc, flag, indent, 'default')
line_trunc += 1
self.lines.append(line_trunc)
except curses.error:
break
for line in range(self.lineno, (self.maxy-1)):
self.__print_string(line, 0, self.__add_blank(" "))
self.__change_mode_interactive()
def __scroll_window(self, procs, flag, indent, offset = 0):
"""
Scroll the window
"""
self.lineno = (self.start_line + 2)
pos = 0
for proc in procs:
| |
<filename>pinkfish/statistics.py
"""
statistics
---------
Calculate trading statistics
"""
# Use future imports for python 3.0 forward compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# Other imports
import pandas as pd
import numpy as np
import operator
import math
from datetime import datetime
from dateutil.relativedelta import relativedelta
from numpy.lib.stride_tricks import as_strided
#####################################################################
# CONSTANTS
TRADING_DAYS_PER_YEAR = 252
TRADING_DAYS_PER_MONTH = 20
TRADING_DAYS_PER_WEEK = 5
#####################################################################
# HELPER FUNCTIONS
def _difference_in_years(start, end):
""" calculate the number of years between two dates """
diff = end - start
diff_in_years = (diff.days + diff.seconds/86400)/365.2425
return diff_in_years
def _get_trade_bars(ts, tlog, op):
l = []
for row in tlog.itertuples():
if op(row.pl_cash, 0):
l.append(len(ts[row.entry_date:row.exit_date].index))
return l
def currency(amount):
if amount >= 0:
return '${:,.2f}'.format(amount)
else:
return '-${:,.2f}'.format(-amount)
#####################################################################
# OVERALL RESULTS
def beginning_balance(capital):
return capital
def ending_balance(dbal):
return dbal.iloc[-1]['close']
def total_net_profit(tlog):
return tlog.iloc[-1]['cumul_total']
def gross_profit(tlog):
return tlog[tlog['pl_cash'] > 0].sum()['pl_cash']
def gross_loss(tlog):
return tlog[tlog['pl_cash'] < 0].sum()['pl_cash']
def profit_factor(tlog):
if gross_profit(tlog) == 0: return 0
if gross_loss(tlog) == 0: return 1000
return gross_profit(tlog) / gross_loss(tlog) * -1
def return_on_initial_capital(tlog, capital):
return total_net_profit(tlog) / capital * 100
def _cagr(B, A, n):
""" calculate compound annual growth rate """
return (math.pow(B / A, 1 / n) - 1) * 100
def annual_return_rate(end_balance, capital, start, end):
B = end_balance
A = capital
n = _difference_in_years(start, end)
return _cagr(B, A, n)
def trading_period(start, end):
diff = relativedelta(end, start)
return '{} years {} months {} days'.format(diff.years, diff.months, diff.days)
def _total_days_in_market(dbal):
n = (dbal['shares'] > 0).sum()
if dbal.iloc[-2]['shares'] > 0:
n += 1
return n
def pct_time_in_market(dbal):
return _total_days_in_market(dbal) / len(dbal) * 100
#####################################################################
# SUMS
def total_num_trades(tlog):
return len(tlog.index)
def trades_per_year(tlog, start, end):
diff = relativedelta(end, start)
years = diff.years + diff.months/12 + diff.days/365
return total_num_trades(tlog) / years
def num_winning_trades(tlog):
return (tlog['pl_cash'] > 0).sum()
def num_losing_trades(tlog):
return (tlog['pl_cash'] < 0).sum()
def num_even_trades(tlog):
return (tlog['pl_cash'] == 0).sum()
def pct_profitable_trades(tlog):
if total_num_trades(tlog) == 0: return 0
return num_winning_trades(tlog) / total_num_trades(tlog) * 100
#####################################################################
# CASH PROFITS AND LOSSES
def avg_profit_per_trade(tlog):
if total_num_trades(tlog) == 0: return 0
return total_net_profit(tlog) / total_num_trades(tlog)
def avg_profit_per_winning_trade(tlog):
if num_winning_trades(tlog) == 0: return 0
return gross_profit(tlog) / num_winning_trades(tlog)
def avg_loss_per_losing_trade(tlog):
if num_losing_trades(tlog) == 0: return 0
return gross_loss(tlog) / num_losing_trades(tlog)
def ratio_avg_profit_win_loss(tlog):
if avg_profit_per_winning_trade(tlog) == 0: return 0
if avg_loss_per_losing_trade(tlog) == 0: return 1000
return (avg_profit_per_winning_trade(tlog) /
avg_loss_per_losing_trade(tlog) * -1)
def largest_profit_winning_trade(tlog):
if num_winning_trades(tlog) == 0: return 0
return tlog[tlog['pl_cash'] > 0].max()['pl_cash']
def largest_loss_losing_trade(tlog):
if num_losing_trades(tlog) == 0: return 0
return tlog[tlog['pl_cash'] < 0].min()['pl_cash']
#####################################################################
# POINTS
def num_winning_points(tlog):
if num_winning_trades(tlog) == 0: return 0
return tlog[tlog['pl_points'] > 0].sum()['pl_points']
def num_losing_points(tlog):
if num_losing_trades(tlog) == 0: return 0
return tlog[tlog['pl_points'] < 0].sum()['pl_points']
def total_net_points(tlog):
return num_winning_points(tlog) + num_losing_points(tlog)
def avg_points(tlog):
if total_num_trades(tlog) == 0: return 0
return tlog['pl_points'].sum() / len(tlog.index)
def largest_points_winning_trade(tlog):
if num_winning_trades(tlog) == 0: return 0
return tlog[tlog['pl_points'] > 0].max()['pl_points']
def largest_points_losing_trade(tlog):
if num_losing_trades(tlog) == 0: return 0
return tlog[tlog['pl_points'] < 0].min()['pl_points']
def avg_pct_gain_per_trade(tlog):
if total_num_trades(tlog) == 0: return 0
df = tlog['pl_points'] / tlog['entry_price']
return np.average(df) * 100
def largest_pct_winning_trade(tlog):
if num_winning_trades(tlog) == 0: return 0
df = tlog[tlog['pl_points'] > 0]
df = df['pl_points'] / df['entry_price']
return df.max() * 100
def largest_pct_losing_trade(tlog):
if num_losing_trades(tlog) == 0: return 0
df = tlog[tlog['pl_points'] < 0]
df = df['pl_points'] / df['entry_price']
return df.min() * 100
#####################################################################
# STREAKS
def _subsequence(s, c):
"""
Takes as parameter list like object s and returns the length of the longest
subsequence of s constituted only by consecutive character 'c's.
Example: If the string passed as parameter is "001000111100", and c is '0',
then the longest subsequence of only '0's has length 3.
"""
count = 0 # current length of the sequence of zeros
maxlen = 0 # temporary value of the maximum length
for bit in s:
if bit == c: # we have read a new '0'
count += 1 # update the length of the current sequence
if count > maxlen: # if necessary, update the temporary maximum
maxlen = count
else: # we have read a 1
count = 0 # reset the length of the current sequence
return maxlen
def max_consecutive_winning_trades(tlog):
if num_winning_trades(tlog) == 0: return 0
return _subsequence(tlog['pl_cash'] > 0, True)
def max_consecutive_losing_trades(tlog):
if num_losing_trades(tlog) == 0: return 0
return _subsequence(tlog['pl_cash'] > 0, False)
def avg_bars_winning_trades(ts, tlog):
if num_winning_trades(tlog) == 0: return 0
return np.average(_get_trade_bars(ts, tlog, operator.gt))
def avg_bars_losing_trades(ts, tlog):
if num_losing_trades(tlog) == 0: return 0
return np.average(_get_trade_bars(ts, tlog, operator.lt))
#####################################################################
# DRAWDOWN AND RUNUP
def max_closed_out_drawdown(close):
""" only compare each point to the previous running peak O(N) """
running_max = pd.Series(close).expanding(min_periods=1).max()
cur_dd = (close - running_max) / running_max * 100
dd_max = min(0, cur_dd.min())
idx = cur_dd.idxmin()
dd = pd.Series()
dd['max'] = dd_max
dd['peak'] = running_max[idx]
dd['trough'] = close[idx]
dd['start_date'] = close[close == dd['peak']].index[0].strftime('%Y-%m-%d')
dd['end_date'] = idx.strftime('%Y-%m-%d')
close = close[close.index > idx]
rd_mask = close > dd['peak']
if rd_mask.any():
dd['recovery_date'] = close[rd_mask].index[0].strftime('%Y-%m-%d')
else:
dd['recovery_date'] = 'Not Recovered Yet'
return dd
def max_intra_day_drawdown(high, low):
""" only compare each point to the previous running peak O(N) """
running_max = pd.Series(high).expanding(min_periods=1).max()
cur_dd = (low - running_max) / running_max * 100
dd_max = min(0, cur_dd.min())
idx = cur_dd.idxmin()
dd = pd.Series()
dd['max'] = dd_max
dd['peak'] = running_max[idx]
dd['trough'] = low[idx]
dd['start_date'] = high[high == dd['peak']].index[0].strftime('%Y-%m-%d')
dd['end_date'] = idx.strftime('%Y-%m-%d')
high = high[high.index > idx]
rd_mask = high > dd['peak']
if rd_mask.any():
dd['recovery_date'] = high[rd_mask].index[0].strftime('%Y-%m-%d')
return dd
def _windowed_view(x, window_size):
"""Create a 2d windowed view of a 1d array.
`x` must be a 1d numpy array.
`numpy.lib.stride_tricks.as_strided` is used to create the view.
The data is not copied.
Example:
>>> x = np.array([1, 2, 3, 4, 5, 6])
>>> _windowed_view(x, 3)
array([[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6]])
"""
y = as_strided(x, shape=(x.size - window_size + 1, window_size),
strides=(x.strides[0], x.strides[0]))
return y
def rolling_max_dd(ser, period, min_periods=1):
"""Compute the rolling maximum drawdown of `ser`.
`ser` must be a Series.
`min_periods` should satisfy `1 <= min_periods <= window_size`.
Returns an 1d array with length `len(x) - min_periods + 1`.
"""
window_size = period + 1
x = ser.values
if min_periods < window_size:
pad = np.empty(window_size - min_periods)
pad.fill(x[0])
x = np.concatenate((pad, x))
y = _windowed_view(x, window_size)
running_max_y = np.maximum.accumulate(y, axis=1)
dd = (y - running_max_y) / running_max_y * 100
rmdd = dd.min(axis=1)
return pd.Series(data=rmdd, index=ser.index, name=ser.name)
def rolling_max_ru(ser, period, min_periods=1):
"""Compute the rolling maximum runup of `ser`.
`ser` must be a Series.
`min_periods` should satisfy `1 <= min_periods <= window_size`.
Returns an 1d array with length `len(x) - min_periods + 1`.
"""
window_size = period + 1
x = ser.values
if min_periods < window_size:
pad = np.empty(window_size - min_periods)
pad.fill(x[0])
x = np.concatenate((pad, x))
y = _windowed_view(x, window_size)
running_min_y = np.minimum.accumulate(y, axis=1)
ru = (y - running_min_y) / running_min_y * 100
rmru = ru.max(axis=1)
return pd.Series(data=rmru, index=ser.index, name=ser.name)
#####################################################################
# PERCENT CHANGE - used to compute several stastics
def pct_change(close, period):
diff = (close.shift(-period) - close) / close * 100
diff.dropna(inplace=True)
return diff
#####################################################################
# RATIOS
def sharpe_ratio(rets, risk_free=0.00, period=TRADING_DAYS_PER_YEAR):
"""
summary Returns the daily Sharpe ratio of the returns.
param rets: 1d numpy array or fund list of daily returns (centered on 0)
param risk_free: risk free returns, default is 0%
return Sharpe Ratio, computed off daily returns
"""
dev = np.std(rets, axis=0)
mean = np.mean(rets, axis=0)
sharpe = (mean*period - risk_free) / (dev * np.sqrt(period))
return sharpe
def sortino_ratio(rets, risk_free=0.00, period=TRADING_DAYS_PER_YEAR):
"""
summary Returns the daily Sortino ratio of the returns.
param rets: 1d numpy array or fund list of daily returns (centered on 0)
param risk_free: risk free return, default is 0%
return Sortino Ratio, computed off daily returns
"""
mean = np.mean(rets, axis=0)
negative_rets = rets[rets < 0]
dev = np.std(negative_rets, axis=0)
sortino = (mean*period - risk_free) / (dev * np.sqrt(period))
return sortino
#####################################################################
# STATS - this is the primary call used to generate the results
def stats(ts, tlog, dbal, start, end, capital):
"""
Compute trading stats
Parameters
----------
ts : Dataframe
Time series of security prices (date, high, low, close, volume,
adj_close)
tlog : Dataframe
Trade log (entry_date, entry_price, long_short, qty,
exit_date, exit_price, pl_points, pl_cash, cumul_total)
dbal : Dataframe
Daily Balance (date, high, low, close)
start : datetime
date of first buy
end : datetime
date of last sell
capital : float
starting capital
Examples
--------
Returns
-------
stats : Series of stats
"""
stats = pd.Series()
# OVERALL RESULTS
stats['start'] | |
# Part of the JBEI Quantitative Metabolic Modeling Library (JQMM)
# Copyright (c) 2016, The Regents of the University of California.
# For licensing details see "license.txt" and "legal.txt".
"""
All functions to obtain the Toya data and a test suite.
This module effectively reproduces all figures in Garcia Martin et al 2015
"""
import unittest, os, copy, shelve, sys, traceback, re
import matplotlib, numpy, difflib
import matplotlib.pyplot as plt
import core, FluxModels, predictions, ReactionNetworks, enhancedLists
from pylab import figure, title, savefig, plot, xlabel, ylabel
from IPython.display import SVG
import utilities as utils
### Needed paths
qmodeldir = os.environ['QUANTMODELPATH']
basedir = qmodeldir+'/data/tests/Toya2010/2S/'
testdir = basedir + '2SpaperTest/'
###
# Strains for which data is available in
strains = ['wt5h','wt6h','wt7h','pyk5h','pyk6h','pyk7h','pgi16h','pgi21h','pgi23h']
# Dictionary that maps the base strain to make predictions from (i.e. pyk5h MOMA predictions are based of wt5h)
strainBase = {'pyk5h':'wt5h','pyk6h':'wt6h','pyk7h':'wt7h','pgi16h':'wt5h','pgi21h':'wt6h','pgi23h':'wt7h'}
class generalTest(unittest.TestCase):
""" Class with some general methods of use to all other tests"""
def compareFileLists(self,files1,files2):
"""
Compares lists of files to check whether they are identical or not
files1 and files2 are expected to be lists containing files to be compared:
files1 = [file1, file2, file3, .... etc]
Files in the list can be given in the form of filenames or tuples for the form (filename, filestring)
Alternatively, the inputs can be dicts:
files1 = {'filename1':file1, 'filename2':file2, 'filename3':file3... etc}
"""
# Put file lists in form of dictionaries for easier manipulation if needed
dicts = []
for thing in [files1,files2]:
# if list convert to dict
if type(thing) is list:
fileList = thing
dict_= {}
for file_ in fileList:
(name,string) = utils.file2tuple(file_)
dict_[name]=string.splitlines()
# if dict keep as it is
elif type(thing) is dict:
dict_ = thing
else: # unknown input error
print files1
print files2
raise Exception('Inputs must be in list or dict type')
dicts.append(dict_)
dict1 = dicts[0]
dict2 = dicts[1]
# See if we have the same file names on each list
names1 = set(dict1.keys())
names2 = set(dict2.keys())
errmsg1 = "File list is different: \n"+str(names1)+" vs \n"+str(names2)
self.assertEqual(names1,names2,msg=errmsg1)
# Make comparison of file content
for fileName in dict1:
diff = difflib.unified_diff(dict1[fileName],dict2[fileName],n=10) # TODO: figure out how to get a better output out of the diff
diffOut = '\n'.join(list(diff))
errmsg2 = "New version of files "+fileName+" is different\n"+"diff:\n"+diffOut
self.assertEqual(diffOut,'',msg=errmsg2)
def compareFluxDict(self,dict1,dict2,delta,elements='all',errMsg=''):
"""
Compares two FLUX dictionaries of the type (e.g.):
fluxDict['PGI'] = flux class instance
-)delta is the relative change permitted
-)Elements decides which elements to compare (lo, best or hi) from the ranged number in core
-)errMsg is the additional string to add to the error messages
"""
# See if we have the same fluxes on each dict
errmsg1 = "Fluxes are different:\n"+str(dict1.keys())+"\n"+str(dict2.keys())
self.assertEqual(set(dict1.keys()),set(dict2.keys()),msg=errmsg1)
noChangeTotal = True
errmsg = '\n'
for name in dict1.keys():
fluxRF = dict1[name]
fluxN = dict2[name]
# Test all values
bestChanged = changed(fluxN.net.best,fluxRF.net.best,delta)
loChanged = changed(fluxN.net.lo,fluxRF.net.lo,delta)
hiChanged = changed(fluxN.net.hi,fluxRF.net.hi,delta)
if elements == 'all':
noChange = (not bestChanged) and (not loChanged) and (not hiChanged)
elif elements == 'lohi':
noChange = (not loChanged) and (not hiChanged)
elif elements == 'best':
noChange = (not bestChanged)
noChangeTotal = noChangeTotal and noChange
if not noChange:
errmsg = errmsg + 'Flux '+name+' changed from: \n'+str(fluxRF.net)+' to: \n'+ str(fluxN.net)+'\n'
self.assertTrue(noChangeTotal,msg=errmsg)
def testFull(self):
"If not defined, defined as testQuick by default"
self.testQuick()
class testInputFiles(generalTest): # TODO: change this to testInputFiles
""" Testing production of SBML files used as input for everything else """
def setUp(self):
# Getting reference files in dictionary form"
try:
files = {}
for strain in strains:
fileName = 'EciJR904TKs'+strain+'TS.xml'
files[fileName] = utils.file2tuple(testdir+'XML/'+fileName)
self.refFiles = files
except:
e = sys.exc_info()[0]
print 'Problems loading stored data in testFiles: \n'+str(e)
def testStrain(self,strain):
""" Tests a single strain by comparing with reference stored files"""
newFile = getSBMLfile(strain)
refFile = self.refFiles['EciJR904TKs'+strain+'TS.xml']
self.compareFileLists([refFile],[newFile])
def testQuick(self):
""" Only tests the first of the strains """
self.testStrain(strains[0])
def testFull(self):
""" Tests all of the strains """
for strain in strains:
self.testStrain(strain)
def refreshSavedFiles(self):
""" Refresh the files used as reference for the comparison """
for strain in strains:
fileName,fileString = getSBMLfile(strain)
outFile = file(testdir+'XML/'+fileName,'w')
outFile.write(fileString)
def tearDown(self):
pass
class testOptimizationFiles(generalTest):
""" Testing input files for TS optimization problem """
def setUp(self):
# Getting reference files in dictionary form"
try:
files = {}
for strain in strains:
files[strain] = readFiles(testdir+strain+'/')
self.refFiles = files
except:
e = sys.exc_info()[0]
print 'Problems loading stored data in testFiles: \n'+str(e)
def testStrain(self,strain):
""" Tests a single strain by comparing with reference stored files"""
TSmodel = getTSmodel(strain)
newFiles = [filterFiles(x) for x in TSmodel.reactionNetwork.getTwoS13CMFAfiles()]
refFiles = [filterFiles(y) for y in sorted(self.refFiles[strain]) if filterFiles(y)!=None]
self.compareFileLists(sorted(refFiles),sorted(newFiles))
def testQuick(self):
""" Only tests the first of the strains """
self.testStrain(strains[0])
def testFull(self):
""" Tests all of the strains """
for strain in strains:
self.testStrain(strain)
def refreshSavedFiles(self):
""" Refresh the files used as reference for the comparison """
for strain in strains:
TSmodel = getTSmodel(strain)
files = TSmodel.reactionNetwork.getTwoS13CMFAfiles()
for file_ in files:
fileName,fileString = file_
outFile = file(testdir+strain+'/'+fileName,'w')
outFile.write(fileString)
def tearDown(self):
pass
class testTSFluxCalculation(generalTest):
""" Testing flux profiles obtained from TS 13C MFA """
def setUp(self):
""" Getting reference files in dictionary form """
self.TSresults = {}
self.fluxDicts = {}
for strain in strains:
try:
db = shelve.open(testdir+strain+'/'+'TSresults'+strain)
self.TSresults[strain] = db['TSresult']
self.fluxDicts[strain] = db['fluxDict']
db.close()
except:
print 'Problems loading stored data in testTSFluxCalculation for strain %s:\n%s' % (strain, traceback.format_exc())
def testStrain(self,strain,saved=False):
""" Tests a single strain
-) saved decides whether to used saved solutions (if True) or calculate them anew (if False)
"""
print '\nTesting strain: '+str(strain)+'\n'
# Names for fluxes to be tested
fluxNames = ['G6PDH2r','GAPD','CS','FUM','ICDHyr','ACODA','ACKr','PDH']
fluxNames.extend(['THD2','ICDHyr','G6PDH2r','GND','MTHFD','GLUDy','KARA1i','C160SN','C181SN','ASAD'])
fluxNames.extend(['MDH','PGCD','AKGDH','NADH6'])
fluxNames.extend(['EX_h2o_e_','EX_o2_e_','EX_co2_e_','EX_nh4_e_','EX_h_e_','EX_glc_e_','EX_ac_e_','EX_pi_e_','EX_urea_e_'])
fluxNames.extend(['EX_so4_e_','EX_glyclt_e_','EX_acald_e_','EX_fum_e_'])
# Avoid repetitions
fluxNames = sorted(list(set(fluxNames)))
# Obtain results
TSresult, TSmodel = getTSresults(strain,saved=saved)
# Reference results
RFresults = self.TSresults
RFresult = RFresults[strain]
fluxDictN = TSresult.reactionNetwork.reactionList.getReactionSubSet(rxnNames=fluxNames).getFluxDictionary(rangeComp='all') # new results
fluxDictRF = RFresult.reactionNetwork.reactionList.getReactionSubSet(rxnNames=fluxNames).getFluxDictionary(rangeComp='all') # reference results
# Flux comparison
delta = 0.10
errMsgAdd = '\n in strain '+strain
self.compareFluxDict(fluxDictRF,fluxDictN,delta,elements='lohi',errMsg=errMsgAdd) # Comparing only lowest and highest flux values
def testQuick(self):
""" Only tests the first of the strains """
self.testStrain(strains[0])
def testFull(self):
""" Test all strains """
AllCasesWork = True
errmsg = ''
# Run tests and compile all failures by catching the exceptions
failures = []
for strain in strains:
try:
self.testStrain(strain)
except AssertionError as e:
failures.append("\n"+"Strain:"+str(strain)+"\n"+str(e)+"\n")
# Raise failure if any fail but give report on all of them
if failures:
errmsg = "\n".join([str(x) for x in failures])
AllCasesWork = False
self.assertTrue(AllCasesWork,msg=errmsg)
def refreshSavedFiles(self):
""" Refresh the files used for the comparison """
for strain in strains:
results, model = getTSresults(strain)
fluxDict = results.reactionNetwork.reactionList.getFluxDictionary(rangeComp='all')
TSresult = results
#Eliminate resultsDict because it is huge
if hasattr(TSresult,'resultsDict'):
delattr(TSresult,'resultsDict')
db = shelve.open(testdir+strain+'/'+'TSresults'+strain)
db['TSresult'] = TSresult
db['fluxDict'] = fluxDict
db.close()
def tearDown(self):
pass
class testELVA(generalTest):
""" Tests ELVA (External Labeling Variability Analysis) results """
def setUp(self):
""" Getting reference files in dictionary form """
try:
db = shelve.open(testdir+'ELVArefs')
self.labelCompDict1 = db['labelCompDict1']
self.labelCompDict2 = db['labelCompDict2']
db.close()
except:
e = sys.exc_info()[0]
print 'Problems loading stored data in testELVA: \n'+str(e)
def getResults(self,saved=False):
""" Results to use for the comparison """
strain = 'wt5h'
### Obtain ELVA results used for the comparison
# ELVA with new reaction set
TSresult, TSmodel = getTSresults(strain,saved=saved)
resultELVA = TSmodel.ELVA(TSresult,procString='proc')
# ELVA with old reaction set
TSresultOld, TSmodelOld = getTSresults(strain,oldVersion=True)
resultELVAOld = TSmodelOld.ELVA(TSresultOld,procString='proc')
### Obtain labeling component dictionaries to be compared
x1,Dx1,y1,DyU1,DyL1,labelComp1,labelCompDict1 = resultELVA.getxDXyDyInfo()
x2,Dx2,y2,DyU2,DyL2,labelComp2,labelCompDict2 = resultELVAOld.getxDXyDyInfo()
return labelCompDict1,labelCompDict2
def compLabelCompDict(self,dict1,dict2,errAdd):
"""
Compares labelCompDicts. Assuming dict1 to be reference
-) errAdd is the extra string added at the beginning of the error string
"""
# Acceptable relative variation fraction
delta = 0.1 | |
FileName in L:
MSG.append (" - " + FileName);
MSG.append ("-"*80);
self.__PRJ.lp (MSG);
allsentences = [] ;
for FileAddress in L:
Sentences , Root , LOCAL_TotalCorpusRootAnalysisResult = self.__PRJ.Preprocessor.ProcessCorpus (FileAddress,DuplicationRemovalPolicy,SkipSentences);
allsentences.extend (Sentences);
self.__DATA[Set]["allsentences"] = allsentences
self.__GOLD[Set] = {"positives":{}, "negatives":{}}
for sentence in self.__DATA[Set]["allsentences"]:
for pair in sentence["PAIRS"]:
if pair["POSITIVE"]:
self.__GOLD[Set]["positives"][pair["ID"]] = pair
else:
self.__GOLD[Set]["negatives"][pair["ID"]] = pair
MSG = ["-"*30+"CORPUS STATISTICS"+"-"*30]
for Set in ["TrainingSet","DevelopmentSet","TestSet"]:
total_positives = len (self.__GOLD[Set]["positives"])
total_negatives = len (self.__GOLD[Set]["negatives"])
positive_types = {}
for p_id in self.__GOLD[Set]["positives"]:
tp = self.__GOLD[Set]["positives"][p_id]["CLASS_TP"]
if not tp in positive_types:
positive_types[tp]=1
else:
positive_types[tp]+=1
MSG.append ("Set :" + Set + "\n" + "-"*20)
MSG.append ("\t positives: " + str(total_positives))
MSG.append ("\t negatives: " + str(total_negatives))
MSG.append ("\t total : " + str(total_positives+total_negatives))
MSG.append ("\t ----------------")
MSG.append ("\t positives:")
for tp in positive_types:
MSG.append ("\t\t- " + tp + ": " + str(positive_types[tp]))
self.__PRJ.lp (MSG)
def __FindTopKShortestPaths (self, key, FeatureGenerationParams):
if not key in self.__DATA.keys():
self.__PRJ.PROGRAM_Halt ("Invalid key. Key should be in " + str(self.__DATA.keys()))
import datetime
start = datetime.datetime.now()
self.__DATA[key]["allsentences"] = self.__PRJ.FeatureGenerator.CalculateAndAddTopKSDPs_Parallel (self.__DATA[key]["allsentences"], FeatureGenerationParams,key)
end = datetime.datetime.now()
self.__PRJ.lp ("Finding Top-K Shortest Path Finished for " + key + ". Processing time: " + str(end - start).split(".")[0])
def __GenerateFeatures (self, key, FeatureGenerationParams, AddFeatureToDictIfNotExists,useOnlyXTopPathsForFeatureGeneration=None):
if not key in self.__DATA.keys():
self.__PRJ.PROGRAM_Halt ("Invalid key. Key should be in " + str(self.__DATA.keys()))
self.__PRJ.lp (["Running Feature Extraction for :" + key, "Allowed to add to Feature Mapping Dictionary:"+str(AddFeatureToDictIfNotExists)])
self.__PRJ.FeatureGenerator.ExtractFeatures (self.__DATA[key]["allsentences"], FeatureGenerationParams, AddFeatureToDictIfNotExists,useOnlyXTopPathsForFeatureGeneration)
def __RestShortestPathsAndFeatures (self):
self.__PRJ.FeatureMappingDictionary.reset_dictionary()
self.__PRJ.lp ("Reseting all top-k-paths and all extracted features in sentences.")
for key in self.__DATA:
if self.__DATA[key].has_key("allsentences"):
for sentence in self.__DATA[key]["allsentences"]:
for pair in sentence["PAIRS"]:
if pair.has_key("TOPKP"):
del pair["TOPKP"]
if pair.has_key("TOPKP_Features"):
del pair["TOPKP_Features"]
if pair.has_key("SP_Features"):
del pair["SP_Features"]
def __ResetFeatures (self):
self.__PRJ.FeatureMappingDictionary.reset_dictionary()
self.__PRJ.POSTagsEmbeddings.reset_dictionary()
self.__PRJ.DPTypesEmbeddings.reset_dictionary()
self.__PRJ.lp ("Reseting all extracted features in sentences.")
for key in self.__DATA:
if self.__DATA[key].has_key("allsentences"):
for sentence in self.__DATA[key]["allsentences"]:
for pair in sentence["PAIRS"]:
if pair.has_key("TOPKP_Features"):
del pair["TOPKP_Features"]
if pair.has_key("SP_Features"):
del pair["SP_Features"]
def __GenerateSVMMatrices (self,key, FeatureGenerationParams, returnLabels=False):
if not key in self.__DATA.keys():
self.__PRJ.PROGRAM_Halt ("Invalid key. Key should be in " + str(self.__DATA.keys()))
from scipy.sparse import csr_matrix
import SharedFunctions as SF
all_sentences = self.__DATA[key]["allsentences"]
pos_neg_dict, class_tp_dict, total_example_count = SF.CalculateHowManyRelationsWithShortestPathInDataset (all_sentences)
if total_example_count < 1:
self.__PRJ.PROGRAM_Halt (key + " has no examples!")
SENTENCE_INDEX = np.zeros (total_example_count, dtype=np.int32) #for pairs .. so, if sentence one has 3 examples and sentence two has four --> 1112222
PAIR_TRACKING = []
if returnLabels:
Y = np.zeros (total_example_count, dtype=np.int8)
else:
Y = None
seen_pair_count = 0
if len(self.__PRJ.Configs["OneHotEncodingForValidEnityTypesForRelations"]) > 1: #there are at least two entity types, so it worths to make clear which is which for each example
feature_entity_type = np.zeros ((total_example_count,2*len(self.__PRJ.Configs["OneHotEncodingForValidEnityTypesForRelations"])), dtype=np.int64)
indptr = [0]
indices = []
for Sentence_Index , S in enumerate (all_sentences):
for pair in S["PAIRS"]:
if (not pair.has_key("TOPKP")) or (pair["TOPKP"] == None):
continue
if (not pair.has_key("TOPKP_Features")) or (pair["TOPKP_Features"] == None): #an example might have pair["TOPKP_Features"] = [] in a very rare and very very unlikely condition
continue
#1-Track: for later writing-back the prediction results into XML
e1_type = S["ENTITIES"][pair["E1"]]["TYPE"];
e2_type = S["ENTITIES"][pair["E2"]]["TYPE"];
_e1tp = e1_type.capitalize()
_e2tp = e2_type.capitalize()
PAIR_TRACKING.append ( (S["ID"] , pair["ID"] , pair["E1"] , pair["E2"], _e1tp , _e2tp) )
#2-TOPKP_Features
"""
the following code-snippet shows howo to efficiently create sparce matrix for SVM ...
X = [[1,3,5],[0,2,4],[7,8,9],[4],[1],[],[],[1,9]]
indptr = [0]
indices = []
for x in X:
indices.extend(x)
indptr.append(len(indices))
print csr_matrix(([1]*len(indices), indices, indptr) ,dtype=int).toarray()
"""
indices.extend (pair["TOPKP_Features"])
indptr.append(len(indices))
#3-entity type and their order features ...
if len(self.__PRJ.Configs["OneHotEncodingForValidEnityTypesForRelations"]) > 1: #there are at least two entity types
#What Policy has been used to generate SDP.
SDP_DIRECTION = FeatureGenerationParams.SDPDirection
if SDP_DIRECTION == FeGen.TSDP_TSVM_SDPFeatureGenerationDirection.from_1stOccurring_to_2nd:
#first column_index, always type of first occuring entity
e1_bgn = S["ENTITIES"][pair["E1"]]["HEADOFFSET"][0]
e2_bgn = S["ENTITIES"][pair["E2"]]["HEADOFFSET"][0]
if e1_bgn == e2_bgn:
e1_bgn = S["ENTITIES"][pair["E1"]]["ORIGOFFSETS"][0][0]
e2_bgn = S["ENTITIES"][pair["E2"]]["ORIGOFFSETS"][0][0]
if e1_bgn < e2_bgn: #CRITICAL
e1_tp = S["ENTITIES"][pair["E1"]]["TYPE"].lower()
e2_tp = S["ENTITIES"][pair["E2"]]["TYPE"].lower()
else:
e1_tp = S["ENTITIES"][pair["E2"]]["TYPE"].lower()
e2_tp = S["ENTITIES"][pair["E1"]]["TYPE"].lower()
elif SDP_DIRECTION == FeGen.TSDP_TSVM_SDPFeatureGenerationDirection.from_2ndOccurring_to_1st:
#first column_index, always type of second occuring entity
e1_bgn = S["ENTITIES"][pair["E1"]]["HEADOFFSET"][0]
e2_bgn = S["ENTITIES"][pair["E2"]]["HEADOFFSET"][0]
if e1_bgn == e2_bgn:
e1_bgn = S["ENTITIES"][pair["E1"]]["ORIGOFFSETS"][0][0]
e2_bgn = S["ENTITIES"][pair["E2"]]["ORIGOFFSETS"][0][0]
if e1_bgn > e2_bgn: #CRITICAL
e1_tp = S["ENTITIES"][pair["E1"]]["TYPE"].lower()
e2_tp = S["ENTITIES"][pair["E2"]]["TYPE"].lower()
else:
e1_tp = S["ENTITIES"][pair["E2"]]["TYPE"].lower()
e2_tp = S["ENTITIES"][pair["E1"]]["TYPE"].lower()
elif SDP_DIRECTION == FeGen.TSDP_TSVM_SDPFeatureGenerationDirection.from_e1value_to_e2value:
e1_tp = S["ENTITIES"][pair["E1"]]["TYPE"].lower()
e2_tp = S["ENTITIES"][pair["E2"]]["TYPE"].lower()
elif SDP_DIRECTION == FeGen.TSDP_TSVM_SDPFeatureGenerationDirection.from_e2value_to_e1value:
e1_tp = S["ENTITIES"][pair["E2"]]["TYPE"].lower()
e2_tp = S["ENTITIES"][pair["E1"]]["TYPE"].lower()
else:
self.__PRJ.PROGRAM_Halt ("SDP_DIRECTION METHOD : " + SDP_DIRECTION + " IS NOT IMPLEMENTED YET!");
ETP1_COLUMN_IDX = self.__PRJ.Configs["OneHotEncodingForValidEnityTypesForRelations"][e1_tp]
ETP2_COLUMN_IDX = len(self.__PRJ.Configs["OneHotEncodingForValidEnityTypesForRelations"]) + self.__PRJ.Configs["OneHotEncodingForValidEnityTypesForRelations"][e2_tp]
feature_entity_type[seen_pair_count, ETP1_COLUMN_IDX] = 1 ;
feature_entity_type[seen_pair_count, ETP2_COLUMN_IDX] = 1 ;
#4-Label:
if returnLabels:
if self.__PRJ.Configs["ClassificationType"]== "binary":
Y[seen_pair_count] = 1 if (pair["POSITIVE"]==True) else 0 ;
else: #MULTICLASS:
#Positive/Negative:
if pair["POSITIVE"]==False:
Y [seen_pair_count]=0;#index zero is always for negative class(es)
else:
class_label = pair["CLASS_TP"]
OneHotIndex = self.__PRJ.Configs["OneHotEncodingForMultiClass"][class_label]
Y[seen_pair_count] = OneHotIndex
#5-Increament index ...
SENTENCE_INDEX[seen_pair_count] = Sentence_Index;
seen_pair_count+=1 ;
#<<<CRITICAL>>> giving shape is critical, because devel/test might have lower features ...
X = csr_matrix(([1]*len(indices), indices, indptr), shape = (seen_pair_count, self.__PRJ.FeatureMappingDictionary.return_dictionary_length()), dtype=np.int8)
if len(self.__PRJ.Configs["OneHotEncodingForValidEnityTypesForRelations"]) > 1: #there are at least two entity types
from scipy.sparse import hstack
X = hstack ( [X, csr_matrix(feature_entity_type)] )
return X, Y, SENTENCE_INDEX , PAIR_TRACKING
def RunPipeline(self,DataPreparationSteps, FeatureGenerationParams, useOnlyXTopPathsForFeatureGeneration=None,TestSetPredictionFileName=None):
self.__PRJ.lp (["*"*80,"Running pipeline:","-"*17,"DataPreparationSteps:"+str(DataPreparationSteps),"MachineLearningTasks:"+str("MachineLearningTasks"),"*"*80]);
if "load_gold_and_data" in DataPreparationSteps and DataPreparationSteps["load_gold_and_data"] <> None:
self.__load_save_GOLD_DATA ("load", fileaddress=DataPreparationSteps["load_gold_and_data"])
if "preprocess" in DataPreparationSteps and DataPreparationSteps["preprocess"]==True:
self.__PreprocessAllFiles();
if self.__RE_PARAMS.WriteBackProcessedTrainingSet:
self.__WriteBackProcessedGOLD ("TrainingSet")
if self.__RE_PARAMS.WriteBackProcessedDevelopmentSet:
self.__WriteBackProcessedGOLD ("DevelopmentSet")
if self.__RE_PARAMS.WriteBackProcessedTestSet:
self.__WriteBackProcessedGOLD ("TestSet")
if "findshortestpaths" in DataPreparationSteps and DataPreparationSteps["findshortestpaths"]==True:
self.__RestShortestPathsAndFeatures ()
for key in self.__DATA:
self.__FindTopKShortestPaths (key, FeatureGenerationParams)
if "save_gold_and_data" in DataPreparationSteps and DataPreparationSteps["save_gold_and_data"] <> None:
self.__load_save_GOLD_DATA ("save", fileaddress=DataPreparationSteps["save_gold_and_data"])
MSG = ["MACHINE LEARNING EXAMPLES STATISTICS:", "-"*130]
MSG.append (" "*20+SF.NVLL("Positives",30)+SF.NVLL("Negatives",30)+SF.NVLL("Total",30))
for key in self.__DATA:
all_sentences = self.__DATA[key]["allsentences"]
pos_neg_dict, class_tp_dict, total_example_count = SF.CalculateHowManyRelationsWithTOPKPaths (all_sentences)
MSG.append ( SF.NVLR (key+" GOLD", 20) + SF.NVLL (str(len(self.__GOLD[key]["positives"])),30) + SF.NVLL (str(len(self.__GOLD[key]["negatives"])),30) + SF.NVLL (str(len(self.__GOLD[key]["positives"])+len(self.__GOLD[key]["negatives"])),30) )
MSG.append ( SF.NVLR (key+" MLEx", 20) + SF.NVLL (str(pos_neg_dict["Positives"]),30) + SF.NVLL (str(pos_neg_dict["Negatives"]),30) + SF.NVLL (str(total_example_count),30))
MSG.append ("-"*130)
self.__PRJ.lp (MSG)
#train on train, optimize on devel-set, get best C_value
#<<<CRITICAL>>> Train can add to feature dictionary, devel cannot
self.__ResetFeatures()
self.__GenerateFeatures ("TrainingSet" , FeatureGenerationParams, True ,useOnlyXTopPathsForFeatureGeneration)
self.__GenerateFeatures ("DevelopmentSet", FeatureGenerationParams, False ,useOnlyXTopPathsForFeatureGeneration)
self.__PRJ.lp ("Total Extracted Features:" + str(self.__PRJ.FeatureMappingDictionary.return_dictionary_length()))
train_x , train_y , train_sentence_index, train_pair_tracking = self.__GenerateSVMMatrices("TrainingSet" ,FeatureGenerationParams, returnLabels=True)
devel_x , devel_y , devel_sentence_index, devel_pair_tracking = self.__GenerateSVMMatrices("DevelopmentSet", FeatureGenerationParams, returnLabels=True)
devel_Best_C_value , devel_Best_Metric_Value = self.__lowlevel_OptimizeOnDevel_Parallel (train_x,train_y,devel_x,devel_y,devel_pair_tracking)
if self.__RE_PARAMS.PredictTestSet:
#train on train+devel (using Best_C_value), predict test...
#<<<CRITICAL>>> Train and Devel can add to feature dictionary, Test cannot
self.__ResetFeatures()
self.__GenerateFeatures ("TrainingSet" , FeatureGenerationParams, True ,useOnlyXTopPathsForFeatureGeneration)
self.__GenerateFeatures ("DevelopmentSet", FeatureGenerationParams, True ,useOnlyXTopPathsForFeatureGeneration)
self.__GenerateFeatures ("TestSet" , FeatureGenerationParams, False ,useOnlyXTopPathsForFeatureGeneration)
self.__PRJ.lp ("Total Extracted Features:" + str(self.__PRJ.FeatureMappingDictionary.return_dictionary_length()))
train_x , train_y , train_sentence_index, train_pair_tracking = self.__GenerateSVMMatrices("TrainingSet" , FeatureGenerationParams, returnLabels=True)
devel_x , devel_y , devel_sentence_index, devel_pair_tracking = self.__GenerateSVMMatrices("DevelopmentSet", FeatureGenerationParams, returnLabels=True)
test_x , test_y , test_sentence_index , test_pair_tracking = self.__GenerateSVMMatrices("TestSet" , FeatureGenerationParams, returnLabels=self.__RE_PARAMS.EvaluateTestSet)
from scipy.sparse import vstack
TRAIN_X = vstack ( [train_x, devel_x] )
TRAIN_Y = np.hstack ( [train_y, devel_y] )
from sklearn.svm import SVC
self.__PRJ.lp ("Training on TrainingSet+DevelopmentSet using found Best_C_Value")
clf = SVC(C=devel_Best_C_value, kernel='linear', class_weight=self.__RE_PARAMS.Classification_class_weights)
clf.fit (TRAIN_X,TRAIN_Y)
self.__PRJ.lp ("Predicting TestSet")
y_pred = clf.predict (test_x)
if self.__RE_PARAMS.EvaluateTestSet:
y_true = test_y
RES = self.__lowelevel_Evaluate_WithGold (y_true, y_pred,test_pair_tracking,"TestSet")
MSG = ["Test Set Prediction Results:" , "-"*30]
for key in sorted(RES.keys()):
MSG.append (SF.NVLR (key,25) + ": " + str(RES[key]))
MSG.append ("-"*80)
MSG.append (self.__RE_PARAMS.Classification_OptimizationMetric + " : " + str(RES[self.__RE_PARAMS.Classification_OptimizationMetric]))
test_Best_Metric_Value = RES[self.__RE_PARAMS.Classification_OptimizationMetric]
self.__PRJ.lp(MSG)
if self.__RE_PARAMS.WriteBackTestSetPredictions:
GOLD_XML_FileAddress = self.__RE_PARAMS.TestSet_Files_Lists[0]
OutputFileAddress = self.__RE_PARAMS.TestSetPredictionOutputFolder + TestSetPredictionFileName
self.__lowelevel_WriteBackSetPredictionResults (y_pred, test_pair_tracking, GOLD_XML_FileAddress, OutputFileAddress)
if self.__RE_PARAMS.EvaluateTestSet:
return self.__PRJ.FeatureMappingDictionary.return_dictionary_length(), devel_Best_Metric_Value , test_Best_Metric_Value
else:
return self.__PRJ.FeatureMappingDictionary.return_dictionary_length(), devel_Best_Metric_Value
def __lowlevel_OptimizeOnDevel_Parallel (self,train_x,train_y,devel_x,devel_y,devel_pair_tracking):
import multiprocessing
#decide number of parallel processes:
MAX_CORE_COUNT = multiprocessing.cpu_count()
NumberOfParallelProcesses = | |
= os.stat(fname)
os.chmod(fname, st.st_mode | stat.S_IEXEC)
if stage:
param = stage.replace('-', '_')
option = "stage_" + param
dict_ = vars(self.args)
if option in dict_:
if dict_[option]:
print("*"*20)
print("Executing ", fname)
print("*"*20)
os.system("./" + fname)
pass
def build_nuitkas(self):
# if not self.nuitkas:
# return
if not self.nuitka_profiles:
return
tmpdir = os.path.join(self.curdir, "tmp/ta")
tmpdir = os.path.relpath(tmpdir)
bfiles = []
#First pass
module2build = {}
standalone2build = []
referenced_modules = set()
for np_name, np_ in self.nuitka_profiles.profiles.items():
for target_ in np_.builds or []:
srcname = target_.utility
outputname = target_.utility
nflags = np_.get_flags(tmpdir, target_)
target_dir = os.path.join(tmpdir, outputname + '.dist')
target_dir_ = os.path.relpath(target_dir, start=self.curdir)
src_dir = os.path.relpath(self.src_dir, start=self.curdir)
src = os.path.join(src_dir, target_.folder, target_.utility) + '.py'
flags_ = ''
if 'flags' in target_:
flags_ = target_.flags
lines = []
lines.append("""
export PATH="/usr/lib64/ccache:$PATH"
""" % vars(self))
build_name = 'build_' + srcname
lines.append(fR"""
time nice -19 pipenv run python3 -m nuitka {nflags} {flags_} {src} 2>&1 > {build_name}.log
#time nice -19 pipenv run python3 -m nuitka --recompile-c-only {nflags} {flags_} {src} 2>&1 > {build_name}.log
#time nice -19 pipenv run python3 -m nuitka --generate-c-only {nflags} {flags_} {src} 2>&1 > {build_name}.log
pipenv run python3 -m pip freeze > {target_dir_}/{build_name}-pip-freeze.txt
""" )
self.fs.folders.append(target_dir)
if "outputname" in target_:
srcname = target_.outputname
if srcname != outputname:
lines.append(R"""
mv %(target_dir_)s/%(outputname)s %(target_dir_)s/%(srcname)s
""" % vars())
# if "modules" in target_:
# force_modules = []
# if 'force_modules' in target_:
# force_modules = target_.force_modules
# for it in target_.modules + force_modules:
# mdir_ = None
# try:
# mdir_ = dir4module(it)
# mdir__ = os.path.relpath(mdir_)
# if len(mdir__)<len(mdir_):
# mdir_ = mdir__
# except:
# pass
# try:
# mdir_ = module2build[it].folder
# except:
# pass
# if mdir_:
# lines.append(R"""
# rsync -rav --exclude=*.py --exclude=*.pyc --exclude=__pycache__ --prune-empty-dirs %(mdir_)s %(target_dir_)s/
# """ % vars())
# force_modules = []
# for it in target_.modules:
# lines.append(R"""
# rsync -av --include=*.so --include=*.bin --exclude=* %(tmpdir_)s/modules/%(it)s/ %(target_dir_)s/.
# rsync -rav %(tmpdir_)s/modules/%(it)s/%(it)s.dist/ %(target_dir_)s/.
# """ % vars())
self.lines2sh(build_name, lines, None)
bfiles.append(build_name)
lines = []
for b_ in bfiles:
lines.append("./" + b_ + '.sh')
self.lines2sh("40-build-nuitkas", lines, "build-nuitka")
pass
def build_go(self):
if not self.gp:
return
tmpdir = os.path.join(self.curdir, "tmp/ta")
bfiles = []
#First pass
module2build = {}
standalone2build = []
referenced_modules = set()
for td_ in self.gp.projects():
git_url, git_branch, path_to_dir_, _ = self.explode_pp_node(td_)
os.chdir(self.curdir)
if os.path.exists(path_to_dir_):
os.chdir(path_to_dir_)
path_to_dir__ = os.path.relpath(path_to_dir_, start=self.curdir)
outputname = os.path.split(path_to_dir_)[-1]
target_dir = os.path.join(tmpdir, outputname + '.build')
target_dir_ = os.path.relpath(target_dir, start=path_to_dir_)
lines = []
build_name = 'build_' + outputname
lines.append(fR"""
pushd {path_to_dir_}
go mod download
CGO_ENABLED=0 go build -ldflags="-linkmode=internal -r" -o {target_dir_}/{outputname} 2>&1 >{outputname}.log
popd
""" )
self.fs.folders.append(target_dir)
self.lines2sh(build_name, lines, None)
bfiles.append(build_name)
lines = []
for b_ in bfiles:
lines.append("./" + b_ + '.sh')
self.lines2sh("42-build-go", lines, "build-go")
pass
def mycopy(self, src, dst):
'''
Адаптивная процедура копирования в подкаталоги окружения — симлинки релятивизируются
и остаются симлинками.
'''
if os.path.exists(dst) and not self.overwrite_mode:
return
if '__pycache__' in src:
return
try:
#
# if wtf(src):
# return
if src in ["/etc/environment"]:
return
if os.path.islink(src):
linkto = os.readlink(src)
dir_, file_ = os.path.split(linkto)
dirdst_, filedst_ = os.path.split(dst)
dirsrc_, filesrc_ = os.path.split(src)
if not dir_ or dirsrc_ == dir_:
if not os.path.exists(dst):
# if file_ == '/usr/lib/python3.7/site-packages/__pycache__/six.cpython-37.opt-1.pyc' or dst=='/home/stas/projects/docmarking/dm-deploy/envs/v013/lib64/python3.7/site-packages/urllib3/packages/__pycache__/six.cpython-37.opt-1.pyc':
# wtf_=1
os.symlink(file_, dst)
else:
pass
else:
shutil.copy2(src, dst, follow_symlinks=False)
except Exception as ex:
print('Cannot copy ', src, dst)
raise ex
pass
def should_copy(self, f):
'''
Получив файл, возвращает, заинтересованы ли мы в копировании этого файла или нет.
Нам нужен реальный интерпретатор, а также файлы в /lib(64) и /usr/lib(64)
Все файлы из /var и т.п. пофиг для нашего портабельного питона.
Также выкидываем локализацию.
Файлы build_id будут сим-ссылками на двоичные файлы и разделяемые библиотеки, которые мы не хотим хранить.
'''
if wtf(f):
return False
if "__pycache__" in f:
return False
if 'nginx' in f and 'sbin' in f:
w_ = 1
if f == "":
return False
if f == '/lib/libpthread-2.31.so':
wtf333 = 1
# if self.br.is_need_exclude(f):
# return False
if self.br.is_needed(f):
return True
if f.startswith("/lib64/ld-linux"): # Этот файл надо специально готовить, чтобы сделать перемещаемым.
return False
parts = list(pathlib.PurePath(f).parts)
el = parts.pop(0)
if el != "/":
raise RuntimeError("unexpected path: not absolute! {}".format(f))
if len(parts) > 0 and parts[0] == "usr":
parts.pop(0)
if len(parts) > 0 and parts[0] == "local":
parts.pop(0)
if not parts:
return False
if not self.args.debug:
if (parts[0] not in ["lib", "lib64"]) and (parts != ['bin', 'bash', 'sbin']):
return False
parts.pop(0)
if len(parts) > 0 and (parts[0] == "locale" or parts[0] == ".build-id"):
return False
# что не отфильтровалось — берем.
return True
def rpm_update_time(self):
import time
for rpmdbpath in ["/var/lib/rpm/Packages", "/var/lib/rpm/rpmdb.sqlite"]:
if os.path.exists(rpmdbpath):
return str(time.ctime(os.path.getmtime(rpmdbpath)))
return None
def dependencies(self, package_list, local=True):
'''
Генерируем список RPM-зависимостей для заданного списка пакетов.
'''
pl_ = self.packages2list(package_list)
package_list_md5 = hashlib.md5((self.rpm_update_time() + '\n' + '\n'.join(pl_)).encode('utf-8')).hexdigest()
cache_filename = 'cache_' + package_list_md5 + '.list'
if os.path.exists(cache_filename):
with open(cache_filename, 'r', encoding='utf-8') as lf:
ls_ = lf.read()
list_ = ls_.split(',')
return list_
repoch = re.compile("\d+\:")
def remove_epoch(package):
package_ = repoch.sub('', package)
return package_
options_ = [
# Фильтруем пакеты по 64битной архитектуре (ну или 32битной, если будем собирать там.),
# хотя сейчас почти везде хардкодинг на 64битную архитектуру.
'--archlist=noarch,{machine}'.format(machine=os.uname().machine),
'--resolve',
'--requires',
'--recursive'
]
if local:
options_ += [
'--cacheonly',
'--installed',
]
if 1:
# res = subprocess.check_output(['repoquery'] + options_ + ['--tree', '--whatrequires'] + package_list, universal_newlines=True)
res = ''
for try_ in range(3):
try:
res = subprocess.check_output(['repoquery', '-y'] + options_ + pl_, universal_newlines=True)
break
except subprocess.CalledProcessError:
# died with <Signals.SIGSEGV: 11>.
time.sleep(2)
# res = subprocess.check_output(['repoquery'] + options_ + ['--output', 'dot-tree'] + package_list, universal_newlines=True)
with open(os.path.join(self.start_dir, 'deps.txt'), 'w', encoding='utf-8') as lf:
lf.write('\n -'.join(pl_))
lf.write('\n----------------\n')
lf.write(res)
output = subprocess.check_output(['repoquery'] + options_ + pl_, universal_newlines=True).splitlines()
output = [remove_epoch(x) for x in output if self.ps.is_package_needed(x)]
packages_ = output + pl_
with open(os.path.join(self.start_dir, 'selected-packages.txt'), 'w', encoding='utf-8') as lf:
lf.write('\n- '.join(packages_))
packages_set_ = set()
for package_ in packages_:
purepackage = package_.split('.', 1)[0]
if len(purepackage) < len(package_):
purepackage = purepackage.rsplit('-', 1)[0]
packages_set_.add(purepackage)
rows_ = []
for package_ in sorted(packages_set_):
res_ = list(self.installed_packages.filter(name=package_))
if len(res_)==0:
continue
name_ = res_[0].name
version_ = res_[0].version
wtf = 1
rows_.append([name_, version_])
pass
write_doc_table('doc-rpm-packages.htm', ['Packages', 'Version'], rows_)
with open(cache_filename, 'w', encoding='utf-8') as lf:
lf.write(','.join(packages_))
return packages_
def generate_file_list_from_pips(self, pips):
'''
Для заданного списка PIP-пакетов, возвращаем список файлов в этих пакетах, которые нужны нам.
'''
file_list = []
pips_ = [p.split('==')[0] for p in pips]
import pkg_resources
for dist in pkg_resources.working_set:
if dist.key in pips_:
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [line.split(',')[0] for line in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list.extend(paths)
pass
res_ = [x for x in file_list if self.should_copy(x)]
return res_
pass
def generate_file_list_from_packages(self, packages):
'''
Для заданного списка RPM-файлов, возвращаем список файлов в этих пакетах, которые нужны нам.
'''
package_list_md5 = hashlib.md5((self.rpm_update_time() + '\n' + '\n'.join(packages)).encode('utf-8')).hexdigest()
cache_filename = 'cachefilelist_' + package_list_md5 + '.list'
if os.path.exists(cache_filename):
with open(cache_filename, 'r', encoding='utf-8') as lf:
ls_ = lf.read()
list_ = ls_.split('\n')
return list_
exclusions = []
for package_ in packages:
exclusions += subprocess.check_output(['rpm', '-qd', package_], universal_newlines=True).splitlines()
# we don't want to use --list the first time: For one, we want to be able to filter
# out some packages with files
# we don't want to copy
# Second, repoquery --list do not include the actual package files when used with --resolve
# and --recursive (only its dependencies').
# So we need a separate step in which all packages are added together.
# for package_ in packages:
# # if 'postgresql12-server' == package_:
# # wttt=1
# # TODO: Somehow parallelize repoquery running
# for try_ in range(3):
# try:
# files = subprocess.check_output(['repoquery',
# '-y',
# '--installed',
# '--archlist=x86_64,noarch'
# '--cacheonly',
# '--list' ] + [package_], universal_newlines=True).splitlines()
# break
# except:
# pass
# for file in files:
# if 'i686' in file:
# assert(True)
candidates = subprocess.check_output(['repoquery',
'-y',
'--installed',
'--archlist=x86_64,noarch',
'--cacheonly',
'--list' ] + packages, universal_newlines=True).splitlines()
# candidates = | |
"HAK": {
"symbol": "HAK",
"name": "Shaka",
"type": "ERC20",
"address": "0x93a7174dafd31d13400cD9fa01f4e5B5BAa00D39",
"ens_address": "",
"decimals": 18,
"website": "https://www.friendsfingers.com",
"logo": {
"src": "https://www.friendsfingers.com/img/brand-resources/shaka_logo_white.png",
"width": "600px",
"height": "600px",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/friendsfingers",
"chat": "",
"facebook": "https://www.facebook.com/friendsfingers",
"forum": "",
"github": "https://github.com/friendsfingers",
"gitter": "",
"instagram": "https://www.instagram.com/friendsfingers",
"linkedin": "https://www.linkedin.com/company/friendsfingers",
"reddit": "https://www.reddit.com/user/friendsfingers",
"slack": "",
"telegram": "https://t.me/friendsfingers",
"twitter": "https://twitter.com/friendsfingers",
"youtube": ""
}
},
"KIN": {
"symbol": "KIN",
"address": "0x818Fc6C2Ec5986bc6E2CBf00939d90556aB12ce5",
"decimals": 18,
"name": "Kin Foundation",
"ens_address": "",
"website": "https://kin.kik.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/kinfoundation",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/kikinteractive/kin-token",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/KinFoundation",
"slack": "https://kinfoundation.slack.com",
"telegram": "",
"twitter": "https://twitter.com/@kin_foundation",
"youtube": ""
}
},
"DXT": {
"symbol": "DXT",
"name": "Datawallet",
"type": "ERC20",
"address": "0x8dB54ca569D3019A2ba126D03C37c44b5eF81EF6",
"ens_address": "",
"decimals": 8,
"website": "https://datawallet.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/datawallethq",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/DataWalletHQ",
"youtube": ""
}
},
"EWO": {
"symbol": "EWO",
"address": "0x444997b7e7fC830E20089afea3078cd518fCF2A2",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://www.ewoplace.com/",
"logo": {
"src": "https://www.ewoplace.com/images/EWO-Token-Icon-256px-min.png",
"width": "256",
"height": "256",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/ewoplace",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"OMX": {
"symbol": "OMX",
"name": "Shivom",
"type": "ERC20",
"address": "0xB5DBC6D3cf380079dF3b27135664b6BCF45D1869",
"ens_address": "",
"decimals": 8,
"website": "https://shivom.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@projectshivom",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/Shivom",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/projectshivom",
"youtube": ""
}
},
"WINGS": {
"symbol": "WINGS",
"address": "0x667088b212ce3d06a1b553a7221E1fD19000d9aF",
"decimals": 18,
"name": "WINGS",
"ens_address": "",
"website": "https://wings.ai",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://hi.wings.ai"
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://reddit.com/r/WingsDAO",
"slack": "",
"telegram": "https://t.me/wingschat",
"twitter": "https://twitter.com/wingsplatform",
"youtube": ""
}
},
"ELEC": {
"symbol": "ELEC",
"name": "Electrify.Asia",
"type": "ERC20",
"address": "0xD49ff13661451313cA1553fd6954BD1d9b6E02b9",
"ens_address": "",
"decimals": 18,
"website": "https://electrify.asia",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/electrifyasia",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/electrify",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/electrifyasia",
"youtube": ""
}
},
"NPX": {
"symbol": "NPX",
"name": "NaPoleonX",
"type": "ERC20",
"address": "0x28b5E12CcE51f15594B0b91d5b5AdaA70F684a02",
"ens_address": "",
"decimals": 2,
"website": "https://napoleonx.ai",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@napoleonx.ai",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/NapoleonX",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/NapoleonXai",
"youtube": ""
}
},
"STQ": {
"symbol": "STQ",
"address": "0x5c3a228510D246b78a3765C20221Cbf3082b44a4",
"decimals": 18,
"name": "Storiqa",
"ens_address": "",
"website": "https://storiqa.com",
"logo": {
"src": "https://s2.coinmarketcap.com/static/img/coins/32x32/2541.png",
"width": "32",
"height": "32",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@storiqa",
"chat": "https://t.me/storiqa_en",
"facebook": "https://www.facebook.com/storiqa/",
"forum": "https://bitcointalk.org/index.php?topic=2233274",
"github": "https://github.com/Storiqa/",
"gitter": "",
"instagram": "https://www.instagram.com/storiqa/",
"linkedin": "https://ru.linkedin.com/company/storiqa",
"reddit": "https://www.reddit.com/r/Storiqa/",
"slack": "",
"telegram": "https://t.me/storiqa_en",
"twitter": "https://twitter.com/storiqa",
"youtube": "https://www.youtube.com/channel/UCU_VW6azYd0cXFACzofUy5w"
}
},
"TRCN": {
"symbol": "TRCN",
"address": "0x566Fd7999B1Fc3988022bD38507A48F0bCf22c77",
"decimals": 18,
"name": "The Real Coin",
"ens_address": "",
"website": "http://www.therealcoinz.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/joinchat/AAAAAE5H5N1SoT0lYvhBXA",
"twitter": "https://mobile.twitter.com/OfficialTRCoin",
"youtube": ""
}
},
"DEPO": {
"symbol": "DEPO",
"address": "0x7cF271966F36343Bf0150F25E5364f7961c58201",
"decimals": 0,
"name": "CRYPTODEPOZIT",
"ens_address": "",
"website": "https://aridika.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"QRG": {
"symbol": "QRG",
"address": "0xFFAA5ffc455d9131f8A2713A741fD1960330508B",
"decimals": 18,
"name": "QRG",
"ens_address": "",
"website": "http://qrg-stamps.com/",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"BBC": {
"symbol": "BBC",
"name": "TraDove B2BCoin",
"type": "ERC20",
"address": "0xe7D3e4413E29ae35B0893140F4500965c74365e5",
"ens_address": "",
"decimals": 18,
"website": "http://bbcoin.tradove.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/tradove",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/TraDove_B2BCoin",
"youtube": ""
}
},
"ARB": {
"symbol": "ARB",
"name": "ARBITRAGE",
"type": "ERC20",
"address": "0xaFBeC4D65BC7b116d85107FD05d912491029Bf46",
"ens_address": "",
"decimals": 18,
"website": "",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"INV": {
"symbol": "INV",
"name": "Invacio",
"type": "ERC20",
"address": "0xEcE83617Db208Ad255Ad4f45Daf81E25137535bb",
"ens_address": "",
"decimals": 8,
"website": "https://www.invacio.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/INVACIO_Analytica",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/Invacio",
"youtube": ""
}
},
"WILD": {
"symbol": "WILD",
"address": "0xD3C00772B24D997A812249ca637a921e81357701",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "http://www.wildcrypto.com",
"logo": {
"src": "http://wildtoken.com/wp-content/uploads/2017/12/WildCrypto-Logo-Only-copy-300x275.png",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/WildCrypto",
"forum": "",
"github": "https://github.com/WildCryptoICO/Wild-Crypto-Token",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/user/WildCrypto",
"slack": "",
"telegram": "https://t.me/joinchat/GJ80yE8A1_ZuwubD_jExjg",
"twitter": "https://twitter.com/WildCrypto",
"youtube": "https://www.youtube.com/channel/UCY0-r0TNdZ95abuydyTC19Q"
}
},
"IVY": {
"symbol": "IVY",
"name": "IvyKoin Public Network Tokens",
"type": "ERC20",
"address": "0xA4eA687A2A7F29cF2dc66B39c68e4411C0D00C49",
"ens_address": "",
"decimals": 18,
"website": "https://www.ivykoin.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@ivykoin",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/ivykoin",
"youtube": ""
}
},
"LCS": {
"symbol": "LCS",
"name": "LocalCoinSwap",
"type": "ERC20",
"address": "0xAA19961b6B858D9F18a115f25aa1D98ABc1fdBA8",
"ens_address": "",
"decimals": 18,
"website": "https://www.localcoinswap.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/localcoinswap",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/LocalCoinSwap",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/Localcoinswap_",
"youtube": ""
}
},
"RVL": {
"symbol": "RVL",
"address": "0x41f615E24fAbd2b097a320E9E6c1f448cb40521c",
"decimals": 18,
"name": "RVL",
"ens_address": "",
"website": "https://www.r-evolutioncoin.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"METM": {
"symbol": "METM",
"name": "MetaMorph",
"type": "ERC20",
"address": "0xFEF3884b603C33EF8eD4183346E093A173C94da6",
"ens_address": "",
"decimals": 18,
"website": "https://metamorph.pro",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@metamorphpro",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/MetaMorphPro",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/MetaMorphPro",
"youtube": ""
}
},
"BLUE": {
"symbol": "BLUE",
"address": "0x539EfE69bCDd21a83eFD9122571a64CC25e0282b",
"decimals": 8,
"name": "Ethereum Blue",
"ens_address": "",
"website": "https://blueprotocol.com/",
| |
"""
auralib module containing plotting functions and related stuff...
Author: <NAME>
Created: 20-Jan-2017
Last Mod: 20-Aug-2016
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.widgets import MultiCursor
import numpy as np
def t2xy(d1, d2, d3, norm=True):
c1 = np.array([0, 0])
c2 = np.array([1, 0])
c3 = np.array([0.5, 0.866])
d1 = np.array(d1)
d2 = np.array(d2)
d3 = np.array(d3)
# apply normalization
if norm:
datasum = np.sum(np.vstack([d1, d2, d3]), axis=0)
d1 = d1/datasum
d2 = d2/datasum
d3 = d3/datasum
px = c1[0]*d1 + c2[0]*d2 + c3[0]*d3
py = c1[1]*d1 + c2[1]*d2 + c3[1]*d3
return px, py
def tern(ax, lbls=['C1', 'C2', 'C3']):
# Corner points of triangular axes
c1 = np.array([0, 0])
c2 = np.array([1, 0])
c3 = np.array([0.5, 0.866])
# Draw axes and add labels
axbg_patch = mpatches.Polygon(np.vstack([c1, c2, c3]), closed=True,
fc='white', ec=None, zorder=1)
ax.add_patch(axbg_patch)
ax.plot([0, 1, 0.5, 0], [0, 0, 0.866, 0], 'k', lw=1.5, zorder=5)
ax.text(c1[0], c1[1], lbls[0], ha='right', va='top', fontsize=14)
ax.text(c2[0], c2[1], lbls[1], ha='left', va='top', fontsize=14)
ax.text(c3[0], c3[1], lbls[2], ha='center', va='bottom', fontsize=14)
# Draw gridlines
for i in np.arange(0.1, 1, 0.1):
l1 = [i, i]
l2 = 1.0 - i
lx, ly = t2xy(l1, [0, l2], [l2, 0])
ax.plot(lx, ly, ':', lw=1.0, color=u'0.4', zorder=2)
ax.text(lx[-1]+0.01, ly[-1]-0.03, '%.2f' % i, ha='center', va='center', rotation=-60.0)
l1 = [i, i]
l2 = 1.0 - i
lx, ly = t2xy([0, l2], l1, [l2, 0])
ax.plot(lx, ly, ':', lw=1.0, color=u'0.4', zorder=2)
ax.text(lx[0]+0.005, ly[0]+0.03, '%.2f' % i, ha='left', va='center', rotation=60.0)
l1 = [i, i]
l2 = 1.0 - i
lx, ly = t2xy([0, l2], [l2, 0], l1)
ax.plot(lx, ly, ':', lw=1.0, color=u'0.4', zorder=2)
ax.text(lx[-1]-0.01, ly[0], '%.2f' % i, ha='right', va='center')
ax.set_xlim([-0.1, 1.1])
ax.set_ylim([-0.1, 0.966])
ax.set_aspect('equal')
ax.set_axis_off()
#ax.xaxis.set_visible(False)
#ax.yaxis.set_visible(False)
#ax.spines['top'].set_visible(False)
#ax.spines['right'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
#ax.spines['left'].set_visible(False)
def tern_scatter(ax, d1, d2, d3, s=25, color=None, marker=None, cmap=None,
lw=None, ec=None, alpha=1.0, label=None):
# Transform points from XY -> C1, C2, C3 coordinate system
px, py = t2xy(d1, d2, d3)
# Plot points on
pts = ax.scatter(px, py, s=s, c=color, marker=marker, cmap=cmap, lw=lw,
edgecolor=ec, alpha=alpha, label=label,
zorder=10)
return pts
def tern_line(ax, d1, d2, d3, c=None, lw=None, ls=None, label=None):
# Transform points from XY -> C1, C2, C3 coordinate system
px, py = t2xy(d1, d2, d3)
# Plot points on
hdl = ax.plot(px, py, c=c, lw=lw, ls=ls, label=label, zorder=10)
return hdl
def plot_blocky(ax, data, zdata, linespec='b-', lw=1):
"""
Convenience function for plotting a blocky log.
Ensure that the zdata log has 1 more sample than the data log.
"""
for i in range(0, len(data)):
ax.plot([data[i], data[i]], [zdata[i], zdata[i+1]], linespec, lw=lw)
for i in range(1, len(data)):
ax.plot([data[i-1], data[i]], [zdata[i], zdata[i]], linespec, lw=lw)
def radar(ax, data, data_names, lw=1.0, color='k', ls='-', marker=None, label=None):
"""
function to produce a radar plot
ax = axis handle of a polar axis to do plotting in
data = 1D array or list of data values
data_names = 1D list of data names
"""
# get number of values in data vector
N = len(data)
# append the first data value to the end of the list so we can make closed
# polygonal regions for plotting and filling
data = np.array(data)
data = data.tolist()
data.append(data[0])
# What will be the angle of each axis in the plot?
# (we divide the plot / number of variable)
angles = [n / float(N) * 2 * np.pi for n in range(N)]
angles += angles[:1]
# Draw one axe per variable + add labels labels yet
plt.sca(ax)
plt.xticks(angles[:-1], data_names)
# Draw ylabels
ax.set_rlabel_position(0)
plt.yticks(color="grey", size=8)
# Plot data
ax.plot(angles, data, lw=lw, color=color, ls=ls, marker=marker, label=label)
# Fill area
ax.fill(angles, data, color=color, alpha=0.1)
ax.grid(True, ls=':')
def plot_filled_logs(ax, logs, z, fill_colors, labels, lw=1.0, alpha=0.3):
"""
Plot a series of logs using fill colors between each log. Designed to show
a series of mineral fraction logs or fluid saturation logs
"""
import matplotlib as mpl
nlogs = len(logs)
# take the log fill colors, and make them darker to be used for the log
# line colors. This will make them more prominent in the display
log_colors = []
for color in fill_colors:
rgb = mpl.colors.to_rgb(color)
rgb = np.array(rgb)
rgb = rgb*0.3
log_colors.append(rgb)
# first plot the log fills
logsum = 0 # cumulative log value
for i in range(nlogs):
curlog = logs[i] + logsum
ax.fill_betweenx(z, curlog, logsum, where=curlog>=logsum,
facecolor=fill_colors[i], alpha=alpha, label=labels[i])
logsum = logsum + logs[i]
# next plot the log curves to visually separate the filled areas
logsum = 0 # cumulative log value
for i in range(nlogs):
curlog = logs[i] + logsum
ax.plot(curlog, z, c=log_colors[i], lw=lw, alpha=alpha)
logsum = logsum + logs[i]
def format_log_axes(axes, ylabel):
"""
Function to format a series of axes displaying simple log curves.
"""
for ax in axes:
ax.xaxis.set_ticks_position('top')
ax.xaxis.set_label_position('top')
ax.grid(True, ls=':')
for ax in axes[1:-1]:
plt.setp(ax.get_yticklabels(), visible=False)
axes[0].set_ylabel(ylabel)
axes[-1].set_ylabel(ylabel)
axes[-1].yaxis.set_ticks_position('right')
axes[-1].yaxis.set_label_position('right')\
def make_wellview(ntrack=5, figsize=(5, 5)):
"""
Function for creating a blank, multi-track well viewer with a well header
area, a well log header area for each track, and a well log data area.
"""
fig = plt.figure(num=1, figsize=figsize)
fig.clf()
nrow = 30
ncol = ntrack
ttl = 0
hdr = 1
dat = 4
axd = [plt.subplot2grid((nrow, ncol), (dat, 0), rowspan=nrow-3)]
axh = [plt.subplot2grid((nrow, ncol), (hdr, 0), rowspan=3, sharex=axd[0])]
for i in range(1, ncol):
axd.append(plt.subplot2grid((nrow, ncol), (dat, i), rowspan=nrow-2, sharey=axd[0]))
axh.append(plt.subplot2grid((nrow, ncol), (hdr, i), rowspan=3, sharex=axd[i], sharey=axh[0]))
axttl = plt.subplot2grid((nrow, ncol), (ttl, 0), colspan=ncol)
for ax in fig.get_axes():
ax.tick_params(which='both', direction='in')
curs = MultiCursor(fig.canvas, axd, vertOn=False, horizOn=True,
lw=1, c=u'0.3')
wview = {'fig': fig, 'axd': axd, 'axh': axh, 'axttl': axttl, 'curs': curs}
return wview
def plot_log(wview, tracknum, aura_log, fmtstr='%.1f', numlogs=1, logpos=1, xscale='normal'):
"""
Function to plot logs in a Well Viewer created using the
aura.plot.make_wellvew() function.
Input logs are required to be aura.well.AuraLog() objects.
"""
axd = wview['axd']
axh = wview['axh']
if xscale=='log':
axd[tracknum].set_xscale('log')
axh[tracknum].set_xscale('log')
axd[tracknum].plot(aura_log.data, aura_log.zref,
color=aura_log.c, lw=aura_log.lw)
axd[tracknum].set_xlim(aura_log.plt_range)
xlim = axd[tracknum].get_xlim()
data_range = np.abs(xlim[1] - xlim[0])
if xscale == 'log':
data_lim_offset0 = xlim[0]
data_lim_offset1 = xlim[1]
else:
data_lim_offset0 = data_range*0.02 + xlim[0]
data_lim_offset1 = -data_range*0.02 + xlim[1]
axh[tracknum].plot([data_lim_offset0, data_lim_offset1], (logpos, logpos),
c=aura_log.c, lw=aura_log.lw)
bbox = dict(fc='white', ec='white', alpha=1.0, pad=0.5)
axh[tracknum].text(data_lim_offset0, logpos, fmtstr % xlim[0],
va='top', ha='left', color=aura_log.c,
bbox=bbox, fontsize=aura_log.fs)
axh[tracknum].text(data_lim_offset1, logpos, fmtstr % xlim[1],
va='top', ha='right', color=aura_log.c,
bbox=bbox, fontsize=aura_log.fs)
if xscale=='log':
xpos_logname = np.sqrt(np.cumsum(xlim))
else:
xpos_logname = np.mean(xlim)
if len(aura_log.units)>0:
axh[tracknum].text(xpos_logname, logpos, aura_log.name+' ('+aura_log.units+')',
va='bottom', ha='center', color=aura_log.c,
fontsize=aura_log.fs)
else:
axh[tracknum].text(xpos_logname, logpos, aura_log.name,
va='bottom', ha='center', color=aura_log.c,
fontsize=aura_log.fs)
axh[tracknum].set_ylim([0, numlogs+1])
axh[tracknum].set_xlim(xlim)
def format_wellview(wview, ylabel='Depth', title_text='Title', ylim='none'):
"""
Once all well data are plotted in an aura wellviewer figure, call this
function to make the well viewer figure look nice.
"""
fig = wview['fig']
axd = wview['axd']
axh = wview['axh']
axttl = wview['axttl']
axd[0].invert_yaxis()
if ylim != 'none':
axd[0].set_ylim(ylim)
ntrack = len(axd)
count = 1
for (axdi, axhi) in zip(axd, axh):
axdi.grid(True, which='major', ls=':', lw=0.5)
axdi.grid(True, which='minor', ls=':', lw=0.5)
axdi.minorticks_on()
axdi.xaxis.set_ticks_position('top')
axdi.xaxis.set_label_position('top')
[label.set_visible(False) for label in axdi.get_xticklabels()]
axhi.set_facecolor('white')
axhi.set_frame_on(True)
axhi.grid(False)
axhi.xaxis.set_visible(False)
axhi.yaxis.set_visible(False)
axdi.yaxis.set_ticks_position('both')
if (count==1):
axdi.set_ylabel(ylabel)
elif count==ntrack:
axdi.set_ylabel(ylabel)
axdi.yaxis.set_ticks_position('right')
axdi.yaxis.set_label_position('right')
else:
axdi.tick_params(labelright=True)
[label.set_visible(False) for label in axdi.get_yticklabels()]
count += 1
axttl.set_facecolor('#fcfcc4')
axttl.set_frame_on(True)
axttl.grid(False)
axttl.xaxis.set_visible(False)
axttl.yaxis.set_visible(False)
axttl.text(0.5, 0.5, title_text, ha='center', va='center', weight='normal')
axttl.set_xlim([0, 1])
axttl.set_ylim([0, 1])
fig.tight_layout(w_pad=0.00, h_pad=0.00)
def add_van_krevelan_template(ax, lw=2, fs=14, c='k'):
T1 = np.array([[ 3.89993785, 97.72755495],
[ 4.27284027, 140.94126413],
[ 3.89993785, 182.6648454 ],
[ 3.89993785, 227.36868248],
[ 3.89993785, 260.15149634],
[ 3.89993785, 291.44418229],
[ 4.27284027, 324.22699615],
[ 4.27284027, 376.38147274],
[ 4.27284027, 419.59518192],
[ 4.27284027, 456.84837949],
[ 5.39154755, 489.63119334],
[ 5.01864512, 514.96336769],
[ 6.13735239, 559.66720477],
[ 7.25605966, 601.39078604],
[ 7.62896209, 629.70321619],
[ 8.00186451, 659.50577425],
[ 10.61218148, 701.22935552],
[ 12.84959602, 735.50229728],
[ | |
'Not Found':
logger.debug('**%s %s already deleted.' % (kind, name))
else:
logger.error(e)
raise e
def create_and_start_vm_from_iso(params):
metadata_name = _get_param('--name', params)
network_config = _get_param('--network', params)
try:
operation_queue = ['virsh dommemstat --period %s --domain %s --config --live' % (str(5), metadata_name)]
config_dict = _network_config_parser(network_config)
operation_queue.extend( _get_network_operations_queue("createAndStartVMFromISO", config_dict, metadata_name))
_set_param('--network', 'none', params)
cmd = _unpackCmd('virt-install', params)
runCmd(cmd)
if is_vm_exists(metadata_name) and not is_vm_active(metadata_name):
create(metadata_name)
_runOperationQueue(operation_queue)
except Exception as e:
try:
if is_vm_exists(metadata_name) and is_vm_active(metadata_name):
destroy(metadata_name)
time.sleep(0.5)
except:
logger.error('Oops! ', exc_info=1)
raise e
def delete_vm(params):
jsonDict = {'spec': {}}
metadata_name = _get_param('--domain', params)
if is_vm_exists(metadata_name) and is_vm_active(metadata_name):
destroy(metadata_name)
cmd = _unpackCmd('virsh undefine', params)
try:
runCmd(cmd)
except Exception as e:
if not is_vm_exists(metadata_name):
logger.debug('VM %s has already been deleted.' % metadata_name)
else:
raise BadRequest('Delete VM %s failed! Error: %s' %(metadata_name, e))
print(jsonDict)
def plug_nic(params):
the_cmd_key = 'plugNIC'
metadata_name = _get_param('--domain', params)
network_config = _get_params(params)
logger.debug(network_config)
config_dict = _network_config_parser_json(the_cmd_key, network_config)
logger.debug(config_dict)
operation_queue = _get_network_operations_queue(the_cmd_key, config_dict, metadata_name)
_runOperationQueue(operation_queue)
def unplug_nic(params):
the_cmd_key = 'unplugNIC'
metadata_name = _get_param('--domain', params)
network_config = _get_params(params)
logger.debug(network_config)
config_dict = _network_config_parser_json(the_cmd_key, network_config)
logger.debug(config_dict)
operation_queue = _get_network_operations_queue(the_cmd_key, config_dict, metadata_name)
_runOperationQueue(operation_queue)
def plug_disk(params):
the_cmd_key = 'plugDisk'
metadata_name = _get_param('--domain', params)
disk_config = _get_params(params)
logger.debug(disk_config)
config_dict = _disk_config_parser_json(the_cmd_key, disk_config)
logger.debug(config_dict)
operation_queue = _get_disk_operations_queue(the_cmd_key, config_dict, metadata_name)
_runOperationQueue(operation_queue)
def unplug_disk(params):
the_cmd_key = 'unplugDisk'
metadata_name = _get_param('--domain', params)
disk_config = _get_params(params)
logger.debug(disk_config)
config_dict = _disk_config_parser_json(the_cmd_key, disk_config)
logger.debug(config_dict)
operation_queue = _get_disk_operations_queue(the_cmd_key, config_dict, metadata_name)
_runOperationQueue(operation_queue)
def set_boot_order(params):
the_cmd_key = 'setBootOrder'
metadata_name = _get_param('--domain', params)
config_dict = _get_params(params)
logger.debug(config_dict)
operation_queue = _get_redefine_vm_operations_queue(the_cmd_key, config_dict, metadata_name)
_runOperationQueue(operation_queue)
def set_vnc_password(params):
the_cmd_key = 'setVncPassword'
metadata_name = _get_param('--domain', params)
config_dict = _get_params(params)
logger.debug(config_dict)
operation_queue = _get_graphic_operations_queue(the_cmd_key, config_dict, metadata_name)
_runOperationQueue(operation_queue)
def unset_vnc_password(params):
the_cmd_key = 'unsetVncPassword'
metadata_name = _get_param('--domain', params)
config_dict = _get_params(params)
logger.debug(config_dict)
operation_queue = _get_graphic_operations_queue(the_cmd_key, config_dict, metadata_name)
_runOperationQueue(operation_queue)
def set_guest_password(params):
the_cmd_key = 'setGuestPassword'
metadata_name = _get_param('--domain', params)
config_dict = _get_params(params)
logger.debug(config_dict)
operation_queue = _get_vm_password_operations_queue(the_cmd_key, config_dict, metadata_name)
_runOperationQueue(operation_queue)
def dumpxml(params):
name = _get_param('--name', params)
jsonDict = {'spec': {'domain': {}, 'nodeName': HOSTNAME, 'status': {}}}
vm_xml = get_xml(name)
vm_power_state = vm_state(name).get(name)
vm_json = toKubeJson(xmlToJson(vm_xml))
# pprint(loads(vm_json))
vm_json = updateDomain(loads(vm_json))
vm_json = updateJsonRemoveLifecycle(jsonDict, vm_json)
jsonDict = addPowerStatusMessage(vm_json, vm_power_state, 'The VM is %s' % vm_power_state)
print(jsonDict)
def dump_l3_network_info(params):
name = _get_param('--name', params)
jsonDict = {'spec': {'data': {}, 'nodeName': HOSTNAME, 'type': 'l3network', 'status': {}}}
jsonDict['spec']['data'] = get_l3_network_info(name)
jsonDict = addPowerStatusMessage(jsonDict, 'Ready', 'The resource is ready.')
print(jsonDict)
def dump_l3_address_info(params):
name = _get_param('--name', params)
jsonDict = {'spec': {'data': {}, 'nodeName': HOSTNAME, 'type': 'l3address', 'status': {}}}
jsonDict['spec']['data'] = get_address_set_info(name)
jsonDict = addPowerStatusMessage(jsonDict, 'Ready', 'The resource is ready.')
print(jsonDict)
def dump_l2_network_info(params):
name = _get_param('--name', params)
jsonDict = {'spec': {'data': {}, 'nodeName': HOSTNAME, 'type': 'l2network', 'status': {}}}
jsonDict['spec']['data'] = get_l2_network_info(name)
jsonDict = addPowerStatusMessage(jsonDict, 'Ready', 'The resource is ready.')
print(jsonDict)
def delete_network():
jsonDict = {'spec': {}}
print(jsonDict)
def _runOperationQueue(operation_queue, interval = 1):
for operation in operation_queue:
logger.debug(operation)
if operation.find('kubeovn-adm unbind-swport') != -1:
runCmd(operation, False)
else:
runCmd(operation)
time.sleep(interval)
def _unpackCmd(cmd, params):
retv = '%s' %(cmd)
for param in params:
retv += ' %s' %(param)
logger.debug(retv)
return retv
def _createNICXml(metadata_name, data):
'''
Write NIC Xml file to DEFAULT_DEVICE_DIR dir.
'''
doc = Document()
root = doc.createElement('interface')
root.setAttribute('type', 'bridge')
doc.appendChild(root)
bandwidth = {}
for k, v in data.items():
if k == 'mac':
node = doc.createElement(k)
node.setAttribute('address', v)
root.appendChild(node)
elif k == 'source':
node = doc.createElement(k)
node.setAttribute('bridge', v)
root.appendChild(node)
elif k == 'virtualport':
node = doc.createElement(k)
node.setAttribute('type', v)
root.appendChild(node)
elif k == 'model':
node = doc.createElement(k)
node.setAttribute('type', v)
root.appendChild(node)
elif k == 'target':
node = doc.createElement(k)
node.setAttribute('dev', v)
root.appendChild(node)
elif k == 'inbound':
bandwidth[k] = v
elif k == 'outbound':
bandwidth[k] = v
if bandwidth:
node_bandwidth = doc.createElement('bandwidth')
for k,v in bandwidth.items():
sub_node = doc.createElement(k)
sub_node.setAttribute('average', v)
node_bandwidth.appendChild(sub_node)
root.appendChild(node_bandwidth)
'''
If DEFAULT_DEVICE_DIR not exists, create it.
'''
if not os.path.exists(constants.KUBEVMM_VM_DEVICES_DIR):
os.makedirs(constants.KUBEVMM_VM_DEVICES_DIR, 0o711)
file_path = '%s/%s-nic-%s.xml' % (constants.KUBEVMM_VM_DEVICES_DIR, metadata_name, data.get('mac').replace(':', ''))
try:
with open(file_path, 'w') as f:
f.write(doc.toprettyxml(indent='\t'))
except:
raise BadRequest('Execute plugNIC error: cannot create NIC XML file \'%s\'' % file_path)
return file_path
def _createDiskXml(metadata_name, data):
'''
Write disk Xml file to DEFAULT_DEVICE_DIR dir.
'''
doc = Document()
root = doc.createElement('disk')
root.setAttribute('type', 'file')
root.setAttribute('device', data.get('type') if data.get('type') else 'disk')
doc.appendChild(root)
driver = {}
iotune = {}
for k, v in data.items():
if k == 'driver':
driver[k] = v
elif k == 'subdriver':
driver[k] = v
elif k == 'source':
node = doc.createElement(k)
node.setAttribute('file', v)
root.appendChild(node)
elif k == 'mode':
node = doc.createElement(v)
root.appendChild(node)
elif k == 'target':
node = doc.createElement(k)
node.setAttribute('dev', v)
root.appendChild(node)
elif k == 'read-bytes-sec':
iotune[k] = v
elif k == 'write-bytes-sec':
iotune[k] = v
elif k == 'read-iops-sec':
iotune[k] = v
elif k == 'write-iops-sec':
iotune[k] = v
if driver:
node = doc.createElement('driver')
node.setAttribute('name', driver.get('driver') if driver.get('driver') else 'qemu')
node.setAttribute('type', driver.get('subdriver') if driver.get('subdriver') else 'qcow2')
root.appendChild(node)
if iotune:
vm_iotune = doc.createElement('iotune')
for k,v in iotune.items():
sub_node = doc.createElement(k)
text = doc.createTextNode(v)
sub_node.appendChild(text)
vm_iotune.appendChild(sub_node)
root.appendChild(vm_iotune)
'''
If DEFAULT_DEVICE_DIR not exists, create it.
'''
if not os.path.exists(constants.KUBEVMM_VM_DEVICES_DIR):
os.makedirs(constants.KUBEVMM_VM_DEVICES_DIR, 0o711)
file_path = '%s/%s-disk-%s.xml' % (constants.KUBEVMM_VM_DEVICES_DIR, metadata_name, data.get('target'))
try:
with open(file_path, 'w') as f:
f.write(doc.toprettyxml(indent='\t'))
except:
raise BadRequest('Execute plugDisk error: cannot create disk XML file \'%s\'' % file_path)
return file_path
def _createGraphicXml(metadata_name, data, unset_vnc_password=False):
'''
Write disk Xml file to DEFAULT_DEVICE_DIR dir.
'''
doc = Document()
root = doc.createElement('graphics')
root.setAttribute('type', 'vnc')
if not unset_vnc_password and data.get('password'):
root.setAttribute('passwd', data.get('password'))
doc.appendChild(root)
node = doc.createElement('listen')
node.setAttribute('type', 'address')
node.setAttribute('address', '0.0.0.0')
root.appendChild(node)
'''
If DEFAULT_DEVICE_DIR not exists, create it.
'''
if not os.path.exists(constants.KUBEVMM_VM_DEVICES_DIR):
os.makedirs(constants.KUBEVMM_VM_DEVICES_DIR, 0o711)
file_path = '%s/%s-graphic.xml' % (constants.KUBEVMM_VM_DEVICES_DIR, metadata_name)
try:
with open(file_path, 'w') as f:
f.write(doc.toprettyxml(indent='\t'))
except:
raise BadRequest('Execute plugDisk error: cannot create disk XML file \'%s\'' % file_path)
return file_path
# def _validate_network_params(data):
# if data:
# for key in data.keys():
# if key not in ['type', 'source', 'inbound', 'outbound', 'mac', 'ip', 'switch']:
# return False
# else:
# return False
# return True
def _network_config_parser(data):
retv = {}
if data:
split_it = data.split(',')
for i in split_it:
i = i.strip()
if i.find('=') != -1:
(k, v) = i.split('=')
retv[k] = v
if retv:
net_type = retv.get('type')
if not net_type:
raise BadRequest('Network config error: no "type" parameter.')
else:
if net_type not in ['bridge', 'l2bridge', 'l3bridge']:
raise BadRequest('Network config error: unsupported network "type" %s.' % retv['type'])
source = retv.get('source')
if not source:
raise BadRequest('Network config error: no "source" parameter.')
if 'mac' not in retv.keys():
retv['mac'] = randomMAC()
'''
Add default params.
'''
if net_type in ['l2bridge', 'l3bridge']:
retv['virtualport'] = 'openvswitch'
retv['model'] = 'virtio'
retv['target'] = 'fe%s' % (retv['mac'].replace(':', '')[2:])
else:
raise BadRequest('Network config error: no parameters or in wrong format, plz check it!')
return retv
def _network_config_parser_json(the_cmd_key, data):
retv = {}
if data:
retv = data.copy()
if _isUnplugNIC(the_cmd_key):
if not retv.get('mac'):
raise BadRequest('Network config error: no "mac" parameter.')
return retv
source = data.get('source')
if not source:
raise BadRequest('Network config error: no "source" parameter.')
split_it = source.split(',')
for i in split_it:
i = i.strip()
if i.find('=') != -1:
(k, v) = i.split('=')
retv[k] = v
if retv:
net_type = retv.get('type')
if not net_type:
raise BadRequest('Network config error: no "type" parameter.')
else:
if net_type not in ['bridge', 'l2bridge', 'l3bridge']:
raise BadRequest('Network config error: unsupported network "type" %s.' % retv['type'])
if 'mac' not in retv.keys():
retv['mac'] = randomMAC()
'''
Add default params.
'''
if net_type in ['l2bridge', 'l3bridge']:
retv['virtualport'] = 'openvswitch'
retv['model'] = 'virtio'
retv['target'] = 'fe%s' % (retv['mac'].replace(':', '')[2:])
else:
raise BadRequest('Network config error: no parameters or in wrong format, plz check it!')
return retv
def _disk_config_parser_json(the_cmd_key, data):
retv = {}
if data:
retv = data.copy()
if _isUnplugDisk(the_cmd_key):
if not retv.get('target'):
raise BadRequest('Disk config error: no "target" parameter.')
return retv
source = data.get('source')
if not source:
raise BadRequest('Disk config error: no "source" parameter.')
if retv:
if not retv.get('target'):
raise BadRequest('Disk config error: no "target" parameter.')
else:
raise BadRequest('Disk config error: no parameters or in wrong format, plz check it!')
return retv
def _get_network_operations_queue(the_cmd_key, config_dict, metadata_name):
if _isInstallVMFromISO(the_cmd_key) or _isInstallVMFromImage(the_cmd_key) or _isPlugNIC(the_cmd_key):
if _isPlugNIC(the_cmd_key):
args = ''
if config_dict.get('live'):
args = args + '--live '
| |
op):
bk = self.rtyper.annotator.bookkeeper
funcdesc, = s_pbc.descriptions
args = simple_args(args_s)
with bk.at_position(None):
graph = funcdesc.get_graph(args, op)
llfn = self.rtyper.getcallable(graph)
return inputconst(typeOf(llfn), llfn)
class __extend__(pairtype(FunctionRepr, FunctionRepr)):
def convert_from_to((r_fpbc1, r_fpbc2), v, llops):
return v
class __extend__(pairtype(FunctionRepr, FunctionsPBCRepr)):
def convert_from_to((r_fpbc1, r_fpbc2), v, llops):
return inputconst(r_fpbc2, r_fpbc1.s_pbc.const)
class __extend__(pairtype(FunctionsPBCRepr, FunctionRepr)):
def convert_from_to((r_fpbc1, r_fpbc2), v, llops):
return inputconst(Void, None)
class __extend__(pairtype(FunctionsPBCRepr, FunctionsPBCRepr)):
def convert_from_to((r_fpbc1, r_fpbc2), v, llops):
# this check makes sense because both source and dest repr are FunctionsPBCRepr
if r_fpbc1.lowleveltype == r_fpbc2.lowleveltype:
return v
return NotImplemented
class SmallFunctionSetPBCRepr(FunctionReprBase):
def __init__(self, rtyper, s_pbc):
FunctionReprBase.__init__(self, rtyper, s_pbc)
llct = get_concrete_calltable(self.rtyper, self.callfamily)
assert len(llct.uniquerows) == 1
self.lowleveltype = Char
self.pointer_repr = FunctionsPBCRepr(rtyper, s_pbc)
self._conversion_tables = {}
self._compression_function = None
self._dispatch_cache = {}
def _setup_repr(self):
if self.s_pbc.subset_of:
assert self.s_pbc.can_be_None == self.s_pbc.subset_of.can_be_None
r = self.rtyper.getrepr(self.s_pbc.subset_of)
if r is not self:
r.setup()
self.descriptions = r.descriptions
self.c_pointer_table = r.c_pointer_table
return
self.descriptions = list(self.s_pbc.descriptions)
if self.s_pbc.can_be_None:
self.descriptions.insert(0, None)
POINTER_TABLE = Array(self.pointer_repr.lowleveltype,
hints={'nolength': True})
pointer_table = malloc(POINTER_TABLE, len(self.descriptions),
immortal=True)
for i, desc in enumerate(self.descriptions):
if desc is not None:
pointer_table[i] = self.pointer_repr.convert_desc(desc)
else:
pointer_table[i] = self.pointer_repr.convert_const(None)
self.c_pointer_table = inputconst(Ptr(POINTER_TABLE), pointer_table)
def convert_desc(self, funcdesc):
return chr(self.descriptions.index(funcdesc))
def convert_const(self, value):
if isinstance(value, types.MethodType) and value.im_self is None:
value = value.im_func # unbound method -> bare function
if value is None:
assert self.descriptions[0] is None
return chr(0)
funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value)
return self.convert_desc(funcdesc)
def special_uninitialized_value(self):
return chr(0xFF)
def dispatcher(self, shape, index, argtypes, resulttype):
key = shape, index, tuple(argtypes), resulttype
if key in self._dispatch_cache:
return self._dispatch_cache[key]
graph = self.make_dispatcher(shape, index, argtypes, resulttype)
self.rtyper.annotator.translator.graphs.append(graph)
ll_ret = getfunctionptr(graph)
c_ret = self._dispatch_cache[key] = inputconst(typeOf(ll_ret), ll_ret)
return c_ret
def make_dispatcher(self, shape, index, argtypes, resulttype):
inputargs = [varoftype(t) for t in [Char] + argtypes]
startblock = Block(inputargs)
startblock.exitswitch = inputargs[0]
graph = FunctionGraph("dispatcher", startblock, varoftype(resulttype))
row_of_graphs = self.callfamily.calltables[shape][index]
links = []
descs = list(self.s_pbc.descriptions)
if self.s_pbc.can_be_None:
descs.insert(0, None)
for desc in descs:
if desc is None:
continue
args_v = [varoftype(t) for t in argtypes]
b = Block(args_v)
llfn = self.rtyper.getcallable(row_of_graphs[desc])
v_fn = inputconst(typeOf(llfn), llfn)
v_result = varoftype(resulttype)
b.operations.append(
SpaceOperation("direct_call", [v_fn] + args_v, v_result))
b.closeblock(Link([v_result], graph.returnblock))
i = self.descriptions.index(desc)
links.append(Link(inputargs[1:], b, chr(i)))
links[-1].llexitcase = chr(i)
startblock.closeblock(*links)
return graph
def call(self, hop):
bk = self.rtyper.annotator.bookkeeper
args = hop.spaceop.build_args(hop.args_s[1:])
s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc
descs = list(s_pbc.descriptions)
shape, index = self.callfamily.find_row(bk, descs, args, hop.spaceop)
row_of_graphs = self.callfamily.calltables[shape][index]
anygraph = row_of_graphs.itervalues().next() # pick any witness
vlist = [hop.inputarg(self, arg=0)]
vlist += callparse.callparse(self.rtyper, anygraph, hop)
rresult = callparse.getrresult(self.rtyper, anygraph)
hop.exception_is_here()
v_dispatcher = self.dispatcher(shape, index,
[v.concretetype for v in vlist[1:]], rresult.lowleveltype)
v_result = hop.genop('direct_call', [v_dispatcher] + vlist,
resulttype=rresult)
return hop.llops.convertvar(v_result, rresult, hop.r_result)
def rtype_bool(self, hop):
if not self.s_pbc.can_be_None:
return inputconst(Bool, True)
else:
v1, = hop.inputargs(self)
return hop.genop('char_ne', [v1, inputconst(Char, '\000')],
resulttype=Bool)
class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionRepr)):
def convert_from_to((r_set, r_ptr), v, llops):
return inputconst(Void, None)
class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionsPBCRepr)):
def convert_from_to((r_set, r_ptr), v, llops):
assert v.concretetype is Char
v_int = llops.genop('cast_char_to_int', [v], resulttype=Signed)
return llops.genop('getarrayitem', [r_set.c_pointer_table, v_int],
resulttype=r_ptr.lowleveltype)
def compression_function(r_set):
if r_set._compression_function is None:
table = []
for i, p in enumerate(r_set.c_pointer_table.value):
table.append((chr(i), p))
last_c, last_p = table[-1]
unroll_table = unrolling_iterable(table[:-1])
def ll_compress(fnptr):
for c, p in unroll_table:
if fnptr == p:
return c
else:
ll_assert(fnptr == last_p, "unexpected function pointer")
return last_c
r_set._compression_function = ll_compress
return r_set._compression_function
class __extend__(pairtype(FunctionRepr, SmallFunctionSetPBCRepr)):
def convert_from_to((r_ptr, r_set), v, llops):
desc, = r_ptr.s_pbc.descriptions
return inputconst(Char, r_set.convert_desc(desc))
class __extend__(pairtype(FunctionsPBCRepr, SmallFunctionSetPBCRepr)):
def convert_from_to((r_ptr, r_set), v, llops):
ll_compress = compression_function(r_set)
return llops.gendirectcall(ll_compress, v)
class __extend__(pairtype(FunctionReprBase, FunctionReprBase)):
def rtype_is_((robj1, robj2), hop):
if hop.s_result.is_constant():
return inputconst(Bool, hop.s_result.const)
s_pbc = annmodel.unionof(robj1.s_pbc, robj2.s_pbc)
r_pbc = hop.rtyper.getrepr(s_pbc)
v1, v2 = hop.inputargs(r_pbc, r_pbc)
assert v1.concretetype == v2.concretetype
if v1.concretetype == Char:
return hop.genop('char_eq', [v1, v2], resulttype=Bool)
elif isinstance(v1.concretetype, Ptr):
return hop.genop('ptr_eq', [v1, v2], resulttype=Bool)
else:
raise TyperError("unknown type %r" % (v1.concretetype,))
def conversion_table(r_from, r_to):
if r_to in r_from._conversion_tables:
return r_from._conversion_tables[r_to]
else:
t = malloc(Array(Char, hints={'nolength': True}),
len(r_from.descriptions), immortal=True)
l = []
for i, d in enumerate(r_from.descriptions):
if d in r_to.descriptions:
j = r_to.descriptions.index(d)
l.append(j)
t[i] = chr(j)
else:
l.append(None)
if l == range(len(r_from.descriptions)):
r = None
else:
r = inputconst(Ptr(Array(Char, hints={'nolength': True})), t)
r_from._conversion_tables[r_to] = r
return r
class __extend__(pairtype(SmallFunctionSetPBCRepr, SmallFunctionSetPBCRepr)):
def convert_from_to((r_from, r_to), v, llops):
c_table = conversion_table(r_from, r_to)
if c_table:
assert v.concretetype is Char
v_int = llops.genop('cast_char_to_int', [v],
resulttype=Signed)
return llops.genop('getarrayitem', [c_table, v_int],
resulttype=Char)
else:
return v
def getFrozenPBCRepr(rtyper, s_pbc):
descs = list(s_pbc.descriptions)
assert len(descs) >= 1
if len(descs) == 1 and not s_pbc.can_be_None:
return SingleFrozenPBCRepr(descs[0])
else:
access = descs[0].queryattrfamily()
for desc in descs[1:]:
access1 = desc.queryattrfamily()
if access1 is not access:
try:
return rtyper.pbc_reprs['unrelated']
except KeyError:
result = MultipleUnrelatedFrozenPBCRepr(rtyper)
rtyper.pbc_reprs['unrelated'] = result
return result
try:
return rtyper.pbc_reprs[access]
except KeyError:
result = MultipleFrozenPBCRepr(rtyper, access)
rtyper.pbc_reprs[access] = result
rtyper.add_pendingsetup(result)
return result
class SingleFrozenPBCRepr(Repr):
"""Representation selected for a single non-callable pre-built constant."""
lowleveltype = Void
def __init__(self, frozendesc):
self.frozendesc = frozendesc
def rtype_getattr(_, hop):
if not hop.s_result.is_constant():
raise TyperError("getattr on a constant PBC returns a non-constant")
return hop.inputconst(hop.r_result, hop.s_result.const)
def convert_desc(self, frozendesc):
assert frozendesc is self.frozendesc
return object() # lowleveltype is Void
def convert_const(self, value):
return None
def getstr(self):
return str(self.frozendesc)
getstr._annspecialcase_ = 'specialize:memo'
def ll_str(self, x):
return self.getstr()
class MultipleFrozenPBCReprBase(CanBeNull, Repr):
def convert_const(self, pbc):
if pbc is None:
return self.null_instance()
frozendesc = self.rtyper.annotator.bookkeeper.getdesc(pbc)
return self.convert_desc(frozendesc)
class MultipleUnrelatedFrozenPBCRepr(MultipleFrozenPBCReprBase):
"""For a SomePBC of frozen PBCs that have no common access set.
The only possible operation on such a thing is comparison with 'is'."""
lowleveltype = llmemory.Address
EMPTY = Struct('pbc', hints={'immutable': True, 'mu_ptr_as_ref': True})
def __init__(self, rtyper):
self.rtyper = rtyper
self.converted_pbc_cache = {}
def convert_desc(self, frozendesc):
try:
return self.converted_pbc_cache[frozendesc]
except KeyError:
r = self.rtyper.getrepr(annmodel.SomePBC([frozendesc]))
if r.lowleveltype is Void:
# must create a new empty structure, as a placeholder
pbc = self.create_instance()
else:
pbc = r.convert_desc(frozendesc)
convpbc = self.convert_pbc(pbc)
self.converted_pbc_cache[frozendesc] = convpbc
return convpbc
def convert_pbc(self, pbcptr):
return llmemory.fakeaddress(pbcptr)
def create_instance(self):
return malloc(self.EMPTY, immortal=True)
def null_instance(self):
return llmemory.Address._defl()
def rtype_getattr(_, hop):
if not hop.s_result.is_constant():
raise TyperError("getattr on a constant PBC returns a non-constant")
return hop.inputconst(hop.r_result, hop.s_result.const)
class __extend__(pairtype(MultipleUnrelatedFrozenPBCRepr,
MultipleUnrelatedFrozenPBCRepr),
pairtype(MultipleUnrelatedFrozenPBCRepr,
SingleFrozenPBCRepr),
pairtype(SingleFrozenPBCRepr,
MultipleUnrelatedFrozenPBCRepr)):
def rtype_is_((robj1, robj2), hop):
if isinstance(robj1, MultipleUnrelatedFrozenPBCRepr):
r = robj1
else:
r = robj2
vlist = hop.inputargs(r, r)
return hop.genop('adr_eq', vlist, resulttype=Bool)
class MultipleFrozenPBCRepr(MultipleFrozenPBCReprBase):
"""For a SomePBC of frozen PBCs with a common attribute access set."""
def __init__(self, rtyper, access_set):
self.rtyper = rtyper
self.access_set = access_set
self.pbc_type = ForwardReference()
self.lowleveltype = Ptr(self.pbc_type)
self.pbc_cache = {}
def _setup_repr(self):
llfields = self._setup_repr_fields()
kwds = {'hints': {'immutable': True, 'mu_ptr_as_ref': True}}
self.pbc_type.become(Struct('pbc', *llfields, **kwds))
def create_instance(self):
return malloc(self.pbc_type, immortal=True)
def null_instance(self):
return nullptr(self.pbc_type)
def getfield(self, vpbc, attr, llops):
mangled_name, r_value = self.fieldmap[attr]
cmangledname = inputconst(Void, mangled_name)
return llops.genop('getfield', [vpbc, cmangledname],
resulttype=r_value)
def _setup_repr_fields(self):
fields = []
self.fieldmap = {}
if self.access_set is not None:
attrlist = self.access_set.attrs.keys()
attrlist.sort()
for attr in attrlist:
s_value = self.access_set.attrs[attr]
r_value = self.rtyper.getrepr(s_value)
mangled_name = mangle('pbc', attr)
fields.append((mangled_name, r_value.lowleveltype))
self.fieldmap[attr] = mangled_name, r_value
return fields
def convert_desc(self, frozendesc):
if (self.access_set is not None and
frozendesc not in self.access_set.descs):
raise TyperError("not found in PBC access set: %r" % (frozendesc,))
try:
return self.pbc_cache[frozendesc]
except KeyError:
self.setup()
result = self.create_instance()
self.pbc_cache[frozendesc] = result
for attr, (mangled_name, r_value) in self.fieldmap.items():
if r_value.lowleveltype is Void:
continue
try:
thisattrvalue = frozendesc.attrcache[attr]
except KeyError:
if frozendesc.warn_missing_attribute(attr):
warning("Desc %r has no attribute %r" % (frozendesc, attr))
continue
llvalue = r_value.convert_const(thisattrvalue)
setattr(result, mangled_name, llvalue)
return result
def rtype_getattr(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(hop.r_result, hop.s_result.const)
attr = hop.args_s[1].const
vpbc, vattr = hop.inputargs(self, Void)
v_res = self.getfield(vpbc, attr, hop.llops)
mangled_name, r_res = self.fieldmap[attr]
return hop.llops.convertvar(v_res, r_res, hop.r_result)
class __extend__(pairtype(MultipleFrozenPBCRepr,
MultipleUnrelatedFrozenPBCRepr)):
def convert_from_to((robj1, robj2), v, llops):
return llops.genop('cast_ptr_to_adr', [v], resulttype=llmemory.Address)
class __extend__(pairtype(MultipleFrozenPBCRepr, MultipleFrozenPBCRepr)):
def convert_from_to((r_pbc1, r_pbc2), v, llops):
if r_pbc1.access_set == r_pbc2.access_set:
return v
return NotImplemented
class __extend__(pairtype(SingleFrozenPBCRepr, MultipleFrozenPBCRepr)):
def convert_from_to((r_pbc1, r_pbc2), v, llops):
frozendesc1 = r_pbc1.frozendesc
access = frozendesc1.queryattrfamily()
if access is r_pbc2.access_set:
value = r_pbc2.convert_desc(frozendesc1)
lltype = r_pbc2.lowleveltype
return Constant(value, lltype)
return NotImplemented
class __extend__(pairtype(MultipleFrozenPBCReprBase,
SingleFrozenPBCRepr)):
def convert_from_to((r_pbc1, r_pbc2), v, llops):
return inputconst(Void, r_pbc2.frozendesc)
class MethodOfFrozenPBCRepr(Repr):
"""Representation selected for a PBC of method object(s) of frozen PBCs.
It assumes that all methods are the same function bound to different PBCs.
The low-level representation can then be a pointer to that PBC."""
def __init__(self, rtyper, | |
<reponame>tlapusan/DecisionTreeStructure
import logging
import sys
import graphviz
import numpy as np
import pygraphviz as pgv
from matplotlib import pyplot as plt
from sklearn import tree as sklearn_tree
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
logging.basicConfig(level=logging.INFO, format="%(levelname)s - %(message)s")
# TODO change plot labels to display entropy or gini. and maybe from leaf to leaves ?
# TODO make more clear how to set show_leaf_predictions parameters
class DecisionTreeStructure:
"""A visual interpretation of decision tree structures.
It contains two types of visualisations :
- visualisations related to leaf nodes
- visualisations about tree predictions
Attributes
----------
tree : sklearn.tree.DecisionTreeClassifier
The tree to investigate
train_dataset: pandas.core.frame.DataFrame
The dataset the tree was trained on
features: array of strings
The list of features names used to train the tree
target: str
The name of target variable
node_count : int
The number of nodes from the tree
children_left : array of int, shape[node_count]
children_left[i] holds the node id of the left child node of node i.
For leaves, children_left[i] == TREE_LEAF
children_right : array of int, shape[node_count]
children_right[i] holds the node id of the right child node of node i.
For leaves, children_right[i] == TREE_LEAF
feature : array of int, shape[node_count]
feature[i] holds the feature index used for split at node i
threshold : array of double, shape[node_count]
threshold[i] holds the split threshold for node i
impurity : array of double, shape[node_count]
impurity[i] holds the impurity (ex. the value of splitting criterion) for node i
n_node_samples : array of int, shape[node_count]
n_node_samples[i] holds the number of training examples reaching the node i
weighted_n_node_samples : array of int, shape[node_count]
weighted_n_node_samples[i] holds the weighted number of training examples reaching the node i
value : array of double, shape [node_count, n_outputs, max_n_classes]
value[i] holds the prediction value for node i
is_leaf : array of bool, shape[node_count]
is_leaf[i] holds true or false, depending if node i is a leaf or split node
split_node_samples: array of int, shape[node_count]
split_node_samples[i] holds training samples reaching the node i
"""
def __init__(self, tree, train_dataset, features, target):
"""Initialize necessary information about the tree.
:param tree: sklearn.tree.DecisionTreeClassifier
The tree to investigate
:param train_dataset: pandas.core.frame.DataFrame
The training dataset the tree was trained on
:param features: array of strings
The list of features names used to train the tree
:param target: str
The name of target variable
"""
self.tree = tree
self.train_dataset = train_dataset.reset_index(drop=True)
self.features = features
self.target = target
self.node_count = tree.tree_.node_count
self.children_left = tree.tree_.children_left
self.children_right = tree.tree_.children_right
self.feature = tree.tree_.feature
self.threshold = tree.tree_.threshold
self.impurity = tree.tree_.impurity
self.n_node_samples = tree.tree_.n_node_samples
self.weighted_n_node_samples = tree.tree_.weighted_n_node_samples
self.value = tree.tree_.value
self.is_leaf = []
self.split_node_samples = {}
def _calculate_leaf_nodes(self):
"""Used to calculate the node type.
The array is_leaf[index] will be True in case the node with id=index is a leaf,
or False if the node is a split node.
"""
if len(self.is_leaf) == 0:
self.is_leaf = np.zeros(shape=self.node_count, dtype=bool)
stack = [(0)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id = stack.pop()
# If we have a test node
if self.children_left[node_id] != self.children_right[node_id]:
stack.append((self.children_left[node_id]))
stack.append((self.children_right[node_id]))
else:
self.is_leaf[node_id] = True
def show_decision_tree_structure(self, rotate=True):
"""Show decision tree structure as a binary tree.
It is just an utility method for graphviz functionality to render a decision tree structure.
:return: graphviz.files.Source
"""
dot_data = sklearn_tree.export_graphviz(self.tree, out_file=None, feature_names=self.features,
filled=True, rotate=rotate, node_ids=True)
return graphviz.Source(dot_data)
def show_features_importance(self, barh=False, max_feature_to_display=None, figsize=(20, 10)):
"""Visual representation of features importance for DecisionTree.
Features are ordered descending by their importance using a bar plot visualisation.
:param max_feature_to_display: int
Maximum number of features to display. This is useful in case we have hundreds of features and the
plot become incomprehensible.
:param barh: boolean
True if we want to display feature importance into a bath plot, false otherwise
:param figsize: tuple
the size (x, y) of the plot (default is (20, 10))
:return: None
"""
feature_importances, feature_names = zip(
*sorted(list(zip(self.tree.feature_importances_, self.features)), key=lambda tup: tup[0],
reverse=True))
if max_feature_to_display is not None:
feature_names = feature_names[:max_feature_to_display]
feature_importances = feature_importances[:max_feature_to_display]
plt.figure(figsize=figsize)
if barh:
plt.barh(feature_names, feature_importances)
else:
plt.bar(feature_names, feature_importances)
plt.xlabel("feature name", fontsize=20)
plt.ylabel("feature importance", fontsize=20)
plt.grid()
plt.show()
def _get_node_path_info(self, node_id, sample, is_weighted):
self._calculate_leaf_nodes()
sample_value = round(sample[self.feature[node_id]], 2)
if sample_value <= self.threshold[node_id]:
threshold_sign = "<="
else:
threshold_sign = ">"
newline = "\n"
if isinstance(self.tree, DecisionTreeClassifier):
# I created bellow local variables because of reformat code issues(when the whole statement is in return)
split_value = self.features[self.feature[node_id]] + '(' + str(
sample[self.feature[node_id]]) + ') ' + threshold_sign + ' ' + str(
round(self.threshold[node_id], 2)) + newline if not self.is_leaf[node_id] else ''
weighted_sample_value = 'weighted sample ' + str(
round(self.weighted_n_node_samples[node_id], 1)) + newline if is_weighted else ''
return f"Node {node_id} \n" \
f"{split_value}" \
f"samples {self.n_node_samples[node_id]} \n" \
f"{weighted_sample_value}" \
f"values {self.value[node_id][0]}, \n" \
f"impurity {round(self.impurity[node_id], 2)}"
elif isinstance(self.tree, DecisionTreeRegressor):
split_value = self.features[self.feature[node_id]] + '(' + str(
sample[self.feature[node_id]]) + ') ' + threshold_sign + ' ' + str(
round(self.threshold[node_id], 2)) + newline if not self.is_leaf[node_id] else ''
weighted_sample_value = 'weighted sample ' + str(
round(self.weighted_n_node_samples[node_id], 1)) + newline if is_weighted else ''
return f"Node {node_id} \n" \
f"{split_value}" \
f"samples {self.n_node_samples[node_id]} \n" \
f"{weighted_sample_value}" \
f"prediction {self.value[node_id][0][0]}, \n" \
f"{self.tree.criterion.upper()} {round(self.impurity[node_id], 2)}"
def show_decision_tree_prediction_path(self, sample, is_weighted=False):
"""Visual interpretation of prediction path.
Show only the prediction path from a decision tree, instead of the whole tree.
It helps to easily understand and follow the prediction path.
The blue nodes are the nodes from prediction path and the black nodes are just blue nodes brothers.
This kind of visualisation is very useful for debugging and understanding tree predictions.
Also it is useful to explain to non technical people the reason behind tree predictions.
:param is_weighted: boolean
Whether or not to include weighted number of training samples reaching node i.
:param sample: array of double, shape[features]
The array of features values
:return: graphviz.files.Source
"""
logging.info(f"Make a prediction for sample {sample}")
node_indicator = self.tree.decision_path([sample])
decision_node_path = node_indicator.indices[node_indicator.indptr[0]:
node_indicator.indptr[1]]
logging.info(f"decision path {decision_node_path}")
g_tree = pgv.AGraph(strict=False, directed=True)
g_tree.layout(prog='dot')
for i in range(0, len(decision_node_path)):
node_id = decision_node_path[i]
node_label = self._get_node_path_info(node_id, sample, is_weighted)
logging.debug(f"adding node id {node_id} with label {node_label}")
g_tree.add_node(node_id, color="blue", label=node_label, fontsize=10, center=True, shape="ellipse")
# check if node_id is not a leaf
if self.children_left[node_id] != -1:
g_tree.add_edge(node_id, self.children_left[node_id])
# check if children_left[node_id] is not from the path and plot the node with black (neighbor node)
if self.children_left[node_id] != decision_node_path[i + 1]:
left_node_id = self.children_left[node_id]
g_tree.add_node(left_node_id, label=self._get_node_path_info(left_node_id, sample, is_weighted),
fontsize=10,
center=True, shape="ellipse")
# check if node_id is not a leaf
if self.children_right[node_id] != -1:
g_tree.add_edge(node_id, self.children_right[node_id])
# check if children_right[node_id] is not from the path and plot the node with black (neighbor node)
if self.children_right[node_id] != decision_node_path[i + 1]:
right_node_id = self.children_right[node_id]
g_tree.add_node(right_node_id, label=self._get_node_path_info(right_node_id, sample, is_weighted),
fontsize=10,
center=True, shape="ellipse")
return graphviz.Source(g_tree.string())
def _calculate_split_node_samples(self, dataset_training):
decision_paths = self.tree.decision_path(dataset_training[self.features]).toarray()
logging.info(f"decision paths {decision_paths} ")
for index in dataset_training.index.values:
decision_node_path = np.nonzero(decision_paths[index])[0]
for node_id in decision_node_path:
try:
self.split_node_samples[node_id].append(index)
except KeyError as ex:
self.split_node_samples[node_id] = [index]
def get_node_samples(self, node_id, return_type="plain"):
"""Create a dataframe containing all training samples reaching node_id.
:param return_type: str
Specify different types of outputs:
'plain' to return all samples from the training set as a dataframe
'describe' to generate statistics that summarize the samples
'describe_by_class' to generate statistics that summarize th samples, but by class target variable
:param node_id: int
The id of node_id
:return: pandas.DataFrame
"""
if len(self.split_node_samples) == 0:
self._calculate_split_node_samples(self.train_dataset)
# print(self.split_node_samples)
output = self.train_dataset.iloc[self.split_node_samples[node_id]][self.features + [self.target]]. \
sort_values(by=self.target)
if return_type == "plain":
return output
elif return_type == "describe":
return output.describe()
elif return_type == "describe_by_class":
return output.groupby(self.target).describe().transpose()
def show_leaf_samples_distribution(self, min_samples=0, max_samples=sys.maxsize, bins=10, figsize=None):
""" Visualize distribution of leaves samples.
:param bins: int
Number of bins of histograms
:param figsize: tuple of int
The figure size to be displayed
"""
self._calculate_leaf_nodes()
if figsize:
plt.figure(figsize=figsize)
plt.hist([self.n_node_samples[i] for i in range(0, self.node_count) if
self.is_leaf[i] and self.n_node_samples[i] >= min_samples and self.n_node_samples[i] <= max_samples],
bins=bins)
plt.xlabel("leaf sample", fontsize=20)
plt.ylabel("leaf count", fontsize=20)
def show_leaf_samples(self, figsize=(10, 5), display_type="plot"):
"""Show number of training samples from each leaf.
If display_type = 'plot' it will show leaves samples using | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import sys, os
import subprocess
import datetime, time
import argparse
import cairo
from math import pi, radians, sin, cos, asin, sqrt
from multiprocessing import Process, Queue
gpxfile = None
output = None
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gpx", dest="gpxfile",
help="the input GPX file to convert into images",
required=True)
parser.add_argument("-o", "--output",
dest="outputfolder", default='./images',
help="the ouput folder, images will be created inside")
args = parser.parse_args()
##conversion du fichier GPX
p = subprocess.Popen(['gpsbabel', '-t', '-i', 'gpx', '-f', args.gpxfile, '-o', 'unicsv', '-x', 'track,course,speed', '-F', '/dev/stdout'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
out = out.split('\n')
out = out[1:] ##la première ligne est l'entête (No,Latitude,Longitude,Altitude,Speed,Course,Date,Time)
##on récupère la vitesse à chaque seconde
##le GPS n'enregistre pas toute les secondes, il faut donc compléter les vides en calculant la moyenne
datas = [] #(datetime, speed, lat, lon)
average_speed = 0
track_length = 0
total_time = 0
t1 = None
t2 = None
v1 = None
v2 = None
lat1 = None
lat2 = None
lon1 = None
lon2 = None
elevation1 = None
elevation2 = None
vmax = None
vmin = None
tmin = None
tmax = None
latmin = None
latmax = None
lonmin = None
lonmax = None
elevationmin = None
elevationmax = None
for line in out:
line = line.replace('\n', '').replace('\r', '')
split = line.split(',')
if not len(split) == 8:
continue
speed = float(split[4]) * 3.6 ##conversion de la vitesse en km/h
t = datetime.datetime.strptime('%s %s' % (split[6], split[7]) , "%Y/%m/%d %H:%M:%S")
lat = float(split[1])
lon = float(split[2])
elevation = float(split[3])
if not vmax or vmax < speed:
vmax = speed
if not vmin or vmin > speed:
vmin = speed
if not tmax or tmax < t:
tmax = t
if not tmin or tmin > t:
tmin = t
if not latmax or latmax < lat:
latmax = lat
if not latmin or latmin > lat:
latmin = lat
if not lonmax or lonmax < lon:
lonmax = lon
if not lonmin or lonmin > lon:
lonmin = lon
if not elevationmax or elevationmax < elevation:
elevationmax = elevation
if not elevationmin or elevationmin > elevation:
elevationmin = elevation
if not t1:
t1 = t
v1 = speed
lon1 = lon
lat1 = lat
elevation1 = elevation
datas.append( {'datetime': t, 'speed': speed, 'lon': lon, 'lat': lat, 'elevation': elevation } )
else:
t2 = t
v2 = speed
lon2 = lon
lat2 = lat
elevation2 = elevation
##on complète les secondes manquantes
start = int(time.mktime(t1.timetuple())) + 1
for i in range(start, int(time.mktime(t2.timetuple()))):
_t = t1 + datetime.timedelta(seconds=i-start+1)
_v = (v1 + v2) / 2.
_lat = (lat1 + lat2) / 2.
_lon = (lon1 + lon2) / 2.
_elevation = (elevation1 + elevation2) / 2.
datas.append( {'datetime': _t, 'speed': _v, 'lon': _lon, 'lat': _lat, 'elevation': _elevation} ) ##ici on pourrait faire une moyenne pondérer, si il manque beaucoup de secondes la moyenne simple n'est pas très précise
datas.append({'datetime': t, 'speed': speed, 'lon': lon, 'lat': lat, 'elevation': elevation})
t1 = t2
t2 = None
v1 = v2
v2 = None
lon1 = lon2
lon2 = None
lat1 = lat2
lat2 = None
elevation1 = elevation2
elevation2 = None
total_time = datas[len(datas)-1]['datetime'] - datas[0]['datetime']
##on modifie l'elevation max pour avoir un multiple de 500.
elevationmax = int(elevationmax + 1)
while elevationmax % 100 > 0:
elevationmax += 1
##calcul du dénivelé positif et négatif
elevationgain = 0
elevationloss = 0
elevation_prev = None
for item in datas:
if not elevation_prev:
elevation_prev = item['elevation']
else:
if elevation_prev < item['elevation']:
elevationgain += item['elevation'] - elevation_prev
else:
elevationloss += elevation_prev - item['elevation']
elevation_prev = item['elevation']
WIDTH = 800
HEIGHT = 260
def calc_distance(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
km = 6371 * c
return km
#################TRACK##################
MODULE_DIMENSION_TRACK = 225
TRACK_OFFSET_X = MODULE_DIMENSION_TRACK/2
TRACK_OFFSET_Y = MODULE_DIMENSION_TRACK/2
radius = MODULE_DIMENSION_TRACK/2
##initialisation des valeurs pour dessiner le tracé du circuit
##on prend la plus plus grande distance la longitude et la latitude
latdiff = calc_distance(latmin, lonmin, latmax, lonmin)
londiff = calc_distance(latmin, lonmin, latmin, lonmax)
maxdiff = latdiff
if londiff > latdiff:
maxdiff = londiff
##on calcul le coef de mise à l'échelle coordonnées GPS -> pixels écran
scale = radius*1.2 / maxdiff
##on calcul la nouvelle hauteur de la zone carte
#parce qu'il est nécessaire de remonter la carte, par défaut elle se dessine sur le bas de la zone
dist = calc_distance(latmin, lonmin, latmax, lonmin)
trackHeight = dist * scale
def build_track(item, ctx):
x_start = None
y_start = None
# Background
ctx.set_source_rgba(0, 0, 0, 0.3)
ctx.arc (TRACK_OFFSET_X, TRACK_OFFSET_Y, radius, 0, 2*pi)
ctx.fill()
# Border
ctx.set_line_width(2)
ctx.set_source_rgb(1,1,1)
ctx.arc (TRACK_OFFSET_X, TRACK_OFFSET_Y, radius, 0, 2*pi)
ctx.stroke()
for data in datas:
dist = calc_distance(latmin, lonmin, data['lat'], lonmin)
y = trackHeight - (dist * scale) + MODULE_DIMENSION_TRACK/4
dist = calc_distance(latmin, lonmin, latmin, data['lon'])
x = dist * scale + MODULE_DIMENSION_TRACK/4
if x_start:
ctx.set_source_rgb(data['speed_color'][0] / 255., data['speed_color'][1] / 255., data['speed_color'][2] / 255.)
ctx.set_line_width(3)
ctx.move_to(x_start, y_start)
ctx.line_to(x, y)
ctx.stroke()
ctx.fill()
x_start = x
y_start = y
##on dessine le point courant
dist = calc_distance(latmin, lonmin, item['lat'], lonmin)
y = trackHeight - (dist * scale) + MODULE_DIMENSION_TRACK/4
dist = calc_distance(latmin, lonmin, latmin, item['lon'])
x = dist * scale + MODULE_DIMENSION/4
ctx.set_source_rgb(0/255., 0/255., 255/255.)
ctx.arc(x, y, 5, 0.0, 2.0 * pi)
ctx.fill()
#################INFO##################
MODULE_DIMENSION = 225
INFO_OFFSET_X = 210
INFO_OFFSET_Y = 10
INFO_WIDTH = 150
INFO_HEIGHT = 90
if INFO_HEIGHT < MODULE_DIMENSION:
INFO_HEIGHT = MODULE_DIMENSION
def build_info(item, ctx):
ctx.set_source_rgba(1, 1, 1, 0.8)
ctx.rectangle (INFO_OFFSET_X, INFO_OFFSET_Y - 5, INFO_WIDTH, INFO_HEIGHT + 10)
ctx.fill()
ctx.set_source_rgb(0.0, 0.0, 0.0)
ctx.select_font_face("Sans",
cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(20)
text = '%0.2f km' % track_length
x_bearing, y_bearing, width, height = ctx.text_extents(text)[:4]
ctx.move_to(INFO_OFFSET_X + 5, INFO_OFFSET_Y + 5 + height)
ctx.show_text(text)
ctx.set_source_rgb(0.0, 0.0, 0.0)
ctx.select_font_face("Sans",
cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(20)
text = '%0.2f km/h' % average_speed
x_bearing, y_bearing, width, height2 = ctx.text_extents(text)[:4]
ctx.move_to(INFO_OFFSET_X + 5, INFO_OFFSET_Y + 5 + 15 + height + height2)
ctx.show_text(text)
ctx.set_source_rgb(0.0, 0.0, 0.0)
ctx.select_font_face("Sans",
cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(20)
text = '%0.0f m d+' % elevationgain
x_bearing, y_bearing, width, height3 = ctx.text_extents(text)[:4]
ctx.move_to(INFO_OFFSET_X + 5, INFO_OFFSET_Y + 5 + 15 + 15 + height + height2 + height3)
ctx.show_text(text)
#################SPEED##################
MODULE_DIMENSION_SPEED = 225
SPEED_OFFSET_X = MODULE_DIMENSION_SPEED/2
SPEED_OFFSET_Y = MODULE_DIMENSION_SPEED/2
def build_speed(item, ctx):
sppedWidht = int(269 * (item['speed']/vmax))+100 #(0 à 259) soit 260 valeurs, et +100 car angle2 min = 100 et max 360
angle2 = sppedWidht * (pi/180.0)
angle1 = (180 - sppedWidht) * (pi/180.0)
radius = MODULE_DIMENSION_SPEED/2
ctx.set_source_rgba(0, 0, 0, 0.3)# couleur de fond
ctx.arc(SPEED_OFFSET_X, SPEED_OFFSET_Y, radius, 0, 2*pi)
ctx.fill()
ctx.set_line_width(3)
ctx.set_source_rgb(1,1,1)
ctx.new_sub_path ()
ctx.arc(SPEED_OFFSET_X, SPEED_OFFSET_Y, radius, 0, 2*pi)
ctx.close_path ()
ctx.stroke()
ctx.new_path()
ctx.set_source_rgba(item['speed_color'][0] / 255., item['speed_color'][1] / 255., item['speed_color'][2] / 255., 0.7)
ctx.arc(SPEED_OFFSET_X, SPEED_OFFSET_Y, radius, angle1, angle2)
ctx.close_path ()
ctx.fill()
def label1_speed(item, ctx, labelSpeed):
ctx.set_source_rgb(1,1,1)
ctx.select_font_face("Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(38)
text = '%0.1f' % item['speed']
x_bearing, y_bearing, width, height = ctx.text_extents(text)[:4]
ctx.move_to(SPEED_OFFSET_X-x_bearing-width/2, SPEED_OFFSET_Y-y_bearing-height/2)
ctx.show_text(text)
return ctx.text_extents(text)[:4]
def label2_speed(item, ctx, labelSpeed):
tabulation = 10
ctx.set_source_rgb(1,1,1)
ctx.select_font_face("Sans", cairo.FONT_SLANT_NORMAL)
ctx.set_font_size(18)
text = 'km/h'
ctx.move_to((SPEED_OFFSET_X-labelSpeed[0]-labelSpeed[2]/2) + labelSpeed[2] + tabulation , SPEED_OFFSET_Y-labelSpeed[1]-labelSpeed[3]/2)
ctx.show_text(text)
#################ELEVATION##################
MODULE_ELEVATION_WIDTH = 675
MODULE_ELEVATION_HEIGHT = 225
RADIUS = MODULE_ELEVATION_HEIGHT / 10
ELEVATION_OFFSET_X = 20
ELEVATION_OFFSET_Y = 10
ELEVATION_WIDTH = MODULE_ELEVATION_WIDTH - ELEVATION_OFFSET_X*2
ELEVATION_HEIGHT = MODULE_ELEVATION_HEIGHT - ELEVATION_OFFSET_Y*2
def build_elevation(item, ctx):
# Background
ctx.set_source_rgba(0, 0, 0, 0.3)
ctx.new_sub_path ()
ctx.arc (MODULE_ELEVATION_WIDTH - RADIUS, RADIUS, RADIUS, -pi/2, 0 )
ctx.arc (MODULE_ELEVATION_WIDTH - RADIUS, MODULE_ELEVATION_HEIGHT - RADIUS, RADIUS, 0, pi/2 )
ctx.arc (RADIUS, MODULE_ELEVATION_HEIGHT - RADIUS, RADIUS, pi/2, pi )
ctx.arc (RADIUS, RADIUS, RADIUS, pi, 3*pi/2 )
ctx.close_path ()
ctx.fill()
# Border
ctx.set_line_width(3)
ctx.set_source_rgb(1,1,1)
ctx.arc (MODULE_ELEVATION_WIDTH - RADIUS, RADIUS, RADIUS, -pi/2, 0 )
ctx.arc (MODULE_ELEVATION_WIDTH - RADIUS, MODULE_ELEVATION_HEIGHT - RADIUS, RADIUS, 0, pi/2 )
ctx.arc (RADIUS, MODULE_ELEVATION_HEIGHT - RADIUS, RADIUS, pi/2, pi )
ctx.arc (RADIUS, RADIUS, RADIUS, pi, 3*pi/2 )
ctx.close_path ()
ctx.stroke()
#on doit afficher total_time.total_seconds() sur MODULE_ELEVATION_WIDTH pixels
##on calcul le coef de mise à l'echelle, puis pour chaque pixel on affiche le dénivelé au temps correspondant
scale_x = total_time.total_seconds() / float(ELEVATION_WIDTH)
for px in range(0, ELEVATION_WIDTH):
d = datas[0]['datetime'] + | |
<reponame>alvarolgn/toolium
# -*- coding: utf-8 -*-
"""
Copyright 2022 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import time
from selenium.common.exceptions import NoSuchElementException, TimeoutException, StaleElementReferenceException
from selenium.webdriver.support.ui import WebDriverWait
class WaitUtils(object):
def __init__(self, driver_wrapper=None):
"""Initialize WaitUtils instance
:param driver_wrapper: driver wrapper instance
"""
from toolium.driver_wrappers_pool import DriverWrappersPool
self.driver_wrapper = driver_wrapper if driver_wrapper else DriverWrappersPool.get_default_wrapper()
# Configure logger
self.logger = logging.getLogger(__name__)
def get_implicitly_wait(self):
"""Read implicitly timeout from configuration properties"""
return self.driver_wrapper.config.get_optional('Driver', 'implicitly_wait')
def set_implicitly_wait(self):
"""Read implicitly timeout from configuration properties and configure driver implicitly wait"""
implicitly_wait = self.get_implicitly_wait()
if implicitly_wait:
self.driver_wrapper.driver.implicitly_wait(implicitly_wait)
def get_explicitly_wait(self):
"""Read explicitly timeout from configuration properties
:returns: configured explicitly timeout (default timeout 10 seconds)
"""
return int(self.driver_wrapper.config.get_optional('Driver', 'explicitly_wait', '10'))
def _expected_condition_find_element(self, element):
"""Tries to find the element, but does not thrown an exception if the element is not found
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:returns: the web element if it has been found or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
from toolium.pageelements.page_element import PageElement
web_element = False
try:
if isinstance(element, PageElement):
# Use _find_web_element() instead of web_element to avoid logging error message
element._web_element = None
element._find_web_element()
web_element = element._web_element
elif isinstance(element, tuple):
web_element = self.driver_wrapper.driver.find_element(*element)
except NoSuchElementException:
pass
return web_element
def _expected_condition_find_element_visible(self, element):
"""Tries to find the element and checks that it is visible, but does not thrown an exception if the element is
not found
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:returns: the web element if it is visible or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
web_element = self._expected_condition_find_element(element)
try:
return web_element if web_element and web_element.is_displayed() else False
except StaleElementReferenceException:
return False
def _expected_condition_find_element_not_visible(self, element):
"""Tries to find the element and checks that it is visible, but does not thrown an exception if the element is
not found
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:returns: True if the web element is not found or it is not visible
"""
web_element = self._expected_condition_find_element(element)
try:
return True if not web_element or not web_element.is_displayed() else False
except StaleElementReferenceException:
return False
def _expected_condition_find_first_element(self, elements):
"""Try to find sequentially the elements of the list and return the first element found
:param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found
sequentially
:returns: first element found or None
:rtype: toolium.pageelements.PageElement or tuple
"""
from toolium.pageelements.page_element import PageElement
element_found = None
for element in elements:
try:
if isinstance(element, PageElement):
element._web_element = None
element._find_web_element()
else:
self.driver_wrapper.driver.find_element(*element)
element_found = element
break
except (NoSuchElementException, TypeError):
pass
return element_found
def _expected_condition_find_element_clickable(self, element):
"""Tries to find the element and checks that it is clickable, but does not thrown an exception if the element
is not found
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:returns: the web element if it is clickable or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
web_element = self._expected_condition_find_element_visible(element)
try:
return web_element if web_element and web_element.is_enabled() else False
except StaleElementReferenceException:
return False
def _expected_condition_find_element_stopped(self, element_times):
"""Tries to find the element and checks that it has stopped moving, but does not thrown an exception if the
element is not found
:param element_times: Tuple with 2 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] times: number of iterations checking the element's location that must be the same for all of them
in order to considering the element has stopped
:returns: the web element if it is clickable or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
element, times = element_times
web_element = self._expected_condition_find_element(element)
try:
locations_list = [tuple(web_element.location.values()) for i in range(int(times)) if not time.sleep(0.001)]
return web_element if set(locations_list) == set(locations_list[-1:]) else False
except StaleElementReferenceException:
return False
def _expected_condition_find_element_containing_text(self, element_text_pair):
"""Tries to find the element and checks that it contains the specified text, but does not thrown an exception if
the element is not found
:param element_text_pair: Tuple with 2 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] text: text to be contained into the element
:returns: the web element if it contains the text or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
element, text = element_text_pair
web_element = self._expected_condition_find_element(element)
try:
return web_element if web_element and text in web_element.text else False
except StaleElementReferenceException:
return False
def _expected_condition_find_element_not_containing_text(self, element_text_pair):
"""Tries to find the element and checks that it does not contain the specified text,
but does not thrown an exception if the element is found
:param element_text_pair: Tuple with 2 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] text: text to not be contained into the element
:returns: the web element if it does not contain the text or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
element, text = element_text_pair
web_element = self._expected_condition_find_element(element)
try:
return web_element if web_element and text not in web_element.text else False
except StaleElementReferenceException:
return False
def _expected_condition_value_in_element_attribute(self, element_attribute_value):
"""Tries to find the element and checks that it contains the requested attribute with the expected value,
but does not thrown an exception if the element is not found
:param element_attribute_value: Tuple with 3 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] attribute: element's attribute where to check its value
[2] value: expected value for the element's attribute
:returns: the web element if it contains the expected value for the requested attribute or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
element, attribute, value = element_attribute_value
web_element = self._expected_condition_find_element(element)
try:
return web_element if web_element and web_element.get_attribute(attribute) == value else False
except StaleElementReferenceException:
return False
def _expected_condition_ajax_request_completed(self, element):
"""Load all ajax request
:returns: the ajax request is completed
"""
return self.driver_wrapper.driver.execute_script("return jQuery.active == 0")
def _wait_until(self, condition_method, condition_input, timeout=None):
"""
Common method to wait until condition met
:param condition_method: method to check the condition
:param condition_input: parameter that will be passed to the condition method
:param timeout: max time to wait
:returns: condition method response
"""
# Remove implicitly wait timeout
implicitly_wait = self.get_implicitly_wait()
if implicitly_wait != 0:
self.driver_wrapper.driver.implicitly_wait(0)
try:
# Get explicitly wait timeout
timeout = timeout if timeout else self.get_explicitly_wait()
# Wait for condition
condition_response = WebDriverWait(self.driver_wrapper.driver, timeout).until(
lambda s: condition_method(condition_input))
finally:
# Restore implicitly wait timeout from properties
if implicitly_wait != 0:
self.set_implicitly_wait()
return condition_response
def wait_until_element_present(self, element, timeout=None):
"""Search element and wait until it is found
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is present
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is not found after the timeout
"""
return self._wait_until(self._expected_condition_find_element, element, timeout)
def wait_until_element_visible(self, element, timeout=None):
"""Search element and wait until it is visible
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is visible
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is still not visible after the timeout
"""
return self._wait_until(self._expected_condition_find_element_visible, element, timeout)
def wait_until_element_not_visible(self, element, timeout=None):
"""Search element and wait until it is not visible
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it exists but is not visible
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is still visible after | |
#!/usr/bin/python3
# First choice pack and unpack into sqlite
# <NAME> 2021
try:
import sys
except:
print("Please install the sys module")
print("\tit should be part of the standard python3 distribution")
raise
try:
import struct
except:
print("Please install the struct module")
print("\tit should be part of the standard python3 distribution")
raise
if __name__ == "__main__":
try:
import signal
except:
print("Please install the signal module")
print("\tit should be part of the standard python3 distribution")
raise
try:
import argparse # for parsing the command line
except:
print("Please install the argparse module")
print("\tit should be part of the standard python3 distribution")
raise
try:
import textwrap
except:
print("Please install the textwrap module")
print("\tit should be part of the standard python3 distribution")
raise
import common
BLOCKSIZE = 128
ArgVerbose = 0
ArgFields = 0
ArgData = 0
ArgBlocks = 0
class ScreenLoc:
def __init__(self):
self._x, self._y = 1,1
self._fx,self._fy = 1,1
self._flength = 0
def _string( self, text ):
if text is None:
text = ''
sp = text.split('\n')
if len(sp) > 1:
# more than one line
self._y += len(sp)-1
self._x = 1
self._x += len(sp[-1])
def field( self, text, ftext, text2 ):
self._string(text)
self._string(ftext+':')
self._fx,self._fy = self._x,self._y
self._string(text2)
self._flength = self._x - self._fx + 78 * ( self._y - self._fy )
@property
def location( self ):
return self._x,self._y
@property
def flocation( self ):
return self._fx,self._fy
@property
def flength( self ):
return self._flength
class DataField(textwrap.TextWrapper):
prior = None
# one per field
def __init__( self, field_dict):
# convert text into FOL format data field
length = field_dict['length']
startx, starty = field_dict['location']
# compute line lengths of template
first = 79 - startx
if first > length:
first = length
self.template = [first]
self.midtemplate = [first]
else:
last = (length - first) % 78
self.midtemplate = [first] + [78 for x in range( (length-first) // 78)] + [last]
self.template = [first] + [78 for x in range( (length-first) // 78)] + [last for x in [1] if last > 0]
# Easy way to tell last object created
prior = type(self).prior
if prior is not None:
prior._final = False
prior.template = prior.midtemplate
type(self).prior = self
self._final = True
super().__init__(width=78, replace_whitespace=True, drop_whitespace=True, initial_indent=' '*(startx-1) )
def PadLines( self, stringlist ):
t = len(self.template)
s = len(stringlist)
if t < s:
del stringlist[t:]
else:
stringlist += ['']*(t-s)
def LastLine( self, stringlist ):
if len(stringlist[-1]) > self.template[-1]:
stringlist[-1] = stringlist[-1][:self.template[-1]]
def SpaceOut( self, stringlist ):
# last field only
l = len(stringlist[-1])
if l > 0 :
stringlist[-1] += ' '*(self.template[-1]-l)
def FitOld( self, stringlist ):
if max([len(s) for s in stringlist]) > 78:
return False
if not self._final and len(stringlist) > len(self.template):
return False
for l,t in zip(stringlist,self.template):
if len(l) > t:
return False
return True
def Parse( self, inputstring ):
# clean up input
#strip ends
ss = inputstring.strip()
#strip trailing space on each line
while ss.count(' \n')>0:
ss=ss.replace(' \n','\n')
# add initial space
ss = ' '+ss
# split lines
sl = (ss).split('\n')
if not self.FitOld(sl):
sl = self.wrap(ss)
# get rid of fake indent that takes place of field name
sl[0] = ' '+sl[0].lstrip()
if not self._final:
self.PadLines( sl )
self.LastLine( sl )
else:
sl += ['\n','\n']
self.SpaceOut( sl )
return '\n'.join(sl)
def hexdump( block ):
if ( len(block) <= 128 ):
hexdumpall(block)
return
hexdumpall( block[:128] )
hexdump( block[128:] )
def hexdumpall(block):
length = len(block)
trail = 0
while length > 0 and block[length-1] == 0x00:
length -= 1
trail += 1
for byte in block[:length]:
sys.stdout.write('%02x ' % byte )
if trail > 0:
sys.stdout.write('{:02x} * {}'.format(0x00,trail) )
sys.stdout.write('\n')
class HtmlState:
next_tag = None
@classmethod
def Set( cls, hs, state ):
if state == 0:
cls.Off(hs)
else:
cls.On(hs)
@classmethod
def On( cls, hs ):
if not hs.state( cls ):
# need to change state
# turn off lower temporarily
if cls.next_tag is not None:
cls.next_tag.Pause(hs)
# change state
hs.append( "<"+cls.tag+">" )
hs.On( cls )
# restore lower
if cls.next_tag is not None:
cls.next_tag.Resume(hs)
@classmethod
def Off( cls, hs ):
if hs.state( cls ):
# need to change state
# turn off lower temporarily
if cls.next_tag is not None:
cls.next_tag.Pause(hs)
# change state
hs.append("</"+cls.tag+">")
hs.Off( cls )
# restore lower
if cls.next_tag is not None:
cls.next_tag.Resume(hs)
@classmethod
def Pause( cls, hs ):
# first pause lower
if cls.next_tag is not None:
cls.next_tag.Pause( hs )
# now pause me
if hs.state( cls ):
hs.append("</"+cls.tag+">")
@classmethod
def Resume( cls, hs ):
# restore my state
if hs.state( cls ):
hs.append( "<"+cls.tag+">" )
# restore all lower states
if cls.next_tag is not None:
cls.next_tag.Resume( hs )
class Sub(HtmlState):
tag = 'sub'
next_tag = None
class Sup(HtmlState):
tag = 'sup'
next_tag = Sub
class Underline(HtmlState):
tag = 'u'
next_tag = Sup
class Italic(HtmlState):
tag = 'i'
next_tag = Underline
class Bold(HtmlState):
tag = 'b'
next_tag = Italic
class Close(HtmlState):
# Wierd class to cleaup up states
next_tag = Bold
@classmethod
def All( cls, hs ):
if cls.next_tag is not None:
# temporarily set states off
cls.next_tag.Pause( hs )
# make permenant
hs.reset()
class HtmlString:
def __init__( self ):
self._string = bytearray(b'')
self.reset()
def reset( self ):
# turn html states off
self._state = {
Bold: False,
Italic: False,
Underline: False,
Sup: False,
Sub: False,
}
@property
def string( self ):
return self._string
@string.setter
def string( self, s ):
self._string = s
def state( self, cls ):
return self._state[ cls ]
def On( self, cls ):
self._state[ cls ] = True
def Off( self, cls ):
self._state[ cls ] = False
def append( self, s ):
if isinstance(s,bytes):
self._string+=s
elif isinstance(s,str):
self._string+=s.encode('utf-8')
else:
self._string.append(s)
class TextField:
# Convert First-choice style text to plain text and HTML
# First choice uses a unique encoding
html_esc = {
ord('<') : '<',
ord('>') : '>',
ord('&') : '&',
ord(' ') : ' ',
}
field_types = {
1:' ', # general
2:'N', # numeric
3:'D', # date
4:'T', # time
5:'Y', # boolian
}
def __init__( self, string ):
self.parsed = None
_text = ['']
_ftext = ['']
_text2 = ['']
_html = HtmlString()
_html2 = HtmlString()
_fhtml = HtmlString()
_fieldtype = ' '
postftext = False
try:
_length = struct.unpack_from('>H',string)[0]
raw = string[2:]
except:
#print("Bad string input")
raw = None
return
#print('raw',len(raw),raw)
#hexdump(raw)
length_count = 0
array_count = 0
while length_count < _length:
#print(_length,array_count,length_count)
c = raw[array_count]
array_count += 1
length_count += 1
if c < 0x80:
Close.All(_html)
Close.All(_fhtml)
if c == 0x0d:
if postftext:
_text2[0] += "\n"
_html2.append('<br />')
else:
_text[0] += "\n"
_html.append('<br />')
length_count += 1
else:
if postftext:
self.AddChar( c, _text2, _html2 )
else:
self.AddChar( c, _text, _html )
else: # C >= 0x80
c &= 0x7F # peel off first bit
d = raw[array_count]
# Background or field
array_count += 1
length_count += 1
if d >= 0xd0 and d <= 0xdf:
# background text or field
# needs 3rd byte
e = raw[array_count]
array_count += 1
length_count += 1
if e & 0x01 == 1:
# background
if postftext:
self.BoIlUn( d, _html2 )
self.SupSub( e, _html2 )
self.AddChar( c, _text2, _html2 )
else:
self.BoIlUn( d, _html )
self.SupSub( e, _html )
self.AddChar( c, _text, _html )
else: # e is even
# field
Close.All(_html)
self.BoIlUn( d, _fhtml )
self.SupSub( e, _fhtml )
self.AddChar( c, _ftext, _fhtml )
postftext = True
elif d >= 0x90 and d <= 0x9f:
# Field Name
Close.All(_html)
self.BoIlUn( d, _fhtml )
if c in type(self).field_types:
_fieldtype = type(self).field_types[c]
Close.All(_fhtml)
else:
self.AddChar( c, _ftext, _fhtml )
postftext = True
elif d >= 0x81 and d <= 0x8f:
# Regular text
if postftext:
self.BoIlUn( d, _html2 )
self.AddChar( c, _text2, _html2 )
else:
self.BoIlUn( d, _html )
self.AddChar( | |
<reponame>wangg12/torchsample
import os.path as osp
import random
import math
import numpy as np
from collections import Sequence
import torch
import mmcv
import torch as th
import torch.nn.functional as F
from ..utils import th_random_choice
class Compose(object):
"""Composes several transforms together."""
def __init__(self, transforms):
"""Composes (chains) several transforms together into a single
transform.
Arguments
---------
transforms : a list of transforms
transforms will be applied sequentially
"""
self.transforms = transforms
def __call__(self, *inputs):
for transform in self.transforms:
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
inputs = transform(*inputs)
return inputs
class RandomChoiceCompose(object):
"""Randomly choose to apply one transform from a collection of transforms.
e.g. to randomly apply EITHER 0-1 or -1-1 normalization to an input:
>>> transform = RandomChoiceCompose([RangeNormalize(0,1),
RangeNormalize(-1,1)])
>>> x_norm = transform(x) # only one of the two normalizations is applied
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, *inputs):
tform = random.choice(self.transforms)
outputs = tform(*inputs)
return outputs
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError("type {} cannot be converted to tensor.".format(type(data)))
class ToTensor(object):
def __call__(self, *inputs):
outputs = []
for idx, _input in enumerate(inputs):
outputs.append(to_tensor(_input))
return outputs if idx > 1 else outputs[0]
class ToCuda(object):
"""Moves an autograd.Variable to the GPU."""
def __init__(self, device=0):
"""Moves an autograd.Variable to the GPU.
Arguments
---------
device : integer
which GPU device to put the input(s) on
"""
self.device = device
def __call__(self, *inputs):
outputs = []
for idx, _input in enumerate(inputs):
_input = _input.cuda(self.device)
outputs.append(_input)
return outputs if idx > 1 else outputs[0]
class ToFile(object):
"""Saves an image to file. Useful as a pass-through ransform when wanting
to observe how augmentation affects the data.
NOTE: Only supports saving to Numpy currently
"""
def __init__(self, root, ext='.npy', fmt='CHW'):
"""Saves an image to file. Useful as a pass-through ransform when
wanting to observe how augmentation affects the data.
NOTE: Only supports saving to Numpy currently
Arguments
---------
root : string
path to main directory in which images will be saved
"""
if root.startswith('~'):
root = osp.expanduser(root)
self.root = root
self.fmt = fmt
self.ext = ext
self.counter = 0
def __call__(self, *inputs):
for idx, _input in enumerate(inputs):
fpath = osp.join(self.root, 'img_{}_{}{}'.format(self.counter, idx, self.ext))
if self.ext == '.npy':
np.save(fpath, _input.cpu().numpy())
elif self.ext == '.pth':
th.save(_input, fpath)
elif self.ext in ['.png', '.jpg']:
if self.fmt == 'CHW':
img_save = _input.cpu().numpy().transpose((1, 2, 0))
else:
img_save = _input.cpu().numpy()
mmcv.imwrite(img_save, fpath)
else:
raise NotImplementedError('not supported file extension {}'.format(self.ext))
self.counter += 1
return inputs
class ChannelsLast(object):
"""Transposes a tensor so that the channel dim is last `HWC` and `DHWC` are
aliases for this transform."""
def __init__(self, safe_check=False):
"""Transposes a tensor so that the channel dim is last `HWC` and `DHWC`
are aliases for this transform.
Arguments
---------
safe_check : boolean
if true, will check if channels are already last and, if so,
will just return the inputs
"""
self.safe_check = safe_check
def __call__(self, *inputs):
ndim = inputs[0].dim()
if self.safe_check:
# check if channels are already last
if inputs[0].size(-1) < inputs[0].size(0):
return inputs
plist = list(range(1, ndim)) + [0]
outputs = []
for idx, _input in enumerate(inputs):
_input = _input.permute(*plist)
outputs.append(_input)
return outputs if idx > 1 else outputs[0]
HWC = ChannelsLast
DHWC = ChannelsLast
class ChannelsFirst(object):
"""Transposes a tensor so that the channel dim is first.
`CHW` and `CDHW` are aliases for this transform.
"""
def __init__(self, safe_check=False):
"""Transposes a tensor so that the channel dim is first. `CHW` and
`CDHW` are aliases for this transform.
Arguments
---------
safe_check : boolean
if true, will check if channels are already last and, if so,
will just return the inputs
"""
self.safe_check = safe_check
def __call__(self, *inputs):
ndim = inputs[0].dim()
if self.safe_check:
# check if channels are already first
if inputs[0].size(0) < inputs[0].size(-1):
return inputs
plist = [ndim-1] + list(range(0, ndim - 1))
outputs = []
for idx, _input in enumerate(inputs):
_input = _input.permute(*plist)
outputs.append(_input)
return outputs if idx > 1 else outputs[0]
CHW = ChannelsFirst
CDHW = ChannelsFirst
class TypeCast(object):
"""Cast a torch.Tensor to a different type."""
def __init__(self, dtype='float'):
"""Cast a torch.Tensor to a different type.
Arguments
---------
dtype : string or torch.*Tensor literal or list of such
data type to which input(s) will be cast.
If list, it should be the same length as inputs.
"""
if isinstance(dtype, (list, tuple)):
dtypes = []
for dt in dtype:
if isinstance(dt, str):
if dt == 'byte':
dt = th.ByteTensor
elif dt == 'double':
dt = th.DoubleTensor
elif dt == 'float':
dt = th.FloatTensor
elif dt == 'int':
dt = th.IntTensor
elif dt == 'long':
dt = th.LongTensor
elif dt == 'short':
dt = th.ShortTensor
dtypes.append(dt)
self.dtype = dtypes
else:
if isinstance(dtype, str):
if dtype == 'byte':
dtype = th.ByteTensor
elif dtype == 'double':
dtype = th.DoubleTensor
elif dtype == 'float':
dtype = th.FloatTensor
elif dtype == 'int':
dtype = th.IntTensor
elif dtype == 'long':
dtype = th.LongTensor
elif dtype == 'short':
dtype = th.ShortTensor
self.dtype = dtype
def __call__(self, *inputs):
if not isinstance(self.dtype, (tuple, list)):
dtypes = [self.dtype] * len(inputs)
else:
dtypes = self.dtype
outputs = []
for idx, _input in enumerate(inputs):
_input = _input.type(dtypes[idx])
outputs.append(_input)
return outputs if idx > 1 else outputs[0]
class AddChannel(object):
"""Adds a dummy channel to an image.
This will make an image of size (28, 28) to now be of size (1, 28,
28), for example.
"""
def __init__(self, axis=0):
"""Adds a dummy channel to an image, also known as expanding an axis or
unsqueezing a dim.
Arguments
---------
axis : integer
dimension to be expanded to singleton
"""
self.axis = axis
def __call__(self, *inputs):
outputs = []
for idx, _input in enumerate(inputs):
_input = _input.unsqueeze(self.axis)
outputs.append(_input)
return outputs if idx > 1 else outputs[0]
ExpandAxis = AddChannel
Unsqueeze = AddChannel
class Transpose(object):
def __init__(self, dim1, dim2):
"""Swaps two dimensions of a tensor.
Arguments
---------
dim1 : integer
first dim to switch
dim2 : integer
second dim to switch
"""
self.dim1 = dim1
self.dim2 = dim2
def __call__(self, *inputs):
outputs = []
for idx, _input in enumerate(inputs):
_input = th.transpose(_input, self.dim1, self.dim2)
outputs.append(_input)
return outputs if idx > 1 else outputs[0]
class RangeNormalize(object):
"""Given min_val: (R, G, B) and max_val: (R,G,B), will normalize each
channel of the th.*Tensor to the provided min and max values.
Works by calculating :
a = (max'-min')/(max-min)
b = max' - a * max
new_value = a * value + b
where min' & max' are given values,
and min & max are observed min/max for each channel
Arguments
---------
min_range : float or integer
Min value to which tensors will be normalized
max_range : float or integer
Max value to which tensors will be normalized
fixed_min : float or integer
Give this value if every sample has the same min (max) and
you know for sure what it is. For instance, if you
have an image then you know the min value will be 0 and the
max value will be 255. Otherwise, the min/max value will be
calculated for each individual sample and this will decrease
speed. Dont use this if each sample has a different min/max.
fixed_max :float or integer
See above
Example:
>>> x = th.rand(3,5,5)
>>> rn = RangeNormalize((0,0,10),(1,1,11))
>>> x_norm = rn(x)
Also works with just one value for min/max:
>>> x = th.rand(3,5,5)
>>> rn = RangeNormalize(0,1)
>>> x_norm = rn(x)
"""
def __init__(self, min_val, max_val):
"""Normalize a tensor between a min and max value.
Arguments
---------
min_val : float
lower bound of normalized tensor
max_val : float
upper bound of normalized tensor
"""
self.min_val = min_val
self.max_val = max_val
def __call__(self, *inputs):
outputs = []
for idx, _input in enumerate(inputs):
_min_val = _input.min()
_max_val = _input.max()
a = (self.max_val - self.min_val) / (_max_val-_min_val)
b = self.max_val - a*_max_val
_input = _input.mul(a).add(b)
outputs.append(_input)
return outputs if idx > 1 | |
"""A set of stream oriented parsers for http requests and responses, inline
with the current draft recommendations from the http working group.
http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-17
Unlike other libraries, this is for clients, servers and proxies.
Missing:
comma parsing/header folding
"""
import re
import zlib
class ParseError(StandardError):
"""Baseclass for all http parsing errors"""
pass
from hanzo.httptools.semantics import Codes, Methods
NEWLINES = ('\r\n', '\n')
class HTTPMessage(object):
"""A stream based parser for http like messages"""
CONTENT_TYPE = "application/http"
def __init__(self, header):
self.buffer = bytearray()
self.offset = 0
self.header = header
self.body_chunks = []
self.mode = 'start'
self.body_reader = None
@property
def url(self):
return self.header.url
@property
def scheme(self):
return self.header.scheme
@property
def method(self):
return self.header.method
@property
def host(self):
return self.header.host
@property
def port(self):
return self.header.port
def feed_fd(self, fd):
while True:
length, terminator = self.feed_predict()
if length == 0:
return ''
elif terminator == '\r\n':
text = fd.readLine()
elif length < 0:
text = fd.read()
elif length > 0:
text = fd.read(length)
unread = self.feed(text)
if unread:
return unread
def feed_predict(self):
"""returns size, terminator request for input. size is 0 means end. """
if self.mode == 'start':
return None, '\r\n'
elif self.mode == 'headers':
return None, '\r\n'
elif self.mode == 'body':
if self.body_reader is not None:
return self.body_reader.feed_predict()
else:
# connection close
return -1, None
if self.mode == 'end':
return 0, None
if self.mode == 'incomplete':
return 0, None
def feed(self, text):
"""Push more text from the input stream into the parser."""
if text and self.mode == 'start':
text = self.feed_start(text)
if text and self.mode == 'headers':
text = self.feed_headers(text)
if self.mode == 'body':
if not self.header.has_body():
self.mode = 'end'
else:
if self.header.body_is_chunked():
self.body_reader = ChunkReader()
else:
length = self.header.body_length()
if length >= 0:
self.body_reader = LengthReader(length)
self.body_chunks = [(self.offset, length)]
if length == 0:
self.mode = 'end'
else:
self.body_chunks = [(self.offset, 0)]
self.body_reader = None
if text and self.mode == 'body':
if self.body_reader is not None:
#print >> sys.stderr, 'feeding', text[:50]
text = self.body_reader.feed(self, text)
else:
((offset, length),) = self.body_chunks
self.buffer.extend(text)
self.offset = len(self.buffer)
self.body_chunks = ((offset, length + len(text)),)
text = ''
return text
def close(self):
"""Mark the end of the input stream and finish parsing."""
if (self.body_reader is None and self.mode == 'body'):
self.mode = 'end'
elif self.mode != 'end':
if self.body_chunks:
# check for incomplete in body_chunks
offset, length = self.body_chunks.pop()
position = len(self.buffer)
length = min(length, position - offset)
self.body_chunks.append((offset, length))
self.mode = 'incomplete'
def headers_complete(self):
"""Check whether the input stream has finished supplying headers."""
return self.mode in ('end', 'body')
def complete(self):
"""Checks whether the input stream is at the end, i.e. if the parser
is expecting no more input."""
return self.mode == 'end'
def feed_line(self, text):
"""Feed text into the buffer, returning the first line found (if found
yet)"""
self.buffer.extend(text)
pos = self.buffer.find('\n', self.offset)
if pos > -1:
pos += 1
text = str(self.buffer[pos:])
del self.buffer[pos:]
line = str(self.buffer[self.offset:])
self.offset = len(self.buffer)
else:
line = None
text = ''
return line, text
def feed_length(self, text, remaining):
"""Feed (at most remaining bytes) text to buffer, returning
leftovers."""
body, text = text[:remaining], text[remaining:]
remaining -= len(body)
self.buffer.extend(body)
self.offset = len(self.buffer)
return remaining, text
def feed_start(self, text):
"""Feed text to the parser while it is in the 'start' state."""
line, text = self.feed_line(text)
if line is not None:
if line not in NEWLINES:
self.header.set_start_line(line)
self.mode = 'headers'
return text
def feed_headers(self, text):
"""Feed text to the parser while it is in the 'headers'
state."""
while text:
line, text = self.feed_line(text)
if line is not None:
self.header.add_header_line(line)
if line in NEWLINES:
self.mode = 'body'
break
return text
def get_message(self):
"""Returns the contents of the input buffer."""
return str(self.buffer)
def get_decoded_message(self):
"""Return the input stream reconstructed from the parsed
data."""
buf = bytearray()
self.write_decoded_message(buf)
return str(buf)
def write_message(self, buf):
#TODO: No idea what this does, looks broken
self.header.write(buf)
buf.extend('\r\n')
self.write_body(buf)
def write_decoded_message(self, buf):
"""Writes the parsed data to the buffer passed."""
self.header.write_decoded(buf)
if self.header.has_body():
length = sum(l for o, l in self.body_chunks)
buf.extend('Content-Length: %d\r\n' % length)
body = self.get_body()
if self.header.encoding and body:
try:
body = zlib.decompress(body)
except zlib.error:
try:
body = zlib.decompress(body, 16 + zlib.MAX_WBITS)
except zlib.error:
encoding_header = "Content-Encoding: %s\r\n" \
% self.header.encoding
buf.extend(encoding_header)
buf.extend('\r\n')
buf.extend(body)
def get_body(self):
"""Returns the body of the HTTP message."""
buf = bytearray()
self.write_body(buf)
return str(buf)
def write_body(self, buf):
"""Writes the body of the HTTP message to the passed
buffer."""
for offset, length in self.body_chunks:
buf.extend(self.buffer[offset:offset + length])
class ChunkReader(object):
"""Reads the body of a HTTP message with chunked encoding."""
def __init__(self):
self.mode = "start"
self.remaining = 0
def feed_predict(self):
if self.mode == 'start':
return None, '\r\n'
elif self.mode == 'chunk':
if self.remaining == 0:
return None, '\r\n'
else:
return self.remaining, None
elif self.mode == 'trailer':
return None, '\r\n'
elif self.mode == 'end':
return 0, None
def feed_start(self, parser, text):
"""Feed text into the ChunkReader when the mode is 'start'."""
line, text = parser.feed_line(text)
offset = len(parser.buffer)
if line is not None:
chunk = int(line.split(';', 1)[0], 16)
parser.body_chunks.append((offset, chunk))
self.remaining = chunk
if chunk == 0:
self.mode = 'trailer'
else:
self.mode = 'chunk'
return text
def feed_chunk(self, parser, text):
"""Feed text into the ChunkReader when the mode is 'chunk'."""
if self.remaining > 0:
self.remaining, text = parser.feed_length(text, self.remaining)
if self.remaining == 0:
end_of_chunk, text = parser.feed_line(text)
if end_of_chunk:
self.mode = 'start'
return text
def feed_trailer(self, parser, text):
"""Feed text into the ChunkReader when the mode is
'trailer'."""
line, text = parser.feed_line(text)
if line is not None:
parser.header.add_trailer_line(line)
if line in NEWLINES:
self.mode = 'end'
return text
def feed(self, parser, text):
"""Feed text into the ChunkReader."""
while text:
if self.mode == 'start':
text = self.feed_start(parser, text)
if text and self.mode == 'chunk':
text = self.feed_chunk(parser, text)
if text and self.mode == 'trailer':
text = self.feed_trailer(parser, text)
if self.mode == 'end':
parser.mode = 'end'
break
return text
class LengthReader(object):
def __init__(self, length):
self.remaining = length
def feed_predict(self):
return self.remaining, None
def feed(self, parser, text):
if self.remaining > 0:
self.remaining, text = parser.feed_length(text, self.remaining)
if self.remaining <= 0:
parser.mode = 'end'
return text
class HTTPHeader(object):
STRIP_HEADERS = [n.lower() for n in ('Content-Length', 'Transfer-Encoding', 'Content-Encoding',
'TE', 'Expect', 'Trailer')]
def __init__(self, ignore_headers):
self.headers = []
self.keep_alive = False
self.mode = 'close'
self.content_length = None
self.encoding = None
self.trailers = []
self.expect_continue = False
self.ignore_headers = set(x.lower() for x in ignore_headers)
def has_body(self):
pass
def set_start_line(self, line):
pass
def write_decoded(self, buf):
self.write_decoded_start(buf)
strip_headers = self.STRIP_HEADERS if self.has_body() else ()
self.write_headers(buf, strip_headers)
def write_decoded_start(self, buf):
pass
def write_headers(self, buf, strip_headers=()):
for k, v in self.headers:
if k.lower() not in strip_headers:
buf.extend('%s: %s\r\n' % (k, v))
for k, v in self.trailers:
if k.lower() not in strip_headers:
buf.extend('%s: %s\r\n' % (k, v))
def add_trailer_line(self, line):
if line.startswith(' ') or line.startswith('\t'):
k, v = self.trailers.pop()
line = line.strip()
v = "%s %s" % (v, line)
self.trailers.append((k, v))
elif line in NEWLINES:
pass
else:
name, value = line.split(':', 1)
name = name.strip()
value = value.strip()
self.trailers.append((name, value))
def add_header(self, name, value):
self.headers.append((name, value))
def add_header_line(self, line):
if line.startswith(' ') or line.startswith('\t'):
k, v = self.headers.pop()
line = line.strip()
v = "%s %s" % (v, line)
self.add_header(k, v)
elif line in NEWLINES:
for name, value in self.headers:
name = name.lower()
value = value.lower()
# todo handle multiple instances
# of these headers
if name in self.ignore_headers:
#print >> sys.stderr, 'ignore', name
pass
elif name == 'expect':
if '100-continue' in value:
self.expect_continue = True
elif name == 'content-length':
if self.mode == 'close':
self.content_length = int(value)
self.mode = 'length'
elif name == 'transfer-encoding':
if 'chunked' in value:
self.mode = 'chunked'
elif name == 'content-encoding':
self.encoding = value
elif name == 'connection':
if 'keep-alive' in value:
self.keep_alive = True
elif 'close' in value:
self.keep_alive = False
else:
#print line
name, value = line.split(':', 1)
name = name.strip()
value = value.strip()
self.add_header(name, value)
def body_is_chunked(self):
return self.mode == 'chunked'
def body_length(self):
if self.mode == 'length':
return self.content_length
url_rx = re.compile(
'(?P<scheme>https?)://(?P<authority>(?P<host>[^:/]+)(?::(?P<port>\d+))?)'
'(?P<path>.*)',
re.I)
class RequestHeader(HTTPHeader):
def __init__(self, ignore_headers=()):
HTTPHeader.__init__(self, ignore_headers=ignore_headers)
self.method | |
import collections
import json
import logging
import os
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForSequenceClassification
current_script_path = __file__
project_root = os.path.realpath(
os.path.join(current_script_path, os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir))
from serif.model.document_model import DocumentModel
from serif.model.event_mention_model import EventMentionModel
from serif.model.impl.a2t_adapter.a2t_example_filters import class_name_to_class as a2t_filter_class_name_to_class
from serif.model.relation_mention_model import RelationMentionModel
from serif.theory.enumerated_type import Tense, Modality
from serif.theory.event_mention import EventMention
from serif.theory.mention import Mention
from serif.model.impl.a2t_adapter.utils import modify_or_add_event_mention, modify_or_add_mention
logger = logging.getLogger(__name__)
def get_valid_slide_window_intervals(min_val, max_val, focus_val, slide_window_size):
ret = set()
for min_p in range(max(min_val, focus_val - slide_window_size + 1), focus_val + 1):
max_p = min_p + slide_window_size - 1
if max_p <= max_val:
ret.add((min_p, max_p))
if len(ret) < 1 and min_val <= focus_val <= max_val:
ret.add((min_val, max_val))
return ret
class GranularSpecialEventArgGoldenGenerator(object):
def generate(self, serif_doc):
ret = list()
for granular_event in serif_doc.event_set or ():
sent_to_ems = dict()
sent_to_ms = dict()
m_to_event_arg_role = dict()
for event_arg in granular_event.arguments:
if event_arg.entity is not None:
for mention in event_arg.entity.mentions:
sent_to_ms.setdefault(mention.sentence.sent_no, set()).add(mention)
m_to_event_arg_role.setdefault(mention, set()).add(event_arg.role)
if event_arg.event_mention is not None:
sent_to_ems.setdefault(event_arg.event_mention.sentence.sent_no, set()).add(event_arg.event_mention)
for sent_id, ems in sent_to_ems.items():
for em in ems:
for m in sent_to_ms.get(sent_id, ()):
for arg_role in m_to_event_arg_role.get(m, ()):
ret.append((em, m,
serif_doc.get_original_text_substring(em.sentence.start_char,
em.sentence.end_char),
arg_role))
return ret
def graunlar_eventarg_golden_example_matcher(generated_examples, golden_examples):
event_mention_to_linkable_mentions = dict()
event_mention_mention_to_examples = dict()
for generated_example in generated_examples:
event_mention_to_linkable_mentions.setdefault(generated_example[0], set()).add(generated_example[1])
event_mention_mention_to_examples.setdefault((generated_example[0], generated_example[1]), set()).add(
generated_example)
ret = list()
for golden_example in golden_examples:
event_mention = golden_example[0]
mention = golden_example[1]
potential_mentions = event_mention_to_linkable_mentions.get(event_mention, ())
other_example_list = list()
for potential_mention in potential_mentions:
if potential_mention is not mention:
other_example_list.extend(event_mention_mention_to_examples.get((event_mention, potential_mention), ()))
ret.append([golden_example, other_example_list])
return ret
class GenericUnaryExampleGenerator(object):
def __init__(self, allowed_elem_types, slide_window_size=2):
self.allowed_elem_types = set(allowed_elem_types)
self.slide_window_size = slide_window_size
def generate(self, serif_doc):
ret = set()
for sentence in serif_doc.sentences:
focus_theory_set = set()
if "Mention" in self.allowed_elem_types:
focus_theory_set.update(sentence.mention_set or ())
if "EventMention" in self.allowed_elem_types:
focus_theory_set.update(sentence.event_mention_set or ())
for mention in focus_theory_set:
for start_slide_window, end_slide_window in get_valid_slide_window_intervals(0, len(
serif_doc.sentences) - 1, sentence.sent_no, self.slide_window_size):
original_passage = serif_doc.get_original_text_substring(
serif_doc.sentences[start_slide_window].start_char,
serif_doc.sentences[end_slide_window].end_char)
ret.add((mention, original_passage))
return ret
class GenericBinaryExampleGenerator(object):
def __init__(self, left_elem_allow_types, right_elem_allow_types, slide_window_size=2):
self.left_elem_allow_types = set(left_elem_allow_types)
self.right_elem_allow_types = set(right_elem_allow_types)
self.slide_window_size = slide_window_size
def generate(self, serif_doc):
ret = set()
all_possible_lefts = set()
all_possible_rights = set()
for serif_sentence in serif_doc.sentences:
left_focus_set = set()
if "Mention" in self.left_elem_allow_types:
left_focus_set.update(serif_sentence.mention_set or ())
if "EventMention" in self.left_elem_allow_types:
left_focus_set.update(serif_sentence.event_mention_set or ())
all_possible_lefts.update(left_focus_set)
right_focus_set = set()
if "Mention" in self.right_elem_allow_types:
right_focus_set.update(serif_sentence.mention_set or ())
if "EventMention" in self.right_elem_allow_types:
right_focus_set.update(serif_sentence.event_mention_set or ())
all_possible_rights.update(right_focus_set)
for left_elem in all_possible_lefts:
for right_elem in all_possible_rights:
if left_elem == right_elem:
continue
left_sent = left_elem.sentence
right_sent = right_elem.sentence
left_windows = get_valid_slide_window_intervals(0, len(serif_doc.sentences) - 1, left_sent.sent_no,
self.slide_window_size)
right_windows = get_valid_slide_window_intervals(0, len(serif_doc.sentences) - 1, right_sent.sent_no,
self.slide_window_size)
windows_intersect = set(left_windows)
windows_intersect = windows_intersect.intersection(set(right_windows))
for start_slide_window, end_slide_window in windows_intersect:
ret.add((left_elem, right_elem, serif_doc.get_original_text_substring(
serif_doc.sentences[start_slide_window].start_char,
serif_doc.sentences[end_slide_window].end_char)))
return ret
def ner_write_back(example_label_confidence_tuple):
for mention, label, confidence, debug_info in example_label_confidence_tuple:
serif_sentence = mention.sentence
modify_or_add_mention(serif_sentence, mention.start_token, mention.end_token, label, confidence, debug_info)
def entity_mention_entity_mention_relation_write_back(example_label_confidence_tuple):
for left_mention, right_mention, label, connfidence, debug_info in example_label_confidence_tuple:
serif_doc = left_mention.document
if serif_doc.rel_mention_set is None:
serif_doc.add_new_rel_mention_set()
rel_mention_set = serif_doc.rel_mention_set
RelationMentionModel.add_new_relation_mention(rel_mention_set, label, left_mention, right_mention,
Tense.Unspecified, Modality.Other, score=connfidence,
pattern=json.dumps(list(debug_info)), model="Ask2Transformers")
def event_mention_write_back(example_label_confidence_tuple):
for event_mention, label, confidence, debug_info in example_label_confidence_tuple:
serif_sentence = event_mention.sentence
modify_or_add_event_mention(serif_sentence, event_mention.start_token, event_mention.end_token, label,
confidence, debug_info)
def event_mention_arg_write_back(example_label_confidence_tuple):
for left_event_mention, right_serif_theory, label, confidence, debug_info in example_label_confidence_tuple:
EventMentionModel.add_new_event_mention_argument(left_event_mention, label, right_serif_theory, confidence,
model="Ask2Transformers", pattern=json.dumps(list(debug_info)))
def init_input_constraints(input_constraints_list):
ret = list()
for input_constraint in input_constraints_list:
filter_ins = a2t_filter_class_name_to_class[input_constraint["name"]](**input_constraint["args"])
ret.append(filter_ins)
return ret
def get_original_text(serif_elem):
if isinstance(serif_elem, Mention):
return serif_elem.text
elif isinstance(serif_elem, EventMention):
return serif_elem.text
else:
raise NotImplementedError(type(serif_elem).__name__)
class A2TPipeline(object):
def __init__(self):
self.example_generator = None
self.global_input_constraints = []
self.ontology = None
self.serializer = None
self.golden_example_generator = None
self.match_heuristic_examples = None
def parse_ontology(self, ontology_dict_root):
self.ontology = dict()
# self.ontology["O"] = {
# "templates": [],
# "input_constraints": self.global_input_constraints
# }
for ontology_type, ontology_properties in ontology_dict_root.items():
templates = set(ontology_properties["templates"])
use_global_input_constraints = ontology_properties["use_global_input_constraints"]
local_input_constraints = ontology_properties.get("input_constraints", list())
self.ontology[ontology_type] = {
"templates": templates,
"input_constraints": (list(
self.global_input_constraints) if use_global_input_constraints is True else list()) + init_input_constraints(
local_input_constraints)
}
def a2t_predict(self, example_to_ontology_name, model, tokenizer):
# Modified from inference.predict
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_n_rel = len(self.ontology.keys()) + 1 # 1 is "O"
template_mapping = dict()
for ontology_name, ontology_config in self.ontology.items():
template_mapping[ontology_name] = ontology_config["templates"]
_template_mapping_reverse = collections.defaultdict(list)
for ontology_name, templates in template_mapping.items():
for v in templates:
_template_mapping_reverse[v].append(ontology_name)
_labels = list(_template_mapping_reverse.keys())
_target_labels = list(self.ontology.keys())
if "O" not in self.ontology.keys():
_target_labels = ["O"] + _target_labels
_new_labels2id = {t: i for i, t in enumerate(_labels)}
_mapping = collections.defaultdict(list)
for ontology_name, templates in template_mapping.items():
_mapping[ontology_name].extend([_new_labels2id[v] for v in templates])
def idx2label(idx):
return _target_labels[idx]
_idx2label = np.vectorize(idx2label)
example_to_valid_templates = dict()
for example, ontology_names in example_to_ontology_name.items():
for ontology_name in ontology_names:
example_to_valid_templates.setdefault(example, set()).update(template_mapping[ontology_name])
example_in_order = list(example_to_ontology_name.keys())
hypotheses = list()
# valid_example = np.zeros((len(example_in_order), len(_target_labels)))
for x, example in enumerate(example_in_order):
# for ontology_idx, ontology_name in enumerate(_target_labels):
# if ontology_name in example_to_ontology_name[example] or ontology_name == "O":
# valid_example[x, ontology_idx] = 1.0
for y, label_template in enumerate(_labels):
escaped_label_template = label_template.replace("{X}", "{0}").replace("{Y}", "{1}")
formatted_question = ""
if len(example) == 2:
formatted_question = escaped_label_template.format(get_original_text(example[0]))
elif len(example) == 3:
formatted_question = escaped_label_template.format(get_original_text(example[0]),
get_original_text(example[1]))
else:
raise ValueError()
hypotheses.append("{} {} {}.".format(example[-1], tokenizer.sep_token, formatted_question))
batch_size = 128
ent_position = -1
for ontology_name, templates in model.config.label2id.items():
if ontology_name.lower() == 'entailment':
ent_position = templates
if ent_position == -1:
raise ValueError("Entailment label position not found on model configuration.")
hypotheses = tokenizer(hypotheses, return_tensors='pt', padding=True).input_ids
dataset = TensorDataset(hypotheses)
data_loader = DataLoader(
dataset,
batch_size=batch_size
)
outputs = []
with torch.no_grad():
for (data,) in tqdm(data_loader, total=(len(dataset) // batch_size) + (len(dataset) % batch_size != 0)):
data = data.to(device)
output = model(data)[0].detach().cpu().numpy()
outputs.append(output)
outputs = np.vstack(outputs)
outputs = np.exp(outputs) / np.exp(outputs).sum(-1, keepdims=True)
outputs = outputs[..., ent_position].reshape(len(example_in_order), -1)
example_template_probs = list()
for x, template_scores in enumerate(outputs):
example_template_probs.append([])
current_en = example_template_probs[-1]
for y, score in enumerate(template_scores):
current_en.append((_labels[y], float(score)))
positive_cnt = 0
negative_cnt = 0
result = list()
for example, predictions in zip(example_in_order, example_template_probs):
best_ontology_type = None
best_template = None
best_score = -1.0
debug_infos = list()
for template, score in predictions:
if template not in example_to_valid_templates[example]:
continue
if score < 0.5:
continue
for pred_label in _template_mapping_reverse[template]:
if pred_label == "O":
continue
if pred_label not in example_to_ontology_name[example]:
continue
debug_infos.append((pred_label, template, score))
if score > best_score:
best_score = score
best_template = template
best_ontology_type = pred_label
debug_infos = tuple(sorted(debug_infos, key=lambda x: x[2], reverse=True))
if best_ontology_type is not None:
positive_cnt += 1
if len(example) == 2:
slot_0 = example[0]
result.append((slot_0, best_ontology_type, best_score, debug_infos))
elif len(example) == 3:
slot_0 = example[0]
slot_1 = example[1]
result.append((slot_0, slot_1, best_ontology_type, best_score, debug_infos))
else:
negative_cnt += 1
logger.info("Positive rate {}".format(
0.0 if positive_cnt + negative_cnt == 0 else positive_cnt / (positive_cnt + negative_cnt)))
return result
def generate_decode_examples_shared(self, serif_doc):
examples = self.example_generator.generate(serif_doc)
logger.info("Generated {} examples".format(len(examples)))
ontology_name_to_examples = dict()
example_to_ontology_names = dict()
for ontology_name, model_config in self.ontology.items():
filtered_examples = set(examples)
for input_constraint_filter in model_config["input_constraints"]:
filtered_examples = set(filter(input_constraint_filter.filter, filtered_examples))
ontology_name_to_examples[ontology_name] = filtered_examples
logger.info("Under {} we have {} examples".format(ontology_name, len(filtered_examples)))
for example in filtered_examples:
example_to_ontology_names.setdefault(example, set()).add(ontology_name)
# Filter O only examples
filtered_example_to_ontology_names = dict()
for example, ontology_names in example_to_ontology_names.items():
if len(ontology_names.difference({"O"})) > 0:
filtered_example_to_ontology_names[example] = ontology_names
return filtered_example_to_ontology_names
def decode(self, serif_doc, model, tokenizer):
if len(self.ontology) < 1:
return
filtered_example_to_ontology_names = self.generate_decode_examples_shared(serif_doc)
if len(filtered_example_to_ontology_names) > 0:
elem_predict_confidence_tuple = self.a2t_predict(filtered_example_to_ontology_names, model, tokenizer)
self.serializer(elem_predict_confidence_tuple)
def generate_golden_examples_and_candidates(self, serif_doc):
filtered_example_to_ontology_names = self.generate_decode_examples_shared(serif_doc)
golden_examples = self.golden_example_generator.generate(serif_doc)
return self.match_heuristic_examples(set(filtered_example_to_ontology_names.keys()), golden_examples)
class A2TDriver(DocumentModel):
def __init__(self, **kwargs):
super(A2TDriver, self).__init__(**kwargs)
self.current_model_name = "ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli"
self.current_model = None
self.current_tokenizer = None
def load_model(self):
"""Load the NLI model from HuggingFace given a pretrained name or path.
Args:
pretrained_model (str, optional): Pretrained model name or path. Defaults to "microsoft/deberta-v2-xlarge-mnli".
"""
logger.info("Loading model {}".format(self.current_model_name))
model = AutoModelForSequenceClassification.from_pretrained(self.current_model_name)
tokenizer = AutoTokenizer.from_pretrained(self.current_model_name)
if torch.cuda.is_available():
device = torch.device("cuda")
model.to(device).half().eval()
logger.info("Finished loading model {}".format(self.current_model_name))
self.current_model = model
self.current_tokenizer = tokenizer
def unload_model(self):
if self.current_model is not None:
del self.current_model
self.current_model = None
if self.current_tokenizer is not None:
del self.current_tokenizer
self.current_tokenizer = None
torch.cuda.empty_cache()
def parse_template(self, template_dict):
self.template_dict = template_dict
self.ner_pipeline = None
self.entity_relation_pipeline = None
self.event_mention_pipeline = None
self.event_mention_arg_pipeline = None
# NER
if "entity_mention" in template_dict["stages_to_run"]:
self.ner_pipeline = A2TPipeline()
self.ner_pipeline.example_generator = GenericUnaryExampleGenerator({"Mention"})
self.ner_pipeline.global_input_constraints = list()
self.ner_pipeline.global_input_constraints.extend(
init_input_constraints(template_dict["entity_mention"]["input_constraints"]))
self.ner_pipeline.parse_ontology(template_dict["entity_mention"]["ontology"])
self.ner_pipeline.serializer = ner_write_back
# entity_mention_entity_mention_relation
if "entity_mention_relation" in template_dict["stages_to_run"]:
self.entity_relation_pipeline = A2TPipeline()
self.entity_relation_pipeline.example_generator = GenericBinaryExampleGenerator({"Mention"}, {"Mention"})
self.entity_relation_pipeline.global_input_constraints = list()
self.entity_relation_pipeline.global_input_constraints.extend(
init_input_constraints(template_dict["entity_mention_relation"]["input_constraints"]))
self.entity_relation_pipeline.parse_ontology(template_dict["entity_mention_relation"]["ontology"])
self.entity_relation_pipeline.serializer = entity_mention_entity_mention_relation_write_back
# event_mention
if "event_mention" in template_dict["stages_to_run"]:
self.event_mention_pipeline = A2TPipeline()
self.event_mention_pipeline.example_generator = GenericUnaryExampleGenerator({"EventMention"})
self.event_mention_pipeline.global_input_constraints | |
# 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
# sta_vec.insert(ind, 0.0)
# del(sta_vec[-1])
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2 or ind==0:
pos += 1
continue
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, None, option.search_size, option,\
mode=action,calibrated_set=calibrated_set)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(np.power(prob_new_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0:
calibrated_set.append(input[0][ind])
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
# del(sta_vec[ind])
# sta_vec.append(0)
pos -= 1
print('oldprob,vold, vnew, alpha,simold, simnew',prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
appendtext(id2sen(input[0]), option.save_path)
return generateset
def simulatedAnnealing_std(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword
elif sim_mode =='keyword-bleu':
similarity = similarity_keyword_bleu
elif sim_mode =='keyword-bert':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert
elif sim_mode =='keyword-bert-bleu':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert_bleu
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
C = 0.05
temperatures = C*(1.0/100)*np.array(list(range(option.sample_time+1,1,-1)))
print(temperatures)
for sen_id in range(use_data.length):
sta_vec=sta_vec_list[sen_id]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
sta_vec_original = [x for x in sta_vec]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
calibrated_set = [x for x in input[0]]
for iter in range(option.sample_time):
temperature = temperatures[iter]
print(temperature)
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
calibrated_set = list(set(calibrated_set))
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,100)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
calibrated_set.append(input[0][ind])
input= input1
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate[prob_candidate_ind])
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
continue
# break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
#for i in range(option.search_size):
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
similarity_new = similarity_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
# sta_vec.insert(ind, 0.0)
# del(sta_vec[-1])
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2 or ind==0:
pos += 1
continue
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, None, option.search_size, option,\
mode=action,calibrated_set=calibrated_set)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(np.power(prob_new_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0:
calibrated_set.append(input[0][ind])
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
# del(sta_vec[ind])
# sta_vec.append(0)
pos -= 1
print('oldprob,vold, vnew, alpha,simold, simnew',prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
appendtext(id2sen(input[0]), option.save_path)
return generateset
def simulatedAnnealing_calibrated(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword
elif sim_mode =='keyword-bleu':
similarity = similarity_keyword_bleu
elif sim_mode =='keyword-bert':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
C = 2
temperatures = 0.3+ C*(1.0/100)*np.array(list(range(option.sample_time+1,1,-1)))
print(temperatures)
for sen_id in range(use_data.length):
sta_vec=sta_vec_list[sen_id%len(sta_vec)]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
sta_vec_original = [x for x in sta_vec]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
calibrated_set = [x for x in input[0]]
for iter in range(option.sample_time):
temperature = temperatures[iter]
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
calibrated_set = list(set(calibrated_set))
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
V_new = math.log(max(prob_candidate_prob,1e-200))
V_old = math.log(max(prob_old_prob,1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,100)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
calibrated_set.append(input[0][ind])
input= input1
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,0,0)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, | |
50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 47, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 52, ["BelowElev"]),
],
"checkStrings": ["Highs 47 to 52"],
},
{
"name": "Temp_25-30",
"commentary": """
MaxT -- 25-30
A specific range of five degrees (5 to 10 degrees in mountainous areas)
Lows 20 to 25
Highs 47 to 52
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 25, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 30, ["BelowElev"]),
],
"checkStrings": ["Highs 25 to 30"],
},
{
"name": "Temp_103-108",
"commentary": """
MaxT -- 103 - 108
A specific range of five degrees (5 to 10 degrees in mountainous areas)
Lows 20 to 25
Highs 47 to 52
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 103, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 108, ["BelowElev"]),
],
"checkStrings": ["Highs 103 to 108"],
},
{
"name": "Temp_100-105",
"commentary": """
MaxT -- 100 - 105
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 100, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 105, ["BelowElev"]),
],
"checkStrings": ["Highs 100 to 105"],
},
{
"name": "Temp_98-102",
"commentary": """
MaxT -- 98 - 102
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 98, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 102, ["BelowElev"]),
],
"checkStrings": ["Highs around 100"],
},
{
"name": "Temp_98-103",
"commentary": """
MaxT -- 98 - 103
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 98, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 103, ["BelowElev"]),
],
"checkStrings": ["Highs 98 to 103"],
},
# Crossing LOWER, MID, UPPER boundaries
{
"name": "Temp_50-54",
"commentary": """
MaxT -- 50-54
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 50, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 54, ["BelowElev"]),
],
"checkStrings": ["Highs in the lower 50s"],
},
{
"name": "Temp_53-54",
"commentary": """
MaxT -- 53-54
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 53, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 54, ["BelowElev"]),
],
"checkStrings": ["Highs in the mid 50s"],
},
{
"name": "Temp_54-57",
"commentary": """
MaxT -- 54-57
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 54, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 57, ["BelowElev"]),
],
"checkStrings": ["Highs in the mid 50s"],
},
{
"name": "Temp_56-58",
"commentary": """
MaxT -- 56-58
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 56, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 58, ["BelowElev"]),
],
"checkStrings": ["Highs in the upper 50s"],
},
{
"name": "Temp_-2-2",
"commentary": """
MaxT -- -2 - 2
Implied range terminology. NEAR zero is also permitted.
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", -2, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 2, ["BelowElev"]),
],
"checkStrings": ["Highs near zero"],
},
{
"name": "Temp_2-3",
"commentary": """
MaxT -- 2-3
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 2, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 3, ["BelowElev"]),
],
"checkStrings": ["Highs around 3"],
},
{
"name": "Temp_4-6",
"commentary": """
MaxT -- 4-6
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 4, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 6, ["BelowElev"]),
],
"checkStrings": ["Highs around 5"],
},
{
"name": "Temp_6-9",
"commentary": """
MaxT -- 6-9
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 6, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 9, ["BelowElev"]),
],
"checkStrings": ["Highs around 8"],
},
{
"name": "Temp_12-14",
"commentary": """
MaxT -- 12-14
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 12, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 14, ["BelowElev"]),
],
"checkStrings": ["Highs around 13"],
},
{
"name": "Temp_10-14",
"commentary": """
MaxT -- 10-14
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 10, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 14, ["BelowElev"]),
],
"checkStrings": ["Highs around 12"],
},
{
"name": "Temp_11-14",
"commentary": """
MaxT -- 11-14
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 11, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 14, ["BelowElev"]),
],
"checkStrings": ["Highs around 13"],
},
{
"name": "Temp_11-16",
"commentary": """
MaxT -- 11-16
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 11, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 16, ["BelowElev"]),
],
"checkStrings": ["Highs 11 to 16"],
},
{
"name": "Temp_-5-5",
"commentary": """
MaxT -- -5 - 5
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", -5, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 5, ["BelowElev"]),
],
"checkStrings": ["Highs 5 below to 5 above zero"],
},
{
"name": "Temp_-5--10",
"commentary": """
MaxT -- -5 - -10
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, | |
{'name': '土默特右旗', 'pid': 96, 'zipcode': 14100},
1430: {'name': '昆都仑区', 'pid': 96, 'zipcode': 14000},
1431: {'name': '白云鄂博矿区', 'pid': 96, 'zipcode': 14000},
1432: {'name': '石拐区', 'pid': 96, 'zipcode': 14000},
1433: {'name': '达尔罕茂明安联合旗', 'pid': 96, 'zipcode': 14500},
1434: {'name': '青山区', 'pid': 96, 'zipcode': 14000},
1435: {'name': '乌达区', 'pid': 97, 'zipcode': 16000},
1436: {'name': '海勃湾区', 'pid': 97, 'zipcode': 16000},
1437: {'name': '海南区', 'pid': 97, 'zipcode': 16000},
1438: {'name': '元宝山区', 'pid': 98, 'zipcode': 24000},
1439: {'name': '克什克腾旗', 'pid': 98, 'zipcode': 25350},
1440: {'name': '喀喇沁旗', 'pid': 98, 'zipcode': 24400},
1441: {'name': '宁城县', 'pid': 98, 'zipcode': 24000},
1442: {'name': '巴林右旗', 'pid': 98, 'zipcode': 25150},
1443: {'name': '巴林左旗', 'pid': 98, 'zipcode': 25450},
1444: {'name': '敖汉旗', 'pid': 98, 'zipcode': 24300},
1445: {'name': '松山区', 'pid': 98, 'zipcode': 24000},
1446: {'name': '林西县', 'pid': 98, 'zipcode': 24000},
1447: {'name': '红山区', 'pid': 98, 'zipcode': 24000},
1448: {'name': '翁牛特旗', 'pid': 98, 'zipcode': 24500},
1449: {'name': '阿鲁科尔沁旗', 'pid': 98, 'zipcode': 25500},
1450: {'name': '奈曼旗', 'pid': 99, 'zipcode': 28300},
1451: {'name': '库伦旗', 'pid': 99, 'zipcode': 28200},
1452: {'name': '开鲁县', 'pid': 99, 'zipcode': 28000},
1453: {'name': '扎鲁特旗', 'pid': 99, 'zipcode': 29100},
1454: {'name': '科尔沁区', 'pid': 99, 'zipcode': 28000},
1455: {'name': '科尔沁左翼中旗', 'pid': 99, 'zipcode': 29300},
1456: {'name': '科尔沁左翼后旗', 'pid': 99, 'zipcode': 28100},
1457: {'name': '霍林郭勒市', 'pid': 99, 'zipcode': 28000},
1458: {'name': '东胜区', 'pid': 100, 'zipcode': 17000},
1459: {'name': '乌审旗', 'pid': 100, 'zipcode': 17300},
1460: {'name': '伊金霍洛旗', 'pid': 100, 'zipcode': 17200},
1461: {'name': '准格尔旗', 'pid': 100, 'zipcode': 17100},
1462: {'name': '杭锦旗', 'pid': 100, 'zipcode': 17400},
1463: {'name': '达拉特旗', 'pid': 100, 'zipcode': 14300},
1464: {'name': '鄂东胜区', 'pid': 100, 'zipcode': 17000},
1465: {'name': '鄂托克前旗', 'pid': 100, 'zipcode': 16200},
1466: {'name': '鄂托克旗', 'pid': 100, 'zipcode': 16100},
1467: {'name': '扎兰屯市', 'pid': 101, 'zipcode': 21000},
1468: {'name': '新巴尔虎右旗', 'pid': 101, 'zipcode': 21300},
1469: {'name': '新巴尔虎左旗', 'pid': 101, 'zipcode': 21200},
1470: {'name': '根河市', 'pid': 101, 'zipcode': 21000},
1471: {'name': '海拉尔区', 'pid': 101, 'zipcode': 21000},
1472: {'name': '满洲里市', 'pid': 101, 'zipcode': 21000},
1473: {'name': '牙克石市', 'pid': 101, 'zipcode': 21000},
1474: {'name': '莫力达瓦达斡尔族自治旗', 'pid': 101, 'zipcode': 21000},
1475: {'name': '鄂伦春自治旗', 'pid': 101, 'zipcode': 21000},
1476: {'name': '鄂温克族自治旗', 'pid': 101, 'zipcode': 21000},
1477: {'name': '阿荣旗', 'pid': 101, 'zipcode': 162750},
1478: {'name': '陈巴尔虎旗', 'pid': 101, 'zipcode': 21500},
1479: {'name': '额尔古纳市', 'pid': 101, 'zipcode': 21000},
1480: {'name': '临河区', 'pid': 102, 'zipcode': 15000},
1481: {'name': '乌拉特中旗', 'pid': 102, 'zipcode': 15300},
1482: {'name': '乌拉特前旗', 'pid': 102, 'zipcode': 14400},
1483: {'name': '乌拉特后旗', 'pid': 102, 'zipcode': 15500},
1484: {'name': '五原县', 'pid': 102, 'zipcode': 15000},
1485: {'name': '杭锦后旗', 'pid': 102, 'zipcode': 15400},
1486: {'name': '磴口县', 'pid': 102, 'zipcode': 15000},
1487: {'name': '丰镇市', 'pid': 103, 'zipcode': 12000},
1488: {'name': '兴和县', 'pid': 103, 'zipcode': 12000},
1489: {'name': '凉城县', 'pid': 103, 'zipcode': 12000},
1490: {'name': '化德县', 'pid': 103, 'zipcode': 12000},
1491: {'name': '卓资县', 'pid': 103, 'zipcode': 12000},
1492: {'name': '商都县', 'pid': 103, 'zipcode': 12000},
1493: {'name': '四子王旗', 'pid': 103, 'zipcode': 11800},
1494: {'name': '察哈尔右翼中旗', 'pid': 103, 'zipcode': 13500},
1495: {'name': '察哈尔右翼前旗', 'pid': 103, 'zipcode': 12200},
1496: {'name': '察哈尔右翼后旗', 'pid': 103, 'zipcode': 12400},
1497: {'name': '集宁区', 'pid': 103, 'zipcode': 12000},
1498: {'name': '乌兰浩特市', 'pid': 104, 'zipcode': 137400},
1499: {'name': '扎赉特旗', 'pid': 104, 'zipcode': 137600},
1500: {'name': '科尔沁右翼中旗', 'pid': 104, 'zipcode': 29400},
1501: {'name': '科尔沁右翼前旗', 'pid': 104, 'zipcode': 137400},
1502: {'name': '突泉县', 'pid': 104, 'zipcode': 137400},
1503: {'name': '阿尔山市', 'pid': 104, 'zipcode': 137400},
1504: {'name': '东乌珠穆沁旗', 'pid': 105, 'zipcode': 26300},
1505: {'name': '二连浩特市', 'pid': 105, 'zipcode': 26000},
1506: {'name': '多伦县', 'pid': 105, 'zipcode': 26000},
1507: {'name': '太仆寺旗', 'pid': 105, 'zipcode': 27000},
1508: {'name': '正蓝旗', 'pid': 105, 'zipcode': 27200},
1509: {'name': '正镶白旗', 'pid': 105, 'zipcode': 13800},
1510: {'name': '苏尼特右旗', 'pid': 105, 'zipcode': 11200},
1511: {'name': '苏尼特左旗', 'pid': 105, 'zipcode': 11300},
1512: {'name': '西乌珠穆沁旗', 'pid': 105, 'zipcode': 26200},
1513: {'name': '锡林浩特市', 'pid': 105, 'zipcode': 26000},
1514: {'name': '镶黄旗', 'pid': 105, 'zipcode': 13250},
1515: {'name': '阿巴嘎旗', 'pid': 105, 'zipcode': 11400},
1516: {'name': '阿拉善右旗', 'pid': 106, 'zipcode': 737300},
1517: {'name': '阿拉善左旗', 'pid': 106, 'zipcode': 750300},
1518: {'name': '额济纳旗', 'pid': 106, 'zipcode': 735400},
1519: {'name': '东陵区', 'pid': 107, 'zipcode': 110000},
1520: {'name': '于洪区', 'pid': 107, 'zipcode': 110000},
1521: {'name': '和平区', 'pid': 107, 'zipcode': 110000},
1522: {'name': '大东区', 'pid': 107, 'zipcode': 110000},
1523: {'name': '康平县', 'pid': 107, 'zipcode': 110000},
1524: {'name': '新民市', 'pid': 107, 'zipcode': 110000},
1525: {'name': '沈北新区', 'pid': 107, 'zipcode': 110000},
1526: {'name': '沈河区', 'pid': 107, 'zipcode': 110000},
1527: {'name': '法库县', 'pid': 107, 'zipcode': 110000},
1528: {'name': '皇姑区', 'pid': 107, 'zipcode': 110000},
1529: {'name': '苏家屯区', 'pid': 107, 'zipcode': 110000},
1530: {'name': '辽中县', 'pid': 107, 'zipcode': 110000},
1531: {'name': '铁西区', 'pid': 107, 'zipcode': 110000},
1532: {'name': '中山区', 'pid': 108, 'zipcode': 116000},
1533: {'name': '庄河市', 'pid': 108, 'zipcode': 116000},
1534: {'name': '旅顺口区', 'pid': 108, 'zipcode': 116000},
1535: {'name': '普兰店市', 'pid': 108, 'zipcode': 116000},
1536: {'name': '沙河口区', 'pid': 108, 'zipcode': 116000},
1537: {'name': '瓦房店市', 'pid': 108, 'zipcode': 116000},
1538: {'name': '甘井子区', 'pid': 108, 'zipcode': 116000},
1539: {'name': '西岗区', 'pid': 108, 'zipcode': 116000},
1540: {'name': '金州区', 'pid': 108, 'zipcode': 116000},
1541: {'name': '长海县', 'pid': 108, 'zipcode': 116000},
1542: {'name': '千山区', 'pid': 109, 'zipcode': 114000},
1543: {'name': '台安县', 'pid': 109, 'zipcode': 114000},
1544: {'name': '岫岩满族自治县', 'pid': 109, 'zipcode': 114300},
1545: {'name': '海城市', 'pid': 109, 'zipcode': 114000},
1546: {'name': '立山区', 'pid': 109, 'zipcode': 114000},
1547: {'name': '铁东区', 'pid': 109, 'zipcode': 114000},
1548: {'name': '铁西区', 'pid': 109, 'zipcode': 114000},
1549: {'name': '东洲区', 'pid': 110, 'zipcode': 113100},
1550: {'name': '抚顺县', 'pid': 110, 'zipcode': 113100},
1551: {'name': '新宾满族自治县', 'pid': 110, 'zipcode': 113200},
1552: {'name': '新抚区', 'pid': 110, 'zipcode': 113100},
1553: {'name': '望花区', 'pid': 110, 'zipcode': 113100},
1554: {'name': '清原满族自治县', 'pid': 110, 'zipcode': 113300},
1555: {'name': '顺城区', 'pid': 110, 'zipcode': 113100},
1556: {'name': '南芬区', 'pid': 111, 'zipcode': 117100},
1557: {'name': '平山区', 'pid': 111, 'zipcode': 117100},
1558: {'name': '明山区', 'pid': 111, 'zipcode': 117100},
1559: {'name': '本溪满族自治县', 'pid': 111, 'zipcode': 117100},
1560: {'name': '桓仁满族自治县', 'pid': 111, 'zipcode': 117200},
1561: {'name': '溪湖区', 'pid': 111, 'zipcode': 117100},
1562: {'name': '东港市', 'pid': 112, 'zipcode': 118000},
1563: {'name': '元宝区', 'pid': 112, 'zipcode': 118000},
1564: {'name': '凤城市', 'pid': 112, 'zipcode': 118000},
1565: {'name': '宽甸满族自治县', 'pid': 112, 'zipcode': 118200},
1566: {'name': '振兴区', 'pid': 112, 'zipcode': 118000},
1567: {'name': '振安区', 'pid': 112, 'zipcode': 118000},
1568: {'name': '义县', 'pid': 113, 'zipcode': 121000},
1569: {'name': '凌河区', 'pid': 113, 'zipcode': 121000},
1570: {'name': '凌海市', 'pid': 113, 'zipcode': 121000},
1571: {'name': '北镇市', 'pid': 113, 'zipcode': 121000},
1572: {'name': '古塔区', 'pid': 113, 'zipcode': 121000},
1573: {'name': '太和区', 'pid': 113, 'zipcode': 121000},
1574: {'name': '黑山县', 'pid': 113, 'zipcode': 121000},
1575: {'name': '大石桥市', 'pid': 114, 'zipcode': 115000},
1576: {'name': '盖州市', 'pid': 114, 'zipcode': 115000},
1577: {'name': '站前区', 'pid': 114, 'zipcode': 115000},
1578: {'name': '老边区', 'pid': 114, 'zipcode': 115000},
1579: {'name': '西市区', 'pid': 114, 'zipcode': 115000},
1580: {'name': '鲅鱼圈区', 'pid': 114, 'zipcode': 115000},
1581: {'name': '太平区', 'pid': 115, 'zipcode': 123100},
1582: {'name': '彰武县', 'pid': 115, 'zipcode': 123100},
1583: {'name': '新邱区', 'pid': 115, 'zipcode': 123100},
1584: {'name': '海州区', 'pid': 115, 'zipcode': 123100},
1585: {'name': '清河门区', 'pid': 115, 'zipcode': 123100},
1586: {'name': '细河区', 'pid': 115, 'zipcode': 123100},
1587: {'name': '蒙古族自治县', 'pid': 115, 'zipcode': 123100},
1588: {'name': '太子河区', 'pid': 116, 'zipcode': 111200},
1589: {'name': '宏伟区', 'pid': 116, 'zipcode': 111200},
1590: {'name': '弓长岭区', 'pid': 116, 'zipcode': 111200},
1591: {'name': '文圣区', 'pid': 116, 'zipcode': 111200},
1592: {'name': '灯塔市', 'pid': 116, 'zipcode': 111200},
1593: {'name': '白塔区', 'pid': 116, 'zipcode': 111200},
1594: {'name': '辽阳县', 'pid': 116, 'zipcode': 111200},
1595: {'name': '兴隆台区', 'pid': 117, 'zipcode': 124000},
1596: {'name': '双台子区', 'pid': 117, 'zipcode': 124000},
1597: {'name': '大洼区', 'pid': 117, 'zipcode': 124000},
1598: {'name': '盘山县', 'pid': 117, 'zipcode': 124000},
1599: {'name': '开原市', 'pid': 118, 'zipcode': 112600},
1600: {'name': '昌图县', 'pid': 118, 'zipcode': 112600},
1601: {'name': '清河区', 'pid': 118, 'zipcode': 112600},
1602: {'name': '西丰县', 'pid': 118, 'zipcode': 112600},
1603: {'name': '调兵山市', 'pid': 118, 'zipcode': 112600},
1604: {'name': '铁岭县', 'pid': 118, 'zipcode': 112600},
1605: {'name': '银州区', 'pid': 118, 'zipcode': 112600},
1606: {'name': '凌源市', 'pid': 119, 'zipcode': 122000},
1607: {'name': '北票市', 'pid': 119, 'zipcode': 122000},
1608: {'name': '双塔区', 'pid': 119, 'zipcode': 122000},
1609: {'name': '喀喇沁左翼蒙古族自治县', 'pid': 119, 'zipcode': 122000},
1610: {'name': '建平县', 'pid': 119, 'zipcode': 122000},
1611: {'name': '朝阳县', 'pid': 119, 'zipcode': 122000},
1612: {'name': '龙城区', 'pid': 119, 'zipcode': 122000},
1613: {'name': '兴城市', 'pid': 120, 'zipcode': 125000},
1614: {'name': '南票区', 'pid': 120, 'zipcode': 125000},
1615: {'name': '建昌县', 'pid': 120, 'zipcode': 125000},
1616: {'name': '绥中县', 'pid': 120, 'zipcode': 125000},
1617: {'name': '连山区', 'pid': 120, 'zipcode': 125000},
1618: {'name': '龙港区', 'pid': 120, 'zipcode': 125000},
1619: {'name': '九台市', 'pid': 121, 'zipcode': 130000},
1620: {'name': '二道区', 'pid': 121, 'zipcode': 130000},
1621: {'name': '农安县', 'pid': 121, 'zipcode': 130000},
1622: {'name': '南关区', 'pid': 121, 'zipcode': 130000},
1623: {'name': '双阳区', 'pid': 121, 'zipcode': 130000},
1624: {'name': '宽城区', 'pid': 121, 'zipcode': 130000},
1625: {'name': '德惠市', 'pid': 121, 'zipcode': 130000},
1626: {'name': '朝阳区', 'pid': 121, 'zipcode': 130000},
1627: {'name': '榆树市', 'pid': 121, 'zipcode': 130000},
1628: {'name': '绿园区', 'pid': 121, 'zipcode': 130000},
1629: {'name': '丰满区', 'pid': 122, 'zipcode': 132000},
1630: {'name': '昌邑区', 'pid': 122, 'zipcode': 132000},
1631: {'name': '桦甸市', 'pid': 122, 'zipcode': 132000},
1632: {'name': '永吉县', 'pid': 122, 'zipcode': 132000},
1633: {'name': '磐石市', 'pid': 122, 'zipcode': 132000},
1634: {'name': '舒兰市', 'pid': 122, 'zipcode': 132000},
1635: {'name': '船营区', 'pid': 122, 'zipcode': 132000},
1636: {'name': '蛟河市', 'pid': 122, 'zipcode': 132000},
1637: {'name': '龙潭区', 'pid': 122, 'zipcode': 132000},
1638: {'name': '伊通满族自治县', 'pid': 123, 'zipcode': 130700},
1639: {'name': '公主岭市', 'pid': 123, 'zipcode': 136000},
1640: {'name': '双辽市', 'pid': 123, 'zipcode': 136000},
1641: {'name': '梨树县', 'pid': 123, 'zipcode': 136000},
1642: {'name': '铁东区', | |
= m.get('enabled')
if m.get('remark') is not None:
self.remark = m.get('remark')
return self
class UpdateAlertStrategyResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
return self
class PagequeryAlertStrategyRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
page_index: int = None,
page_size: int = None,
tenant_name: str = None,
scene: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 页数,默认1
self.page_index = page_index
# 页码大小,默认10
self.page_size = page_size
# 租户
self.tenant_name = tenant_name
# 场景码
#
self.scene = scene
def validate(self):
self.validate_required(self.page_index, 'page_index')
self.validate_required(self.page_size, 'page_size')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.page_index is not None:
result['page_index'] = self.page_index
if self.page_size is not None:
result['page_size'] = self.page_size
if self.tenant_name is not None:
result['tenant_name'] = self.tenant_name
if self.scene is not None:
result['scene'] = self.scene
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('page_index') is not None:
self.page_index = m.get('page_index')
if m.get('page_size') is not None:
self.page_size = m.get('page_size')
if m.get('tenant_name') is not None:
self.tenant_name = m.get('tenant_name')
if m.get('scene') is not None:
self.scene = m.get('scene')
return self
class PagequeryAlertStrategyResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: AlertStrategyPageResponse = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 数据
self.data = data
def validate(self):
if self.data:
self.data.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data is not None:
result['data'] = self.data.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data') is not None:
temp_model = AlertStrategyPageResponse()
self.data = temp_model.from_map(m['data'])
return self
class SyncLabelTransferrawRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
scene: str = None,
collect_label_raw_content_list: List[CollectLabelRawContent] = None,
nonce: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 场景码
#
self.scene = scene
# 收集标签数据
self.collect_label_raw_content_list = collect_label_raw_content_list
# 业务号
self.nonce = nonce
def validate(self):
self.validate_required(self.scene, 'scene')
self.validate_required(self.collect_label_raw_content_list, 'collect_label_raw_content_list')
if self.collect_label_raw_content_list:
for k in self.collect_label_raw_content_list:
if k:
k.validate()
self.validate_required(self.nonce, 'nonce')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.scene is not None:
result['scene'] = self.scene
result['collect_label_raw_content_list'] = []
if self.collect_label_raw_content_list is not None:
for k in self.collect_label_raw_content_list:
result['collect_label_raw_content_list'].append(k.to_map() if k else None)
if self.nonce is not None:
result['nonce'] = self.nonce
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('scene') is not None:
self.scene = m.get('scene')
self.collect_label_raw_content_list = []
if m.get('collect_label_raw_content_list') is not None:
for k in m.get('collect_label_raw_content_list'):
temp_model = CollectLabelRawContent()
self.collect_label_raw_content_list.append(temp_model.from_map(k))
if m.get('nonce') is not None:
self.nonce = m.get('nonce')
return self
class SyncLabelTransferrawResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
result_list: List[LabelChainResult] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 标签上链hash返回
self.result_list = result_list
def validate(self):
if self.result_list:
for k in self.result_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['result_list'] = []
if self.result_list is not None:
for k in self.result_list:
result['result_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.result_list = []
if m.get('result_list') is not None:
for k in m.get('result_list'):
temp_model = LabelChainResult()
self.result_list.append(temp_model.from_map(k))
return self
class SendLabelTransferrawonasyncRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
scene: str = None,
collect_label_raw_content_list: List[CollectLabelRawContent] = None,
nonce: str = None,
response_period: int = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 场景码
#
self.scene = scene
# 收集的标签流转数据
self.collect_label_raw_content_list = collect_label_raw_content_list
# 业务号,防重放
self.nonce = nonce
# 1 业务端查询结果的最晚时间 2 单位:天,最大值为30 天 3 在responsePeriod 天之后,调用异步接口结果查询 将 无法获取异步接口的执行结果
self.response_period = response_period
def validate(self):
self.validate_required(self.scene, 'scene')
self.validate_required(self.collect_label_raw_content_list, 'collect_label_raw_content_list')
if self.collect_label_raw_content_list:
for k in self.collect_label_raw_content_list:
if k:
k.validate()
self.validate_required(self.nonce, 'nonce')
self.validate_required(self.response_period, 'response_period')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.scene is not None:
result['scene'] = self.scene
result['collect_label_raw_content_list'] = []
if self.collect_label_raw_content_list is not None:
for k in self.collect_label_raw_content_list:
result['collect_label_raw_content_list'].append(k.to_map() if k else None)
if self.nonce is not None:
result['nonce'] = self.nonce
if self.response_period is not None:
result['response_period'] = self.response_period
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('scene') is not None:
self.scene = m.get('scene')
self.collect_label_raw_content_list = []
if m.get('collect_label_raw_content_list') is not None:
for k in m.get('collect_label_raw_content_list'):
temp_model = CollectLabelRawContent()
self.collect_label_raw_content_list.append(temp_model.from_map(k))
if m.get('nonce') is not None:
self.nonce = m.get('nonce')
if m.get('response_period') is not None:
self.response_period = m.get('response_period')
return self
class SendLabelTransferrawonasyncResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
request_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 接口请求的唯一标识
self.request_id = request_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.request_id is not None:
result['request_id'] = self.request_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
return self
class QueryDockedDataRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
docked_method: str = None,
scene: str = None,
key: List[str] = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 1. 已对接的接口名 ;
# 2. docked_method可通过 实例化SDK中的Request模型后获取,例如:
# String dockedMethod = new CreateDeviceDatamodelRequest().getMethod();
self.docked_method = docked_method
# 关键key为chainDeviceId 时不填
self.scene = scene
# 1. 接口中的关键key ,例如 deviceId ;
# 2. key为chainDeviceId时,scene字段不填
#
self.key = key
def validate(self):
self.validate_required(self.docked_method, 'docked_method')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.docked_method is not None:
result['docked_method'] = self.docked_method
if self.scene is not None:
result['scene'] = self.scene
if self.key is not None:
result['key'] = self.key
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('docked_method') is not None:
self.docked_method = m.get('docked_method')
if m.get('scene') is not None:
self.scene = m.get('scene')
if m.get('key') | |
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import itertools
import collections.abc as abc
from numbers import Number
import numpy as np
from dimod.decorators import vartype_argument
from dimod.sampleset import as_samples
from dimod.utilities import iter_safe_relabels
from dimod.vartypes import Vartype
__all__ = ['BinaryPolynomial']
def asfrozenset(term):
"""Convert to frozenset if it is not already"""
return term if isinstance(term, frozenset) else frozenset(term)
class BinaryPolynomial(abc.MutableMapping):
"""A polynomial with binary variables and real-valued coefficients.
Args:
poly (mapping/iterable):
Polynomial as a mapping of form {term: bias, ...}, where `term` is
a collection of variables and `bias` the associated bias. It can also
be an iterable of 2-tuples (term, bias).
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
Attributes:
degree (int):
The degree of the polynomial.
variables (set):
The variables.
vartype (:class:`.Vartype`):
One of :class:`.Vartype.SPIN` or :class:`.Vartype.BINARY`.
Examples:
Binary polynomials can be constructed in many different ways. The
following are all equivalent
>>> poly = dimod.BinaryPolynomial({'a': -1, 'ab': 1}, dimod.SPIN)
>>> poly = dimod.BinaryPolynomial({('a',): -1, ('a', 'b'): 1}, dimod.SPIN)
>>> poly = dimod.BinaryPolynomial([('a', -1), (('a', 'b'), 1)], dimod.SPIN)
>>> poly = dimod.BinaryPolynomial({'a': -1, 'ab': .5, 'ba': .5}, dimod.SPIN)
Binary polynomials act a mutable mappings but the terms can be accessed with
any sequence.
>>> poly = dimod.BinaryPolynomial({'a': -1, 'ab': 1}, dimod.BINARY)
>>> poly['ab']
1
>>> poly['ba']
1
>>> poly[{'a', 'b'}]
1
>>> poly[('a', 'b')]
1
>>> poly['cd'] = 4
>>> poly['dc']
4
"""
@vartype_argument('vartype')
def __init__(self, poly, vartype):
if isinstance(poly, abc.Mapping):
poly = poly.items()
# we need to aggregate the repeated terms
self._terms = terms = {}
for term, bias in poly:
fsterm = asfrozenset(term)
# when SPIN-valued, s^2 == 1, so we need to handle that case
# in BINARY, x^2 == x
if len(fsterm) < len(term) and vartype is Vartype.SPIN:
new = set()
term = tuple(term) # make sure it has .count
for v in fsterm:
if term.count(v) % 2:
new.add(v)
fsterm = frozenset(new)
if fsterm in terms:
terms[fsterm] += bias
else:
terms[fsterm] = bias
self.vartype = vartype
def __contains__(self, term):
return asfrozenset(term) in self._terms
def __delitem__(self, term):
del self._terms[asfrozenset(term)]
def __eq__(self, other):
if not isinstance(other, BinaryPolynomial):
try:
other = type(self)(other, self.vartype)
except Exception:
# not a polynomial
return False
self_terms = self._terms
other_terms = other._terms
return (
self.vartype == other.vartype
and all(
(not bias or other_terms.get(term, 0.) == bias)
for term, bias in self.items()
)
and all(
(not bias or self_terms.get(term, 0.) == bias)
for term, bias in other.items()
)
)
def __ne__(self, other):
return not (self == other)
def __getitem__(self, term):
return self._terms[asfrozenset(term)]
def __iter__(self):
return iter(self._terms)
def __len__(self):
return len(self._terms)
def __setitem__(self, term, bias):
self._terms[asfrozenset(term)] = bias
def __repr__(self):
return '{!s}({!r}, {!r})'.format(self.__class__.__name__,
self._terms, self.vartype.name)
@property
def variables(self):
"""Variables of the polynomial."""
return set().union(*self._terms)
@property
def degree(self):
"""Degree of the polynomial."""
if len(self) == 0:
return 0
return max(map(len, self._terms))
def copy(self):
"""Create a shallow copy."""
return type(self)(self, self.vartype)
def energy(self, sample_like, dtype=float):
"""The energy of the given sample.
Args:
sample_like (samples_like):
A raw sample. `sample_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`, optional):
The data type of the returned energies. Defaults to float.
Returns:
The energy.
"""
energy, = self.energies(sample_like, dtype=dtype)
return energy
def energies(self, samples_like, dtype=float):
"""The energies of the given samples.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`, optional):
The data type of the returned energies. Defaults to float.
Returns:
:obj:`numpy.ndarray`: The energies.
"""
samples, labels = as_samples(samples_like)
if labels:
idx, label = zip(*enumerate(labels))
labeldict = dict(zip(label, idx))
else:
labeldict = {}
num_samples = samples.shape[0]
energies = np.zeros(num_samples, dtype=dtype)
for term, bias in self.items():
if len(term) == 0:
energies += bias
else:
energies += np.prod([samples[:, labeldict[v]] for v in term], axis=0) * bias
return energies
def relabel_variables(self, mapping, inplace=True):
"""Relabel variables of a binary polynomial as specified by mapping.
Args:
mapping (dict):
Dict mapping current variable labels to new ones. If an
incomplete mapping is provided, unmapped variables retain their
current labels.
inplace (bool, optional, default=True):
If True, the binary polynomial is updated in-place; otherwise, a
new binary polynomial is returned.
Returns:
:class:`.BinaryPolynomial`: A binary polynomial with the variables
relabeled. If `inplace` is set to True, returns itself.
"""
if not inplace:
return self.copy().relabel_variables(mapping, inplace=True)
for submap in iter_safe_relabels(mapping, self.variables):
for oldterm, bias in list(self.items()):
newterm = frozenset((submap.get(v, v) for v in oldterm))
if newterm != oldterm:
self[newterm] = bias
del self[oldterm]
return self
def normalize(self, bias_range=1, poly_range=None, ignored_terms=None):
"""Normalizes the biases of the binary polynomial such that they fall in
the provided range(s).
If `poly_range` is provided, then `bias_range` will be treated as
the range for the linear biases and `poly_range` will be used for
the range of the other biases.
Args:
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
"""
def parse_range(r):
if isinstance(r, Number):
return -abs(r), abs(r)
return r
if ignored_terms is None:
ignored_terms = set()
else:
ignored_terms = {asfrozenset(term) for term in ignored_terms}
if poly_range is None:
linear_range, poly_range = bias_range, bias_range
else:
linear_range = bias_range
lin_range, poly_range = map(parse_range, (linear_range, poly_range))
# determine the current ranges for linear, higherorder
lmin = lmax = 0
pmin = pmax = 0
for term, bias in self.items():
if term in ignored_terms:
# we don't use the ignored terms to calculate the scaling
continue
if len(term) == 1:
lmin = min(bias, lmin)
lmax = max(bias, lmax)
elif len(term) > 1:
pmin = min(bias, pmin)
pmax = max(bias, pmax)
inv_scalar = max(lmin / lin_range[0], lmax / lin_range[1],
pmin / poly_range[0], pmax / poly_range[1])
if inv_scalar != 0:
self.scale(1 / inv_scalar, ignored_terms=ignored_terms)
def scale(self, scalar, ignored_terms=None):
"""Multiply the polynomial by the given scalar.
Args:
scalar (number):
Value to multiply the polynomial by.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
"""
if ignored_terms is None:
ignored_terms = set()
else:
ignored_terms = {asfrozenset(term) for term in ignored_terms}
for term in self:
if term not in ignored_terms:
self[term] *= scalar
@classmethod
def from_hising(cls, h, J, offset=None):
"""Construct a binary polynomial from a higher-order Ising problem.
Args:
h (dict):
The linear biases.
J (dict):
The higher-order biases.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:obj:`.BinaryPolynomial`
Examples:
>>> poly = dimod.BinaryPolynomial.from_hising({'a': 2}, {'ab': -1}, 0)
>>> poly.degree
2
"""
poly = {(k,): v for k, v in h.items()}
poly.update(J)
if offset is not None:
poly[frozenset([])] = offset
return cls(poly, Vartype.SPIN)
def to_hising(self):
"""Construct a higher-order Ising problem from a binary polynomial.
Returns:
tuple: A 3-tuple of the form (`h`, `J`, `offset`) where `h` includes
the linear biases, `J` has the higher-order biases and `offset` is
the linear offset.
Examples:
>>> poly = dimod.BinaryPolynomial({'a': -1, 'ab': 1, 'abc': -1}, dimod.SPIN)
>>> h, J, off = poly.to_hising()
>>> h
{'a': -1}
"""
if self.vartype is Vartype.BINARY:
return self.to_spin().to_hising()
h = {}
J = {}
offset = 0
for term, bias in self.items():
if len(term) == 0:
offset += bias
elif len(term) == 1:
v, = term
h[v] = bias
else:
J[tuple(term)] = bias
return h, J, offset
@classmethod
def from_hubo(cls, H, offset=None):
"""Construct a binary polynomial from a higher-order unconstrained
binary optimization (HUBO) problem.
Args:
H (dict):
Coefficients | |
#<NAME> mouseExpMan, that big old file with classes for all the data I'm going to collect
#lol how this is all actually metadata... >_<
#sqlite3 and/or sqlalchemy backed by sqlite, franky, I'm going to use sqlite one way or another, and I think sqlalchemy is a better fit for my needs
#FIXME: this is old, should it be here or in the database file to provide a thin layer between the database itself and the functions I want to call from the RIG as opposed to any analysis code?
import numpy as np
from scipy import interpolate as interp
from time import gmtime, strftime
from datetime import datetime,date,time #this may end up being more useful since it auto time deltas
class dataObject:
def __init__(self):
self.transactions=list #this should be a list NOT a dict becuase time is ordered
#we can then use indexing to get only the transactions of a specific target
#in theory if all targets were classes they could store their own transactions...
def addData(self,target,value): #i should really just wrap all of this...
self.transactions.append(datetime.utcnow(),target,value)
setattr(self,target,value)
return self
def listTransactions(self):
print(self.transactions)
return self
class timeObj:
"""Base class for things/events that have a start time when the class is initiated and a stop time at some indeterminate point in the future, kept in GMT"""
def __init__(self):
self.startGMT=gmtime() #FIXME use datetime objects
self.stopGMT=None
def stopTime(self):
"""set stopGMT when finished with the object"""
self.stopGMT=gmtime()
return self.stopGMT
#every transaction should have a time assoicated with it...
#BUT we dont really want to store transactions
class experiment(timeObj): #this may not exist...
"""Base experiment class, should work for all ephys experiment types but may need modification of the save method"""
def __init__(self,npc,expName,expNum): #may also need npCalib, may need to deal with the data path here... it needs to be dealt with explicitly SOMEWHERE
#FIXME WE DO NOT NEED TO LOAD all the PREVIOUS data into memory when we are COLLECTING
#at least in theory... even if there are 10k cells
self.fileMan=fileMan() #FIXME figure out how to do this properly
#build the file name
expFileName='{}{}'.format(expName,expNum) #FIXME
if expFile: #ideally we would always open an existing exp file and add to it
np.load(expFile) #may need my own load function >_< or use pickle or something
try:
#try to find the file based on the expName and expNum
except IOError:
super().__init__()
self.expName=expName
self.npc=npc #every experiment needs a rig! probably using this to streamline data acquisition HERE rather than in kbControl
self.expNum=expNum #probably don't actually want to do this, want unique identifiers that know about other experiments
#do these represent the CURRENT or the PAST, I think they are PAST so why do we need them if we are going to write the data to disk every time we finish with something?
self.mouseList=[] #ALTERNATELY: DICTIONARIES??!??!?! nah, don't need the keys...
self.sliceList=[]
self.cellList=[] #^^^ the above all know about eachother...
self.stimList=[] #stimList does NOT ?
self.mouse=None
self.slc=None #I suppose in some crazy word you could have more than one...
self.cells=[]
def addMouse(self,mouseObj):
try:
mouseObj.__class__ is mouseObj
except:
pass
def addSlice(self,sliceObj):
pass
def addCell(self,cellObj):
#this is where we want to automatically calculate stimLocations?
pass
def addStim(self,stimObj):
pass
def addAbf(self,abfObj=None): #this is the data everything else is the metadata
"""This is what we call when we run an experiment, EVERYTHING must be linked to it"""
abfMan.
pass
def saveData(self):
"""call this thing after every addition but ALWAYS keep the previous 2 versions"""
#for some reason this is raring up to all be saved in a single file...
pass
class fileMan:
import pickle
def __init__(self,datPath,abfPath,self.caliPath):
self.datPath=datPath #path to the metadata, may change these variables with file xtn
self.abfPath=abfPath
self.caliPath=caliPath #may get rid of this and store the calibration data somewhere else
def saveDat(self):
#Pickle this shit and don't worry about it!
#Thus, when it comes time to do data analysis we can get everything into a more reasonable format and convert all the abf files with their attached metadata
#one big convert instead of continual fiddling, better'd make sure not to tamper with older variables, also danger of data corruption... derp
#automatically save to two locations! ecc? PAR2 files apparently...
pass
def saveCali(self):
pass
def formatDat(self):
"""format the data for saving, probably just want to save everything?"""
pass
def invFormatDat(self):
"""read the formatted data back in"""
pass
class abfObj:
#may want an abfMan instead to manage all the abf files and figure out which will be next...
#this may simply be overkill since all I really need is a path and a filename...
#actually, this may be a better object to tie data around becuase it is really what all the data in here is used to interpret
#other ways of viewing and organizing the data are a... bonus?
def __init__(self,abfFile):
self.abfFile=abfFile
class stimObj:
"""Class for stimuation objects""" #FIXME this could apply to light, or to an electrode, fix it
def __init__(self,pos,intensity):
self.pos=pos
self.intensity=intensity #unforunately this will probably have to be entered manually :/
#we don't need duration because that is stored in the abf files (FOR NOW DAMN IT!)
class rigObj:
"""Class for defining the rig object not needed atm, but could be useful if I get other things working"""
def __init__(self,hsTup,isVcList):
if len(isVcList)!=len(hsTup):
print('hsTup and isVcList must be same length')
pass
else:
self.hsTup=hsTup #a tuple listing the headstages 0,1,2,3
self.isVCList=isVcList #a list of bools where 1 means that that hs is currently in voltage clamp
self.hsHasCell=np.zeros_like(hsTup) #assuming that this is started when there are no cells
def gotCell(self,hs):
try:
self.hsHasCell[hs]=1
except AttributeError:
print('You don\'t have that many headstages! Spend more money!')
pass
class litterObj:
def __init__(self,litName,paired,dob,dam,sire,numMice,dow=None,numSaced=0,cagecard=None):
self.litName=litName
self.paired=paired
self.dob=dob #FTLOG use ISO8601 WITH NO DASHES
if dow:
self.dow=dow #date of weaning use this to pop shit up!
self.weaned=True
else:
self.dow=dob+21
self.weaned=False
self.damGeno=damGeno
self.sireGeno=sireGeno
self.numMice=numMice
self.numSaced=numSaced
def weanLitter(self):
self.weaned=True
def sackMouse(self):
self.numUsed+=1
class mouseObj(timeObj):
"""Base class for all mouse objects"""
def __init__(self,mouseNum=None,litterObj=None,genotype=None,dod=None,earTag=None,tattoo=None): #FIXME I need so way to assign unique identifirers, mouse nums are no good
super().__init__()
self.litter=litterObj #ideally some day this will be integrated with the litter object BUT TODAY IS NOT THAT DAY!
#do not auto populate, only add mice that have REAL DATA associated with them! UNLESS, I want to look at utilization, in which case seeing how many mice are not used per litter might be useful alternately that can be stored elsewhere
#in theory I could use the gdata api and store everything in the cloud too...
self.mouseNum=mouseNum
self.earTag=earTag
self.tattoo=tattoo
self.genotype=genotype #FIXME: may want to create a genotype class since there's so much data on it and it can
self.dod=dod #date of death, note that litter contains DOB info so no worries on that account
#NOTE: unique identifiers for objects? ARGH
def age(self):
today=123114 #probably should be a deate FIXME
return today-self.dob
def __str__(self):
return 'Mouse data:\n litter\t\t{}\n mouse\t\t{}\n mouse start\t{}\n mouse stop\t{}\n'\
.format(self.litter, self.mouseNum, formatTime(self.startGMT), formatTime(self.stopGMT))
class damObj(mouseObj):
def __init__(self):
super().__init__()
class sireObj(mouseObj):
def __init__(self):
super().__init__()
class slcMouseObj(mouseObj):
"""A class for mice that are used for slicing"""
#how do we deal with conversion of a mouse in a litter to a slice mouse?
#most logical way is to only convert mice that already exist, so, for example, if I have a mouse that has been genotyped I can create it under and experiment?
#litter's need not have a mouse for every one of its members, just make sure to increment numSaced whenever one is killed
#the objective here is to not have to hunt for all the data pertaining to a cell, so a BUNCH of stuff needs to be saved here with the mouse
#FIXME: init the new slcMosueObj using the old liveMouseObj (or just mouseObj) and del the old one
def __init__(self):
super().__init__()
#these are all potential data points, some of which complicate data collection but could be used for seeing how things are going?
self.weight=None
self.uLkx=None
self.tOut=None
self.tBOut=None
self.tDone=None
self.numSlices=None
self.epHemi=None
class scepMouseObj(mouseObj):
"""A class for mice that are used for | |
= image_file_reader.read_frame(frame_index)
if frame_number > int(getattr(metadata, 'NumberOfFrames', '1')):
raise ValueError(
f'Provided frame number {frame_number} exceeds number '
'of available frames.'
)
if not transfer_syntax_uid.is_encapsulated:
pixels = frame
else:
if reencoding_media_type is None:
pixels = frame
elif reencoding_media_type == 'image/jp2':
image_type = 'jpeg2000'
image_kwargs = {'irreversible': False}
array = image_file_reader.decode_frame(frame_index, frame)
image = Image.fromarray(array)
with io.BytesIO() as fp:
image.save(
fp,
image_type,
**image_kwargs # type: ignore
)
pixels = fp.getvalue()
else:
raise ValueError(
'Cannot re-encode frames using media type '
f'"{reencoding_media_type}".'
)
yield pixels
def retrieve_instance_frames(
self,
study_instance_uid: str,
series_instance_uid: str,
sop_instance_uid: str,
frame_numbers: List[int],
media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None
) -> List[bytes]:
"""Retrieve one or more frames of an image instance.
Parameters
----------
study_instance_uid: str
Study Instance UID
series_instance_uid: str
Series Instance UID
sop_instance_uid: str
SOP Instance UID
frame_numbers: List[int]
Frame numbers
media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional
Acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
Returns
-------
List[bytes]
Frames
""" # noqa: E501
logger.info(
f'retrieve frames of instance "{sop_instance_uid}" of '
f'series "{series_instance_uid}" of study "{study_instance_uid}"'
)
file_path = self._get_instance_file_path(
study_instance_uid,
series_instance_uid,
sop_instance_uid,
)
if len(frame_numbers) == 0:
raise ValueError('At least one frame number must be provided.')
image_file_reader = self._get_image_file_reader(file_path)
metadata = image_file_reader.metadata
transfer_syntax_uid = image_file_reader.transfer_syntax_uid
reencoding_media_type = self._check_media_types_for_instance_frames(
transfer_syntax_uid,
media_types
)
frame_indices = []
for frame_number in frame_numbers:
if frame_number > int(getattr(metadata, 'NumberOfFrames', '1')):
raise ValueError(
f'Provided frame number {frame_number} exceeds number '
'of available frames.'
)
frame_index = frame_number - 1
frame_indices.append(frame_index)
reencoded_frames = []
for frame_index in frame_indices:
frame = image_file_reader.read_frame(frame_index)
if not transfer_syntax_uid.is_encapsulated:
reencoded_frame = frame
else:
if reencoding_media_type is None:
reencoded_frame = frame
elif reencoding_media_type == 'image/jp2':
image_type = 'jpeg2000'
image_kwargs = {'irreversible': False}
array = image_file_reader.decode_frame(frame_index, frame)
image = Image.fromarray(array)
with io.BytesIO() as fp:
image.save(
fp,
image_type,
**image_kwargs # type: ignore
)
reencoded_frame = fp.getvalue()
else:
raise ValueError(
'Cannot re-encode frames using media type '
f'"{reencoding_media_type}".'
)
reencoded_frames.append(reencoded_frame)
return reencoded_frames
def retrieve_instance_frames_rendered(
self,
study_instance_uid: str,
series_instance_uid: str,
sop_instance_uid: str,
frame_numbers: List[int],
media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None,
params: Optional[Dict[str, str]] = None,
) -> bytes:
"""Retrieve server-side rendered frames of an image instance.
Parameters
----------
study_instance_uid: str
Study Instance UID
series_instance_uid: str
Series Instance UID
sop_instance_uid: str
SOP Instance UID
frame_numbers: List[int]
Frame numbers
media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional
Acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
params: Union[Dict[str, str], None], optional
Additional query parameters
Returns
-------
bytes
Rendered representation of frames
""" # noqa: E501
logger.info(
f'retrieve rendered frames of instance "{sop_instance_uid}" of '
f'series "{series_instance_uid}" of study "{study_instance_uid}"'
)
if len(frame_numbers) == 0:
raise ValueError('A frame number must be provided.')
elif len(frame_numbers) > 1:
raise ValueError(
'Only rendering of a single frame is supported for now.'
)
frame_number = frame_numbers[0]
file_path = self._get_instance_file_path(
study_instance_uid,
series_instance_uid,
sop_instance_uid,
)
image_file_reader = self._get_image_file_reader(file_path)
frame_index = frame_number - 1
frame = image_file_reader.read_frame(frame_index)
metadata = image_file_reader.metadata
transfer_syntax_uid = image_file_reader.transfer_syntax_uid
if frame_number > int(getattr(metadata, 'NumberOfFrames', '1')):
raise ValueError(
'Provided frame number exceeds number of frames.'
)
codec_name, codec_kwargs = self._get_image_codec_parameters(
metadata=metadata,
transfer_syntax_uid=transfer_syntax_uid,
media_types=media_types,
params=params
)
if codec_name is None:
pixels = frame
else:
array = image_file_reader.decode_frame(frame_index, frame)
image = Image.fromarray(array)
with io.BytesIO() as fp:
image.save(fp, codec_name, **codec_kwargs)
fp.seek(0)
pixels = fp.read()
return pixels
def _get_image_codec_parameters(
self,
metadata: Dataset,
transfer_syntax_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None,
params: Optional[Dict[str, str]] = None,
) -> Tuple[Optional[str], Dict[str, Any]]:
if media_types is not None:
acceptable_media_types = list(set([
m[0]
if isinstance(m, tuple)
else m
for m in media_types
]))
are_media_types_valid = all(
m.startswith('image')
for m in acceptable_media_types
)
if not are_media_types_valid:
raise ValueError(
'Compressed instance frames can only be retrieved in '
'rendered format using media type "image".'
)
if 'image/png' in acceptable_media_types:
image_type = 'png'
elif 'image/jp2' in acceptable_media_types:
if transfer_syntax_uid == '1.2.840.10008.1.2.4.90':
image_type = None
else:
# Lossless recompression
image_type = 'jpeg2000'
elif 'image/jpeg' in acceptable_media_types:
if transfer_syntax_uid == '1.2.840.10008.1.2.4.50':
# Avoid lossy recompression of lossy compressed frames.
image_type = None
else:
# Allow lossy recompression in case of retrieve rendered.
logger.warn(
'frames of instance "{sop_instance_uid}" are lossy '
'recompressed upon retrieval'
)
image_type = 'jpeg'
else:
raise ValueError(
'Cannot retrieve frames of instance in rendered '
'format using any of the acceptable media types: '
'"{}".'.format('", "'.join(acceptable_media_types))
)
else:
if transfer_syntax_uid == '1.2.840.10008.1.2.4.50':
# Avoid lossy recompression of lossy compressed frames.
image_type = None
else:
image_type = 'jpeg'
image_kwargs: Dict[str, Any] = {
# Avoid re-compression when encoding in PNG format
'png': {'compress_level': 0, 'optimize': False},
'jpeg': {'quality': 100, 'optimize': False},
'jpeg2000': {'irreversible': False},
}
if params is not None and image_type is not None:
include_icc_profile = params.get('icc_profile', 'no')
if include_icc_profile == 'yes':
icc_profile = metadata.OpticalPathSequence[0].ICCProfile
image_kwargs[image_type]['icc_profile'] = ImageCmsProfile(
icc_profile
)
elif include_icc_profile == 'srgb':
icc_profile = createProfile('sRGB')
image_kwargs[image_type]['icc_profile'] = ImageCmsProfile(
icc_profile
)
elif include_icc_profile == 'no':
pass
else:
raise ValueError(
f'ICC Profile "{include_icc_profile}" is not supported.'
)
if image_type is None:
return (image_type, {})
return (image_type, image_kwargs[image_type])
@staticmethod
def lookup_keyword(
tag: Union[int, str, Tuple[int, int], BaseTag]
) -> str:
"""Look up the keyword of a DICOM attribute.
Parameters
----------
tag: Union[str, int, Tuple[int, int], pydicom.tag.BaseTag]
Attribute tag (e.g. ``"00080018"``)
Returns
-------
str
Attribute keyword (e.g. ``"SOPInstanceUID"``)
"""
keyword = keyword_for_tag(tag)
if keyword is None:
raise KeyError(f'Could not find a keyword for tag {tag}.')
return keyword
@staticmethod
def lookup_tag(keyword: str) -> str:
"""Look up the tag of a DICOM attribute.
Parameters
----------
keyword: str
Attribute keyword (e.g. ``"SOPInstanceUID"``)
Returns
-------
str
Attribute tag as HEX string (e.g. ``"00080018"``)
"""
tag = tag_for_keyword(keyword)
if tag is None:
raise KeyError(f'Could not find a tag for "{keyword}".')
tag = Tag(tag)
return '{0:04x}{1:04x}'.format(tag.group, tag.element).upper()
def store_instances(
self,
datasets: Sequence[Dataset],
study_instance_uid: Optional[str] = None
) -> Dataset:
"""Store instances.
Parameters
----------
datasets: Sequence[pydicom.dataset.Dataset]
Instances that should be stored
study_instance_uid: Union[str, None], optional
Study Instance UID
Returns
-------
pydicom.dataset.Dataset
Information about status of stored instances
"""
message = 'store instances'
if study_instance_uid is not None:
message += f' of study "{study_instance_uid}"'
logger.info(message)
# We first encode all data sets and temporarily store them in memory
# before inserting the metadata into the database and writing the data
# sets to files on disk. This will allow us to "roll back" in case of
# an error. We may want to consider implementing this in a more
# sophisticated way in case it becomes a performance bottleneck.
studies: Dict[
str,
Tuple[
str,
Optional[str],
Optional[str],
Optional[str],
Optional[str],
Optional[str],
Optional[str],
Optional[str],
Optional[str],
]
] = {}
series: Dict[
str,
Tuple[
str,
str,
str,
Optional[str],
Optional[int],
]
] = {}
instances: Dict[
str,
Tuple[
str,
str,
str,
str,
Optional[int],
Optional[int],
Optional[int],
Optional[int],
Optional[int],
str,
str,
]
] = {}
successes = []
failures = []
for ds in datasets:
logger.info(
f'store instance "{ds.SOPInstanceUID}" '
f'of series "{ds.SeriesInstanceUID}" '
f'of study "{ds.StudyInstanceUID}" '
)
try:
if study_instance_uid is not None:
if ds.StudyInstanceUID != study_instance_uid:
continue
else:
study_instance_uid = ds.StudyInstanceUID
study_metadata = self._extract_study_metadata(ds)
studies[study_instance_uid] = study_metadata
series_metadata = self._extract_series_metadata(ds)
series_instance_uid = ds.SeriesInstanceUID
series[series_instance_uid] = series_metadata
sop_instance_uid = ds.SOPInstanceUID
rel_file_path = '/'.join([
'studies',
study_instance_uid,
'series',
series_instance_uid,
'instances',
sop_instance_uid
])
instance_metadata = self._extract_instance_metadata(
ds,
rel_file_path
)
instances[sop_instance_uid] = instance_metadata
with io.BytesIO() as b:
dcmwrite(b, ds, write_like_original=False)
file_content = b.getvalue()
file_path = self.base_dir.joinpath(rel_file_path)
successes.append((ds, file_path, file_content))
except Exception as error:
logger.error(
f'failed to store instance "{ds.SOPInstanceUID}" '
f'of series "{ds.SeriesInstanceUID}" '
f'of study "{ds.StudyInstanceUID}": {error}'
)
failures.append(ds)
self._insert_into_db(
studies.values(),
series.values(),
instances.values()
)
response = Dataset()
response.RetrieveURL = None
if len(successes) > 0:
response.ReferencedSOPSequence = []
for ds, file_path, file_content in successes:
directory = file_path.parent
directory.mkdir(exist_ok=True, parents=True)
with open(file_path, 'wb') as fp:
fp.write(file_content)
success_item = Dataset()
success_item.ReferencedSOPClassUID = ds.SOPClassUID
success_item.ReferencedSOPInstanceUID = ds.SOPInstanceUID
success_item.RetrieveURL = None
if len(failures) > 0:
response.FailedSOPSequence = []
for ds in failures:
failure_item = Dataset()
failure_item.FailureReason = 272
failure_item.ReferencedSOPClassUID = ds.SOPClassUID
failure_item.ReferencedSOPInstanceUID = ds.SOPInstanceUID
response.FailedSOPSequence.append(failure_item)
return response
def delete_study(self, study_instance_uid: str) -> None:
"""Delete all instances | |
else:
column_args.insert(0, Column(record_key, String, primary_key=True))
elif datatype == 'float':
column_args.insert(0, Column(record_key, Float, primary_key=True))
elif datatype == 'integer':
column_args.insert(0, Column(record_key, Integer, primary_key=True))
else:
raise ValueError('Field "id" in record_schema must be a string, float or integer.')
else:
if datatype == 'boolean':
column_args.append(Column(record_key, Boolean))
elif datatype == 'string':
if max_length:
column_args.append(Column(record_key, String(max_length)))
else:
column_args.append(Column(record_key, String))
elif datatype == 'float':
column_args.append(Column(record_key, Float))
elif datatype == 'integer':
column_args.append(Column(record_key, Integer))
elif datatype == 'list':
column_args.append(Column(record_key, Binary))
return column_args
def _reconstruct_record(self, record_object):
''' a helper method for reconstructing record fields from record object '''
record_details = {}
current_details = record_details
for key, value in self.model.keyMap.items():
record_key = key[1:]
if record_key:
record_value = getattr(record_object, record_key, None)
if record_value != None:
record_segments = record_key.split('.')
for i in range(len(record_segments)):
segment = record_segments[i]
if i + 1 < len(record_segments):
if segment not in record_details.keys():
current_details[segment] = {}
current_details = current_details[segment]
else:
if isinstance(record_value, bytes):
current_details[segment] = pickle.loads(record_value)
else:
current_details[segment] = record_value
current_details = record_details
return record_details
def _compare_columns(self, new_columns, old_columns):
''' a helper method for generating differences between column properties '''
# print(new_columns)
# print(old_columns)
add_columns = {}
remove_columns = {}
rename_columns = {}
retype_columns = {}
resize_columns = {}
for key, value in new_columns.items():
if key not in old_columns.keys():
add_columns[key] = True
if value[2]:
if value[2] in old_columns.keys():
rename_columns[key] = value[2]
del add_columns[key]
else:
if value[1] != old_columns[key][1]:
retype_columns[key] = value[1]
if value[3] != old_columns[key][3]:
resize_columns[key] = value[3]
remove_keys = set(old_columns.keys()) - set(new_columns.keys())
if remove_keys:
for key in list(remove_keys):
remove_columns[key] = True
return add_columns, remove_columns, rename_columns, retype_columns, resize_columns
def _construct_inserts(self, record, new_columns, rename_columns, retype_columns, resize_columns):
''' a helper method for constructing the insert kwargs for a record '''
insert_kwargs = {}
for key, value in new_columns.items():
# retrieve value for key (or from old key name)
if key in rename_columns.keys():
record_value = getattr(record, rename_columns[key], None)
else:
record_value = getattr(record, key, None)
# attempt to convert datatype
if record_value:
if key in retype_columns.keys():
try:
old_list = False
if isinstance(record_value, bytes):
record_value = pickle.loads(record_value)
old_list = True
if retype_columns[key] == 'boolean':
record_value = bool(record_value)
elif retype_columns[key] == 'string':
if old_list:
record_value = ','.join(record_value)
else:
record_value = str(record_value)
elif retype_columns[key] == 'integer':
if old_list:
record_value = int(record_value[0])
else:
record_value = int(record_value)
elif retype_columns[key] == 'float':
if old_list:
record_value = float(record_value[0])
else:
record_value = float(record_value)
elif retype_columns[key] == 'list':
if isinstance(record_value, str):
record_value = pickle.dumps(record_value.split(','))
else:
record_value = pickle.dumps([record_value])
except:
record_value = None
# attempt to resize string data
if key in resize_columns.keys():
max_length = resize_columns[key]
try:
if len(record_value) > max_length:
record_value = record_value[0:max_length]
except:
record_value = None
insert_kwargs[key] = record_value
return insert_kwargs
def _rebuild_table(self, new_name, old_name, new_columns, old_columns):
''' a helper method for rebuilding table (by renaming & migrating) '''
# verbosity
print('Rebuilding %s table in %s database' % (self.table_name, self.database_name), end='', flush=True)
from sqlalchemy import Table, MetaData
metadata_object = MetaData()
# construct old table
old_table_args = [ old_name, metadata_object ]
old_column_args = self._construct_columns(old_columns)
old_table_args.extend(old_column_args)
old_table = Table(*old_table_args)
# construct new table
new_table_args = [ new_name, metadata_object ]
new_column_args = self._construct_columns(new_columns)
new_table_args.extend(new_column_args)
new_table = Table(*new_table_args)
# determine differences between tables
add_columns, remove_columns, rename_columns, retype_columns, resize_columns = self._compare_columns(new_columns, old_columns)
# rename table and recreate table if it doesn't already exist
table_list = self.engine.table_names()
if not old_name in table_list:
self.engine.execute('ALTER TABLE %s RENAME TO %s' % (new_name, old_name))
new_table.create(self.engine)
# wait for renamed table to be responsive
# migrate records from old to new
list_statement = old_table.select()
count = 0
for record in self.session.execute(list_statement).fetchall():
create_kwargs = self._construct_inserts(record, new_columns, rename_columns, retype_columns, resize_columns)
insert_statement = new_table.insert().values(**create_kwargs)
self.session.execute(insert_statement)
delete_statement = old_table.delete(old_table.c.id==record.id)
self.session.execute(delete_statement)
if not count % 10:
print('.', end='', flush=True)
count += 1
# drop old table
record_list = self.session.execute(list_statement).first()
if not record_list:
self.session.close()
old_table.drop(self.engine)
self.session = self.engine.connect()
# handle verbosity
print(' done.')
return True
def exists(self, primary_key):
'''
a method to determine if record exists
:param primary_key: string with primary key of record
:return: boolean to indicate existence of record
'''
select_statement = self.table.select(self.table).where(self.table.c.id==primary_key)
record_object = self.session.execute(select_statement).first()
if record_object:
return True
return False
def list(self, query_criteria=None, order_criteria=None):
'''
a generator method to list records in table which match query criteria
:param query_criteria: dictionary with schema dot-path field names and query qualifiers
:param order_criteria: list of single keypair dictionaries with field names to order by
:return: generator object with string of primary key
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'discrete_values': [ 'pond', 'lake', 'stream', 'brook' ]
}
}
NOTE: sql only supports a limited number of query conditions and all list
fields in a record are stored as a blob. this method constructs a
sql query which contains clauses wherever the query conditions can
be translated one-to-one into sql keywords and returns the entire
record of each qualifying record. once sql returns its results, the
remaining query conditions are applied to the record and only those
results which match all conditions are yield by the generator. as
such, depending upon the conditions selected, this method acts more
or less like a SCAN of the entire database. if no sql supported
conditions are provided, the method will look through all records.
native SQL supported conditions
float, integer & strings:
value_exists
equal_to
discrete_values
excluded_values
greater_than
less_than
max_value
min_value
booleans:
value_exists
equal_to
lists:
value_exists
NOTE: the full list of all criteria are found in the reference page for the
jsonmodel module as well as the query-rules.json file included in the
module.
http://collectiveacuity.github.io/jsonModel/reference/#query-criteria
an example of how to construct the order_criteria argument:
order_criteria = [
{ '.path.to.number': 'descend' },
{ '.path.to.string': '' }
]
NOTE: results can be ordered either by ascending or descending values. to
order in ascending order, leave the value for the field empty. any value
for the field key automatically is interpreted as descending order
'''
title = '%s.list' % self.__class__.__name__
from sqlalchemy import desc as order_desc
# validate inputs
if query_criteria:
self.model.query(query_criteria)
else:
query_criteria = {}
if order_criteria:
object_title = '%s(%s=%s)' % (title, 'order_criteria', str(order_criteria))
self.fields.validate(order_criteria, '.order_criteria', object_title)
for i in range(len(order_criteria)):
criterion = order_criteria[i]
for key, value in criterion.items():
criteria_key = key
if key.find('.') != 0:
criteria_key = '.%s' % key
if criteria_key not in self.model.keyMap.keys():
raise ValueError('%s(order_criteria=[...]) item %s key %s does not exist in record_schema.' % (title, i, key))
else:
order_criteria = []
# construct select statement with sql supported conditions
# http://docs.sqlalchemy.org/en/latest/orm/tutorial.html#common-filter-operators
select_object = self.table.select()
for key, value in query_criteria.items():
record_key = key
map_key = key
if key.find('.') == 0:
record_key = key[1:]
else:
map_key = '.%s' % key
if record_key:
if self.item_key.findall(record_key):
pass
else:
test_value = value
if not isinstance(value, dict):
test_value = { 'equal_to': value }
column_object = getattr(self.table.c, record_key)
for k, v in test_value.items():
if k == 'value_exists':
if self.model.keyMap[map_key]['value_datatype'] in ('string', 'number', 'boolean', 'list'):
if v:
select_object = select_object.where(column_object!=None)
else:
select_object = select_object.where(column_object==None)
else:
if self.model.keyMap[map_key]['value_datatype'] in ('string', 'number', 'boolean'):
if k == 'equal_to':
select_object = select_object.where(column_object==v)
elif k == 'discrete_values':
select_object = select_object.where(column_object.in_(v))
elif k == 'excluded_values':
select_object = select_object.where(~column_object.in_(v))
elif k == 'greater_than':
select_object = select_object.where(column_object.__gt__(v))
elif k == 'less_than':
select_object = select_object.where(column_object.__lt__(v))
elif k == 'max_value':
select_object = select_object.where(column_object.__le__(v))
elif k == 'min_value':
select_object = select_object.where(column_object.__ge__(v))
# add order criteria
for criterion in order_criteria:
key, value = next(iter(criterion.items()))
record_key = key
if key.find('.') == 0:
record_key = key[1:]
if record_key:
if self.item_key.findall(record_key):
pass
else:
column_object = getattr(self.table.c, record_key)
if value:
select_object = select_object.order_by(order_desc(column_object))
else:
select_object = select_object.order_by(column_object)
# execute query on database
# print(select_object)
for record in self.session.execute(select_object).fetchall():
record_details = self._reconstruct_record(record)
# filter results with non-sql supported conditions
if query_criteria:
if self.model.query(query_criteria, | |
crime | candy"], gain, "statgems")
else:
desc += " :lollipop:`lollipop`"
sql.add(PlayerID, "lollipop", gain, "inventory")
sql.add(PlayerID, ["Halloween", "Halloween | crime | lollipop"], gain, "statgems")
else:
desc = "{1} {0} :gem:`gems`".format(gain, lang_P.forge_msg(lang, "crime array", None, True))
sql.addGems(PlayerID, gain)
sql.add(PlayerID, ["crime", "crime | gain"], gain, "statgems")
try:
sql.addGems(GF.PlayerID_GetGems, -gain) # Vole l'équivalent du crime au bot
except sqlite3.OperationalError:
pass
desc += GF.gift(PlayerID, lang)
sql.updateComTime(PlayerID, "crime", "gems")
lvl.addxp(PlayerID, 1, "gems")
sql.add(PlayerID, ["crime", "crime"], 1, "statgems")
msg.append("OK")
else:
desc = lang_P.forge_msg(lang, "couldown", [str(GF.couldown_6s)])
msg.append("couldown")
msg.append(lang)
msg.append(desc)
return msg
def gamble(param):
"""**[valeur]** | Avez vous l'ame d'un parieur ?"""
lang = param["lang"]
PlayerID = param["PlayerID"]
valeur = param["valeur"]
msg = []
valeur = int(valeur)
gems = sql.valueAtNumber(PlayerID, "gems", "gems")
if valeur < 0:
desc = lang_P.forge_msg(lang, "DiscordCop Amende")
sql.add(PlayerID, ["divers", "DiscordCop Amende"], 1, "statgems")
if gems > 100 :
sql.addGems(PlayerID, -100)
else :
sql.addGems(PlayerID, -gems)
msg.append("anticheat")
msg.append(lang)
msg.append(desc)
return msg
elif valeur > 0 and gems >= valeur:
if sql.spam(PlayerID, GF.couldown_8s, "gamble", "gems"):
val = 0-valeur
sql.addGems(PlayerID, val)
sql.addGems(GF.PlayerID_GetGems, int(valeur))
sql.add(PlayerID, ["gamble", "gamble | perte"], valeur, "statgems")
if r.randint(0, 3) == 0:
gain = valeur*3
Taxe = GF.taxe(gain, 0.2)
try:
sql.addGems(GF.PlayerID_GetGems, int(Taxe["taxe"]))
except:
print("Le bot ne fait pas parti de la DB")
# l'espérence est de 0 sur la gamble
desc = "{1} {0} :gem:`gems`".format(gain, lang_P.forge_msg(lang, "gamble array", None, True))
sql.add(PlayerID, ["gamble", "gamble | win"], 1, "statgems")
sql.addGems(PlayerID, gain)
sql.add(PlayerID, ["gamble", "gamble | gain"], gain, "statgems")
gainmax = sql.valueAtNumber(PlayerID, "gamble | max", "statgems")
if gain > gainmax:
if gainmax == 0:
sql.add(PlayerID, ["gamble", "gamble | max"], gain, "statgems")
else:
sql.updateField(PlayerID, "gamble | max", gain, "statgems")
# =====================================
# Bonus
# =====================================
desc += GF.lootbox(PlayerID, lang)
else:
desc = lang_P.forge_msg(lang, "gamble", [valeur], False, 0)
sql.updateComTime(PlayerID, "gamble", "gems")
lvl.addxp(PlayerID, 1, "gems")
sql.add(PlayerID, ["gamble", "gamble"], 1, "statgems")
msg.append("OK")
else:
desc = lang_P.forge_msg(lang, "couldown", [str(GF.couldown_8s)])
msg.append("couldown")
elif gems < valeur:
desc = lang_P.forge_msg(lang, "gamble", None, False, 4)
msg.append("NOK")
else:
desc = lang_P.forge_msg(lang, "gamble", None, False, 5)
msg.append("NOK")
msg.append(lang)
msg.append(desc)
return msg
def mine(param):
"""Minez compagnons !!"""
lang = param["lang"]
PlayerID = param["PlayerID"]
msg = []
nbMax = 0
desc = ""
if sql.spam(PlayerID, GF.couldown_6s, "mine", "gems"):
if GF.testInvTaille(PlayerID):
# =====================================
# Détection du meilleur outil
# =====================================
if sql.valueAtNumber(PlayerID, "diamond_pickaxe", "inventory") >= 1:
nbMax = 600
outil = "diamond_pickaxe"
mult = 2.5
elif sql.valueAtNumber(PlayerID, "iron_pickaxe", "inventory") >= 1:
nbMax = 250
outil = "iron_pickaxe"
mult = 1.5
elif sql.valueAtNumber(PlayerID, "pickaxe", "inventory") >= 1:
nbMax = 100
outil = "pickaxe"
mult = 1
add_item = ""
if nbMax != 0:
nbrand = r.randint(1, nbMax)
# =====================================
# Gestion de la durabilité de l'outil
# =====================================
Durability = GF.durability(PlayerID, outil)
if Durability:
desc = lang_P.forge_msg(lang, "mine", [outil, "{idmoji[gem_" + outil + "]}"], False, 0)
sql.add(PlayerID, ["mine", "mine | broken | {}".format(outil)], 1, "statgems")
msg.append("OK")
msg.append(lang)
msg.append(desc)
return msg
# =====================================
# Gestion des résultats
# =====================================
# print(nbrand)
if mult > 1:
if nbrand <= int(nbMax*(0.01)):
add_item = "ruby"
nbrand = r.randint(0, 1)
elif nbrand <= int(nbMax*(0.05)):
add_item = "emerald"
nbrand = r.randint(0, 2)
elif nbrand <= int(nbMax*(0.10)):
add_item = "diamond"
nbrand = r.randint(0, 3)
elif nbrand <= int(nbMax*(0.25)):
add_item = "gold"
nbrand = r.randint(0, 5)
elif nbrand <= int(nbMax*(0.50)):
add_item = "iron"
nbrand = r.randint(1, 8)
else:
nbrand = 0
else:
if nbrand <= int(nbMax*(0.50)):
add_item = "iron"
nbrand = r.randint(1, 5)
else:
nbrand = 0
if nbrand != 0:
nbrand = int(nbrand*mult)
sql.add(PlayerID, add_item, nbrand, "inventory")
sql.add(PlayerID, ["mine", "mine | item | {}".format(add_item)], nbrand, "statgems")
desc = lang_P.forge_msg(lang, "mine", [nbrand, add_item, "{idmoji[gem_" + add_item + "]}"], False, 1)
# =====================================
# Bonus
# =====================================
desc += GF.lootbox(PlayerID, lang)
desc += GF.gift(PlayerID, lang)
nbcobble = r.randint(1, 10)
nbcobble = int(nbcobble*mult)
sql.add(PlayerID, "cobblestone", nbcobble, "inventory")
sql.add(PlayerID, ["mine", "mine | item | cobblestone"], nbcobble, "statgems")
desc += lang_P.forge_msg(lang, "mine", [nbcobble, "{idmoji[gem_cobblestone]}"], False, 2)
sql.add(PlayerID, ["mine", "mine"], 1, "statgems")
else:
desc = lang_P.forge_msg(lang, "mine", None, False, 3)
sql.updateComTime(PlayerID, "mine", "gems")
lvl.addxp(PlayerID, 1, "gems")
msg.append("OK")
else:
desc = lang_P.forge_msg(lang, "WarningMsg", None, False, 2)
msg.append("NOK")
else:
desc = lang_P.forge_msg(lang, "couldown", [str(GF.couldown_6s)])
msg.append("couldown")
msg.append(lang)
msg.append(desc)
return msg
def dig(param):
"""Creusons compagnons !!"""
lang = param["lang"]
PlayerID = param["PlayerID"]
msg = []
nbMax = 0
desc = ""
if sql.spam(PlayerID, GF.couldown_6s, "dig", "gems"):
if GF.testInvTaille(PlayerID):
# =====================================
# Détection du meilleur outil
# =====================================
if sql.valueAtNumber(PlayerID, "diamond_shovel", "inventory") >= 1:
nbMax = 400
outil = "diamond_shovel"
mult = 2.5
elif sql.valueAtNumber(PlayerID, "iron_shovel", "inventory") >= 1:
nbMax = 200
outil = "iron_shovel"
mult = 1.5
elif sql.valueAtNumber(PlayerID, "shovel", "inventory") >= 1:
nbMax = 100
outil = "shovel"
mult = 1
add_item = ""
if nbMax != 0:
nbrand = r.randint(1, nbMax)
# =====================================
# Gestion de la durabilité de l'outil
# =====================================
Durability = GF.durability(PlayerID, outil)
if Durability:
desc = lang_P.forge_msg(lang, "dig", [outil, "{idmoji[gem_" + outil + "]}"], False, 0)
sql.add(PlayerID, ["dig", "dig | broken | {}".format(outil)], 1, "statgems")
msg.append("OK")
msg.append(lang)
msg.append(desc)
return msg
# =====================================
# Gestion des résultats
# =====================================
# print(nbrand)
if nbrand <= int(nbMax*(0.25)):
add_item = "cacao"
nbrand = r.randint(0, 2)
elif nbrand <= int(nbMax*(0.65)):
add_item = "seed"
nbrand = r.randint(0, 4)
elif nbrand <= int(nbMax*(0.95)):
add_item = "potato"
nbrand = r.randint(1, 4)
else:
nbrand = 0
if nbrand != 0:
nbrand = int(nbrand*mult)
sql.add(PlayerID, add_item, nbrand, "inventory")
sql.add(PlayerID, ["dig", "dig | item | {}".format(add_item)], nbrand, "statgems")
desc = lang_P.forge_msg(lang, "dig", [nbrand, add_item, "{idmoji[gem_" + add_item + "]}"], False, 1)
# =====================================
# Bonus
# =====================================
desc += GF.lootbox(PlayerID, lang)
desc += GF.gift(PlayerID, lang)
else:
desc = lang_P.forge_msg(lang, "dig", None, False, 2)
sql.add(PlayerID, ["dig", "dig"], 1, "statgems")
else:
desc = lang_P.forge_msg(lang, "dig", None, False, 3)
sql.updateComTime(PlayerID, "dig", "gems")
lvl.addxp(PlayerID, 1, "gems")
msg.append("OK")
else:
desc = lang_P.forge_msg(lang, "WarningMsg", None, False, 2)
msg.append("NOK")
else:
desc = lang_P.forge_msg(lang, "couldown", [str(GF.couldown_6s)])
msg.append("couldown")
msg.append(lang)
msg.append(desc)
return msg
def fish(param):
"""Péchons compagnons !!"""
lang = param["lang"]
PlayerID = param["PlayerID"]
msg = []
nbMax = 0
desc = ""
if sql.spam(PlayerID, GF.couldown_6s, "fish", "gems"):
if GF.testInvTaille(PlayerID):
# =====================================
# Détection du meilleur outil
# =====================================
if sql.valueAtNumber(PlayerID, "fishingrod", "inventory") >= 1:
nbMax = 100
outil = "fishingrod"
nbfishhook = sql.valueAtNumber(PlayerID, "fishhook", "inventory")
if nbfishhook >= 1:
mult = r.randint(-1, 5)
if mult < 2:
mult = 2
sql.add(PlayerID, "fishhook", -1, "inventory")
sql.add(PlayerID, ["fish", "fish | fishhook utilisé"], 1, "statgems")
else:
mult = 1
add_item = ""
if nbMax != 0:
nbrand = r.randint(1, nbMax)
# =====================================
# Gestion de la durabilité de l'outil
# =====================================
Durability = GF.durability(PlayerID, outil)
if Durability:
desc = lang_P.forge_msg(lang, "fish", [outil, "{idmoji[gem_" + outil + "]}"], False, 0)
sql.add(PlayerID, ["fish", "fish | broken | {}".format(outil)], 1, "statgems")
msg.append("OK")
msg.append(lang)
msg.append(desc)
return msg
# =====================================
# Gestion des résultats
# =====================================
# print(nbrand)
if nbrand <= int(nbMax*(0.10)):
add_item = "octopus"
nbrand = r.randint(0, 1)
elif nbrand <= int(nbMax*(0.25)):
add_item = "blowfish"
nbrand = r.randint(0, 3)
elif nbrand <= int(nbMax*(0.40)):
add_item = "tropicalfish"
nbrand = r.randint(0, 3)
elif nbrand <= int(nbMax*(0.90)):
add_item = "fish"
else:
nbrand = 0
if nbrand != 0 or add_item == "fish":
if add_item != "fish":
nbrand = int(nbrand*mult)
sql.add(PlayerID, add_item, nbrand, "inventory")
sql.add(PlayerID, ["fish", "fish | item | {}".format(add_item)], nbrand, "statgems")
desc = lang_P.forge_msg(lang, "fish", [nbrand, add_item, "{idmoji[gem_" + add_item + "]}"], False, 1)
nb = r.randint(1, 8)
nb = int(nb*mult)
sql.add(PlayerID, "fish", nb, "inventory")
sql.add(PlayerID, ["fish", "fish | item | fish"], nb, "statgems")
desc += lang_P.forge_msg(lang, "fish", [nb, "{idmoji[gem_fish]}"], False, 2)
# =====================================
# Bonus
# =====================================
desc += GF.lootbox(PlayerID, lang)
desc += GF.gift(PlayerID, lang)
else:
desc = lang_P.forge_msg(lang, "fish", None, False, 3)
if mult >= 2:
sql.add(PlayerID, "fishhook", 1, "inventory")
sql.add(PlayerID, ["fish", "fish | fishhook utilisé"], -1, "statgems")
sql.add(PlayerID, ["fish", "fish"], 1, "statgems")
else:
desc = lang_P.forge_msg(lang, "fish", ["{idmoji[fishingrod]}"], False, 4)
sql.updateComTime(PlayerID, "fish", "gems")
lvl.addxp(PlayerID, 1, "gems")
msg.append("OK")
else:
desc = lang_P.forge_msg(lang, "WarningMsg", None, False, 2)
msg.append("NOK")
else:
desc = lang_P.forge_msg(lang, "couldown", [str(GF.couldown_6s)])
msg.append("couldown")
msg.append(lang)
msg.append(desc)
return msg
def slots(param):
"""**[mise]** | La machine à sous, la | |
list
:param size: Number of sample to draw from data
:return:
"""
# Hack to avoid skipping the first value of the parameter list.
# This function is called once when the _algorithm __init__
# has to initialize the parameter names. Because of this, we end up
# losing the first value in the list, which is undesirable
# This check makes sure that the first call results in a dummy value
if self.throwaway_first:
self.throwaway_first = False
return None
if size:
return np.fromiter(self.iterator, dtype=float, count=size)
else:
try:
return next(self.iterator)
except StopIteration:
text = 'Number of repetitions is higher than the number of available parameter sets'
raise IndexError(text)
def astuple(self):
return self(), self.name, 0, 0, 0, 0, self.as_int
class Constant(Base):
"""
A specialization that produces always the same constant value
"""
__rndargs__ = 'scalar',
def __init__(self, *args, **kwargs):
super(Constant, self).__init__(self, 'Constant', *args, **kwargs)
value = property(lambda self: self.rndargs[0])
def __call__(self, size=None):
"""
Returns the next value from the data list
:param size: Number of items to draw from parameter
:return:
"""
if size:
return np.ones(size, dtype=float) * self.value
else:
return self.value
def astuple(self):
return self(), self.name, 0, self.value, self.value, self.value, self.as_int
class Normal(Base):
"""
A specialization of the Base parameter for normal distributions
"""
__rndargs__ = 'mean', 'stddev'
def __init__(self, *args, **kwargs):
"""
:name: Name of the parameter
:mean: center of the normal distribution
:stddev: variance of the normal distribution
:step: (optional) number for step size required for some algorithms,
eg. mcmc need a parameter of the variance for the next step
default is median of rndfunc(*rndargs, size=1000)
:optguess: (optional) number for start point of parameter
default is quantile(0.5) - quantile(0.4) of
rndfunc(*rndargs, size=1000)
"""
super(Normal, self).__init__(rnd.normal, 'Normal', *args, **kwargs)
class logNormal(Base):
"""
A specialization of the Base parameter for normal distributions
"""
__rndargs__ = 'mean', 'sigma'
def __init__(self, *args, **kwargs):
"""
:name: Name of the parameter
:mean: Mean value of the underlying normal distribution
:sigma: Standard deviation of the underlying normal distribution >0
:step: (optional) number for step size required for some algorithms,
eg. mcmc need a parameter of the variance for the next step
default is median of rndfunc(*rndargs, size=1000)
:optguess: (optional) number for start point of parameter
default is quantile(0.5) - quantile(0.4) of
rndfunc(*rndargs, size=1000)
"""
super(logNormal, self).__init__(rnd.lognormal, 'logNormal', *args, **kwargs)
class Chisquare(Base):
"""
A specialization of the Base parameter for chisquare distributions
"""
__rndargs__ = 'dt',
def __init__(self, *args, **kwargs):
"""
:name: Name of the parameter
:dt: Number of degrees of freedom.
:step: (optional) number for step size required for some algorithms,
eg. mcmc need a parameter of the variance for the next step
default is median of rndfunc(*rndargs, size=1000)
:optguess: (optional) number for start point of parameter
default is quantile(0.5) - quantile(0.4) of
rndfunc(*rndargs, size=1000)
"""
super(Chisquare, self).__init__(rnd.chisquare, 'Chisquare', *args, **kwargs)
class Exponential(Base):
"""
A specialization of the Base parameter for exponential distributions
"""
__rndargs__ = 'scale',
def __init__(self, *args, **kwargs):
"""
:name: Name of the parameter
:scale: The scale parameter, \beta = 1/\lambda.
:step: (optional) number for step size required for some algorithms,
eg. mcmc need a parameter of the variance for the next step
default is median of rndfunc(*rndargs, size=1000)
:optguess: (optional) number for start point of parameter
default is quantile(0.5) - quantile(0.4) of
rndfunc(*rndargs, size=1000)
"""
super(Exponential, self).__init__(rnd.exponential, 'Exponential', *args, **kwargs)
class Gamma(Base):
"""
A specialization of the Base parameter for gamma distributions
"""
__rndargs__ = 'shape', 'scale'
def __init__(self, *args, **kwargs):
"""
:name: Name of the parameter
:shape: The shape of the gamma distribution.
:scale: The scale of the gamme distribution
:step: (optional) number for step size required for some algorithms,
eg. mcmc need a parameter of the variance for the next step
default is median of rndfunc(*rndargs, size=1000)
:optguess: (optional) number for start point of parameter
default is quantile(0.5) - quantile(0.4) of
rndfunc(*rndargs, size=1000)
"""
super(Gamma, self).__init__(rnd.gamma, 'Gamma', *args, **kwargs)
class Wald(Base):
"""
A specialization of the Base parameter for Wald distributions
"""
__rndargs__ = 'mean', 'scale'
def __init__(self, *args, **kwargs):
"""
:name: Name of the parameter
:mean: Shape of the distribution.
:scale: Shape of the distribution.
:step: (optional) number for step size required for some algorithms,
eg. mcmc need a parameter of the variance for the next step
default is median of rndfunc(*rndargs, size=1000)
:optguess: (optional) number for start point of parameter
default is quantile(0.5) - quantile(0.4) of
rndfunc(*rndargs, size=1000)
"""
super(Wald, self).__init__(rnd.wald, 'Wald', *args, **kwargs)
class Weibull(Base):
"""
A specialization of the Base parameter for Weibull distributions
"""
__rndargs__ = 'a',
def __init__(self, *args, **kwargs):
"""
:name: Name of the parameter
:a: Shape of the distribution.
:step: (optional) number for step size required for some algorithms,
eg. mcmc need a parameter of the variance for the next step
default is median of rndfunc(*rndargs, size=1000)
:optguess: (optional) number for start point of parameter
default is quantile(0.5) - quantile(0.4) of
rndfunc(*rndargs, size=1000)
"""
super(Weibull, self).__init__(rnd.weibull, 'Weibull', *args, **kwargs)
class Triangular(Base):
"""
A parameter sampling from a triangular distribution
"""
__rndargs__ = 'left', 'mode', 'right'
def __init__(self, *args, **kwargs):
"""
:name: Name of the parameter
:left: Lower limit of the parameter
:mode: The value where the peak of the distribution occurs.
:right: Upper limit, should be larger than `left`.
:step: (optional) number for step size required for some algorithms,
eg. mcmc need a parameter of the variance for the next step
default is median of rndfunc(*rndargs, size=1000)
:optguess: (optional) number for start point of parameter
default is quantile(0.5) - quantile(0.4) of
rndfunc(*rndargs, size=1000)
"""
super(Triangular, self).__init__(rnd.triangular, 'Triangular', *args, **kwargs)
class ParameterSet(object):
"""
A Pickable parameter set to use named parameters in a setup
Is not created by a user directly, but in algorithm.
Older versions used a namedtuple, which is not pickable.
An instance of ParameterSet is sent to the users setup.simulate method.
Usage:
ps = ParameterSet(...)
Update values by arguments or keyword arguments
ps(0, 1, 2)
ps(a=1, c=2)
Assess parameter values of this parameter set
ps[0] == ps['a'] == ps.a
A parameter set is a sequence:
list(ps)
Assess the parameter set properties as arrays
[ps.maxbound, ps.minbound, ps.optguess, ps.step, ps.random]
"""
def __init__(self, param_info):
"""
Creates a set of parameters from a parameter info array.
To create the parameter set from a setup use either:
setup = ...
ps = ParameterSet(get_parameters_array(setup))
or you can just use a function for this:
ps = create_set(setup)
:param param_info: A record array containing the properties of the parameters
of this set.
"""
self.__lookup = dict(("p" + x if x.isdigit() else x, i) for i, x in enumerate(param_info['name']))
self.__info = param_info
def __call__(self, *values, **kwargs):
"""
Populates the values ('random') of the parameter set with new data
:param values: Contains the new values or omitted.
If given, the number of values needs to match the number
of parameters
:param kwargs: Can be used to set only single parameter values
:return:
"""
if values:
if len(self.__info) != len(values):
raise ValueError('Given values do are not the same length as the parameter set')
self.__info['random'][:] = values
for k in kwargs:
try:
self.__info['random'][self.__lookup[k]] = kwargs[k]
except KeyError:
raise TypeError('{} is not a parameter of this set'.format(k))
return self
def __len__(self):
return len(self.__info['random'])
def __iter__(self):
return iter(self.__info['random'])
def __getitem__(self, item):
"""
Provides item access
ps[0] == ps['a']
:raises: KeyError, IndexError and TypeError
"""
if type(item) is str:
item = self.__lookup[item]
return self.__info['random'][item]
def __setitem__(self, key, value):
"""
Provides setting of item
ps[0] = 1
ps['a'] = 2
"""
if key in self.__lookup:
key = self.__lookup[key]
self.__info['random'][key] = value
def __getattr__(self, item):
"""
Provides the attribute access like
print(ps.a)
"""
if item.startswith('_'):
raise AttributeError('{} is not a member of this parameter set'.format(item))
elif item in self.__lookup:
return self.__info['random'][self.__lookup[item]]
elif item in self.__info.dtype.names:
return self.__info[item]
else:
raise AttributeError('{} is not a member of this parameter set'.format(item))
def __setattr__(self, key, value):
"""
Provides setting of attributes
| |
<reponame>lauraschachter/pydrograph
__author__ = 'lschachter'
import datetime as dt
import time
from urllib.request import urlopen
import numpy as np
import pandas as pd
from shapely.geometry import Point, Polygon, shape
import pyproj
import gisutils
import urllib3
import xmltodict
import datetime
def make_url(site_code, method, begin_date = '2000-01-01', begin_time = '00:00'):
''' This function creates a url to interact with the Army Corps of Engineers webservices to access their rivergages data.
Currently, the function only supports stage data, as that is what the code is written to pull; however,
it could be modified to pull in flow, precip, air/water temp, and other parameters as well that are collected by
the Army Corps of Engineers.
This function takes in a site code, which is usually alphanumeric such as "rcki2" and should be in a string format.
The "method" that this function takes in should be one of the five methods that the webservice can interpret:
getSites, getValues, getVariableInfo, getSiteInfo, and getFValues.
The begin date is set to pull the January 2000, however these can be changed to start at any date and time. The function
pulls one month of data after the start date and time, as the service seems to only return a month of data regardless of how
far out the end date is set.
Dates should be in 'YYYY-MM-DD' format, and time should be in 'HH:MM' 24 format. (data is hourly when available)
Parameters
----------
site_code = str
method = str
'getSites',
'getValues',
'getVariableInfo',
'getSiteInfo',
'getFValues'
begin_date = str
'YYYY-MM-DD'
begin_time = str
'HH:MM'
Returns
-------
url string
'''
begin = begin_date + 'T' + begin_time
end = datetime.datetime.strptime(begin_date, '%Y-%m-%d')
end = end + datetime.timedelta(days = 31)
end = end.strftime('%Y-%m-%dT00:00')
meth_list = ['getSites',
'getValues',
'getVariableInfo',
'getSiteInfo',
'getFValues']
stuff_at_start = 'https://rivergages.mvr.usace.army.mil/watercontrol/webservices/rest/webserviceWaterML.cfc?method=RGWML&'
method_url = 'meth={}'.format('getSiteInfo')
stuff_in_middle = '&site={}&location={}&variable=HG&'.format(site_code, site_code)
dates = 'beginDate={}&endDate={}'.format(begin, end)
stuff_at_end = '&authtoken=<PASSWORD>&authToken=<PASSWORD>'
url = stuff_at_start + method_url + stuff_in_middle + dates + stuff_at_end
http = urllib3.PoolManager()
response = http.request('GET', url)
data = xmltodict.parse(response.data)
sitename = (data['sitesResponse']['site']['siteInfo']['siteName'])
words = ['lock', 'Lock', 'dam', 'Dam']
dam = 0
for i in words:
if i in sitename:
dam = dam + 1
#print('might be a Dam! [{}]'.format(i))
method_url2 = 'meth={}'.format(method)
if dam > 0:
stuff_in_middle = '&site={}&location={}&variable=HT&'.format(site_code, site_code)
url = stuff_at_start + method_url2 + stuff_in_middle + dates + stuff_at_end
return url
def get_data(url):
''' This function takes a url made in the make_url function and returns the raw data, which is a ordered dictionary.
Parameters
----------
url = str
Returns
-------
data = ordered dictionary
'''
url = url
http = urllib3.PoolManager()
response = http.request('GET', url)
data = xmltodict.parse(response.data)
return data
def get_site_info(url, data_return = False):
''' This function prints site information about the site name, site code, available date range, geolocation, and coordinate reference system.
This function will only run if the url was made using the method 'getSiteInfo'. With the correct url, this function pulls in variable information from the
Army Corps of Engineers webservices.
Parameters
----------
url = str
data_return = optional, set to False, True/False
Returns
-------
prints site information about the site name, site code, available date range, geolocation, and coordinate reference system.
If data_return is set to True, will return raw data (ordered dictionary).
'''
url = url
http = urllib3.PoolManager()
response = http.request('GET', url)
data = xmltodict.parse(response.data)
sitename = (data['sitesResponse']['site']['siteInfo']['siteName'])
sitecode = data['sitesResponse']['site']['siteInfo']['siteCode']['#text']
geolocation = dict(data['sitesResponse']['site']['siteInfo']['geoLocation']['geogLocation'])
print(sitename)
print('---------------------------------------------------------------')
print('Site Code: {}'.format(sitecode))
print('Location: latitude {}, longitude {}'.format(geolocation['latitude'], geolocation['longitude']))
print('Coordinate Reference System: {}'.format(geolocation['@srs']))
if len(data['sitesResponse']['site']['seriesCatalog']) == 1:
print('Variable: No variable info available at this site.')
print('Available Date Range: No date info available at this site.')
variable_info = None
elif isinstance(data['sitesResponse']['site']['seriesCatalog']['series'], list):
variable_info = dict(data['sitesResponse']['site']['seriesCatalog']['series'][1]['variable'])
begin_date = data['sitesResponse']['site']['seriesCatalog']['series'][1]['variableTimeInterval']['beginDateTime']
begin_date = begin_date.replace('T', ' ')
end_date = data['sitesResponse']['site']['seriesCatalog']['series'][1]['variableTimeInterval']['endDateTime']
end_date = end_date.replace('T', ' ')
else:
variable_info = dict(data['sitesResponse']['site']['seriesCatalog']['series']['variable'])
begin_date = data['sitesResponse']['site']['seriesCatalog']['series']['variableTimeInterval']['beginDateTime']
begin_date = begin_date.replace('T', ' ')
end_date = data['sitesResponse']['site']['seriesCatalog']['series']['variableTimeInterval']['endDateTime']
end_date = end_date.replace('T', ' ')
if variable_info is not None:
print('Variable: {}'.format(variable_info['variableName']))
print('Available Date Range: {} to {}'.format(begin_date, end_date))
if data_return is True:
return data
def get_variable_info(url, data_return = False):
''' This function prints information about the variable returned by 'getVariableInfo' from the Army Corps of Engineers webservice.
This function will only run if the url was made using the method 'getVariableInfo'.
Parameters
----------
url = str
data_return = optional, set to False, True/False
Returns
-------
prints variable information about the variable, variable code, and no data value.
If data_return is set to True, will return raw data (ordered dictionary).
variable_info - pandas dataframe of variable information
variable_units - dictionary of variable units
variable_codes - dictionary of variable codes
'''
url = url
http = urllib3.PoolManager()
response = http.request('GET', url)
data = xmltodict.parse(response.data)
variable_info = (data['variablesResponse']['variables'])
variable_units = dict(data['variablesResponse']['variables']['variable']['units'])
variable_codes = dict(data['variablesResponse']['variables']['variable']['variableCode'])
print('Variable: {}'.format(variable_info['variable']['variableName']))
print('Variable Code: {}'.format(variable_info['variable']['variableCode']['#text']))
print('No Data Value: {}'.format(variable_info['variable']['NoDataValue']))
if data_return is True:
return data
def get_sites(url, data_return = False):
''' This function prints information about the site by 'getSites' from the Army Corps of Engineers webservice.
This function will only run if the url was made using the method 'getSites'.
Parameters
----------
url = str
data_return = optional, set to False, True/False
Returns
-------
prints site information about site name, side code, location, and coordinate reference system.
If data_return is set to True, will return raw data (ordered dictionary).
'''
url = url
http = urllib3.PoolManager()
response = http.request('GET', url)
data = xmltodict.parse(response.data)
sitename = (data['sitesResponse']['site']['siteInfo']['siteName'])
sitecode = dict(data['sitesResponse']['site']['siteInfo']['siteCode'])
geolocation = dict(data['sitesResponse']['site']['siteInfo']['geoLocation']['geogLocation'])
print(sitename)
print('---------------------------------------------------------------')
print('Site Code: {}, {}'.format(sitecode['#text'], sitecode['@siteID']))
print('Location: latitude {}, longitude {}'.format(geolocation['latitude'], geolocation['longitude']))
print('Coordinate Reference System: {}'.format(geolocation['@srs']))
if data_return is True:
return data
def get_values(url):
''' This function retrieves a month of stage data from the US Army Corps of Engineers webservices. Data is returned in a pandas
dataframe with stage and datetime columns. This function will only run if the url was made using the method 'getValues'.
Parameters
----------
url = str
Returns
-------
df - pandas dataframe of datetimes and stage data (as strings and as floats) in feet
'''
url = url
http = urllib3.PoolManager()
response = http.request('GET', url)
data = xmltodict.parse(response.data)
if len(data['timeSeriesResponse']['timeSeries']['values']) == 2:
df2 = pd.DataFrame(data['timeSeriesResponse']['timeSeries']['values']['value'])
df2['datetime'] = pd.to_datetime(df2['@dateTime'], format= '%Y-%m-%dT%H:%M:%S')
df2 = df2.drop(columns = {'@dateTime'})
df2 = df2.rename(columns = {'#text' : 'stage (ft)'})
df2 = df2[df2['stage (ft)'] >= '0']
df2 = df2.assign(stage = df2['stage (ft)'].astype(float))
else:
print('No values available at this site in this date range')
df2 = None
return df2
def get_fvalues(url):
''' This function retrieves a month of stage data from the US Army Corps of Engineers webservices. Data is returned in a pandas
dataframe with stage and datetime columns. This function will only run if the url was made using the method 'getValues'.
Parameters
----------
url = str
Returns
-------
df - pandas dataframe of datetimes and stage data (as strings and as floats) in feet
'''
url = url
http = urllib3.PoolManager()
response = http.request('GET', url)
data = xmltodict.parse(response.data)
if len(data['timeSeriesResponse']['timeSeries']['values']) == 2:
df2 = pd.DataFrame(data['timeSeriesResponse']['timeSeries']['values']['value'])
df2['datetime'] = pd.to_datetime(df2['@dateTime'], format= '%Y-%m-%dT%H:%M:%S')
df2 = df2.drop(columns = {'@dateTime'})
df2 = df2.rename(columns = {'#text' : 'stage (ft)'})
df2 = df2[df2['stage (ft)'] >= '0']
df2 = df2.assign(stage = df2['stage (ft)'].astype(float))
else:
print('No values available at this site in this date range')
df2 = None
return df2
def pull_year_of_data(site_name, begin_date):
'''This function uses the get_values function to pull one year of data (the Army site only gives a month at a time).
Parameters
----------
site_name = str
begin_date = str 'YYYY-MM-DD'
Returns
-------
data - pandas dataframe of datetimes and stage data (as strings and as floats) in feet
| |
YOLO format and export as files.
If you pass classes, classes.txt will be generated based on it .
If not , classes.txt will be generated based on passed tasks .(Annotations never used in your project will not be exported.)
tasks is a list of tasks. (Required)
classes is a list of annotation values. e.g. ['dog','bird'] (Optional)
output_dir is output directory(default: output/yolo). (Optional)
"""
annos, categories = converters.to_yolo(tasks, classes)
for anno in annos:
file_name = anno["filename"]
basename = utils.get_basename(file_name)
file_path = os.path.join(
output_dir, "annotations", basename + ".txt")
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'w', encoding="utf8") as f:
for obj in anno["object"]:
f.write(obj)
f.write("\n")
classes_file_path = os.path.join(output_dir, "classes.txt")
os.makedirs(os.path.dirname(classes_file_path), exist_ok=True)
with open(classes_file_path, 'w', encoding="utf8") as f:
for category in categories:
f.write(category["name"])
f.write("\n")
def export_pascalvoc(self, tasks: list, output_dir: str = os.path.join("output", "pascalvoc")) -> None:
"""
Convert tasks to Pascal VOC format as files.
tasks is a list of tasks. (Required)
output_dir is output directory(default: output/pascalvoc). (Optional)
"""
pascalvoc = converters.to_pascalvoc(tasks)
for voc in pascalvoc:
file_name = voc["annotation"]["filename"]
basename = utils.get_basename(file_name)
file_path = os.path.join(output_dir, basename + ".xml")
os.makedirs(os.path.dirname(file_path), exist_ok=True)
xml = xmltodict.unparse(voc, pretty=True, full_document=False)
with open(file_path, 'w', encoding="utf8") as f:
f.write(xml)
def export_labelme(self, tasks: list, output_dir: str = os.path.join("output", "labelme")) -> None:
"""
Convert tasks to labelme format as files.
tasks is a list of tasks. (Required)
output_dir is output directory(default: output/labelme). (Optional)
"""
labelmes = converters.to_labelme(tasks)
for labelme in labelmes:
file_name = labelme["imagePath"]
basename = utils.get_basename(file_name)
file_path = os.path.join(output_dir, basename + ".json")
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'w') as f:
json.dump(labelme, f, indent=4, ensure_ascii=False)
# Instance / Semantic Segmetation
def export_instance_segmentation(self, tasks: list, output_dir: str = os.path.join("output", "instance_segmentation"), pallete: List[int] = const.COLOR_PALETTE) -> None:
"""
Convert tasks to index color instance segmentation (PNG files).
Supports only bbox, polygon and segmentation annotation types.
Supports up to 57 instances in default colors palette. Check const.COLOR_PALETTE for more details.
tasks is a list of tasks. (Required)
output_dir is output directory(default: output/instance_segmentation). (Optional)
pallete is color palette of index color. Ex: [255, 0, 0, ...] (Optional)
"""
tasks = converters.to_pixel_coordinates(tasks)
for task in tasks:
self.__export_index_color_image(
task=task, output_dir=output_dir, pallete=pallete, is_instance_segmentation=True)
def export_semantic_segmentation(self, tasks: list, output_dir: str = os.path.join("output", "semantic_segmentation"), pallete: List[int] = const.COLOR_PALETTE) -> None:
"""
Convert tasks to index color semantic segmentation (PNG files).
Supports only bbox, polygon and segmentation annotation types.
Check const.COLOR_PALETTE for color pallete.
tasks is a list of tasks. (Required)
output_dir is output directory(default: output/semantic_segmentation). (Optional)
pallete is color palette of index color. Ex: [255, 0, 0, ...] (Optional)
"""
classes = []
for task in tasks:
for annotation in task["annotations"]:
classes.append(annotation["value"])
classes = list(set(classes))
classes.sort()
tasks = converters.to_pixel_coordinates(tasks)
for task in tasks:
self.__export_index_color_image(
task=task, output_dir=output_dir, pallete=pallete, is_instance_segmentation=False, classes=classes)
def __export_index_color_image(self, task: list, output_dir: str, pallete: List[int], is_instance_segmentation: bool = True, classes: list = []) -> None:
image = Image.new("RGB", (task["width"], task["height"]), 0)
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
index = 1
for annotation in task["annotations"]:
color = index if is_instance_segmentation else classes.index(
annotation["value"]) + 1
if annotation["type"] == AnnotationType.segmentation.value:
for region in annotation["points"]:
count = 0
for points in region:
if count == 0:
cv_draw_points = self.__get_cv_draw_points(points)
cv2.fillPoly(
image, [cv_draw_points], color, lineType=cv2.LINE_8, shift=0)
else:
# Reverse hollow points for opencv because this points are counter clockwise
cv_draw_points = self.__get_cv_draw_points(
utils.reverse_points(points))
cv2.fillPoly(
image, [cv_draw_points], 0, lineType=cv2.LINE_8, shift=0)
count += 1
elif annotation["type"] == AnnotationType.polygon.value:
cv_draw_points = self.__get_cv_draw_points(
annotation["points"])
cv2.fillPoly(image, [cv_draw_points], color,
lineType=cv2.LINE_8, shift=0)
elif annotation["type"] == AnnotationType.bbox.value:
cv_draw_points = self.__get_cv_draw_points(
annotation["points"])
cv2.fillPoly(image, [cv_draw_points], color,
lineType=cv2.LINE_8, shift=0)
else:
continue
index += 1
image_path = os.path.join(
output_dir, utils.get_basename(task["name"]) + ".png")
os.makedirs(os.path.dirname(image_path), exist_ok=True)
image = Image.fromarray(image)
image = image.convert('P')
image.putpalette(pallete)
image.save(image_path)
def __get_cv_draw_points(self, points: List[int]) -> List[int]:
"""
Convert points to pillow draw points. Diagonal points are not supported.
"""
x_points = []
x_points.append(points[0])
x_points.append(points[1])
for i in range(int(len(points) / 2)):
if i == 0:
continue
x = points[i * 2]
y = points[i * 2 + 1]
if y > x_points[(i - 1) * 2 + 1]:
x_points[(i - 1) * 2] = x_points[(i - 1) * 2] - 1
x = x - 1
x_points.append(x)
x_points.append(y)
y_points = []
y_points.append(points[0])
y_points.append(points[1])
for i in range(int(len(points) / 2)):
if i == 0:
continue
x = points[i * 2]
y = points[i * 2 + 1]
if x < y_points[(i - 1) * 2]:
y_points[(i - 1) * 2 + 1] = y_points[(i - 1) * 2 + 1] - 1
y = y - 1
y_points.append(x)
y_points.append(y)
new_points = []
for i in range(int(len(points) / 2)):
new_points.append(x_points[i * 2])
new_points.append(y_points[i * 2 + 1])
cv_points = []
for i in range(int(len(new_points) / 2)):
cv_points.append((new_points[i * 2], new_points[i * 2 + 1]))
return np.array(cv_points)
# Annotation
def find_annotation(self, annotation_id: str) -> dict:
"""
Find an annotation.
"""
endpoint = "annotations/" + annotation_id
return self.api.get_request(endpoint)
def find_annotation_by_value(self, project: str, value: str) -> dict:
"""
Find an annotation by value.
"""
annotations = self.get_annotations(project=project, value=value)
if not annotations:
return None
return annotations[0]
def get_annotations(
self,
project: str,
value: str = None,
offset: int = None,
limit: int = 10,
) -> list:
"""
Returns a list of annotations.
Returns up to 1000 at a time, to get more, set offset as the starting position to fetch.
project is slug of your project. (Required)
value is an unique identifier of annotation in your project. (Required)
offset is the starting position number to fetch. (Optional)
limit is the max number to fetch. (Optional)
"""
if limit > 1000:
raise FastLabelInvalidException(
"Limit must be less than or equal to 1000.", 422)
endpoint = "annotations"
params = {"project": project}
if value:
params["value"] = value
if offset:
params["offset"] = offset
if limit:
params["limit"] = limit
return self.api.get_request(endpoint, params=params)
def create_annotation(
self,
project: str,
type: str,
value: str,
title: str,
color: str = None,
attributes: list = []
) -> str:
"""
Create an annotation.
project is slug of your project. (Required)
type can be 'bbox', 'polygon', 'keypoint', 'classification', 'line', 'segmentation'. (Required)
value is an unique identifier of annotation in your project. (Required)
title is a display name of value. (Required)
color is hex color code like #ffffff. (Optional)
attributes is a list of attribute. (Optional)
"""
endpoint = "annotations"
payload = {
"project": project,
"type": type,
"value": value,
"title": title,
}
if color:
payload["color"] = color
if attributes:
payload["attributes"] = attributes
return self.api.post_request(endpoint, payload=payload)
def create_classification_annotation(
self,
project: str,
attributes: list
) -> str:
"""
Create a classification annotation.
project is slug of your project. (Required)
attributes is a list of attribute. (Required)
"""
endpoint = "annotations/classification"
payload = {"project": project, "attributes": attributes}
return self.api.post_request(endpoint, payload=payload)
def update_annotation(
self,
annotation_id: str,
value: str = None,
title: str = None,
color: str = None,
attributes: list = []
) -> str:
"""
Update an annotation.
annotation_id is an id of the annotation. (Required)
value is an unique identifier of annotation in your project. (Optional)
title is a display name of value. (Optional)
color is hex color code like #ffffff. (Optional)
attributes is a list of attribute. (Optional)
"""
endpoint = "annotations/" + annotation_id
payload = {}
if value:
payload["value"] = value
if title:
payload["title"] = title
if color:
payload["color"] = color
if attributes:
payload["attributes"] = attributes
return self.api.put_request(endpoint, payload=payload)
def update_classification_annotation(
self,
annotation_id: str,
attributes: list
) -> str:
"""
Update a classification annotation.
annotation_id is an id of the annotation. (Required)
attributes is a list of attribute. (Required)
"""
endpoint = "annotations/classification/" + annotation_id
payload = {"attributes": attributes}
return self.api.put_request(endpoint, payload=payload)
def delete_annotation(self, annotation_id: str) -> None:
"""
Delete an annotation.
"""
endpoint = "annotations/" + annotation_id
self.api.delete_request(endpoint)
# Project
def find_project(self, project_id: str) -> dict:
"""
Find a project.
"""
endpoint = "projects/" + project_id
return self.api.get_request(endpoint)
def find_project_by_slug(self, slug: str) -> dict:
"""
Find a project by slug.
slug is slug of your project. (Required)
"""
projects = self.get_projects(slug=slug)
if not projects:
return | |
<reponame>Kochise/canette
#!/usr/bin/env python
# author: d.koch
# coding: utf-8
# naming: pep-0008
# typing: pep-0484
# docstring: pep-0257
# indentation: tabulation
""" canp_node.py
Node
card-chan-NODE-conf
"""
# --- IMPORT ---
# Standard libraries (installed with python)
#import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from datetime import date, timedelta
from typing import Any
#from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
#from typing import Union
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# External libraries (installed with pip, conda, setup.py, ...)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Included libraries (this module, local files)
from canp_conf import canp_conf
from canp_conf import CANP_CONF__CPA_RPDO_MP
from canp_conf import CANP_CONF__CPA_TPDO_MP
from canp_conf import CANP_CONF__PDO_RX
from canp_conf import CANP_CONF__PDO_TX
from canp_conf import CANP_CONF__SDO_RX
from canp_conf import CANP_CONF__SDO_TX
from canp_conf import enum_CANP_CONF__TYPE
from canp_conv import canp_conv
from canp_enum import CANP_ENUM__APP_NAME
from canp_enum import CANP_ENUM__BYTE_LITTLE
from canp_enum import CANP_ENUM__HEAD_DATA
from canp_enum import CANP_ENUM__HEAD_LIST
from canp_enum import CANP_ENUM__HEAD_MAIN
#from canp_enum import CANP_ENUM__HEAD_NAME
from canp_enum import CANP_ENUM__NODE_MAX
#from canp_enum import CANP_ENUM__STR_ASLASH
#from canp_enum import CANP_ENUM__STR_DOT
from canp_enum import CANP_ENUM__STR_EMPTY
#from canp_enum import CANP_ENUM__STR_SPACE
from canp_enum import CANP_ENUM__VAL_DEFAULT
from canp_enum import enum_TYPE
from canp_args import canp_args
from canp_logs import canp_logs
# --- GLOBAL ---
# CiA 301
CANP_NODE__COB_NMT = 0x000
CANP_NODE__COB_RES1 = 0x000
CANP_NODE__COB_SYNC = 0x080
CANP_NODE__COB_EMCY = 0x080
CANP_NODE__COB_TIME = 0x100
CANP_NODE__COB_TPDO1 = 0x180
CANP_NODE__COB_RPDO1 = 0x200
CANP_NODE__COB_TPDO2 = 0x280
CANP_NODE__COB_RPDO2 = 0x300
CANP_NODE__COB_TPDO3 = 0x380
CANP_NODE__COB_RPDO3 = 0x400
CANP_NODE__COB_TPDO4 = 0x480
CANP_NODE__COB_RPDO4 = 0x500
CANP_NODE__COB_TSDO = 0x580
CANP_NODE__COB_RSDO = 0x600
CANP_NODE__COB_RES2 = 0x680
CANP_NODE__COB_ECP = 0x700
CANP_NODE__COB_RES3 = 0x780
list_CANP_NODE__COB_TPDO = [
CANP_NODE__COB_TPDO1,
CANP_NODE__COB_TPDO2,
CANP_NODE__COB_TPDO3,
CANP_NODE__COB_TPDO4,
]
list_CANP_NODE__COB_RPDO = [
CANP_NODE__COB_RPDO1,
CANP_NODE__COB_RPDO2,
CANP_NODE__COB_RPDO3,
CANP_NODE__COB_RPDO4,
]
# CiA 30x (specific cases)
CANP_NODE__COB_GFC = 0x001 # CiA 302
CANP_NODE__COB_FLYM = 0x071 # CiA 302-2
CANP_NODE__COB_IAI = 0x07F # CiA 302-6
CANP_NODE__COB_DSDO = 0x6E0 # CiA 302-5
CANP_NODE__COB_LSS = 0x7E4 # CiA 305
# CiA 4xx (specific cases)
CANP_NODE__COB_NCP1 = 0x6E1 # CiA 416-1
CANP_NODE__COB_NCP2 = 0x6F0 # CiA 416-1
# Frame indexes
CANP_LOG__IDX_TIME = 0
CANP_LOG__IDX_COBID = 1
CANP_LOG__IDX_DATA = 2
CANP_NODE__NMT_OP = 0x01 # Operational (1)
CANP_NODE__NMT_STOP = 0x02 # Stopped (2)
CANP_NODE__NMT_PREOP = 0x80 # Pre-Op. (128)
CANP_NODE__NMT_RSTNODE = 0x81 # Reset Node (129)
CANP_NODE__NMT_RSTCOMM = 0x82 # Reset Comm. (130)
CANP_NODE__NMT__DLC = 2
# NTM Finite State Machine (FSM)
list_CANP_NODE__NMT_FSM = [
CANP_NODE__NMT_OP,
CANP_NODE__NMT_STOP,
CANP_NODE__NMT_PREOP,
CANP_NODE__NMT_RSTNODE,
CANP_NODE__NMT_RSTCOMM,
]
dict_CANP_NODE__NMT_FSM = {
CANP_NODE__NMT_OP: [CANP_NODE__NMT_STOP, CANP_NODE__NMT_PREOP, CANP_NODE__NMT_RSTNODE, CANP_NODE__NMT_RSTCOMM],
CANP_NODE__NMT_STOP: [CANP_NODE__NMT_OP, CANP_NODE__NMT_PREOP, CANP_NODE__NMT_RSTNODE, CANP_NODE__NMT_RSTCOMM],
CANP_NODE__NMT_PREOP: [CANP_NODE__NMT_OP, CANP_NODE__NMT_STOP, CANP_NODE__NMT_RSTNODE, CANP_NODE__NMT_RSTCOMM],
CANP_NODE__NMT_RSTNODE: [CANP_NODE__NMT_RSTCOMM],
CANP_NODE__NMT_RSTCOMM: [CANP_NODE__NMT_PREOP],
}
CANP_NODE__EMCY__DLC = 8
CANP_NODE__TIME__DLC = 8
CANP_NODE__TSDO_RECVBLK = 0x00 # Receiving '...' (0x00-0x1F )
CANP_NODE__TSDO_RECVX = 0x41 # Receiving 'x bytes'
CANP_NODE__TSDO_RECV4 = 0x43 # Receive '4 bytes'
CANP_NODE__TSDO_RECV3 = 0x47 # Receive '3 bytes'
CANP_NODE__TSDO_RECV2 = 0x4B # Receive '2 bytes'
CANP_NODE__TSDO_RECV1 = 0x4F # Receive '1 byte'
CANP_NODE__TSDO_SENDACK = 0x60 # Send ACK
CANP_NODE__TSDO_SENDERR = 0x80 # Send ERROR
CANP_NODE__TSDO__DLC = 8
list_CANP_NODE__TSDO_CMD = [
CANP_NODE__TSDO_RECVX,
CANP_NODE__TSDO_RECV4,
CANP_NODE__TSDO_RECV3,
CANP_NODE__TSDO_RECV2,
CANP_NODE__TSDO_RECV1,
CANP_NODE__TSDO_SENDACK,
CANP_NODE__TSDO_SENDERR,
]
CANP_NODE__RSDO_SEND4 = 0x23 # Send '4 bytes'
CANP_NODE__RSDO_SEND3 = 0x27 # Send '3 bytes'
CANP_NODE__RSDO_SEND2 = 0x2B # Send '2 bytes'
CANP_NODE__RSDO_SEND1 = 0x2F # Send '1 byte'
CANP_NODE__RSDO_RDOBJ = 0x40 # Read 'object'
CANP_NODE__RSDO_RDTGL0 = 0x60 # Read '...' (toggle 0)
CANP_NODE__RSDO_RDTGL1 = 0x70 # Read '...' (toggle 1)
CANP_NODE__RSDO_SENDABRT = 0x80 # Send ABORT
CANP_NODE__RSDO__DLC = 8
list_CANP_NODE__RSDO_CMD = [
CANP_NODE__RSDO_SEND4,
CANP_NODE__RSDO_SEND3,
CANP_NODE__RSDO_SEND2,
CANP_NODE__RSDO_SEND1,
CANP_NODE__RSDO_RDOBJ,
CANP_NODE__RSDO_RDTGL0,
CANP_NODE__RSDO_RDTGL1,
CANP_NODE__RSDO_SENDABRT,
]
CANP_NODE__ECP_BOOT = 0x00 # Bootup
CANP_NODE__ECP_STOP = 0x04 # Stopped
CANP_NODE__ECP_OP = 0x05 # Operational
CANP_NODE__ECP_PREOP = 0x7F # Pre-operational
CANP_NODE__ECP_TOGGLE = 0x80 # Toggle (0x8x : at each Tx)
CANP_NODE__ECP__DLC = 1
list_CANP_NODE__ECP_CMD = [
CANP_NODE__ECP_BOOT,
CANP_NODE__ECP_STOP,
CANP_NODE__ECP_OP,
CANP_NODE__ECP_PREOP,
CANP_NODE__ECP_TOGGLE,
]
CANP_NODE__INDEX_DTA = 0x0000 # 0x0000 – 0x0FFF Data Types Area (DTA)
# 0x0000 - 0x0000 : reserved
# 0x0001 – 0x025F : Data types
# 0x0260 – 0x0FFF : reserved
CANP_NODE__INDEX_CPA = 0x1000 # 0x1000 – 0x1FFF Communication Profile Area (CPA)
CANP_NODE__INDEX_MSPA = 0x2000 # 0x2000 – 0x5FFF Manufacturer Specific Profile Area (MSPA)
CANP_NODE__INDEX_SDPA = 0x6000 # 0x6000 – 0x9FFF Standardised Device Profile Area (SDPA)
# 0x6000 - 0x67FF Device 1
# 0x6800 - 0x6FFF Device 2 (same as Device 1, but offset)
# 0x7000 - 0x77FF Device 3 (same as Device 1, but offset)
# 0x7800 - 0x7FFF Device 4 (same as Device 1, but offset)
# 0x8000 - 0x87FF Device 5 (same as Device 1, but offset)
# 0x8800 - 0x8FFF Device 6 (same as Device 1, but offset)
# 0x9000 - 0x97FF Device 7 (same as Device 1, but offset)
# 0x9800 - 0x9FFF Device 8 (same as Device 1, but offset)
CANP_NODE__INDEX_SNVA = 0xA000 # 0xA000 – 0xAFFF Standardised Network Variable Area (SNVA)
CANP_NODE__INDEX_SSVA = 0xB000 # 0xB000 – 0xBFFF Standardised System Variable Area (SSVA)
CANP_NODE__INDEX_RES = 0xC000 # 0xC000 – 0xFFFF reserved
# EMCY error_code (0x00xx : 00 = code below)
dict_CANP_NODE__EMCY_SERR: Dict[int, str] = {
0x00: "no error or reset",
0x10: "generic error",
0x20: "current",
0x21: "current, canopen device input side",
0x22: "current inside the canopen device",
0x23: "current, canopen device output side",
0x30: "voltage",
0x31: "mains",
0x32: "voltage inside the canopen device",
0x33: "output voltage",
0x40: "temperature",
0x41: "ambient temperature",
0x42: "canopen device temperature",
0x50: "canopen device hardware",
0x60: "canopen device software",
0x61: "internal software",
0x62: "user software",
0x63: "data set",
0x70: "additional modules",
0x80: "monitoring",
0x81: "communication",
0x82: "protocol",
0x90: "external",
0xF0: "additional functions",
0xFF: "canopen device specific"
}
# CANP_NODE__TSDO_SENDERR error_code
# CANP_NODE__RSDO_SENDABRT error_code
dict_CANP_NODE__SDO_SERR: Dict[int, str] = {
0x05030000: "toggle bit not changed",
0x05040001: "command specifier unknown",
0x06010000: "unsupported access",
0x06010002: "read only entry",
0x06020000: "object not existing",
0x06040041: "object cannot be pdo mapped",
0x06040042: "mapped pdo exceed pdo",
0x06070012: "parameter length too long",
0x06070013: "parameter length too short",
0x06090011: "subindex not existing",
0x06090031: "value too great",
0x06090032: "value too small",
0x08000000: "general error",
0x08000022: "data cannot be read or stored in this state"
}
# Configuration files (key = filename)
g_dict_confs: Optional[Dict[str, canp_conf]] = None
# --- CLASS ---
class canp_node:
""" CAN node
"""
# Configuration object (for 'raw frame' into 'object' conversion)
m_cls_cnfs: Optional[canp_conf] = None
# Frames analysed (key = timestamp)
m_dict_raws: Optional[Dict[float, Any]] = None
# Objects stored (key = index, sub-indexes)
m_dict_objs: Optional[Dict[int, Any]] = None
# Logger object
m_logs = canp_logs.logger(CANP_ENUM__APP_NAME).getChild("node")
m_int_date: int = 0
m_int_time: int = 0
m_int_nmt: int = 0x0
m_int_idx: int = 0
m_int_sub: int = 0
m_int_cmd: int = 0
m_int_acc: int = 0
m_byte_acc: bytearray = b""
def __init__(self,
**i_dict_args: Any
) -> None:
""" Constructor
"""
super().__init__(**i_dict_args)
def __getitem__(self,
i_int_index: int = -1
) -> Any:
""" Get at (key = object index, if present)
"""
l_any_ret: Any = None
try:
l_any_ret = self.m_dict_objs[i_int_index]
# - except KeyError -
except KeyError:
pass
return l_any_ret
def __len__(self) -> int:
""" Size of (number of objects)
"""
l_int_ret: int = 0
if self.m_dict_objs is not None:
l_int_ret = len(self.m_dict_objs)
return l_int_ret
def obj_list(self,
) -> None:
""" Object list
"""
l_list_ret: list = []
if self.m_dict_objs is not None:
l_list_ret = self.m_dict_objs.keys()
return l_list_ret
def obj_store(self,
i_int_idx: int = 0,
i_int_sub: int = 0,
i_any_data: Any = CANP_ENUM__STR_EMPTY,
i_float_time: float = 0.0,
i_bool_bytes: bool = False
) -> None:
""" Object storing
"""
l_any_data: Any = None
l_any_data = i_any_data
if self.m_dict_objs is None:
self.m_dict_objs = {}
try:
# Check index
self.m_dict_objs[i_int_idx]
# - except KeyError -
except KeyError:
# Create dict
self.m_dict_objs[i_int_idx] = {}
try:
# Check sub-index
self.m_dict_objs[i_int_idx][i_int_sub]
# - except KeyError -
except KeyError:
# Create dict
self.m_dict_objs[i_int_idx][i_int_sub] = {}
try:
# Check accumulator (special index)
self.m_dict_objs[i_int_idx][i_int_sub][CANP_ENUM__HEAD_LIST]
# - except KeyError -
except KeyError:
# Create list
self.m_dict_objs[i_int_idx][i_int_sub][CANP_ENUM__HEAD_LIST] = []
if i_bool_bytes == False:
if isinstance(i_any_data, bytearray) or isinstance(i_any_data, bytes):
if self.m_cls_cnfs is not None:
# Trying to decode using conf
l_any_data = self.m_cls_cnfs.conv_obj(
i_int_idx,
i_int_sub,
i_any_data)
# Storing raw data (last value)
self.m_dict_objs[i_int_idx][i_int_sub][CANP_ENUM__HEAD_DATA] = l_any_data
# Encapsulate for storage (tuple)
l_any_data = (i_float_time, l_any_data)
self.m_dict_objs[i_int_idx][i_int_sub][CANP_ENUM__HEAD_LIST].append(l_any_data)
def pdo_dispatch(self,
i_int_pdo: int = 0,
i_bytes_data: bytearray = b"",
i_float_time: float = 0.0
) -> None:
""" Object dispatching
"""
if i_int_pdo > 0 and isinstance(i_bytes_data, bytearray) and len(i_bytes_data) > 0:
#l_enum_typ: enum_TYPE = enum_TYPE.VisibleString
l_any_data: Any = CANP_ENUM__STR_EMPTY
l_int_data: int = 0
l_int_mask: int = 0
l_int_dlc: int = 0
l_int_idx: int = 0
l_int_sub: int = 0
l_int_max: int = 0
l_int_len: int = 0
l_str_err: str = CANP_ENUM__STR_EMPTY
l_str_chk: str = CANP_ENUM__STR_EMPTY
if self.m_cls_cnfs is not None:
l_str_err = f"node.pdo_dispatch.pdo.map[{i_int_pdo:#x}]"
l_str_chk = "(check config file)"
try:
# Configuration object
l_dict_idx = self.m_cls_cnfs.m_dict_obj[i_int_pdo]
# - except KeyError -
# ParameterName=
# SubNumber=
# ObjectType=
# ...
# Check object integrity ---------------------
# TODO DUPLICATE START : canp_conf.check_obj
try:
# Maximum sub-index
l_int_max = l_dict_idx[CANP_ENUM__VAL_DEFAULT][enum_CANP_CONF__TYPE.SubNumber]
# - except KeyError -
if l_int_max > 0:
l_int_len = len(self.m_dict_objs[i_int_pdo])
if l_int_max > l_int_len:
pass
#self.m_logs.error(f"{l_str_err}.sub[{l_int_max}].map[{l_int_len}].inconsistent {l_str_chk}".rstrip())
try:
# Number of mapped objects (variable)
l_any_data = self.m_dict_objs[i_int_pdo][0][CANP_ENUM__HEAD_DATA]
# - except KeyError -
if isinstance(l_any_data, bytearray):
l_int_map = canp_conv.int_bytes(l_any_data)
elif isinstance(l_any_data, int):
l_int_map = l_any_data
if l_int_map > 0:
l_int_len -= 1
if l_int_map < l_int_len:
pass
#self.m_logs.error(f"{l_str_err}.max[{l_int_map}].map[{l_int_len}].unmapped {l_str_chk}".rstrip())
elif l_int_map != l_int_len:
pass
self.m_logs.error(f"{l_str_err}.max[{l_int_map}].map[{l_int_len}].inconsistent {l_str_chk}".rstrip())
# DUPLICATE END : canp_conf.check_obj
# Check object integrity ---------------------
l_int_data = canp_conv.int_bytes(i_bool_bytes = i_bytes_data)
l_int_dlc = len(i_bytes_data) * 8
for l_int_loop in range(1, l_int_map + 1):
# Each mapped object (1 to 64 bits)
l_str_map = f"map[{i_int_pdo:#x}].sub[{l_int_loop}]"
try:
# Pdo register
l_dict_pdo = self.m_dict_objs[i_int_pdo][l_int_loop]
# - except KeyError -
try:
# Mapped cobid+len
l_any_data = l_dict_pdo[CANP_ENUM__HEAD_DATA]
# - except KeyError -
if isinstance(l_any_data, bytearray):
l_int_idx = canp_conv.int_bytes(l_any_data)
elif isinstance(l_any_data, int):
l_int_idx = l_any_data
# Target object
l_int_len = (l_int_idx >> 0) & 0xFF
l_int_sub = (l_int_idx >> 8) & 0xFF
l_int_idx = (l_int_idx >> 16) & 0xFFFF
if l_int_len > 0:
l_int_mask = (2 ** l_int_len) - 1
l_any_data = l_int_data & l_int_mask
l_int_data >>= l_int_len
l_int_dlc -= l_int_len
# Byte sized
if l_int_len >= 8:
l_int_len //= 8
else:
l_int_len = 1
# Type conversion is done by the store
l_any_data = bytearray(
l_any_data.to_bytes(
l_int_len,
byteorder = CANP_ENUM__BYTE_LITTLE))
self.obj_store(
i_int_idx = l_int_idx,
i_int_sub = l_int_sub,
i_any_data = l_any_data,
i_float_time = i_float_time)
except KeyError:
self.m_logs.error(f"{l_str_err}.data.unknown")
except KeyError:
self.m_logs.error(f"{l_str_err}.unknown")
if l_int_dlc > 0:
pass
#self.m_logs.error(f"{l_str_err}.data.dlc.leftover[{l_int_dlc}] {l_str_chk}".rstrip())
elif l_int_dlc < 0:
pass
self.m_logs.error(f"{l_str_err}.data.dlc.overshoot[{l_int_dlc}] {l_str_chk}".rstrip())
else:
self.m_logs.error(f"{l_str_err}.map.zero")
except KeyError:
self.m_logs.error(f"{l_str_err}.map.unknown")
else:
self.m_logs.error(f"{l_str_err}.max.zero")
except KeyError:
self.m_logs.error(f"{l_str_err}.max.unknown")
except KeyError:
self.m_logs.error(f"{l_str_err}.unknown")
def conf_load(self,
i_str_file: str = CANP_ENUM__STR_EMPTY,
i_bool_force: bool = False,
i_bool_bytes: bool = False
) -> None:
""" Node configuration
"""
if i_str_file != CANP_ENUM__STR_EMPTY:
l_enum_typ: enum_TYPE = enum_TYPE.VisibleString
l_bool_ok: bool = False
l_any_data: Any = 0
l_int_idx: int = 0
l_int_sub: int = 0
# Access global | |
need_num = 2
break
d_ite.append(pool[j])
need_num = need_num - 1
pai_num = pai_num - 1
if need_num == 0:
combine(qua_two,r_qua[i],d_ite)
remove_pack(pool,d_ite)
d_ite = []
d_qua.append(r_qua[i])
break
else:
if is_pai(pool[j]):
continue
else:
if len(d_ite) == 1 and n_need == 1:
if d_ite[0] == pool[j]:
if sin_num - 2 <= 0:
d_ite = []
need_num = 2
break
d_ite.append(pool[j])
need_num = need_num - 1
sin_num = sin_num - 1
if need_num == 0:
combine(qua_two,r_qua[i],d_ite)
remove_pack(pool,d_ite)
d_ite = []
d_qua.append(r_qua[i])
break
new_len_pool = sum(map(len,pool))
new_len_qua_two = sum(map(len,qua_two))
if new_len_pool != len_pool:
#some qua has got a TWO
remove_pack(t_qua,d_qua)
new_len_qua = sum(map(len,t_qua))
if debug <= 6:
if len_qua + len_pool + len_qua_two != new_len_qua + new_len_pool\
+ new_len_qua_two:
print "len_qua:%d + len_pool:%d + len_qua_two:%d != new_len_qua:%d\
+ new_len_pool:%d + new_len_qua_two:%d"%(len_qua,len_pool,len_qua_two\
,new_len_qua,new_len_pool,new_len_qua_two)
print "numbers inconsistence"
print "step 4 failed combine qua_two",qua_two
return False
if debug <= 6:
print "step 3 finished, qua_two <%d>:"%(sum(map(len,qua_two))),qua_two
print "t_qua:",t_qua
print "pool:",pool
return True
####################end step3_qua_two############################################
def step2_small_plane_sins(plane,stri,pool):
'''
output:plane,stri,pool
from pool,
small planes get two sins
notice this step only consider small planes
'''
if debug <= 6:
print "******step 2.5 ****** small plane plus two sins begin"
print "plane:",plane
print "stri:",stri
print "pool:",pool
if len(stri) == 0 or len(pool) == 0:
if debug <= 6:
print "no seq_tri or sin"
print "plane:",plane
return True
d_stri = [] #to be deleted from stri which find sins or pairs
d_sin = [] #sinor to be deleted from pool
r_stri = stri[:] #no necessary for a reversed order by length of seq_tri
#r_stri = sorted(stri, key=lambda s:len(s), reverse=1)
if debug <= 6:
print "r_stri:",r_stri
len_stri = sum(map(len,stri))
len_pool = sum(map(len,pool))
len_plane = sum(map(len,plane))
#First let's count the number of sins in pool:
sin_num = 0;
for item in pool:
if is_sin(item):
sin_num = sin_num + 1
if sin_num < 2 :
if debug <= 6:
print "no sin in pool",pool
print "plane:",plane
return True
need_num = 0
sin_num = 0
if debug <= 6:
print "r_stri",r_stri
print "pool:",pool
print "start search"
for i in range(len(r_stri)):
sin_num = 0
for item in pool:
if is_sin(item):
sin_num = sin_num + 1
if sin_num < 2: # this step only take care small planes, so at least 2 length
if debug <= 6:
print "no sin in pool:",pool
break
if len(r_stri[i]) == 6: #only consider small plane
need_num = len(r_stri[i])/3 #don't forget /3
if need_num <= sin_num:
for k in range(len(pool)):
if is_sin(pool[k]):
d_sin.append(pool[k])#d_item:[[3],[5,5]]
need_num = need_num - 1
if need_num == 0: #enough sins find
if debug <= 6:
print "find enough sins for plane,d_sin",d_sin
combine(plane,r_stri[i],d_sin)
#del sin from pool
remove_pack(pool,d_sin)
d_sin = []
#record this seq_str ,to delete outside of the circle
d_stri.append(r_stri[i])
sin_num = 0 #restart count the total sin number
break # finish this stri combination and break
else:
continue #loop the next less stri
new_len_pool = sum(map(len,pool))
new_len_plane = sum(map(len,plane))
if new_len_pool != len_pool:
#some seqtri has got sins
remove_pack(stri,d_stri)
new_len_stri = sum(map(len,stri))
if debug <= 6:
if len_stri + len_pool + len_plane != new_len_stri + new_len_pool\
+ new_len_plane:
print "numbers inconsistence"
print "step 2.5 failed combine plane",plane
return False
if debug <= 6:
print "step2 finished, plane <%d>:"%(sum(map(len,plane))),plane
print "stri:",stri
print "pool:",pool
return True
################end step2_small_plane_sins################################
def step2_plane_singles(plane,stri,pool):
'''
output:plane,stri,pool
from pool,
planes get singles in a reversed order of planes' length
notice this step only for big planes
'''
if debug <= 6:
print "\n"
print "******step 2.****** plane with singles begin"
print "plane:",plane
print "stri:",stri
print "pool:",pool
if len(stri) == 0 or len(pool) == 0:
if debug <= 6:
print "no seq_tri or pai"
print "plane:",plane
return True
d_stri = [] #to be deleted from stri which find pairs
d_sin = [] #to be deleted from pool
r_stri = stri[:] #reversed order by length of seq_tri
r_stri = sorted(stri, key=lambda s:len(s), reverse=1)
if debug <= 6:
print "r_stri:",r_stri
len_stri = sum(map(len,stri))
len_pool = sum(map(len,pool))
len_plane = sum(map(len,plane))
#First let's count the number of pairs in pool:
sin_num = 0
for i in range(len(pool)):
if is_sin(pool[i]):
sin_num = sin_num + 1
if sin_num < 3:#big plane at least need 3 singles
if debug <= 6:
print "not enough sins in pool",pool
print "plane:",plane
return True
need_num = 0
sin_num = 0
if debug <= 6:
print "r_stri",r_stri
print "pool:",pool
print "start search"
#in a reversed ord to get pairs
for i in range(len(r_stri)):
if len(r_stri[i]) >= 9: #big plane
sin_num = 0 #recount the sin number
for j in range(len(pool)):
if is_sin(pool[j]):
sin_num = sin_num + 1
if sin_num < 3: # this step only take care big planes, so at least 3 sins
if debug <= 6:
print "no enough sins now,sin_num:",sin_num
break #no enough sins to find
need_num = len(r_stri[i])/3 #don't forget /3
if need_num <= sin_num:
for k in range(len(pool)):
if is_sin(pool[k]):
d_sin.append(pool[k])#d_sin:[[3],[4],[5]]
need_num = need_num - 1
if need_num == 0: #enough sins find
if debug <= 6:
print "find enough sins for plane,d_sin",d_sin
#Add to plane like [[6,6,6,7,7,7,8,8,8,3,4,5]]
combine(plane,r_stri[i],d_sin)
#del sin from pool
remove_pack(pool,d_sin)
d_sin = []
#record this seq_str ,to delete outside of the circle
d_stri.append(r_stri[i])
sin_num = 0 #restart count the total sin number
break # finish this stri combination and break
else:
continue #loop the next less stri
new_len_pool = sum(map(len,pool))
new_len_plane = sum(map(len,plane))
if new_len_pool != len_pool:
#some seqtri has got sins
remove_pack(stri,d_stri)
new_len_stri = sum(map(len,stri))
if debug <= 6:
if len_stri + len_pool + len_plane != new_len_stri + new_len_pool\
+ new_len_plane:
print "numbers inconsistence"
print "step 2 failed combine plane",plane
return False
if debug <= 6:
print "step2 finished, plane <%d>:"%(sum(map(len,plane))),plane
print "stri:",stri
print "pool:",pool
return True
###################end step2_plane_singles#####################3
def step1_plane_pairs(plane,stri,pool):
'''
output:plane,stri,pool
from pool,
planes get pairs in a reversed order of planes' length
'''
if debug <= 6:
print "\n"
print "******step 1.****** plane with pairs begin"
print "stri:",stri
print "pool:",pool
if len(stri) == 0 or len(pool) == 0:
if debug <= 6:
print "no seq_tri or pai"
print "plane:",plane
return True
d_stri = [] #to be deleted from stri which find pairs
d_pai = [] #to be deleted from pool
r_stri = stri[:] #reversed order by length of seq_tri
r_stri = sorted(stri, key=lambda s:len(s), reverse=1)
if debug <= 6:
print "r_stri:",r_stri
len_stri = sum(map(len,stri))
len_pool = sum(map(len,pool))
len_plane = sum(map(len,plane))
#First let's count the number of pairs in pool:
pai_num = 0
for i in range(len(pool)):
if is_pai(pool[i]):
pai_num = pai_num + 1
if pai_num < 2:#333444 need two pairs
if debug <= 6:
print "not enough pairs in pool",pool
print "plane:",plane
return True
need_num = 0
pai_num = 0
if debug <= 6:
print "r_stri",r_stri
print "pool:",pool
#in a reversed ord to get pairs
for i in range(len(r_stri)):
pai_num = 0 #recount the total number
for j in range(len(pool)):
if is_pai(pool[j]):
pai_num = pai_num + 1
if pai_num < 2:
if debug <= 6:
print "no enough pairs now,pai_num:",pai_num
break #no enough pairs to find
need_num = len(r_stri[i])/3 #don't forget /3
if need_num <= pai_num:
for k in range(len(pool)):
if is_pai(pool[k]):
d_pai.append(pool[k])#d_pai:[[3,3],[4,4],[5,5]]
need_num = need_num - 1
if need_num == 0: #enough pairs find
if debug <= 6:
print "find enough pairs for plane,d_pai",d_pai
#Add to plane like [[6,6,6,7,7,7,8,8,8,3,3,4,4,5,5,]]
combine(plane,r_stri[i],d_pai)
#del pai from pool
remove_pack(pool,d_pai)
d_pai = []
#record this seq_str ,to delete outside of the circle
d_stri.append(r_stri[i])
pai_number = 0
break
else:
continue
new_len_pool = sum(map(len,pool))
new_len_plane = sum(map(len,plane))
if new_len_pool != len_pool:
#some seqtri has got pairs
remove_pack(stri,d_stri)
new_len_stri = sum(map(len,stri))
if debug <= 6:
if len_stri + len_pool + len_plane != new_len_stri + new_len_pool\
+ new_len_plane:
print "numbers inconsistence"
print "step 1 failed combine plane",plane
return False
if debug <= 6:
print "step1 finished, plane <%d>:"%(sum(map(len,plane))),plane
print "stri:",stri
print "pool:",pool
return True
################end step1_plane_pairs##################
def show(l):
'''
show the result of l
'''
ss | |
= Var(within=Reals,bounds=(0,500),initialize=0)
m.x1877 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1878 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1879 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1880 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1881 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1882 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1883 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1884 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1885 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1886 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1887 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1888 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1889 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1890 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1891 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1892 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1893 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1894 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1895 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1896 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1897 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1898 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1899 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1900 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1901 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1902 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1903 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1904 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1905 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1906 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1907 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1908 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1909 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1910 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1911 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1912 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1913 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1914 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1915 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1916 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1917 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1918 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1919 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1920 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1921 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1922 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1923 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1924 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1925 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1926 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1927 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1928 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1929 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1930 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1931 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1932 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1933 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1934 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1935 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1936 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1937 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1938 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1939 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1940 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1941 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1942 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1943 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1944 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1945 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1946 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1947 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1948 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1949 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1950 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1951 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1952 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1953 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1954 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1955 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1956 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1957 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1958 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1959 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1960 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1961 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1962 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1963 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1964 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1965 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1966 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1967 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1968 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1969 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1970 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1971 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1972 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1973 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1974 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1975 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1976 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1977 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1978 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1979 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1980 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1981 = Var(within=Reals,bounds=(0,500),initialize=0)
m.x1982 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1983 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1984 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1985 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1986 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1987 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1988 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1989 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1990 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1991 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1992 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1993 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1994 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1995 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1996 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1997 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1998 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x1999 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2000 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2001 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2002 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2003 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2004 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2005 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2006 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2007 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2008 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2009 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2010 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2011 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2012 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2013 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2014 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2015 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2016 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2017 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2018 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2019 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2020 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2021 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2022 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2023 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2024 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2025 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2026 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2027 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2028 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2029 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2030 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2031 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2032 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2033 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2034 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2035 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2036 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2037 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2038 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2039 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2040 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2041 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2042 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2043 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2044 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2045 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2046 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2047 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2048 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2049 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2050 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2051 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2052 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2053 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2054 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2055 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2056 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2057 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2058 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2059 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2060 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2061 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2062 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2063 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2064 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2065 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2066 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2067 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2068 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2069 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2070 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2071 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2072 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2073 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2074 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2075 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2076 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2077 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2078 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2079 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2080 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2081 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2082 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2083 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2084 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2085 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2086 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2087 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2088 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2089 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2090 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2091 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2092 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2093 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2094 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2095 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2096 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2097 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2098 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2099 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2100 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2101 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2102 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2103 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2104 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2114 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2115 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2116 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2117 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2118 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2119 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2120 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2121 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2133 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2134 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2135 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2136 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2137 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2138 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2139 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2140 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2141 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2142 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2143 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2144 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2156 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2157 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2158 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2159 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2160 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2161 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2162 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2163 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2164 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2165 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2166 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2167 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2168 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2169 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2170 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2171 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2172 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2173 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2174 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2175 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2176 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2177 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2178 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2179 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2180 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2181 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2182 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2183 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2184 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2185 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2186 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2187 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2188 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2189 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2190 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2191 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2192 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2193 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2194 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2195 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2196 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2197 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2198 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2199 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2200 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2201 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2202 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2203 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2204 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2205 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2206 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2207 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2208 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2209 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2210 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2211 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2212 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2213 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2214 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2215 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2216 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2217 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2218 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2219 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2220 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2221 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2222 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2223 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2224 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2225 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2226 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2227 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2228 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2229 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2230 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2231 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2232 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2233 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2234 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2235 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2236 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2237 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2238 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2239 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2240 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2241 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2242 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2243 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2244 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2245 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2246 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2247 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2248 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2249 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2250 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2251 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2252 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2253 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2254 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2255 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2256 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2257 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2258 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2259 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2260 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2261 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2262 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2263 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2264 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2265 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2266 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2267 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2268 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2269 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2270 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2271 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2272 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2273 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2274 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2275 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2276 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2277 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2278 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2279 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2280 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2281 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2282 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2283 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2284 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2285 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2286 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2287 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2288 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2289 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2290 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2291 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2292 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2293 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2294 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2295 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2296 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2297 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2298 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2299 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2300 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2301 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2302 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2303 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2304 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2305 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2306 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2307 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2308 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2309 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2310 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2311 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2312 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2313 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2314 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2315 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2316 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2317 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2318 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2319 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2320 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2321 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2322 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2323 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2324 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2325 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2326 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2327 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2328 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2329 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2330 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2331 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2332 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2333 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2334 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2335 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2336 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2337 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2338 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2339 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2340 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2341 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2342 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2343 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2344 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2345 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2346 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2347 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2348 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2349 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2350 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2351 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2352 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2353 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2354 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2355 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2356 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2357 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2358 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2359 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2360 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2361 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2362 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2363 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2364 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2365 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2366 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2367 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2368 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2369 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2370 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2371 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2372 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2373 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2374 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2375 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2376 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2377 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2378 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2379 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2380 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2381 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2382 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2383 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2384 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2385 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2386 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2387 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2388 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2389 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2390 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2391 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2392 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2393 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2394 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2395 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2396 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2397 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2398 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2399 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2400 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2401 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2402 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2403 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2404 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2405 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2406 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2407 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2408 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2409 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2410 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2411 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2412 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2413 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2414 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2415 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2416 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2417 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2418 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2419 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2420 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2421 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2422 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2423 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2424 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2425 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2426 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2427 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2428 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2429 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2430 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2431 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2432 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2433 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2434 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2435 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2436 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2437 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2438 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2439 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2440 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2441 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2442 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2443 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2444 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2445 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2446 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2447 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2448 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2449 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2450 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2451 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2452 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2453 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2454 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2455 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2456 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2457 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2458 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2459 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2460 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2461 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2462 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2463 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2464 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2465 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2466 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2467 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2468 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2469 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2470 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2471 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2472 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2473 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2474 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2475 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2476 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2477 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2478 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2479 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2480 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2481 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2482 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2483 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2484 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2485 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2486 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2487 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2488 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2489 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2490 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2491 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2492 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2493 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2494 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2495 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2496 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2497 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2498 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2499 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2500 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2501 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2502 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2503 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2504 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2505 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2506 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2507 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2508 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2509 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2510 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2511 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2512 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2513 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2514 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2515 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2516 | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created by <NAME> on 08/20/2020
"""
import tkinter as tk
import tkinter.messagebox as messagebox
from tkinter import ttk
from datetime import datetime
import sys
from PIL import ImageTk, Image
import cv2
import database
from win10toast import ToastNotifier
from os import path
class MainDisplay(tk.Frame):
def __init__(self, master = None):
tk.Frame.__init__(self, master)
self.master = master
self.menubar = Menu(self)
self.master.config(menu = self.menubar)
##############################Variables###############################
#Fonts
self.font_title = ('arial', 12, 'bold')
self.font_text = ("arial", 8)
#Dates
self.Start_Date = tk.StringVar()
self.Notification_date = tk.StringVar()
#Personal Info
self.Name = tk.StringVar()
self.Sex = tk.StringVar()
self.Age = tk.StringVar()
self.Status = tk.StringVar()
self.Address = tk.StringVar()
#Measurements
self.Height = tk.StringVar()
self.Weight_Current = tk.StringVar()
self.Weight_Perfect = tk.StringVar()
self.Weight_Changed = tk.StringVar()
self.BMI = tk.StringVar()
self.Leather_tuck = tk.StringVar()
self.Arm_Circu = tk.StringVar()
#Sickness
self.Sick_Prev = tk.StringVar()
self.Sick_Curr = tk.StringVar()
#BioInfo
self.Pressure = tk.StringVar()
self.Pulse = tk.StringVar()
self.Temperature = tk.StringVar()
self.Breath = tk.StringVar()
self.Urin_Exp = tk.StringVar()
#Meditation
self.Cyanosis = tk.StringVar()
self.Elevate = tk.StringVar()
self.Pallor = tk.StringVar()
self.Dryness = tk.StringVar()
self.Edema = tk.StringVar()
self.Hair = tk.StringVar()
#Changes
self.Sleep = tk.StringVar()
self.Constipation = tk.StringVar()
self.Diarrhea = tk.StringVar()
self.Vomiting = tk.StringVar()
self.Urin_Color = tk.StringVar()
self.Urin_Number = tk.StringVar()
#Current_sym
self.Current_sym1 = tk.StringVar()
self.Current_sym2 = tk.StringVar()
self.Current_sym3 = tk.StringVar()
self.Current_sym4 = tk.StringVar()
#Special Cases
##Lab Test
self.Uria = tk.StringVar()
self.Humo = tk.StringVar()
self.Krea = tk.StringVar()
self.Na = tk.StringVar()
self.K = tk.StringVar()
self.Ca = tk.StringVar()
self.WBC = tk.StringVar()
self.Pro = tk.StringVar()
self.Sug = tk.StringVar()
##Extra test
self.Beliro = tk.StringVar()
self.Fe = tk.StringVar()
self.Thyroid = tk.StringVar()
self.Urin_Acid = tk.StringVar()
##Evaluation
self.Evaluation = tk.StringVar()
################################Clock#################################
self.ClockFrame = tk.Frame(self.master, width = 1350, height = 50, bd = 5, relief = "ridge")
self.ClockFrame.pack(side = "bottom", fill = "x", expand = 1, anchor = 's')
self.Time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
self.ClockLabel = tk.Label(self.ClockFrame, text = self.Time, anchor = 'e')
self.ClockLabel.pack(side = 'right')
self.Clock()
##############################Main Frame###############################
self.MainFrame = tk.Frame(self.master, relief = 'ridge')
self.MainFrame.pack(side = "top", fill = "x", expand = 1, anchor = 'n')
self.RightFrame = tk.Frame(self.MainFrame, height = height, width = width, relief = 'ridge')
self.RightFrame.grid(row=0, column=1, sticky = "news")
##############################Buttons Frame###############################
self.ButtonFrame = tk.Frame(self.RightFrame, height = height, width = width, relief = 'ridge')
self.ButtonFrame.grid(row = 0, column = 0, columnspan = 10)
self.Add_Button = tk.Button(self.ButtonFrame, font = self.font_text, text = "Add New", padx = 4, pady = 4, width = 15, command = self.AddData)
self.Add_Button.grid(row = 0, column = 0)
self.Clear_Button = tk.Button(self.ButtonFrame, font = self.font_text, text = "Clear", padx = 4, pady = 4, width = 15, command = self.Clear)
self.Clear_Button.grid(row = 0, column = 1)
self.Delete_Button = tk.Button(self.ButtonFrame, font = self.font_text, text = "Delete", padx = 4, pady = 4, width = 15, command = self.DeletePatiant)
self.Delete_Button.grid(row = 0, column = 2)
self.Update_Button = tk.Button(self.ButtonFrame, font = self.font_text, text = "Update", padx = 4, pady = 4, width = 15, command = self.UpdatePatiant)
self.Update_Button.grid(row = 0, column = 3)
self.Exit_Button = tk.Button(self.ButtonFrame, font = self.font_text, text = "Exit", padx = 4, pady = 4, width = 15, command = self.on_exit)
self.Exit_Button.grid(row = 0, column = 4)
##############################Personal Info###############################
self.Personal_info = tk.Label(self.RightFrame, font = self.font_title, text = "Personal Information", padx = 4, pady = 4)
self.Personal_info.grid(row = 1, column = 0, columnspan = 10, sticky = "w")
self.FullName_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Full Name", padx = 4, pady = 4)
self.FullName_Label.grid(row = 2, column = 0, sticky = "ew")
self.FullName_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Name, width = 20)
self.FullName_Entry.grid(row = 2, column = 1, columnspan = 2, sticky = "ew")
self.Sex_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Sex", padx = 4, pady = 4)
self.Sex_Label.grid(row = 2, column = 2, sticky = "nswe")
self.Sex_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Sex, width = 10)
self.Sex_Entry.grid(row = 2, column = 3, sticky = "ew")
self.Age_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Age", padx = 4, pady = 4)
self.Age_Label.grid(row = 2, column = 4, sticky = "ew")
self.Age_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Age, width = 15)
self.Age_Entry.grid(row = 2, column = 5, sticky = "ew")
self.Status_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Status", padx = 4, pady = 4)
self.Status_Label.grid(row = 2, column = 6, sticky = "ew")
self.Status_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Status, width = 10)
self.Status_Entry.grid(row = 2, column = 7, sticky = "ew")
self.Address_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Address", padx = 4, pady = 4)
self.Address_Label.grid(row = 2, column = 8, sticky = "ew")
self.Address_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Address, width = 15)
self.Address_Entry.grid(row = 2, column = 9, columnspan = 2, sticky = "ew")
##############################Measurement Info###############################
self.Measurement_info = tk.Label(self.RightFrame, font = self.font_title, text = "Measurement Information", padx = 4, pady = 4)
self.Measurement_info.grid(row = 3, column = 0, columnspan = 10, sticky = "w")
self.Height_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Height", padx = 4, pady = 4)
self.Height_Label.grid(row = 4, column = 0, sticky = "ew")
self.Height_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Height, width = 10)
self.Height_Entry.grid(row = 4, column = 1, sticky = "ew")
self.C_Weight_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Current Weight", padx = 4, pady = 4)
self.C_Weight_Label.grid(row = 4, column = 2, sticky = "ew")
self.C_Weight_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Weight_Current, width = 10)
self.C_Weight_Entry.grid(row = 4, column = 3, sticky = "ew")
self.P_Weight_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Perfect Weight", padx = 4, pady = 4)
self.P_Weight_Label.grid(row = 4, column = 4, sticky = "ew")
self.P_Weight_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Weight_Perfect, width = 10)
self.P_Weight_Entry.grid(row = 4, column = 5, sticky = "ew")
self.Changed_Weight_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Changed Weight", padx = 4, pady = 4)
self.Changed_Weight_Label.grid(row = 4, column = 6, sticky = "ew")
self.Changed_Weight_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Weight_Changed, width = 10)
self.Changed_Weight_Entry.grid(row = 4, column = 7, sticky = "ew")
self.BMI_Label = tk.Label(self.RightFrame, font = self.font_text, text = "BMI", padx = 4, pady = 4)
self.BMI_Label.grid(row = 4, column = 8, sticky = "ew")
self.BMI_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.BMI, width = 15)
self.BMI_Entry.grid(row = 4, column = 9, sticky = "ew")
self.Leather_tuck_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Leather Tuck", padx = 4, pady = 4)
self.Leather_tuck_Label.grid(row = 5, column = 0, sticky = "ew")
self.Leather_tuck_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Leather_tuck, width = 10)
self.Leather_tuck_Entry.grid(row = 5, column = 1, sticky = "ew")
self.Arm_Circu_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Arm Circumference", padx = 4, pady = 4)
self.Arm_Circu_Label.grid(row = 5, column = 2, sticky = "ew")
self.Arm_Circu_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Arm_Circu, width = 10)
self.Arm_Circu_Entry.grid(row = 5, column = 3, sticky = "ew")
##############################Sickness Info###############################
self.Sickness_info = tk.Label(self.RightFrame, font = self.font_title, text = "Sickness Information", padx = 4, pady = 4)
self.Sickness_info.grid(row = 6, column = 0, columnspan = 10, sticky = "w")
self.Sick_Prev_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Previous Sickness", padx = 4, pady = 4)
self.Sick_Prev_Label.grid(row = 7, column = 0, sticky = "ew")
self.Sick_Prev_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Sick_Prev, width = 10)
self.Sick_Prev_Entry.grid(row = 7, column = 1, columnspan = 9, sticky = "ew")
self.Sick_Curr_Label = tk.Label(self.RightFrame, font = self.font_text, text = "Current Sickness", padx = 4, pady = 4)
self.Sick_Curr_Label.grid(row = 8, column = 0, sticky = "ew")
self.Sick_Curr_Entry = tk.Entry(self.RightFrame, font = self.font_text, textvariable = self.Sick_Curr, width = 10)
self.Sick_Curr_Entry.grid(row = 8, column = 1, columnspan = 9, sticky = "ew")
##############################Vital Signal###############################
self.VitalSignal_info = tk.Label(self.RightFrame, font = self.font_title, text = "Vital Signals", padx = 4, pady = 4)
self.VitalSignal_info.grid(row = 9, column = 0, columnspan = 10, sticky = "w")
self.Pressure_Label = tk.Label(self.RightFrame, font = | |
for batch in to_insert_batches(rows):
res = self.cursor.executemany(stmt, batch)
# new version of crate does not bomb anymore when
# something goes wrong in multi entries
# simply it returns -2 for each row that have an issue
# TODO: improve error handling.
# using batches, we don't need to fail the whole set
# but only failing batches.
if isinstance(res, list):
for i in range(len(res)):
if res[i]['rowcount'] < 0:
raise Exception('An insert failed')
dt = datetime.now() - start_time
time_difference = (dt.days * 24 * 60 * 60 + dt.seconds) \
* 1000 + dt.microseconds / 1000.0
self.logger.debug("Query completed | time={} msec".format(
str(time_difference)))
except Exception as e:
self.sql_error_handler(e)
if not self._should_insert_original_entities(e):
raise
self.logger.exception(
'Failed to insert entities because of below error; ' +
'translator will still try saving original JSON in ' +
f"{table_name}.{ORIGINAL_ENTITY_COL}"
)
self._insert_original_entities_in_failed_batch(
table_name, entities, e)
def _build_insert_params_and_values(
self, col_names: List[str], rows: List[List],
entities: List[dict]) -> (str, str, List[List]):
if self.config.keep_raw_entity():
original_entity_col_index = col_names.index(ORIGINAL_ENTITY_COL)
for i, r in enumerate(rows):
wrapper = self._build_original_data_value(entities[i])
r[original_entity_col_index] = wrapper
col_list = ', '.join(['"{}"'.format(c.lower()) for c in col_names])
placeholders = ','.join(['?'] * len(col_names))
return col_list, placeholders, rows
# NOTE. Brittle code.
# This code, like the rest of the insert workflow implicitly assumes
# 1. col_names[k] <-> rows[k] <-> entities[k]
# 2. original entity column always gets added upfront
# But we never really check anywhere (1) and (2) always hold true,
# so slight changes to the insert workflow could cause nasty bugs...
def _build_original_data_value(self, entity: dict,
insert_error: Exception = None,
failed_batch_id: str = None) -> Any:
value = {
'data': entity
}
if failed_batch_id:
value['failedBatchID'] = failed_batch_id
if insert_error:
value['error'] = repr(insert_error)
return self._to_db_ngsi_structured_value(value)
@staticmethod
def _to_db_ngsi_structured_value(data: dict) -> Any:
return data
def _should_insert_original_entities(self,
insert_error: Exception) -> bool:
raise NotImplementedError
def _insert_original_entities_in_failed_batch(
self, table_name: str, entities: List[dict],
insert_error: Exception):
cols = f"{ENTITY_ID_COL}, {ENTITY_TYPE_COL}, {self.TIME_INDEX_NAME}" \
+ f", {ORIGINAL_ENTITY_COL}"
stmt = f"insert into {table_name} ({cols}) values (?, ?, ?, ?)"
tix = current_timex()
batch_id = uuid4().hex
rows = [[entity_id(e), entity_type(e), tix,
self._build_original_data_value(e, insert_error, batch_id)]
for e in entities]
self.cursor.executemany(stmt, rows)
def _attr_is_structured(self, a):
if 'value' in a and a['value'] is not None \
and isinstance(a['value'], dict):
self.logger.debug("attribute {} has 'value' attribute of type dict"
.format(a))
return True
return False
# TODO this logic is too simple. looks like this actually only used
# in row 67 of reporter.py and in test_validate_notifivation (i.e
# most probably we can remove
@staticmethod
def is_text(attr_type):
# TODO: verify: same logic in two different places!
# The above kinda reproduces the tests done by the translator,
# we should factor this logic out and keep it in just one place!
return attr_type == NGSI_TEXT or attr_type not in NGSI_TO_SQL
def _preprocess_values(self, e, original_attrs, col_names,
fiware_servicepath):
values = []
for cn in col_names:
if cn == 'entity_type':
values.append(e['type'])
elif cn == 'entity_id':
values.append(e['id'])
elif cn == self.TIME_INDEX_NAME:
values.append(e[self.TIME_INDEX_NAME])
elif cn == FIWARE_SERVICEPATH:
values.append(fiware_servicepath or '')
else:
# Normal attributes
try:
attr = original_attrs[cn][0]
attr_t = original_attrs[cn][1]
if SlfGeometry.is_ngsi_slf_attr(e[attr]):
mapped_value = self._ngsi_slf_to_db(e[attr])
elif attr_t == NGSI_GEOJSON or attr_t == NGSI_LD_GEOMETRY:
mapped_value = self._ngsi_geojson_to_db(e[attr])
elif self._is_ngsi_ld_datetime_property(e[attr]):
mapped_value = self._ngsi_ld_datetime_to_db(e[attr])
elif attr_t == NGSI_TEXT:
mapped_value = self._ngsi_text_to_db(e[attr])
elif attr_t == NGSI_DATETIME or attr_t == NGSI_ISO8601:
mapped_value = self._ngsi_datetime_to_db(e[attr])
elif attr_t == "Boolean":
mapped_value = self._ngsi_boolean_to_db(e[attr])
elif attr_t == "Number":
mapped_value = self._ngsi_number_to_db(e[attr])
elif attr_t == "Integer":
mapped_value = self._ngsi_integer_to_db(e[attr])
elif attr_t == 'Relationship':
mapped_value = self._ngsi_ld_relationship_to_db(
e[attr])
elif self._is_ngsi_array(e[attr], attr_t):
mapped_value = self._ngsi_array_to_db(e[attr])
elif self._is_ngsi_object(e[attr], attr_t):
mapped_value = self._ngsi_structured_to_db(e[attr])
else:
mapped_value = self._ngsi_default_to_db(e[attr])
values.append(mapped_value)
except KeyError:
# this entity update does not have a value for the column
# so use None which will be inserted as NULL to the db.
values.append(None)
except ValueError:
# this value cannot be cast to column type
# so use None which will be inserted as NULL to the db.
values.append(None)
return values
@staticmethod
def _is_ngsi_array(attr, attr_t):
return (attr_t == NGSI_STRUCTURED_VALUE
and 'value' in attr and isinstance(attr['value'],
list)) or attr_t == "Array"
@staticmethod
def _is_ngsi_object(attr, attr_t):
return attr_t == NGSI_STRUCTURED_VALUE or (
'value' in attr and isinstance(attr['value'], dict))
@staticmethod
def _is_ngsi_ld_datetime_property(attr):
if 'type' in attr and attr[
'type'] == 'Property' and 'value' in attr and isinstance(
attr['value'], dict) \
and '@type' in attr['value'] and attr['value'][
'@type'] == 'DateTime':
return True
return False
@staticmethod
def _ngsi_geojson_to_db(attr):
raise NotImplementedError
@staticmethod
def _ngsi_number_to_db(attr):
try:
if isinstance(attr['value'], bool):
return None
elif isinstance(attr['value'], float):
return attr['value']
elif attr['value'] is not None:
return float(attr['value'])
except ValueError:
logging.warning(
'{} cannot be cast to {} replaced with None'.format(
attr.get('value', None), attr.get('type', None)))
return None
else:
logging.warning(
'{} cannot be cast to {} replaced with None'.format(
attr.get('value', None), attr.get('type', None)))
return None
@staticmethod
def _ngsi_datetime_to_db(attr):
if 'value' in attr and SQLTranslator._is_iso_date(attr['value']):
return attr['value']
else:
logging.warning(
'{} cannot be cast to {} replaced with None'.format(
attr.get('value', None), attr.get('type', None)))
return None
@staticmethod
def _ngsi_integer_to_db(attr):
try:
if isinstance(attr['value'], bool):
return None
elif isinstance(attr['value'], int):
return attr['value']
elif attr['value'] is not None:
return int(float(attr['value']))
except ValueError:
logging.warning(
'{} cannot be cast to {} replaced with None'.format(
attr.get('value', None), attr.get('type', None)))
return None
else:
logging.warning(
'{} cannot be cast to {} replaced with None'.format(
attr.get('value', None), attr.get('type', None)))
return None
@staticmethod
def _ngsi_boolean_to_db(attr):
if isinstance(attr['value'], str) and attr['value'].lower() == 'true':
return True
elif isinstance(attr['value'], str) \
and attr['value'].lower() == 'false':
return False
elif isinstance(attr['value'], int) and attr['value'] == 1:
return True
elif isinstance(attr['value'], int) and attr['value'] == 0:
return False
elif isinstance(attr['value'], bool):
return attr['value']
else:
logging.warning(
'{} cannot be cast to {} replaced with None'.format(
attr.get('value', None), attr.get('type', None)))
return None
@staticmethod
def _ngsi_slf_to_db(attr):
raise NotImplementedError
@staticmethod
def _ngsi_structured_to_db(attr):
raise NotImplementedError
@staticmethod
def _ngsi_array_to_db(attr):
raise NotImplementedError
@staticmethod
def _ngsi_text_to_db(attr):
if 'value' in attr and attr['value'] is not None:
return str(attr['value'])
logging.warning('{} cannot be cast to {} replaced with None'.format(
attr.get('value', None), attr.get('type', None)))
return None
@staticmethod
def _ngsi_default_to_db(attr):
return attr.get('value', None)
@staticmethod
def _ngsi_ld_datetime_to_db(attr):
if SQLTranslator._is_ngsi_ld_datetime_property(
attr) and SQLTranslator._is_iso_date(attr['value']['@value']):
return attr['value']['@value']
else:
if 'value' in attr:
logging.warning(
'{} cannot be cast to {} replaced with None'.format(
attr['value'].get('@value', None),
attr['value'].get('@type', None)))
else:
logging.warning(
'attribute "value" is missing, cannot perform cast')
return None
@staticmethod
def _ngsi_ld_relationship_to_db(attr):
return attr.get('value', None) or attr.get('object', None)
def _update_metadata_table(self, table_name, metadata):
"""
This method creates the METADATA_TABLE_NAME (if not exists), which
stores, for each table_name (entity type), a translation table (dict)
mapping the column names (from entity attributes) to the corresponding
attributes metadata such as original attribute names and NGSI types.
If such table existed, this method updates it accordingly if required.
Required means, either there was no metadata for that
table_name or the new_metadata has new entries not present in
persisted_metadata.
:param table_name: unicode
The name of the table whose metadata will be updated
:param metadata: dict
The dict mapping the matedata of each column. See original_attrs.
"""
if not self._is_query_in_cache(self.dbCacheName, METADATA_TABLE_NAME):
self._create_metadata_table()
self._cache(self.dbCacheName,
METADATA_TABLE_NAME,
None,
self.default_ttl)
# Bring translation table!
stmt = "select entity_attrs from {} where table_name = ?".format(
METADATA_TABLE_NAME)
# By design, one entry per table_name
try:
res = self._execute_query_via_cache(self.dbCacheName,
table_name,
stmt,
[table_name],
self.default_ttl)
persisted_metadata = res[0][0] if res else {}
except Exception as e:
self.sql_error_handler(e)
# Metadata table still not created
logging.debug(str(e), exc_info=True)
# Attempt to re-create metadata table
self._create_metadata_table()
persisted_metadata = {}
diff = metadata.keys() - persisted_metadata.keys()
if diff:
# we update using the difference to "not" corrupt the metadata
# by previous insert
update = dict((k, metadata[k]) for k in diff if k in metadata)
persisted_metadata.update(update)
self._store_metadata(table_name, persisted_metadata)
self._cache(self.dbCacheName,
table_name,
[[persisted_metadata]],
self.default_ttl)
return diff
# TODO: concurrency.
# This implementation paves
# the way to lost updates...
def _store_metadata(self, table_name, persisted_metadata):
raise NotImplementedError
def _get_et_table_names(self, fiware_service=None):
"""
Return the names of all the tables representing entity types.
:return: list(unicode)
"""
stmt = "select distinct table_name from {}".format(METADATA_TABLE_NAME)
key = ""
if fiware_service:
key = fiware_service.lower()
where = " where table_name ~* '\"{}{}\"[.].*'"
stmt += where.format(TENANT_PREFIX, key)
else:
| |
85, 13),
(166, 54, 3)],
6: [
(254, 237, 222),
(253, 208, 162),
(253, 174, 107),
(253, 141, 60),
(230, 85, 13),
(166, 54, 3)],
7: [
(254, 237, 222),
(253, 208, 162),
(253, 174, 107),
(253, 141, 60),
(241, 105, 19),
(217, 72, 1),
(140, 45, 4)],
8: [
(255, 245, 235),
(254, 230, 206),
(253, 208, 162),
(253, 174, 107),
(253, 141, 60),
(241, 105, 19),
(217, 72, 1),
(140, 45, 4)],
9: [
(255, 245, 235),
(254, 230, 206),
(253, 208, 162),
(253, 174, 107),
(253, 141, 60),
(241, 105, 19),
(217, 72, 1),
(166, 54, 3),
(127, 39, 4)], "type": "seq", "reverse": True},
"GreenBlue": {
3: [
(229, 245, 249),
(153, 216, 201),
(44, 162, 95)],
4: [
(237, 248, 251),
(178, 226, 226),
(102, 194, 164),
(35, 139, 69)],
5: [
(237, 248, 251),
(178, 226, 226),
(102, 194, 164),
(44, 162, 95),
(0, 109, 44)],
6: [
(237, 248, 251),
(204, 236, 230),
(153, 216, 201),
(102, 194, 164),
(44, 162, 95),
(0, 109, 44)],
7: [
(237, 248, 251),
(204, 236, 230),
(153, 216, 201),
(102, 194, 164),
(65, 174, 118),
(35, 139, 69),
(0, 88, 36)],
8: [
(247, 252, 253),
(229, 245, 249),
(204, 236, 230),
(153, 216, 201),
(102, 194, 164),
(65, 174, 118),
(35, 139, 69),
(0, 88, 36)],
9: [
(247, 252, 253),
(229, 245, 249),
(204, 236, 230),
(153, 216, 201),
(102, 194, 164),
(65, 174, 118),
(35, 139, 69),
(0, 109, 44),
(0, 68, 27)], "type": "seq", "reverse": True},
"BrownOrangeYellow": {
3: [
(255, 247, 188),
(254, 196, 79),
(217, 95, 14)],
4: [
(255, 255, 212),
(254, 217, 142),
(254, 153, 41),
(204, 76, 2)],
5: [
(255, 255, 212),
(254, 217, 142),
(254, 153, 41),
(217, 95, 14),
(153, 52, 4)],
6: [
(255, 255, 212),
(254, 227, 145),
(254, 196, 79),
(254, 153, 41),
(217, 95, 14),
(153, 52, 4)],
7: [
(255, 255, 212),
(254, 227, 145),
(254, 196, 79),
(254, 153, 41),
(236, 112, 20),
(204, 76, 2),
(140, 45, 4)],
8: [
(255, 255, 229),
(255, 247, 188),
(254, 227, 145),
(254, 196, 79),
(254, 153, 41),
(236, 112, 20),
(204, 76, 2),
(140, 45, 4)],
9: [
(255, 255, 229),
(255, 247, 188),
(254, 227, 145),
(254, 196, 79),
(254, 153, 41),
(236, 112, 20),
(204, 76, 2),
(153, 52, 4),
(102, 37, 6)], "type": "seq", "reverse": True},
"GreenYellow": {
3: [
(247, 252, 185),
(173, 221, 142),
(49, 163, 84)],
4: [
(255, 255, 204),
(194, 230, 153),
(120, 198, 121),
(35, 132, 67)],
5: [
(255, 255, 204),
(194, 230, 153),
(120, 198, 121),
(49, 163, 84),
(0, 104, 55)],
6: [
(255, 255, 204),
(217, 240, 163),
(173, 221, 142),
(120, 198, 121),
(49, 163, 84),
(0, 104, 55)],
7: [
(255, 255, 204),
(217, 240, 163),
(173, 221, 142),
(120, 198, 121),
(65, 171, 93),
(35, 132, 67),
(0, 90, 50)],
8: [
(255, 255, 229),
(247, 252, 185),
(217, 240, 163),
(173, 221, 142),
(120, 198, 121),
(65, 171, 93),
(35, 132, 67),
(0, 90, 50)],
9: [
(255, 255, 229),
(247, 252, 185),
(217, 240, 163),
(173, 221, 142),
(120, 198, 121),
(65, 171, 93),
(35, 132, 67),
(0, 104, 55),
(0, 69, 41)], "type": "seq", "reverse": True},
"Reds": {
3: [
(254, 224, 210),
(252, 146, 114),
(222, 45, 38)],
4: [
(254, 229, 217),
(252, 174, 145),
(251, 106, 74),
(203, 24, 29)],
5: [
(254, 229, 217),
(252, 174, 145),
(251, 106, 74),
(222, 45, 38),
(165, 15, 21)],
6: [
(254, 229, 217),
(252, 187, 161),
(252, 146, 114),
(251, 106, 74),
(222, 45, 38),
(165, 15, 21)],
7: [
(254, 229, 217),
(252, 187, 161),
(252, 146, 114),
(251, 106, 74),
(239, 59, 44),
(203, 24, 29),
(153, 0, 13)],
8: [
(255, 245, 240),
(254, 224, 210),
(252, 187, 161),
(252, 146, 114),
(251, 106, 74),
(239, 59, 44),
(203, 24, 29),
(153, 0, 13)],
9: [
(255, 245, 240),
(254, 224, 210),
(252, 187, 161),
(252, 146, 114),
(251, 106, 74),
(239, 59, 44),
(203, 24, 29),
(165, 15, 21),
(103, 0, 13)], "type": "seq", "reverse": True},
"PurpleRed": {
3: [
(253, 224, 221),
(250, 159, 181),
(197, 27, 138)],
4: [
(254, 235, 226),
(251, 180, 185),
(247, 104, 161),
(174, 1, 126)],
5: [
(254, 235, 226),
(251, 180, 185),
(247, 104, 161),
(197, 27, 138),
(122, 1, 119)],
6: [
(254, 235, 226),
(252, 197, 192),
(250, 159, 181),
(247, 104, 161),
(197, 27, 138),
(122, 1, 119)],
7: [
(254, 235, 226),
(252, 197, 192),
(250, 159, 181),
(247, 104, 161),
(221, 52, 151),
(174, 1, 126),
(122, 1, 119)],
8: [
(255, 247, 243),
(253, 224, 221),
(252, 197, 192),
(250, 159, 181),
(247, 104, 161),
(221, 52, 151),
(174, 1, 126),
(122, 1, 119)],
9: [
(255, 247, 243),
(253, 224, 221),
(252, 197, 192),
(250, 159, 181),
(247, 104, 161),
(221, 52, 151),
(174, 1, 126),
(122, 1, 119),
(73, 0, 106)], "type": "seq", "reverse": True},
"Greens": {
3: [
(229, 245, 224),
(161, 217, 155),
(49, 163, 84)],
4: [
(237, 248, 233),
(186, 228, 179),
(116, 196, 118),
(35, 139, 69)],
5: [
(237, 248, 233),
(186, 228, 179),
(116, 196, 118),
(49, 163, 84),
(0, 109, 44)],
6: [
(237, 248, 233),
(199, 233, 192),
(161, 217, 155),
(116, 196, 118),
(49, 163, 84),
(0, 109, 44)],
7: [
(237, 248, 233),
(199, 233, 192),
(161, 217, 155),
(116, 196, 118),
(65, 171, 93),
(35, 139, 69),
(0, 90, 50)],
8: [
(247, 252, 245),
(229, 245, 224),
(199, 233, 192),
(161, 217, 155),
(116, 196, 118),
(65, 171, 93),
(35, 139, 69),
(0, 90, 50)],
9: [
(247, 252, 245),
(229, 245, 224),
(199, 233, 192),
(161, 217, 155),
(116, 196, 118),
(65, 171, 93),
(35, 139, 69),
(0, 109, 44),
(0, 68, 27)], "type": "seq", "reverse": True},
"BlueGreenYellow": {
3: [
(237, 248, 177),
(127, 205, 187),
(44, 127, 184)],
4: [
(255, 255, 204),
(161, 218, 180),
(65, 182, 196),
(34, 94, 168)],
5: [
(255, 255, 204),
(161, 218, 180),
(65, 182, 196),
(44, 127, 184),
(37, 52, 148)],
6: [
(255, 255, 204),
(199, 233, 180),
(127, 205, 187),
(65, 182, 196),
(44, 127, 184),
(37, 52, 148)],
7: [
(255, 255, 204),
(199, 233, 180),
(127, 205, 187),
(65, 182, 196),
(29, 145, 192),
(34, 94, 168),
(12, 44, 132)],
8: [
(255, 255, 217),
(237, 248, 177),
(199, 233, 180),
(127, 205, 187),
(65, 182, 196),
(29, 145, 192),
(34, 94, 168),
(12, 44, 132)],
9: [
(255, 255, 217),
(237, 248, 177),
(199, 233, 180),
(127, 205, 187),
(65, 182, 196),
(29, 145, 192),
(34, 94, 168),
(37, 52, 148),
(8, 29, 88)], "type": "seq", "reverse": True},
"Purples": {
3: [
(239, 237, 245),
(188, 189, 220),
(117, 107, 177)],
4: [
(242, 240, 247),
(203, 201, 226),
(158, 154, 200),
(106, 81, 163)],
5: [
(242, 240, 247),
(203, 201, 226),
(158, 154, 200),
(117, 107, 177),
(84, 39, 143)],
6: [
(242, 240, 247),
(218, 218, 235),
(188, 189, 220),
(158, 154, 200),
(117, 107, 177),
(84, 39, 143)],
7: [
(242, 240, 247),
(218, 218, 235),
(188, 189, 220),
(158, 154, 200),
(128, 125, 186),
(106, 81, 163),
(74, 20, 134)],
8: [
(252, 251, 253),
(239, 237, 245),
(218, 218, 235),
(188, 189, 220),
(158, 154, 200),
(128, 125, 186),
(106, 81, 163),
(74, 20, 134)],
9: [
(252, 251, 253),
(239, 237, 245),
(218, 218, 235),
(188, 189, 220),
(158, 154, 200),
(128, 125, 186),
(106, 81, 163),
(84, 39, 143),
(63, 0, 125)], "type": "seq", "reverse": True},
"BlueGreen": {
3: [
(224, | |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
"""Run relation following over pre-trained corpus index."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import json
import os
import re
import time
from absl import flags
from bert import modeling
from bert import optimization
from bert import tokenization
from language.labs.drkit import evaluate
from language.labs.drkit import input_fns
from language.labs.drkit import model_fns
from language.labs.drkit import search_utils
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import memory_stats as contrib_memory_stats
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None, "JSON for training.")
flags.DEFINE_string("predict_file", None, "JSON for predictions.")
flags.DEFINE_string("test_file", None, "JSON for predictions.")
flags.DEFINE_string("data_type", "onehop",
"Whether queries are `onehop` or `twohop`.")
flags.DEFINE_string("model_type", "onehop",
"Whether to use `onehop` or `twohop` model.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("train_data_dir", None,
"Location of entity / mention files for training data.")
flags.DEFINE_string("test_data_dir", None,
"Location of entity / mention files for test data.")
flags.DEFINE_integer("num_hops", 2, "Number of hops in rule template.")
flags.DEFINE_integer("max_entity_len", 15,
"Maximum number of tokens in an entity name.")
flags.DEFINE_integer(
"num_mips_neighbors", 15000,
"Number of nearest neighbor mentions to retrieve for queries in each hop.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"projection_dim", 200, "Number of dimensions to project embeddings to. "
"Set to None to use full dimensions.")
flags.DEFINE_integer(
"max_query_length", 30,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool("do_test", False, "Whether to run eval on the test set.")
flags.DEFINE_float(
"subject_mention_probability", 0.0,
"Fraction of training instances for which we use subject "
"mentions in the text as opposed to canonical names.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 3e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_string("supervision", "entity",
"Type of supervision -- `mention` or `entity`.")
flags.DEFINE_float("entity_score_threshold", 1e-2,
"Minimum score of an entity to retrieve sparse neighbors.")
flags.DEFINE_float("softmax_temperature", 2.,
"Temperature before computing softmax.")
flags.DEFINE_string(
"sparse_reduce_fn", "max",
"Function to aggregate sparse search results for a set of "
"entities.")
flags.DEFINE_string("sparse_strategy", "dense_first",
"How to combine sparse and dense components.")
flags.DEFINE_boolean("intermediate_loss", False,
"Compute loss on intermediate layers.")
flags.DEFINE_boolean("light", False, "If true run in light mode.")
flags.DEFINE_string(
"qry_layers_to_use", "-1",
"Comma-separated list of layer representations to use as the fixed "
"query representation.")
flags.DEFINE_string(
"qry_aggregation_fn", "concat",
"Aggregation method for combining the outputs of layers specified using "
"`qry_layers`.")
flags.DEFINE_string(
"entity_score_aggregation_fn", "max",
"Aggregation method for combining the mention logits to entities.")
flags.DEFINE_float("question_dropout", 0.2,
"Dropout probability for question BiLSTMs.")
flags.DEFINE_integer("question_num_layers", 5,
"Number of layers for question BiLSTMs.")
flags.DEFINE_boolean(
"ensure_answer_sparse", False,
"If true, ensures answer is among sparse retrieval results"
"during training.")
flags.DEFINE_boolean(
"ensure_answer_dense", False,
"If true, ensures answer is among dense retrieval results "
"during training.")
flags.DEFINE_boolean(
"train_with_sparse", True,
"If true, multiplies logits with sparse retrieval results "
"during training.")
flags.DEFINE_boolean(
"predict_with_sparse", True,
"If true, multiplies logits with sparse retrieval results "
"during inference.")
flags.DEFINE_boolean("fix_sparse_to_one", True,
"If true, sparse search matrix is fixed to {0,1}.")
flags.DEFINE_boolean("l2_normalize_db", False,
"If true, pre-trained embeddings are normalized to 1.")
flags.DEFINE_boolean("load_only_bert", False,
"To load only BERT variables from init_checkpoint.")
flags.DEFINE_boolean(
"use_best_ckpt_for_predict", False,
"If True, loads the best_model checkpoint in model_dir, "
"instead of the latest one.")
flags.DEFINE_bool("profile_model", False, "Whether to run profiling.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_integer("random_seed", 1, "Random seed for reproducibility.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
class QAConfig(object):
"""Hyperparameters for the QA model."""
def __init__(self, qry_layers_to_use, qry_aggregation_fn, dropout,
qry_num_layers, projection_dim, num_entities, max_entity_len,
ensure_answer_sparse, ensure_answer_dense, train_with_sparse,
predict_with_sparse, fix_sparse_to_one, supervision,
l2_normalize_db, entity_score_aggregation_fn,
entity_score_threshold, softmax_temperature, sparse_reduce_fn,
intermediate_loss, train_batch_size, predict_batch_size, light,
sparse_strategy, load_only_bert):
self.qry_layers_to_use = [int(vv) for vv in qry_layers_to_use.split(",")]
self.qry_aggregation_fn = qry_aggregation_fn
self.dropout = dropout
self.qry_num_layers = qry_num_layers
self.projection_dim = projection_dim
self.num_entities = num_entities
self.max_entity_len = max_entity_len
self.load_only_bert = load_only_bert
self.ensure_answer_sparse = ensure_answer_sparse
self.ensure_answer_dense = ensure_answer_dense
self.train_with_sparse = train_with_sparse
self.predict_with_sparse = predict_with_sparse
self.fix_sparse_to_one = fix_sparse_to_one
self.supervision = supervision
self.l2_normalize_db = l2_normalize_db
self.entity_score_aggregation_fn = entity_score_aggregation_fn
self.entity_score_threshold = entity_score_threshold
self.softmax_temperature = softmax_temperature
self.sparse_reduce_fn = sparse_reduce_fn
self.intermediate_loss = intermediate_loss
self.train_batch_size = train_batch_size
self.predict_batch_size = predict_batch_size
self.light = light
self.sparse_strategy = sparse_strategy
class MIPSConfig(object):
"""Hyperparameters for the QA model."""
def __init__(self, ckpt_path, ckpt_var_name, num_mentions, emb_size,
num_neighbors):
self.ckpt_path = ckpt_path
self.ckpt_var_name = ckpt_var_name
self.num_mentions = num_mentions
self.emb_size = emb_size
self.num_neighbors = num_neighbors
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu,
exclude_bert):
"""Creates an optimizer training op, optionally excluding BERT vars."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = ((1.0 - is_warmup) * learning_rate +
is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = optimization.AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.estimator.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
if exclude_bert:
bert_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "bert")
tvars = [vv for vv in tvars if vv not in bert_vars]
tf.logging.info("Training the following variables:")
for vv in tvars:
tf.logging.info(vv.name)
grads = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
def get_assignment_map_from_checkpoint(tvars,
init_checkpoint,
load_only_bert=False):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
if load_only_bert and ("bert" not in name):
continue
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def model_fn_builder(bert_config,
qa_config,
mips_config,
init_checkpoint,
e2m_checkpoint,
m2e_checkpoint,
entity_id_checkpoint,
entity_mask_checkpoint,
learning_rate,
num_train_steps,
num_warmup_steps,
use_tpu,
use_one_hot_embeddings,
create_model_fn,
summary_obj=None):
"""Returns `model_fn` closure for TPUEstimator."""
tf.random.set_random_seed(FLAGS.random_seed)
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for | |
<filename>day05/day05_puz1.py<gh_stars>1-10
#! /usr/bin/env python
def run_opcode(code_list, programme_input=1):
"""Run the opcode as determined by the values in code_list
Before you enter the next loop, check to see if the opcode
(the first number in the sequence) is 99. If it is, then
you can stop and return the code as it stands.
Parameters
----------
code_list : list
The opcode
programme_input : int
The input to the programme, default 1
"""
# Start reading in the programme at position 0
opcode_loc = 0
opcode = None
output = None
while opcode != '99':
# Get and parse the opcode
code = code_list[opcode_loc]
opcode, parameter_mode_dict = parse_opcode(code)
if opcode == '01':
# Add the appropriate values together if you have an opcode of 1
code_list = apply_opcode1(code_list,
opcode_loc,
parameter_mode_dict)
# Increase the opcode_loc by 4 to keep yourself moving forwards
# through the code
opcode_loc += 4
if opcode == '02':
# Multiply the appropriate values together if you have an opcode
# of 2
code_list = apply_opcode2(code_list,
opcode_loc,
parameter_mode_dict)
# Increase the opcode_loc by 4 to keep yourself moving forwards
# through the code
opcode_loc += 4
if opcode == '03':
# Put the input value in the appropriate location if you have an
# opcode of 3
code_list = apply_opcode3(code_list,
opcode_loc,
programme_input=programme_input)
# Increase the opcode_loc by 2 to keep yourself moving forwards
# through the code
opcode_loc += 2
if opcode == '04':
# Return the output value if you have an opcode of 4
code_list, output = apply_opcode4(code_list,
opcode_loc,
parameter_mode_dict)
# Print the output value to screen
print(f'Output value: {output}')
# Increase the opcode_loc by 2 to keep yourself moving forwards
# through the code
opcode_loc += 2
# If the output is not 0 then check that it is followed by a 99
if output != 0:
check_next_opcode_99(opcode_loc, code_list)
return code_list, output
def load_computer_data(fname):
"""Read in input file with the computer's opcode as provided.
Parameters
----------
fname : string
File provided by advent of code competition
"""
# Create empty code list
code_list = []
# Read in each line, and split by comma
with open(fname, 'r') as f:
for line in f:
code_list += line.split(',')
# Convert all items to integer
code_list = [int(item) for item in code_list]
return code_list
def parse_opcode(code):
"""Each opcode is up to 5 digits long. The two on the furthest right
contain the instruction, and then the 3 on the left (reading from right
to left) indicate the mode (position or immediate) for each of the
parameters.
This function converts the number to a 0 padded string and then splits up
the 5 digits into the opcode and parameter modes.
Parameters
----------
code : int
instruction as integer that is up to 5 digits long
Returns
-------
opcode : str
two digit string corresponding to an instruction
parameter_mode_dict : dict
dictionary containing the parameter mode for each of the opcode
parameters
"""
code = f'{code:05}'
opcode = code[3:5]
parameter_mode_dict = {1: code[2], 2: code[1], 3: code[0]}
return opcode, parameter_mode_dict
# Define Python user-defined exceptions
# Adapted from https://www.programiz.com/python-programming/user-defined-exception # noqa
class Error(Exception):
"""Base class for other exceptions"""
pass
class ForbiddenValueError(Error):
"""Raised when the opcode mode is not permitted"""
pass
def apply_opcode1(code_list, opcode_loc, parameter_mode_dict):
"""When you've determined that the opcode is 1 - which means to add the
following two numbers (or the values at the position of those two numbers,
depending on the parameter mode) then you can use this function to adjust
code_list.
Parameters
----------
code_list : list
The whole programme
opcode_loc : int
The index of the opcode in code_list
parameter_mode_dict : dict
A dictionary indicating for the following 3 values after an opcode of 1
whether they should be considered in position (0) or immediate (1)
modes
Returns
-------
code_list : list
The whole programme
"""
opcode, param1, param2, param3 = code_list[opcode_loc:opcode_loc+4]
# If the mode is 1 then the parameter should be interpreted as it stands.
# If the mode is 0 then we need to get the value at that location in the
# code list
if parameter_mode_dict[1] == '0':
param1 = code_list[param1]
if parameter_mode_dict[2] == '0':
param2 = code_list[param2]
# The parameter mode for the 3rd parameter (which is the location that
# the answer will be stored) should never be anything other than 0, so
# we're going to raise an error if it is
if parameter_mode_dict[3] != '0':
print('Something has gone wrong! ' +
'The 3rd parameter should never be anything other than 0')
raise ForbiddenValueError
# Now lets actually do what the opcode says: add param1 and param2 and
# put the value at param3
code_list[param3] = param1 + param2
return code_list
def apply_opcode2(code_list, opcode_loc, parameter_mode_dict):
"""When you've determined that the opcode is 2 - which means to multiply
the following two numbers (or the values at the position of those two
numbers, depending on the parameter mode) then you can use this function to
adjust code_list.
Parameters
----------
code_list : list
The opcode
opcode_loc : int
The index of the opcode in code_list
parameter_mode_dict : dict
A dictionary indicating for the following 3 values after an opcode of 2
whether they should be considered in position (0) or immediate (1)
modes
Returns
-------
code_list : list
The whole programme
"""
opcode, param1, param2, param3 = code_list[opcode_loc:opcode_loc+4]
# If the mode is 1 then the parameter should be interpreted as it stands.
# If the mode is 0 then we need to get the value at that location in the
# code list
if parameter_mode_dict[1] == '0':
param1 = code_list[param1]
if parameter_mode_dict[2] == '0':
param2 = code_list[param2]
# The parameter mode for the 3rd parameter (which is the location that
# the answer will be stored) should never be anything other than 0, so
# we're going to raise an error if it is
if parameter_mode_dict[3] != '0':
print('Something has gone wrong! ' +
'The 3rd parameter should never be anything other than 0')
raise ForbiddenValueError
# Now lets actually do what the opcode says: multiply param1 and param2 and
# put the value at param3
code_list[param3] = param1 * param2
return code_list
def apply_opcode3(code_list, opcode_loc, programme_input=1):
"""When you've determined that the opcode is 3 - which means to take an
input value and store it in the location of its only parameter then you can
use this function to
adjust code_list.
Parameters
----------
code_list : list
The opcode
opcode_loc : int
The index of the opcode in code_list
programme_input : int
input value, default 1
Returns
-------
code_list : list
The whole programme
"""
opcode, param1 = code_list[opcode_loc:opcode_loc+2]
# Now lets actually do what the opcode says: put the input value at the
# location given by param1
code_list[param1] = programme_input
return code_list
def apply_opcode4(code_list, opcode_loc, parameter_mode_dict):
"""When you've determined that the opcode is 4 - which means to return a
value in the location of its only parameter as an output - you can use this
function to adjust code_list.
Parameters
----------
code_list : list
The opcode
opcode_loc : int
The index of the opcode in code_list
parameter_mode_dict : dict
A dictionary indicating for the following value after an opcode of 3
whether they should be considered in position (0) or immediate (1)
modes
Returns
-------
code_list : list
The whole programme
output : int
The value in the location determined by the parameter of the opcode
"""
opcode, param1 = code_list[opcode_loc:opcode_loc+2]
# If the mode is 1 then the parameter should be interpreted as it stands.
# If the mode is 0 then we need to get the value at that location in the
# code list
if parameter_mode_dict[1] == '0':
param1 = code_list[param1]
# Return that value as an output
output = param1
return code_list, output
def check_next_opcode_99(opcode_loc, code_list):
# A non-zero output value should only occur *right* before
# the programme |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.