code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def area(a, b):
resultado = a * b
return resultado
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def area(a, b):
resultado = a * b
return resultado
def main():
num1 = float(input('INTRODUCE LA BASE: '))
num2 = float(input('INTRODUCE LA ALTURA: '))
print('EL AREA DEL RECTANGULO ES: ', area(num1, num2))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def area(a, b):
resultado = a * b
return resultado
def main():
num1 = float(input('INTRODUCE LA BASE: '))
num2 = float(input('INTRODUCE LA ALTURA: '))
print('EL AREA DEL RECTANGULO ES: ', area(num1, num2))
pass
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
def area (a, b):
resultado = a * b
return (resultado)
def main():
#escribe tu código abajo de esta línea
num1 = float(input("INTRODUCE LA BASE: "))
num2 = float(input("INTRODUCE LA ALTURA: "))
print ("EL AREA DEL RECTANGULO ES: ", area (num1, num2))
pass
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "282dbdb3a8d9ed914e8ca5c7fa74d2873920e18c",
"index": 7308,
"step-1": "<mask token>\n",
"step-2": "def area(a, b):\n resultado = a * b\n return resultado\n\n\n<mask token>\n",
"step-3": "def area(a, b):\n resultado = a * b\n return resultado\n\n\ndef main():\n num1 = float(input('INTRODUCE LA BASE: '))\n num2 = float(input('INTRODUCE LA ALTURA: '))\n print('EL AREA DEL RECTANGULO ES: ', area(num1, num2))\n\n\n<mask token>\n",
"step-4": "def area(a, b):\n resultado = a * b\n return resultado\n\n\ndef main():\n num1 = float(input('INTRODUCE LA BASE: '))\n num2 = float(input('INTRODUCE LA ALTURA: '))\n print('EL AREA DEL RECTANGULO ES: ', area(num1, num2))\n\n\npass\nif __name__ == '__main__':\n main()\n",
"step-5": "def area (a, b):\n resultado = a * b \n return (resultado)\n\ndef main():\n #escribe tu código abajo de esta línea\n num1 = float(input(\"INTRODUCE LA BASE: \"))\n num2 = float(input(\"INTRODUCE LA ALTURA: \"))\n\n print (\"EL AREA DEL RECTANGULO ES: \", area (num1, num2))\n\npass\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestKOrderStatistic(unittest.TestCase):
def test_find(self):
for a, k, ans in test_case_find:
self.assertEqual(k_order_statistic(a, k), ans)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestKOrderStatistic(unittest.TestCase):
def test_find(self):
for a, k, ans in test_case_find:
self.assertEqual(k_order_statistic(a, k), ans)
def test_values(self):
for a in test_case_value:
self.assertRaises(TypeError, k_order_statistic, (a, random.
randint(0, 10)))
for a, k, ans in test_case_find:
self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
test_case_find = [([0], 0, 0), ([-1, -1, -1, -1], 3, -1), ([-1, -1, -1, -1],
1, -1), ([-1, 0, 3, -10], 3, 3), ([-1, -2, -3, -4, -5], 0, -5), ([1, 2,
3, 4, 5], 1, 2), ([True, False, True], 2, True), ([sys.maxsize], 0, sys
.maxsize), ([True, 10], 1, 10)]
test_case_value = [[], [1, 'a', None, True], ['asd', True]]
class TestKOrderStatistic(unittest.TestCase):
def test_find(self):
for a, k, ans in test_case_find:
self.assertEqual(k_order_statistic(a, k), ans)
def test_values(self):
for a in test_case_value:
self.assertRaises(TypeError, k_order_statistic, (a, random.
randint(0, 10)))
for a, k, ans in test_case_find:
self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))
<|reserved_special_token_1|>
import sys
import unittest
import random
from k_order_statistic import k_order_statistic
test_case_find = [([0], 0, 0), ([-1, -1, -1, -1], 3, -1), ([-1, -1, -1, -1],
1, -1), ([-1, 0, 3, -10], 3, 3), ([-1, -2, -3, -4, -5], 0, -5), ([1, 2,
3, 4, 5], 1, 2), ([True, False, True], 2, True), ([sys.maxsize], 0, sys
.maxsize), ([True, 10], 1, 10)]
test_case_value = [[], [1, 'a', None, True], ['asd', True]]
class TestKOrderStatistic(unittest.TestCase):
def test_find(self):
for a, k, ans in test_case_find:
self.assertEqual(k_order_statistic(a, k), ans)
def test_values(self):
for a in test_case_value:
self.assertRaises(TypeError, k_order_statistic, (a, random.
randint(0, 10)))
for a, k, ans in test_case_find:
self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))
|
flexible
|
{
"blob_id": "b93cd5ad957da37b1a4cca1d465a67723110e926",
"index": 2813,
"step-1": "<mask token>\n\n\nclass TestKOrderStatistic(unittest.TestCase):\n\n def test_find(self):\n for a, k, ans in test_case_find:\n self.assertEqual(k_order_statistic(a, k), ans)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestKOrderStatistic(unittest.TestCase):\n\n def test_find(self):\n for a, k, ans in test_case_find:\n self.assertEqual(k_order_statistic(a, k), ans)\n\n def test_values(self):\n for a in test_case_value:\n self.assertRaises(TypeError, k_order_statistic, (a, random.\n randint(0, 10)))\n for a, k, ans in test_case_find:\n self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))\n",
"step-3": "<mask token>\ntest_case_find = [([0], 0, 0), ([-1, -1, -1, -1], 3, -1), ([-1, -1, -1, -1],\n 1, -1), ([-1, 0, 3, -10], 3, 3), ([-1, -2, -3, -4, -5], 0, -5), ([1, 2,\n 3, 4, 5], 1, 2), ([True, False, True], 2, True), ([sys.maxsize], 0, sys\n .maxsize), ([True, 10], 1, 10)]\ntest_case_value = [[], [1, 'a', None, True], ['asd', True]]\n\n\nclass TestKOrderStatistic(unittest.TestCase):\n\n def test_find(self):\n for a, k, ans in test_case_find:\n self.assertEqual(k_order_statistic(a, k), ans)\n\n def test_values(self):\n for a in test_case_value:\n self.assertRaises(TypeError, k_order_statistic, (a, random.\n randint(0, 10)))\n for a, k, ans in test_case_find:\n self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))\n",
"step-4": "import sys\nimport unittest\nimport random\nfrom k_order_statistic import k_order_statistic\ntest_case_find = [([0], 0, 0), ([-1, -1, -1, -1], 3, -1), ([-1, -1, -1, -1],\n 1, -1), ([-1, 0, 3, -10], 3, 3), ([-1, -2, -3, -4, -5], 0, -5), ([1, 2,\n 3, 4, 5], 1, 2), ([True, False, True], 2, True), ([sys.maxsize], 0, sys\n .maxsize), ([True, 10], 1, 10)]\ntest_case_value = [[], [1, 'a', None, True], ['asd', True]]\n\n\nclass TestKOrderStatistic(unittest.TestCase):\n\n def test_find(self):\n for a, k, ans in test_case_find:\n self.assertEqual(k_order_statistic(a, k), ans)\n\n def test_values(self):\n for a in test_case_value:\n self.assertRaises(TypeError, k_order_statistic, (a, random.\n randint(0, 10)))\n for a, k, ans in test_case_find:\n self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# Named Entity Recognition on Medical Data (BIO Tagging)
# Bio-Word2Vec Embeddings Source and Reference: https://github.com/ncbi-nlp/BioWordVec
import os
import re
import torch
import pickle
from torch import nn
from torch import optim
import torch.nn.functional as F
import numpy as np
import random
from DNC.dnc import DNC_Module # Importing DNC Implementation
class task_NER():
def __init__(self):
self.name = "NER_task_bio"
# Controller Params
self.controller_size = 128
self.controller_layers = 1
# Head Params
self.num_read_heads = 1
self.num_write_heads = 1
# Processor Params
self.num_inputs = 200 # Length of Embeddings
self.num_outputs = 7 # Class size
# Memory Params
self.memory_N = 128
self.memory_M = 128
# Training Params
self.num_batches = -1
self.save_batch = 5 # Saving model after every save_batch number of batches
self.batch_size = 10
self.num_epoch = 4
# Optimizer Params
self.adam_lr = 1e-4
self.adam_betas = (0.9, 0.999)
self.adam_eps = 1e-8
# Handles
self.machine = None
self.loss = None
self.optimizer = None
# Class Dictionaries
self.labelDict = None # Label Dictionary - Labels to Index
self.reverseDict = None # Inverse Label Dictionary - Index to Labels
# File Paths
self.concept_path_train = "../medical_data/train_data/concept" # Path to train concept files
self.text_path_train = "../medical_data/train_data/txt" # Path to train text summaries
self.concept_path_test = "../medical_data/test_data/concept" # Path to test concept files
self.text_path_test = "../medical_data/test_data/txt" # Path to test text summaries
self.save_path = "../medical_data/cleaned_files" # Save path
self.embed_dic_path = "../medical_data/embeddings/bio_embedding_dictionary.dat" # Word2Vec embeddings Dictionary path
self.random_vec = "../medical_data/embeddings/random_vec.dat" # Path to random embedding (Used to create new vectors)
self.model_path = "../saved_models/" # Stores Trained Models
# Miscellaneous
self.padding_symbol = np.full((self.num_inputs), 0.01) # Padding symbol embedding
def get_task_name(self):
return self.name
def init_dnc(self):
self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.controller_size, self.controller_layers, self.num_read_heads, self.num_write_heads, self.memory_N, self.memory_M)
def init_loss(self):
self.loss = nn.CrossEntropyLoss(reduction = 'mean') # Cross Entropy Loss -> Softmax Activation + Cross Entropy Loss
def init_optimizer(self):
self.optimizer = optim.Adam(self.machine.parameters(), lr = self.adam_lr, betas = self.adam_betas, eps = self.adam_eps)
def calc_loss(self, Y_pred, Y):
# Y: dim -> (sequence_len x batch_size)
# Y_pred: dim -> (sequence_len x batch_size x num_outputs)
loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)
for i in range(Y_pred.shape[0]):
loss_vec[i] = self.loss(Y_pred[i], Y[i])
return torch.mean(loss_vec)
def calc_cost(self, Y_pred, Y): # Calculates % Cost
# Y: dim -> (sequence_len x batch_size)
# Y_pred: dim -> (sequence_len x batch_size x sequence_width)
'''
Note:
1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.
2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.
'''
# Stores correct class labels for each entity type
class_bag = {}
class_bag['problem'] = 0 # Total labels
class_bag['test'] = 0 # Total labels
class_bag['treatment'] = 0 # Total labels
class_bag['problem_cor'] = 0 # Correctly classified labels
class_bag['test_cor'] = 0 # Correctly classified labels
class_bag['treatment_cor'] = 0 # Correctly classified labels
class_bag['problem_fp'] = 0 # False positive classified labels
class_bag['test_fp'] = 0 # False positive classified labels
class_bag['treatment_fp'] = 0 # False positive classified labels
pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()).reshape(-1) # Predicted class. dim -> (sequence_len*batch_size)
Y = np.transpose(Y.numpy()).reshape(-1) # Converting to NumPy Array and linearizing
cor_pred = (Y == pred_class).astype(np.int) # Comparing Prediction and Labels to find correct predictions
class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size)*100.0 # % Accuracy of Correctly Predicted Words (Not Entities)
# Getting the beginning index of all the entities
beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])
# Getting the end index of all the entities (All the Index previous of 'Other'/'Begin' and not equal to 'Other')
target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1
if target[0] == -1:
target = target[1:]
end_idx = list(target[np.where(Y[target] != 6)[0]])
if Y[-1] != 6:
end_idx.append(Y.size-1)
assert len(beg_idx) == len(end_idx) # Sanity Check
class_bag['total'] = len(beg_idx) # Total number of Entities
# Counting Entities
sum_vec = np.cumsum(cor_pred) # Calculates cumulative summation of predicted vector
for b, e in zip(beg_idx, end_idx):
idx_range = e-b+1 # Entity span
sum_range = sum_vec[e]-sum_vec[b]+1 # Count of entity elements which are predicted correctly
lab = self.reverseDict[Y[b]][2:] # Extracting entity type (Problem, Test or Treatment)
class_bag[lab] = class_bag[lab]+1 # Getting count of each entities
if sum_range == idx_range: # +1 if entity is classified correctly
class_bag[lab+'_cor'] = class_bag[lab+'_cor']+1
# Detecting False Positives
# Getting the beginning index of all the entities in Predicted Results
beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])
for b in beg_idx_p:
if cor_pred[b] == 0:
lab = self.reverseDict[pred_class[b]][2:]
class_bag[lab+'_fp'] = class_bag[lab+'_fp']+1
return class_bag
def print_word(self, token_class): # Prints the Class name from Class number
word = self.reverseDict[token_class]
print(word + "\n")
def clip_grads(self): # Clipping gradients for stability
"""Gradient clipping to the range [10, 10]."""
parameters = list(filter(lambda p: p.grad is not None, self.machine.parameters()))
for p in parameters:
p.grad.data.clamp_(-10, 10)
def initialize_labels(self): # Initializing label dictionaries for Labels->IDX and IDX->Labels
self.labelDict = {} # Label Dictionary - Labels to Index
self.reverseDict = {} # Inverse Label Dictionary - Index to Labels
# Using BIEOS labelling scheme
self.labelDict['b-problem'] = 0 # Problem - Beginning
self.labelDict['i-problem'] = 1 # Problem - Inside
self.labelDict['b-test'] = 2 # Test - Beginning
self.labelDict['i-test'] = 3 # Test - Inside
self.labelDict['b-treatment'] = 4 # Treatment - Beginning
self.labelDict['i-treatment'] = 5 # Treatment - Inside
self.labelDict['o'] = 6 # Outside Token
# Making Inverse Label Dictionary
for k in self.labelDict.keys():
self.reverseDict[self.labelDict[k]] = k
# Saving the diictionaries into a file
self.save_data([self.labelDict, self.reverseDict], os.path.join(self.save_path, "label_dicts_bio.dat"))
def parse_concepts(self, file_path): # Parses the concept file to extract concepts and labels
conceptList = [] # Stores all the Concept in the File
f = open(file_path) # Opening and reading a concept file
content = f.readlines() # Reading all the lines in the concept file
f.close() # Closing the concept file
for x in content: # Reading each line in the concept file
dic = {}
# Cleaning and extracting the entities, labels and their positions in the corresponding medical summaries
x = re.sub('\n', ' ', x)
x = re.sub(r'\ +', ' ', x)
x = x.strip().split('||')
temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]
temp1[0] = temp1[0][3:]
temp1[-3] = temp1[-3][0:-1]
entity = temp1[0:-2]
if len(entity) >= 1:
lab = ['i']*len(entity)
lab[0] = 'b'
lab = [l+"-"+label for l in lab]
else:
print("Data in File: " + file_path + ", not in expected format..")
exit()
noLab = [self.labelDict[l] for l in lab]
sLine, sCol = int(temp1[-2].split(":")[0]), int(temp1[-2].split(":")[1])
eLine, eCol = int(temp1[-1].split(":")[0]), int(temp1[-1].split(":")[1])
'''
# Printing the information
print("------------------------------------------------------------")
print("Entity: " + str(entity))
print("Entity Label: " + label)
print("Labels - BIO form: " + str(lab))
print("Labels Index: " + str(noLab))
print("Start Line: " + str(sLine) + ", Start Column: " + str(sCol))
print("End Line: " + str(eLine) + ", End Column: " + str(eCol))
print("------------------------------------------------------------")
'''
# Storing the information as a dictionary
dic['entity'] = entity # Entity Name (In the form of list of words)
dic['label'] = label # Common Label
dic['BIO_labels'] = lab # List of BIO labels for each word
dic['label_index'] = noLab # Labels in the index form
dic['start_line'] = sLine # Start line of the concept in the corresponding text summaries
dic['start_word_no'] = sCol # Starting word number of the concept in the corresponding start line
dic['end_line'] = eLine # End line of the concept in the corresponding text summaries
dic['end_word_no'] = eCol # Ending word number of the concept in the corresponding end line
# Appending the concept dictionary to the list
conceptList.append(dic)
return conceptList # Returning the all the concepts in the current file in the form of dictionary list
def parse_summary(self, file_path): # Parses the Text summaries
file_lines = [] # Stores the lins of files in the list form
tags = [] # Stores corresponding labels for each word in the file (Default label: 'o' [Outside])
default_label = len(self.labelDict)-1 # default_label is "7" (Corresponding to 'Other' entity)
# counter = 1 # Temporary variable used during print
f = open(file_path) # Opening and reading a concept file
content = f.readlines() # Reading all the lines in the concept file
f.close()
for x in content:
x = re.sub('\n', ' ', x)
x = re.sub(r'\ +', ' ', x)
file_lines.append(x.strip().split(" ")) # Spliting the lines into word list and Appending each of them in the file list
tags.append([default_label]*len(file_lines[-1])) # Assigining the default_label to all the words in a line
'''
# Printing the information
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(file_lines[-1])
print("\nCorresponding labels:")
print(tags[-1])
print("------------------------------------------------------------")
counter += 1
'''
assert len(tags[-1]) == len(file_lines[-1]), "Line length is not matching labels length..." # Sanity Check
return file_lines, tags
def modify_labels(self, conceptList, tags): # Modifies the default labels of each word in text files with the true labels from the concept files
for e in conceptList: # Iterating over all the dictionary elements in the Concept List
if e['start_line'] == e['end_line']: # Checking whether concept is spanning over a single line or multiple line in the summary
tags[e['start_line']-1][e['start_word_no']:e['end_word_no']+1] = e['label_index'][:]
else:
start = e['start_line']
end = e['end_line']
beg = 0
for i in range(start, end+1): # Distributing labels over multiple lines in the text summaries
if i == start:
tags[i-1][e['start_word_no']:] = e['label_index'][0:len(tags[i-1])-e['start_word_no']]
beg = len(tags[i-1])-e['start_word_no']
elif i == end:
tags[i-1][0:e['end_word_no']+1] = e['label_index'][beg:]
else:
tags[i-1][:] = e['label_index'][beg:beg+len(tags[i-1])]
beg = beg+len(tags[i-1])
return tags
def print_data(self, file, file_lines, tags): # Prints the given data
counter = 1
print("\n************ Printing details of the file: " + file + " ************\n")
for x in file_lines:
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(x)
print("\nCorresponding labels:")
print([self.reverseDict[i] for i in tags[counter-1]])
print("\nCorresponding Label Indices:")
print(tags[counter-1])
print("------------------------------------------------------------")
counter += 1
def save_data(self, obj_list, s_path): # Saves the file into the binary file using Pickle
# Note: The 'obj_list' must be a list and none other than that
pickle.dump(tuple(obj_list), open(s_path,'wb'))
def acquire_data(self, task): # Read all the concept files to get concepts and labels, proces them and save them
data = {} # Dictionary to store all the data objects (conceptList, file_lines, tags) each indexed by file name
if task == 'train': # Determining the task type to assign the data path accordingly
t_path = self.text_path_train
c_path = self.concept_path_train
else:
t_path = self.text_path_test
c_path = self.concept_path_test
for f in os.listdir(t_path):
f1 = f.split('.')[0] + ".con"
if os.path.isfile(os.path.join(c_path, f1)):
conceptList = self.parse_concepts(os.path.join(c_path, f1)) # Parsing concepts and labels from the corresponding concept file
file_lines, tags = self.parse_summary(os.path.join(t_path, f)) # Parses the document summaries to get the written notes
tags = self.modify_labels(conceptList, tags) # Modifies he default labels to each word with the true labels from the concept files
data[f1] = [conceptList, file_lines, tags] # Storing each object in dictionary
# self.print_data(f, file_lines, tags) # Printing the details
return data
def structure_data(self, data_dict): # Structures the data in proper trainable form
final_line_list = [] # Stores words of all the files in separate sub-lists
final_tag_list = [] # Stores tags of all the files in separate sub-lists
for k in data_dict.keys(): # Extracting data from each pre-processed file in dictionary
file_lines = data_dict[k][1] # Extracting story
tags = data_dict[k][2] # Extracting corresponding labels
# Creating empty lists
temp1 = []
temp2 = []
# Merging all the lines in file into a single list. Same for corresponding labels
for i in range(len(file_lines)):
temp1.extend(file_lines[i])
temp2.extend(tags[i])
assert len(temp1) == len(temp2), "Word length not matching Label length for story in " + str(k) # Sanity Check
final_line_list.append(temp1)
final_tag_list.append(temp2)
assert len(final_line_list) == len(final_tag_list), "Number of stories not matching number of labels list" # Sanity Check
return final_line_list, final_tag_list
def padding(self, line_list, tag_list): # Pads stories with padding symbol to make them of same length
diff = 0
max_len = 0
outside_class = len(self.labelDict)-1 # Classifying padding symbol as "outside" term
# Calculating Max Summary Length
for i in range(len(line_list)):
if len(line_list[i])>max_len:
max_len = len(line_list[i])
for i in range(len(line_list)):
diff = max_len - len(line_list[i])
line_list[i].extend([self.padding_symbol]*diff)
tag_list[i].extend([outside_class]*diff)
assert (len(line_list[i]) == max_len) and (len(line_list[i]) == len(tag_list[i])), "Padding unsuccessful" # Sanity check
return np.asarray(line_list), np.asarray(tag_list) # Making NumPy array of size (batch_size x story_length x word size) and (batch_size x story_length x 1) respectively
def embed_input(self, line_list): # Converts words to vector embeddings
final_list = [] # Stores embedded words
summary = None # Temp variable
word = None # Temp variable
temp = None # Temp variable
embed_dic = pickle.load(open(self.embed_dic_path, 'rb')) # Loading word2vec dictionary using Pickle
r_embed = pickle.load(open(self.random_vec, 'rb')) # Loading Random embedding
for i in range(len(line_list)): # Iterating over all the summaries
summary = line_list[i]
final_list.append([]) # Reserving space for curent summary
for j in range(len(summary)):
word = summary[j].lower()
if word in embed_dic: # Checking for existence of word in dictionary
final_list[-1].append(embed_dic[word])
else:
temp = r_embed[:] # Copying the values of the list
random.shuffle(temp) # Randomly shuffling the word embedding to make it unique
temp = np.asarray(temp, dtype=np.float32) # Converting to NumPy array
final_list[-1].append(temp)
return final_list
def prepare_data(self, task='train'): # Preparing all the data necessary
line_list, tag_list = None, None
'''
line_list is the list of rows, where each row is a list of all the words in a medical summary
Similar is the case for tag_list, except, it stores labels for each words
'''
if not os.path.exists(self.save_path):
os.mkdir(self.save_path) # Creating a new directory if it does not exist else reading previously saved data
if not os.path.exists(os.path.join(self.save_path, "label_dicts_bio.dat")):
self.initialize_labels() # Initialize label to index dictionaries
else:
self.labelDict, self.reverseDict = pickle.load(open(os.path.join(self.save_path, "label_dicts_bio.dat"), 'rb')) # Loading Label dictionaries
if not os.path.exists(os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat")):
data_dict = self.acquire_data(task) # Read data from file
line_list, tag_list = self.structure_data(data_dict) # Structures the data into proper form
line_list = self.embed_input(line_list) # Embeds input data (words) into embeddings
self.save_data([line_list, tag_list], os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat"))
else:
line_list, tag_list = pickle.load(open(os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat"), 'rb')) # Loading Data dictionary
return line_list, tag_list
def get_data(self, task='train'):
line_list, tag_list = self.prepare_data(task)
# Shuffling stories
story_idx = list(range(0, len(line_list)))
random.shuffle(story_idx)
num_batch = int(len(story_idx)/self.batch_size)
self.num_batches = num_batch
# Out Data
x_out = []
y_out = []
counter = 1
for i in story_idx:
if num_batch<=0:
break
x_out.append(line_list[i])
y_out.append(tag_list[i])
if counter % self.batch_size == 0:
counter = 0
# Padding and converting labels to one hot vectors
x_out_pad, y_out_pad = self.padding(x_out, y_out)
x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=torch.float32) # Converting from (batch_size x story_length x word size) to (story_length x batch_size x word size)
y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=torch.long) # Converting from (batch_size x story_length x 1) to (story_length x batch_size x 1)
x_out = []
y_out = []
num_batch -= 1
yield (self.num_batches - num_batch), x_out_array, y_out_array
counter += 1
def train_model(self):
# Here, the model is optimized using Cross Entropy Loss.
loss_list = []
seq_length = []
last_batch = 0
# self.load_model(1, 99, 13) # Loading Pre-Trained model to train further
for j in range(self.num_epoch):
for batch_num, X, Y in self.get_data(task='train'):
self.optimizer.zero_grad() # Making old gradients zero before calculating the fresh ones
self.machine.initialization(self.batch_size) # Initializing states
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)
# Feeding the DNC network all the data first and then predicting output
# by giving zero vector as input and previous read states and hidden vector
# and thus training vector this way to give outputs matching the labels
embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1]) # Passing Embeddings from backwards
loss = self.calc_loss(Y_out, Y)
loss.backward()
self.clip_grads()
self.optimizer.step()
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']
tot = class_bag['total']
loss_list += [loss.item()]
seq_length += [Y.shape[0]]
if (batch_num % self.save_batch) == 0:
self.save_model(j, batch_num)
last_batch = batch_num
print("Epoch: " + str(j) + "/" + str(self.num_epoch) + ", Batch: " + str(batch_num) + "/" + str(self.num_batches) + ", Loss: {0:.2f}, ".format(loss.item()) + \
"Batch Accuracy (Entity Prediction): {0:.2f} %, ".format((float(corr)/float(tot))*100.0) + "Batch Accuracy (Word Prediction): {0:.2f} %".format(class_bag['word_pred_acc']))
self.save_model(j, last_batch)
def test_model(self): # Testing the model
correct = 0
total = 0
result_dict = {}
result_dict['total_problem'] = 0 # Total labels in data
result_dict['total_test'] = 0 # Total labels in data
result_dict['total_treatment'] = 0 # Total labels in data
result_dict['correct_problem'] = 0 # Correctly classified labels
result_dict['correct_test'] = 0 # Correctly classified labels
result_dict['correct_treatment'] = 0 # Correctly classified labels
result_dict['false_positive_problem'] = 0 # False Positive labels
result_dict['false_positive_test'] = 0 # False Positive labels
result_dict['false_positive_treatment'] = 0 # False Positive labels
print("\n")
for batch_num, X, Y in self.get_data(task='test'):
self.machine.initialization(self.batch_size) # Initializing states
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)
# Feeding the DNC network all the data first and then predicting output
# by giving zero vector as input and previous read states and hidden vector
# and thus training vector this way to give outputs matching the labels
embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1])
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']
tot = class_bag['total']
result_dict['total_problem'] = result_dict['total_problem'] + class_bag['problem']
result_dict['total_test'] = result_dict['total_test'] + class_bag['test']
result_dict['total_treatment'] = result_dict['total_treatment'] + class_bag['treatment']
result_dict['correct_problem'] = result_dict['correct_problem'] + class_bag['problem_cor']
result_dict['correct_test'] = result_dict['correct_test'] + class_bag['test_cor']
result_dict['correct_treatment'] = result_dict['correct_treatment'] + class_bag['treatment_cor']
result_dict['false_positive_problem'] = result_dict['false_positive_problem'] + class_bag['problem_fp']
result_dict['false_positive_test'] = result_dict['false_positive_test'] + class_bag['test_fp']
result_dict['false_positive_treatment'] = result_dict['false_positive_treatment'] + class_bag['treatment_fp']
correct += corr
total += tot
print("Test Example " + str(batch_num) + "/" + str(self.num_batches) + " processed, Batch Accuracy: {0:.2f} %, ".format((float(corr)/float(tot))*100.0) + "Batch Accuracy (Word Prediction): {0:.2f} %".format(class_bag['word_pred_acc']))
result_dict['accuracy'] = (float(correct)/float(total))*100.0
result_dict = self.calc_metrics(result_dict)
print("\nOverall Entity Prediction Accuracy: {0:.2f} %".format(result_dict['accuracy']))
return result_dict
def calc_metrics(self, result_dict): # Calculates Certain Metrices
precision_p = float(result_dict['correct_problem'])/float(result_dict['correct_problem'] + result_dict['false_positive_problem']) # Problem Precision
recall_p = float(result_dict['correct_problem'])/float(result_dict['total_problem']) # Problem Recall
precision_ts = float(result_dict['correct_test'])/float(result_dict['correct_test'] + result_dict['false_positive_test']) # Test Precision
recall_ts = float(result_dict['correct_test'])/float(result_dict['total_test']) # Test Recall
precision_tr = float(result_dict['correct_treatment'])/float(result_dict['correct_treatment'] + result_dict['false_positive_treatment']) # Treatment Precision
recall_tr = float(result_dict['correct_treatment'])/float(result_dict['total_treatment']) # Treatment Recall
f_score_p = 2*precision_p*recall_p/(precision_p+recall_p) # Problem F1 Score
f_score_ts = 2*precision_ts*recall_ts/(precision_ts+recall_ts) # Test F1 Score
f_score_tr = 2*precision_tr*recall_tr/(precision_tr+recall_tr) # Treatment F1 Score
result_dict['problem_precision'] = precision_p
result_dict['problem_recall'] = recall_p
result_dict['problem_f1'] = f_score_p
result_dict['test_precision'] = precision_ts
result_dict['test_recall'] = recall_ts
result_dict['test_f1'] = f_score_ts
result_dict['treatment_precision'] = precision_tr
result_dict['treatment_recall'] = recall_tr
result_dict['treatment_f1'] = f_score_tr
result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr)/3.0 # Macro Average F1 Score
# Micro Average F1 Score
correct_sum = result_dict['correct_problem'] + result_dict['correct_test'] + result_dict['correct_treatment']
fp_sum = result_dict['false_positive_problem'] + result_dict['false_positive_test'] + result_dict['false_positive_treatment']
total_sum = result_dict['total_problem'] + result_dict['total_test'] + result_dict['total_treatment']
precision_avg = float(correct_sum)/float(correct_sum + fp_sum)
recall_avg = float(correct_sum)/float(total_sum)
result_dict['micro_average_f1'] = 2*precision_avg*recall_avg/(precision_avg+recall_avg)
return result_dict
def save_model(self, curr_epoch, curr_batch):
# Here 'start_epoch' and 'start_batch' params below are the 'epoch' and 'batch' number from which to start training after next model loading
# Note: It is recommended to start from the 'start_epoch' and not 'start_epoch' + 'start_batch', because batches are formed randomly
if not os.path.exists(os.path.join(self.model_path, self.name)):
os.mkdir(os.path.join(self.model_path, self.name))
state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1, 'start_batch': curr_batch + 1, 'state_dict': self.machine.state_dict(), 'optimizer_dic' : self.optimizer.state_dict()}
filename = self.model_path + self.name + "/" + self.name + "_" + str(curr_epoch) + "_" + str(curr_batch) + "_saved_model.pth.tar"
torch.save(state_dic, filename)
def load_model(self, option, epoch, batch):
path = self.model_path + self.name + "/" + self.name + "_" + str(epoch) + "_" + str(batch) + "_saved_model.pth.tar"
if option == 1: # Loading for training
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_dic'])
else: # Loading for testing
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.machine.eval()
|
normal
|
{
"blob_id": "eb99def75404bc3b674bcb633714009149f2d50d",
"index": 5097,
"step-1": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n <mask token>\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n <mask token>\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n <mask token>\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n <mask token>\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n <mask token>\n <mask token>\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n <mask token>\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n <mask token>\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr=self.\n adam_lr, betas=self.adam_betas, eps=self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n <mask token>\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n\n def clip_grads(self):\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine\n .parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n\n def parse_concepts(self, file_path):\n conceptList = []\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n dic = {}\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n x = x.strip().split('||')\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n if len(entity) >= 1:\n lab = ['i'] * len(entity)\n lab[0] = 'b'\n lab = [(l + '-' + label) for l in lab]\n else:\n print('Data in File: ' + file_path +\n ', not in expected format..')\n exit()\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split\n (':')[1])\n eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split\n (':')[1])\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n \"\"\"\n dic['entity'] = entity\n dic['label'] = label\n dic['BIO_labels'] = lab\n dic['label_index'] = noLab\n dic['start_line'] = sLine\n dic['start_word_no'] = sCol\n dic['end_line'] = eLine\n dic['end_word_no'] = eCol\n conceptList.append(dic)\n return conceptList\n\n def parse_summary(self, file_path):\n file_lines = []\n tags = []\n default_label = len(self.labelDict) - 1\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n file_lines.append(x.strip().split(' '))\n tags.append([default_label] * len(file_lines[-1]))\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n \"\"\"\n assert len(tags[-1]) == len(file_lines[-1]\n ), 'Line length is not matching labels length...'\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n\n def print_data(self, file, file_lines, tags):\n counter = 1\n print('\\n************ Printing details of the file: ' + file +\n ' ************\\n')\n for x in file_lines:\n print(\n '------------------------------------------------------------')\n print('File Lines No: ' + str(counter))\n print(x)\n print('\\nCorresponding labels:')\n print([self.reverseDict[i] for i in tags[counter - 1]])\n print('\\nCorresponding Label Indices:')\n print(tags[counter - 1])\n print(\n '------------------------------------------------------------')\n counter += 1\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n\n def acquire_data(self, task):\n data = {}\n if task == 'train':\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + '.con'\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1))\n file_lines, tags = self.parse_summary(os.path.join(t_path, f))\n tags = self.modify_labels(conceptList, tags)\n data[f1] = [conceptList, file_lines, tags]\n return data\n <mask token>\n <mask token>\n <mask token>\n\n def prepare_data(self, task='train'):\n line_list, tag_list = None, None\n \"\"\"\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n \"\"\"\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n if not os.path.exists(os.path.join(self.save_path,\n 'label_dicts_bio.dat')):\n self.initialize_labels()\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.\n join(self.save_path, 'label_dicts_bio.dat'), 'rb'))\n if not os.path.exists(os.path.join(self.save_path, \n 'object_dict_bio_' + str(task) + '.dat')):\n data_dict = self.acquire_data(task)\n line_list, tag_list = self.structure_data(data_dict)\n line_list = self.embed_input(line_list)\n self.save_data([line_list, tag_list], os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n <mask token>\n <mask token>\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + '/' + self.name + '_' + str(epoch\n ) + '_' + str(batch) + '_saved_model.pth.tar'\n if option == 1:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()\n",
"step-3": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.\n controller_size, self.controller_layers, self.num_read_heads,\n self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr=self.\n adam_lr, betas=self.adam_betas, eps=self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y):\n \"\"\"\n Note: \n 1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.\n 2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.\n \"\"\"\n class_bag = {}\n class_bag['problem'] = 0\n class_bag['test'] = 0\n class_bag['treatment'] = 0\n class_bag['problem_cor'] = 0\n class_bag['test_cor'] = 0\n class_bag['treatment_cor'] = 0\n class_bag['problem_fp'] = 0\n class_bag['test_fp'] = 0\n class_bag['treatment_fp'] = 0\n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()\n ).reshape(-1)\n Y = np.transpose(Y.numpy()).reshape(-1)\n cor_pred = (Y == pred_class).astype(np.int)\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size\n ) * 100.0\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size - 1)\n assert len(beg_idx) == len(end_idx)\n class_bag['total'] = len(beg_idx)\n sum_vec = np.cumsum(cor_pred)\n for b, e in zip(beg_idx, end_idx):\n idx_range = e - b + 1\n sum_range = sum_vec[e] - sum_vec[b] + 1\n lab = self.reverseDict[Y[b]][2:]\n class_bag[lab] = class_bag[lab] + 1\n if sum_range == idx_range:\n class_bag[lab + '_cor'] = class_bag[lab + '_cor'] + 1\n beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])\n for b in beg_idx_p:\n if cor_pred[b] == 0:\n lab = self.reverseDict[pred_class[b]][2:]\n class_bag[lab + '_fp'] = class_bag[lab + '_fp'] + 1\n return class_bag\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n\n def clip_grads(self):\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine\n .parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n\n def parse_concepts(self, file_path):\n conceptList = []\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n dic = {}\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n x = x.strip().split('||')\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n if len(entity) >= 1:\n lab = ['i'] * len(entity)\n lab[0] = 'b'\n lab = [(l + '-' + label) for l in lab]\n else:\n print('Data in File: ' + file_path +\n ', not in expected format..')\n exit()\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split\n (':')[1])\n eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split\n (':')[1])\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n \"\"\"\n dic['entity'] = entity\n dic['label'] = label\n dic['BIO_labels'] = lab\n dic['label_index'] = noLab\n dic['start_line'] = sLine\n dic['start_word_no'] = sCol\n dic['end_line'] = eLine\n dic['end_word_no'] = eCol\n conceptList.append(dic)\n return conceptList\n\n def parse_summary(self, file_path):\n file_lines = []\n tags = []\n default_label = len(self.labelDict) - 1\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n file_lines.append(x.strip().split(' '))\n tags.append([default_label] * len(file_lines[-1]))\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n \"\"\"\n assert len(tags[-1]) == len(file_lines[-1]\n ), 'Line length is not matching labels length...'\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n\n def print_data(self, file, file_lines, tags):\n counter = 1\n print('\\n************ Printing details of the file: ' + file +\n ' ************\\n')\n for x in file_lines:\n print(\n '------------------------------------------------------------')\n print('File Lines No: ' + str(counter))\n print(x)\n print('\\nCorresponding labels:')\n print([self.reverseDict[i] for i in tags[counter - 1]])\n print('\\nCorresponding Label Indices:')\n print(tags[counter - 1])\n print(\n '------------------------------------------------------------')\n counter += 1\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n\n def acquire_data(self, task):\n data = {}\n if task == 'train':\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + '.con'\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1))\n file_lines, tags = self.parse_summary(os.path.join(t_path, f))\n tags = self.modify_labels(conceptList, tags)\n data[f1] = [conceptList, file_lines, tags]\n return data\n\n def structure_data(self, data_dict):\n final_line_list = []\n final_tag_list = []\n for k in data_dict.keys():\n file_lines = data_dict[k][1]\n tags = data_dict[k][2]\n temp1 = []\n temp2 = []\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n assert len(temp1) == len(temp2\n ), 'Word length not matching Label length for story in ' + str(\n k)\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n assert len(final_line_list) == len(final_tag_list\n ), 'Number of stories not matching number of labels list'\n return final_line_list, final_tag_list\n\n def padding(self, line_list, tag_list):\n diff = 0\n max_len = 0\n outside_class = len(self.labelDict) - 1\n for i in range(len(line_list)):\n if len(line_list[i]) > max_len:\n max_len = len(line_list[i])\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol] * diff)\n tag_list[i].extend([outside_class] * diff)\n assert len(line_list[i]) == max_len and len(line_list[i]) == len(\n tag_list[i]), 'Padding unsuccessful'\n return np.asarray(line_list), np.asarray(tag_list)\n <mask token>\n\n def prepare_data(self, task='train'):\n line_list, tag_list = None, None\n \"\"\"\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n \"\"\"\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n if not os.path.exists(os.path.join(self.save_path,\n 'label_dicts_bio.dat')):\n self.initialize_labels()\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.\n join(self.save_path, 'label_dicts_bio.dat'), 'rb'))\n if not os.path.exists(os.path.join(self.save_path, \n 'object_dict_bio_' + str(task) + '.dat')):\n data_dict = self.acquire_data(task)\n line_list, tag_list = self.structure_data(data_dict)\n line_list = self.embed_input(line_list)\n self.save_data([line_list, tag_list], os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n\n def calc_metrics(self, result_dict):\n precision_p = float(result_dict['correct_problem']) / float(\n result_dict['correct_problem'] + result_dict[\n 'false_positive_problem'])\n recall_p = float(result_dict['correct_problem']) / float(result_dict\n ['total_problem'])\n precision_ts = float(result_dict['correct_test']) / float(\n result_dict['correct_test'] + result_dict['false_positive_test'])\n recall_ts = float(result_dict['correct_test']) / float(result_dict[\n 'total_test'])\n precision_tr = float(result_dict['correct_treatment']) / float(\n result_dict['correct_treatment'] + result_dict[\n 'false_positive_treatment'])\n recall_tr = float(result_dict['correct_treatment']) / float(result_dict\n ['total_treatment'])\n f_score_p = 2 * precision_p * recall_p / (precision_p + recall_p)\n f_score_ts = 2 * precision_ts * recall_ts / (precision_ts + recall_ts)\n f_score_tr = 2 * precision_tr * recall_tr / (precision_tr + recall_tr)\n result_dict['problem_precision'] = precision_p\n result_dict['problem_recall'] = recall_p\n result_dict['problem_f1'] = f_score_p\n result_dict['test_precision'] = precision_ts\n result_dict['test_recall'] = recall_ts\n result_dict['test_f1'] = f_score_ts\n result_dict['treatment_precision'] = precision_tr\n result_dict['treatment_recall'] = recall_tr\n result_dict['treatment_f1'] = f_score_tr\n result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr\n ) / 3.0\n correct_sum = result_dict['correct_problem'] + result_dict[\n 'correct_test'] + result_dict['correct_treatment']\n fp_sum = result_dict['false_positive_problem'] + result_dict[\n 'false_positive_test'] + result_dict['false_positive_treatment']\n total_sum = result_dict['total_problem'] + result_dict['total_test'\n ] + result_dict['total_treatment']\n precision_avg = float(correct_sum) / float(correct_sum + fp_sum)\n recall_avg = float(correct_sum) / float(total_sum)\n result_dict['micro_average_f1'] = 2 * precision_avg * recall_avg / (\n precision_avg + recall_avg)\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1,\n 'start_batch': curr_batch + 1, 'state_dict': self.machine.\n state_dict(), 'optimizer_dic': self.optimizer.state_dict()}\n filename = self.model_path + self.name + '/' + self.name + '_' + str(\n curr_epoch) + '_' + str(curr_batch) + '_saved_model.pth.tar'\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + '/' + self.name + '_' + str(epoch\n ) + '_' + str(batch) + '_saved_model.pth.tar'\n if option == 1:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()\n",
"step-4": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.\n controller_size, self.controller_layers, self.num_read_heads,\n self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr=self.\n adam_lr, betas=self.adam_betas, eps=self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y):\n \"\"\"\n Note: \n 1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.\n 2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.\n \"\"\"\n class_bag = {}\n class_bag['problem'] = 0\n class_bag['test'] = 0\n class_bag['treatment'] = 0\n class_bag['problem_cor'] = 0\n class_bag['test_cor'] = 0\n class_bag['treatment_cor'] = 0\n class_bag['problem_fp'] = 0\n class_bag['test_fp'] = 0\n class_bag['treatment_fp'] = 0\n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()\n ).reshape(-1)\n Y = np.transpose(Y.numpy()).reshape(-1)\n cor_pred = (Y == pred_class).astype(np.int)\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size\n ) * 100.0\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size - 1)\n assert len(beg_idx) == len(end_idx)\n class_bag['total'] = len(beg_idx)\n sum_vec = np.cumsum(cor_pred)\n for b, e in zip(beg_idx, end_idx):\n idx_range = e - b + 1\n sum_range = sum_vec[e] - sum_vec[b] + 1\n lab = self.reverseDict[Y[b]][2:]\n class_bag[lab] = class_bag[lab] + 1\n if sum_range == idx_range:\n class_bag[lab + '_cor'] = class_bag[lab + '_cor'] + 1\n beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])\n for b in beg_idx_p:\n if cor_pred[b] == 0:\n lab = self.reverseDict[pred_class[b]][2:]\n class_bag[lab + '_fp'] = class_bag[lab + '_fp'] + 1\n return class_bag\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n\n def clip_grads(self):\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine\n .parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n\n def parse_concepts(self, file_path):\n conceptList = []\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n dic = {}\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n x = x.strip().split('||')\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n if len(entity) >= 1:\n lab = ['i'] * len(entity)\n lab[0] = 'b'\n lab = [(l + '-' + label) for l in lab]\n else:\n print('Data in File: ' + file_path +\n ', not in expected format..')\n exit()\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split\n (':')[1])\n eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split\n (':')[1])\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n \"\"\"\n dic['entity'] = entity\n dic['label'] = label\n dic['BIO_labels'] = lab\n dic['label_index'] = noLab\n dic['start_line'] = sLine\n dic['start_word_no'] = sCol\n dic['end_line'] = eLine\n dic['end_word_no'] = eCol\n conceptList.append(dic)\n return conceptList\n\n def parse_summary(self, file_path):\n file_lines = []\n tags = []\n default_label = len(self.labelDict) - 1\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n file_lines.append(x.strip().split(' '))\n tags.append([default_label] * len(file_lines[-1]))\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n \"\"\"\n assert len(tags[-1]) == len(file_lines[-1]\n ), 'Line length is not matching labels length...'\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n\n def print_data(self, file, file_lines, tags):\n counter = 1\n print('\\n************ Printing details of the file: ' + file +\n ' ************\\n')\n for x in file_lines:\n print(\n '------------------------------------------------------------')\n print('File Lines No: ' + str(counter))\n print(x)\n print('\\nCorresponding labels:')\n print([self.reverseDict[i] for i in tags[counter - 1]])\n print('\\nCorresponding Label Indices:')\n print(tags[counter - 1])\n print(\n '------------------------------------------------------------')\n counter += 1\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n\n def acquire_data(self, task):\n data = {}\n if task == 'train':\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + '.con'\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1))\n file_lines, tags = self.parse_summary(os.path.join(t_path, f))\n tags = self.modify_labels(conceptList, tags)\n data[f1] = [conceptList, file_lines, tags]\n return data\n\n def structure_data(self, data_dict):\n final_line_list = []\n final_tag_list = []\n for k in data_dict.keys():\n file_lines = data_dict[k][1]\n tags = data_dict[k][2]\n temp1 = []\n temp2 = []\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n assert len(temp1) == len(temp2\n ), 'Word length not matching Label length for story in ' + str(\n k)\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n assert len(final_line_list) == len(final_tag_list\n ), 'Number of stories not matching number of labels list'\n return final_line_list, final_tag_list\n\n def padding(self, line_list, tag_list):\n diff = 0\n max_len = 0\n outside_class = len(self.labelDict) - 1\n for i in range(len(line_list)):\n if len(line_list[i]) > max_len:\n max_len = len(line_list[i])\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol] * diff)\n tag_list[i].extend([outside_class] * diff)\n assert len(line_list[i]) == max_len and len(line_list[i]) == len(\n tag_list[i]), 'Padding unsuccessful'\n return np.asarray(line_list), np.asarray(tag_list)\n\n def embed_input(self, line_list):\n final_list = []\n summary = None\n word = None\n temp = None\n embed_dic = pickle.load(open(self.embed_dic_path, 'rb'))\n r_embed = pickle.load(open(self.random_vec, 'rb'))\n for i in range(len(line_list)):\n summary = line_list[i]\n final_list.append([])\n for j in range(len(summary)):\n word = summary[j].lower()\n if word in embed_dic:\n final_list[-1].append(embed_dic[word])\n else:\n temp = r_embed[:]\n random.shuffle(temp)\n temp = np.asarray(temp, dtype=np.float32)\n final_list[-1].append(temp)\n return final_list\n\n def prepare_data(self, task='train'):\n line_list, tag_list = None, None\n \"\"\"\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n \"\"\"\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n if not os.path.exists(os.path.join(self.save_path,\n 'label_dicts_bio.dat')):\n self.initialize_labels()\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.\n join(self.save_path, 'label_dicts_bio.dat'), 'rb'))\n if not os.path.exists(os.path.join(self.save_path, \n 'object_dict_bio_' + str(task) + '.dat')):\n data_dict = self.acquire_data(task)\n line_list, tag_list = self.structure_data(data_dict)\n line_list = self.embed_input(line_list)\n self.save_data([line_list, tag_list], os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n\n def calc_metrics(self, result_dict):\n precision_p = float(result_dict['correct_problem']) / float(\n result_dict['correct_problem'] + result_dict[\n 'false_positive_problem'])\n recall_p = float(result_dict['correct_problem']) / float(result_dict\n ['total_problem'])\n precision_ts = float(result_dict['correct_test']) / float(\n result_dict['correct_test'] + result_dict['false_positive_test'])\n recall_ts = float(result_dict['correct_test']) / float(result_dict[\n 'total_test'])\n precision_tr = float(result_dict['correct_treatment']) / float(\n result_dict['correct_treatment'] + result_dict[\n 'false_positive_treatment'])\n recall_tr = float(result_dict['correct_treatment']) / float(result_dict\n ['total_treatment'])\n f_score_p = 2 * precision_p * recall_p / (precision_p + recall_p)\n f_score_ts = 2 * precision_ts * recall_ts / (precision_ts + recall_ts)\n f_score_tr = 2 * precision_tr * recall_tr / (precision_tr + recall_tr)\n result_dict['problem_precision'] = precision_p\n result_dict['problem_recall'] = recall_p\n result_dict['problem_f1'] = f_score_p\n result_dict['test_precision'] = precision_ts\n result_dict['test_recall'] = recall_ts\n result_dict['test_f1'] = f_score_ts\n result_dict['treatment_precision'] = precision_tr\n result_dict['treatment_recall'] = recall_tr\n result_dict['treatment_f1'] = f_score_tr\n result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr\n ) / 3.0\n correct_sum = result_dict['correct_problem'] + result_dict[\n 'correct_test'] + result_dict['correct_treatment']\n fp_sum = result_dict['false_positive_problem'] + result_dict[\n 'false_positive_test'] + result_dict['false_positive_treatment']\n total_sum = result_dict['total_problem'] + result_dict['total_test'\n ] + result_dict['total_treatment']\n precision_avg = float(correct_sum) / float(correct_sum + fp_sum)\n recall_avg = float(correct_sum) / float(total_sum)\n result_dict['micro_average_f1'] = 2 * precision_avg * recall_avg / (\n precision_avg + recall_avg)\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1,\n 'start_batch': curr_batch + 1, 'state_dict': self.machine.\n state_dict(), 'optimizer_dic': self.optimizer.state_dict()}\n filename = self.model_path + self.name + '/' + self.name + '_' + str(\n curr_epoch) + '_' + str(curr_batch) + '_saved_model.pth.tar'\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + '/' + self.name + '_' + str(epoch\n ) + '_' + str(batch) + '_saved_model.pth.tar'\n if option == 1:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()\n",
"step-5": "# Named Entity Recognition on Medical Data (BIO Tagging)\n# Bio-Word2Vec Embeddings Source and Reference: https://github.com/ncbi-nlp/BioWordVec\n\nimport os\nimport re\nimport torch\nimport pickle\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport random\n\nfrom DNC.dnc import DNC_Module # Importing DNC Implementation\n\nclass task_NER():\n\n def __init__(self):\n self.name = \"NER_task_bio\"\n\n # Controller Params\n self.controller_size = 128\n self.controller_layers = 1\n\n # Head Params\n self.num_read_heads = 1\n self.num_write_heads = 1\n\n # Processor Params\n self.num_inputs = 200 # Length of Embeddings\n self.num_outputs = 7 # Class size\n\n # Memory Params\n self.memory_N = 128\n self.memory_M = 128\n\n # Training Params\n self.num_batches = -1\n self.save_batch = 5 # Saving model after every save_batch number of batches\n self.batch_size = 10\n self.num_epoch = 4\n\n # Optimizer Params\n self.adam_lr = 1e-4\n self.adam_betas = (0.9, 0.999)\n self.adam_eps = 1e-8\n\n # Handles\n self.machine = None\n self.loss = None\n self.optimizer = None\n\n # Class Dictionaries\n self.labelDict = None # Label Dictionary - Labels to Index\n self.reverseDict = None # Inverse Label Dictionary - Index to Labels\n\n # File Paths\n self.concept_path_train = \"../medical_data/train_data/concept\" # Path to train concept files\n self.text_path_train = \"../medical_data/train_data/txt\" # Path to train text summaries\n self.concept_path_test = \"../medical_data/test_data/concept\" # Path to test concept files\n self.text_path_test = \"../medical_data/test_data/txt\" # Path to test text summaries\n self.save_path = \"../medical_data/cleaned_files\" # Save path\n self.embed_dic_path = \"../medical_data/embeddings/bio_embedding_dictionary.dat\" # Word2Vec embeddings Dictionary path\n self.random_vec = \"../medical_data/embeddings/random_vec.dat\" # Path to random embedding (Used to create new vectors)\n self.model_path = \"../saved_models/\" # Stores Trained Models\n\n # Miscellaneous\n self.padding_symbol = np.full((self.num_inputs), 0.01) # Padding symbol embedding\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.controller_size, self.controller_layers, self.num_read_heads, self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction = 'mean') # Cross Entropy Loss -> Softmax Activation + Cross Entropy Loss\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr = self.adam_lr, betas = self.adam_betas, eps = self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n # Y: dim -> (sequence_len x batch_size)\n # Y_pred: dim -> (sequence_len x batch_size x num_outputs)\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y): # Calculates % Cost\n # Y: dim -> (sequence_len x batch_size)\n # Y_pred: dim -> (sequence_len x batch_size x sequence_width)\n\n '''\n Note: \n 1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.\n 2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.\n '''\n\n # Stores correct class labels for each entity type\n class_bag = {}\n class_bag['problem'] = 0 # Total labels\n class_bag['test'] = 0 # Total labels\n class_bag['treatment'] = 0 # Total labels\n class_bag['problem_cor'] = 0 # Correctly classified labels\n class_bag['test_cor'] = 0 # Correctly classified labels\n class_bag['treatment_cor'] = 0 # Correctly classified labels\n class_bag['problem_fp'] = 0 # False positive classified labels\n class_bag['test_fp'] = 0 # False positive classified labels\n class_bag['treatment_fp'] = 0 # False positive classified labels\n \n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()).reshape(-1) # Predicted class. dim -> (sequence_len*batch_size)\n Y = np.transpose(Y.numpy()).reshape(-1) # Converting to NumPy Array and linearizing\n cor_pred = (Y == pred_class).astype(np.int) # Comparing Prediction and Labels to find correct predictions\n\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size)*100.0 # % Accuracy of Correctly Predicted Words (Not Entities)\n\n # Getting the beginning index of all the entities\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n\n # Getting the end index of all the entities (All the Index previous of 'Other'/'Begin' and not equal to 'Other')\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size-1)\n\n assert len(beg_idx) == len(end_idx) # Sanity Check\n class_bag['total'] = len(beg_idx) # Total number of Entities\n\n # Counting Entities\n sum_vec = np.cumsum(cor_pred) # Calculates cumulative summation of predicted vector\n for b, e in zip(beg_idx, end_idx):\n idx_range = e-b+1 # Entity span\n sum_range = sum_vec[e]-sum_vec[b]+1 # Count of entity elements which are predicted correctly\n\n lab = self.reverseDict[Y[b]][2:] # Extracting entity type (Problem, Test or Treatment)\n class_bag[lab] = class_bag[lab]+1 # Getting count of each entities\n \n if sum_range == idx_range: # +1 if entity is classified correctly\n class_bag[lab+'_cor'] = class_bag[lab+'_cor']+1\n\n # Detecting False Positives\n # Getting the beginning index of all the entities in Predicted Results\n beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])\n \n for b in beg_idx_p:\n if cor_pred[b] == 0:\n lab = self.reverseDict[pred_class[b]][2:]\n class_bag[lab+'_fp'] = class_bag[lab+'_fp']+1\n\n return class_bag\n \n def print_word(self, token_class): # Prints the Class name from Class number\n word = self.reverseDict[token_class]\n print(word + \"\\n\")\n\n def clip_grads(self): # Clipping gradients for stability\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine.parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self): # Initializing label dictionaries for Labels->IDX and IDX->Labels\n self.labelDict = {} # Label Dictionary - Labels to Index\n self.reverseDict = {} # Inverse Label Dictionary - Index to Labels\n\n # Using BIEOS labelling scheme\n self.labelDict['b-problem'] = 0 # Problem - Beginning \n self.labelDict['i-problem'] = 1 # Problem - Inside\n self.labelDict['b-test'] = 2 # Test - Beginning\n self.labelDict['i-test'] = 3 # Test - Inside\n self.labelDict['b-treatment'] = 4 # Treatment - Beginning\n self.labelDict['i-treatment'] = 5 # Treatment - Inside\n self.labelDict['o'] = 6 # Outside Token\n\n # Making Inverse Label Dictionary\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n\n # Saving the diictionaries into a file\n self.save_data([self.labelDict, self.reverseDict], os.path.join(self.save_path, \"label_dicts_bio.dat\"))\n\n def parse_concepts(self, file_path): # Parses the concept file to extract concepts and labels\n conceptList = [] # Stores all the Concept in the File\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close() # Closing the concept file\n\n for x in content: # Reading each line in the concept file\n dic = {}\n\n # Cleaning and extracting the entities, labels and their positions in the corresponding medical summaries\n x = re.sub('\\n', ' ', x)\n x = re.sub(r'\\ +', ' ', x)\n x = x.strip().split('||')\n\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n\n if len(entity) >= 1:\n lab = ['i']*len(entity)\n lab[0] = 'b'\n lab = [l+\"-\"+label for l in lab]\n else:\n print(\"Data in File: \" + file_path + \", not in expected format..\")\n exit()\n\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(\":\")[0]), int(temp1[-2].split(\":\")[1])\n eLine, eCol = int(temp1[-1].split(\":\")[0]), int(temp1[-1].split(\":\")[1])\n \n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n '''\n\n # Storing the information as a dictionary\n dic['entity'] = entity # Entity Name (In the form of list of words)\n dic['label'] = label # Common Label\n dic['BIO_labels'] = lab # List of BIO labels for each word\n dic['label_index'] = noLab # Labels in the index form\n dic['start_line'] = sLine # Start line of the concept in the corresponding text summaries\n dic['start_word_no'] = sCol # Starting word number of the concept in the corresponding start line\n dic['end_line'] = eLine # End line of the concept in the corresponding text summaries\n dic['end_word_no'] = eCol # Ending word number of the concept in the corresponding end line\n\n # Appending the concept dictionary to the list\n conceptList.append(dic)\n\n return conceptList # Returning the all the concepts in the current file in the form of dictionary list\n\n def parse_summary(self, file_path): # Parses the Text summaries\n file_lines = [] # Stores the lins of files in the list form\n tags = [] # Stores corresponding labels for each word in the file (Default label: 'o' [Outside])\n default_label = len(self.labelDict)-1 # default_label is \"7\" (Corresponding to 'Other' entity) \n # counter = 1 # Temporary variable used during print\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close()\n\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub(r'\\ +', ' ', x)\n file_lines.append(x.strip().split(\" \")) # Spliting the lines into word list and Appending each of them in the file list\n tags.append([default_label]*len(file_lines[-1])) # Assigining the default_label to all the words in a line\n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n '''\n assert len(tags[-1]) == len(file_lines[-1]), \"Line length is not matching labels length...\" # Sanity Check\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags): # Modifies the default labels of each word in text files with the true labels from the concept files\n for e in conceptList: # Iterating over all the dictionary elements in the Concept List\n if e['start_line'] == e['end_line']: # Checking whether concept is spanning over a single line or multiple line in the summary\n tags[e['start_line']-1][e['start_word_no']:e['end_word_no']+1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end+1): # Distributing labels over multiple lines in the text summaries\n if i == start:\n tags[i-1][e['start_word_no']:] = e['label_index'][0:len(tags[i-1])-e['start_word_no']]\n beg = len(tags[i-1])-e['start_word_no']\n elif i == end:\n tags[i-1][0:e['end_word_no']+1] = e['label_index'][beg:]\n else:\n tags[i-1][:] = e['label_index'][beg:beg+len(tags[i-1])]\n beg = beg+len(tags[i-1])\n return tags\n\n def print_data(self, file, file_lines, tags): # Prints the given data\n counter = 1\n\n print(\"\\n************ Printing details of the file: \" + file + \" ************\\n\")\n for x in file_lines:\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(x)\n print(\"\\nCorresponding labels:\")\n print([self.reverseDict[i] for i in tags[counter-1]])\n print(\"\\nCorresponding Label Indices:\")\n print(tags[counter-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n\n def save_data(self, obj_list, s_path): # Saves the file into the binary file using Pickle\n # Note: The 'obj_list' must be a list and none other than that\n pickle.dump(tuple(obj_list), open(s_path,'wb'))\n\n def acquire_data(self, task): # Read all the concept files to get concepts and labels, proces them and save them\n data = {} # Dictionary to store all the data objects (conceptList, file_lines, tags) each indexed by file name\n\n if task == 'train': # Determining the task type to assign the data path accordingly\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + \".con\"\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1)) # Parsing concepts and labels from the corresponding concept file\n file_lines, tags = self.parse_summary(os.path.join(t_path, f)) # Parses the document summaries to get the written notes\n tags = self.modify_labels(conceptList, tags) # Modifies he default labels to each word with the true labels from the concept files\n data[f1] = [conceptList, file_lines, tags] # Storing each object in dictionary\n # self.print_data(f, file_lines, tags) # Printing the details\n return data\n\n def structure_data(self, data_dict): # Structures the data in proper trainable form\n final_line_list = [] # Stores words of all the files in separate sub-lists\n final_tag_list = [] # Stores tags of all the files in separate sub-lists\n\n for k in data_dict.keys(): # Extracting data from each pre-processed file in dictionary\n file_lines = data_dict[k][1] # Extracting story\n tags = data_dict[k][2] # Extracting corresponding labels\n\n # Creating empty lists\n temp1 = []\n temp2 = []\n\n # Merging all the lines in file into a single list. Same for corresponding labels\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n \n assert len(temp1) == len(temp2), \"Word length not matching Label length for story in \" + str(k) # Sanity Check\n\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n \n assert len(final_line_list) == len(final_tag_list), \"Number of stories not matching number of labels list\" # Sanity Check\n return final_line_list, final_tag_list\n \n def padding(self, line_list, tag_list): # Pads stories with padding symbol to make them of same length \n diff = 0\n max_len = 0\n outside_class = len(self.labelDict)-1 # Classifying padding symbol as \"outside\" term\n\n # Calculating Max Summary Length\n for i in range(len(line_list)):\n if len(line_list[i])>max_len:\n max_len = len(line_list[i])\n\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol]*diff)\n tag_list[i].extend([outside_class]*diff)\n assert (len(line_list[i]) == max_len) and (len(line_list[i]) == len(tag_list[i])), \"Padding unsuccessful\" # Sanity check\n return np.asarray(line_list), np.asarray(tag_list) # Making NumPy array of size (batch_size x story_length x word size) and (batch_size x story_length x 1) respectively\n\n def embed_input(self, line_list): # Converts words to vector embeddings\n final_list = [] # Stores embedded words\n summary = None # Temp variable\n word = None # Temp variable\n temp = None # Temp variable\n\n embed_dic = pickle.load(open(self.embed_dic_path, 'rb')) # Loading word2vec dictionary using Pickle\n r_embed = pickle.load(open(self.random_vec, 'rb')) # Loading Random embedding\n\n for i in range(len(line_list)): # Iterating over all the summaries\n summary = line_list[i]\n final_list.append([]) # Reserving space for curent summary\n\n for j in range(len(summary)):\n word = summary[j].lower()\n if word in embed_dic: # Checking for existence of word in dictionary\n final_list[-1].append(embed_dic[word])\n else:\n temp = r_embed[:] # Copying the values of the list\n random.shuffle(temp) # Randomly shuffling the word embedding to make it unique\n temp = np.asarray(temp, dtype=np.float32) # Converting to NumPy array\n final_list[-1].append(temp)\n return final_list\n\n def prepare_data(self, task='train'): # Preparing all the data necessary\n line_list, tag_list = None, None\n\n '''\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n '''\n\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path) # Creating a new directory if it does not exist else reading previously saved data\n \n if not os.path.exists(os.path.join(self.save_path, \"label_dicts_bio.dat\")):\n self.initialize_labels() # Initialize label to index dictionaries\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.join(self.save_path, \"label_dicts_bio.dat\"), 'rb')) # Loading Label dictionaries\n \n if not os.path.exists(os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\")):\n data_dict = self.acquire_data(task) # Read data from file\n line_list, tag_list = self.structure_data(data_dict) # Structures the data into proper form\n line_list = self.embed_input(line_list) # Embeds input data (words) into embeddings\n self.save_data([line_list, tag_list], os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\"))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\"), 'rb')) # Loading Data dictionary\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n\n # Shuffling stories\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n\n num_batch = int(len(story_idx)/self.batch_size)\n self.num_batches = num_batch\n\n # Out Data\n x_out = []\n y_out = []\n \n counter = 1\n\n for i in story_idx:\n if num_batch<=0:\n break\n\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n\n if counter % self.batch_size == 0:\n counter = 0\n \n # Padding and converting labels to one hot vectors\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=torch.float32) # Converting from (batch_size x story_length x word size) to (story_length x batch_size x word size)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=torch.long) # Converting from (batch_size x story_length x 1) to (story_length x batch_size x 1)\n\n x_out = []\n y_out = []\n num_batch -= 1\n\n yield (self.num_batches - num_batch), x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n # Here, the model is optimized using Cross Entropy Loss.\n loss_list = []\n seq_length = []\n last_batch = 0\n\n # self.load_model(1, 99, 13) # Loading Pre-Trained model to train further\n\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad() # Making old gradients zero before calculating the fresh ones\n self.machine.initialization(self.batch_size) # Initializing states\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)\n\n # Feeding the DNC network all the data first and then predicting output\n # by giving zero vector as input and previous read states and hidden vector\n # and thus training vector this way to give outputs matching the labels\n\n embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation\n temp_size = X.shape[0]\n\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1]) # Passing Embeddings from backwards\n\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n\n class_bag = self.calc_cost(Y_out, Y)\n\n corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']\n tot = class_bag['total']\n\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n\n if (batch_num % self.save_batch) == 0:\n self.save_model(j, batch_num)\n\n last_batch = batch_num\n print(\"Epoch: \" + str(j) + \"/\" + str(self.num_epoch) + \", Batch: \" + str(batch_num) + \"/\" + str(self.num_batches) + \", Loss: {0:.2f}, \".format(loss.item()) + \\\n \"Batch Accuracy (Entity Prediction): {0:.2f} %, \".format((float(corr)/float(tot))*100.0) + \"Batch Accuracy (Word Prediction): {0:.2f} %\".format(class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self): # Testing the model\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0 # Total labels in data\n result_dict['total_test'] = 0 # Total labels in data\n result_dict['total_treatment'] = 0 # Total labels in data\n result_dict['correct_problem'] = 0 # Correctly classified labels\n result_dict['correct_test'] = 0 # Correctly classified labels\n result_dict['correct_treatment'] = 0 # Correctly classified labels\n result_dict['false_positive_problem'] = 0 # False Positive labels\n result_dict['false_positive_test'] = 0 # False Positive labels\n result_dict['false_positive_treatment'] = 0 # False Positive labels\n print(\"\\n\")\n\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size) # Initializing states\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)\n\n # Feeding the DNC network all the data first and then predicting output\n # by giving zero vector as input and previous read states and hidden vector\n # and thus training vector this way to give outputs matching the labels\n\n embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation\n temp_size = X.shape[0]\n\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1])\n\n class_bag = self.calc_cost(Y_out, Y)\n\n corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']\n tot = class_bag['total']\n\n result_dict['total_problem'] = result_dict['total_problem'] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag['test']\n result_dict['total_treatment'] = result_dict['total_treatment'] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict['false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict['false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict['false_positive_treatment'] + class_bag['treatment_fp']\n\n correct += corr\n total += tot\n print(\"Test Example \" + str(batch_num) + \"/\" + str(self.num_batches) + \" processed, Batch Accuracy: {0:.2f} %, \".format((float(corr)/float(tot))*100.0) + \"Batch Accuracy (Word Prediction): {0:.2f} %\".format(class_bag['word_pred_acc']))\n \n result_dict['accuracy'] = (float(correct)/float(total))*100.0\n result_dict = self.calc_metrics(result_dict)\n print(\"\\nOverall Entity Prediction Accuracy: {0:.2f} %\".format(result_dict['accuracy']))\n return result_dict\n\n def calc_metrics(self, result_dict): # Calculates Certain Metrices\n precision_p = float(result_dict['correct_problem'])/float(result_dict['correct_problem'] + result_dict['false_positive_problem']) # Problem Precision\n recall_p = float(result_dict['correct_problem'])/float(result_dict['total_problem']) # Problem Recall\n\n precision_ts = float(result_dict['correct_test'])/float(result_dict['correct_test'] + result_dict['false_positive_test']) # Test Precision\n recall_ts = float(result_dict['correct_test'])/float(result_dict['total_test']) # Test Recall\n\n precision_tr = float(result_dict['correct_treatment'])/float(result_dict['correct_treatment'] + result_dict['false_positive_treatment']) # Treatment Precision\n recall_tr = float(result_dict['correct_treatment'])/float(result_dict['total_treatment']) # Treatment Recall\n\n f_score_p = 2*precision_p*recall_p/(precision_p+recall_p) # Problem F1 Score\n f_score_ts = 2*precision_ts*recall_ts/(precision_ts+recall_ts) # Test F1 Score\n f_score_tr = 2*precision_tr*recall_tr/(precision_tr+recall_tr) # Treatment F1 Score\n\n result_dict['problem_precision'] = precision_p\n result_dict['problem_recall'] = recall_p\n result_dict['problem_f1'] = f_score_p\n result_dict['test_precision'] = precision_ts\n result_dict['test_recall'] = recall_ts\n result_dict['test_f1'] = f_score_ts\n result_dict['treatment_precision'] = precision_tr\n result_dict['treatment_recall'] = recall_tr\n result_dict['treatment_f1'] = f_score_tr\n result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr)/3.0 # Macro Average F1 Score\n\n # Micro Average F1 Score\n correct_sum = result_dict['correct_problem'] + result_dict['correct_test'] + result_dict['correct_treatment']\n fp_sum = result_dict['false_positive_problem'] + result_dict['false_positive_test'] + result_dict['false_positive_treatment']\n total_sum = result_dict['total_problem'] + result_dict['total_test'] + result_dict['total_treatment']\n \n precision_avg = float(correct_sum)/float(correct_sum + fp_sum)\n recall_avg = float(correct_sum)/float(total_sum)\n result_dict['micro_average_f1'] = 2*precision_avg*recall_avg/(precision_avg+recall_avg)\n\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n # Here 'start_epoch' and 'start_batch' params below are the 'epoch' and 'batch' number from which to start training after next model loading\n # Note: It is recommended to start from the 'start_epoch' and not 'start_epoch' + 'start_batch', because batches are formed randomly\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1, 'start_batch': curr_batch + 1, 'state_dict': self.machine.state_dict(), 'optimizer_dic' : self.optimizer.state_dict()}\n filename = self.model_path + self.name + \"/\" + self.name + \"_\" + str(curr_epoch) + \"_\" + str(curr_batch) + \"_saved_model.pth.tar\"\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + \"/\" + self.name + \"_\" + str(epoch) + \"_\" + str(batch) + \"_saved_model.pth.tar\"\n if option == 1: # Loading for training\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else: # Loading for testing\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()",
"step-ids": [
12,
20,
26,
27,
29
]
}
|
[
12,
20,
26,
27,
29
] |
__author__ = 'matthias'
from tcp import *
from data import *
#SERVER = "131.225.237.31"
#PORT = 33487
data = LaserData()
#server = TCP(SERVER, PORT)
server = TCP()
server.start_server()
for i in range(100):
data = server.recv_server()
print data
|
normal
|
{
"blob_id": "1e4d18909b72ceef729efdd7b2ab996ace45f1bd",
"index": 6367,
"step-1": "__author__ = 'matthias'\n\nfrom tcp import *\nfrom data import *\n\n#SERVER = \"131.225.237.31\"\n#PORT = 33487\n\ndata = LaserData()\n#server = TCP(SERVER, PORT)\nserver = TCP()\nserver.start_server()\nfor i in range(100):\n data = server.recv_server()\n\n print data\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import (Text, VBox, HBox, Label, Password, RadioButtons,
Button, Layout, Box, Tab, Output, Dropdown,
FloatText, BoundedIntText, Combobox)
from cbm.utils import config, data_options
from cbm.ipycbm.utils import settings
from cbm.sources import database
def widget_box():
source = int(config.get_value(['set', 'data_source']))
sources = RadioButtons(
options=[
("JRC RESTful API.", 0),
("Direct access to database and object storage.", 1)
],
value=source,
layout={'width': 'max-content'}
)
sources_box = Box([
Label(value="Data sources:"),
sources]
)
info_api = Label("RESTful API Settings.")
info_direct = Label("Direct access settings")
view_options = VBox([info_direct])
if source == 0:
view_options.children = [info_api, rest_api()]
elif source == 1:
view_options.children = [info_direct, direct()]
def on_source_change(change):
view_options.children = []
if sources.value == 0:
view_options.children = [info_api, rest_api()]
elif sources.value == 1:
view_options.children = [info_direct, direct()]
config.update(['set', 'data_source'], str(sources.value))
sources.observe(on_source_change, 'value')
wbox_sources = VBox([sources_box, view_options],
layout=Layout(border='1px solid black'))
info_general = Label(value="General settings:")
wbox = VBox([wbox_sources, info_general, settings.widget_box()])
return wbox
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(
value=values['api']['url'],
placeholder='Add URL',
description='API URL:',
disabled=False
)
wt_user = Text(
value=values['api']['user'],
placeholder='Username',
description='API User:',
disabled=False
)
wt_pass = Password(
value=values['api']['pass'],
placeholder='******',
description='API Password:',
disabled=False
)
wb_save = Button(
description='Save',
disabled=False,
icon='save'
)
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog("API information is updated")
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
def direct():
# try:
tab_box = Tab(children=[settings.direct_conn(), direct_settings()])
tab_box.set_title(0, 'Connection')
tab_box.set_title(1, 'db Configuration')
# except:
# tab_box = Tab(children=[direct_conn()])
# tab_box.set_title(0, 'Connection')
# print("!WARNING! Can not load direct configuration settings.")
return tab_box
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(
options=[d for d in values['ds_conf']],
value=ds_def,
description='Default:',
disabled=False,
layout=Layout(width='200px')
)
dsy = Dropdown(
options=[int(y) for y in values['ds_conf'][dsc.value]['years']],
value=int(ds_dye),
description='Dataset year:',
disabled=False,
layout=Layout(width='180px')
)
btn_refresh = Button(
layout=Layout(width='35px'),
icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs',
tooltip="Configure this dataset")
bt_new = Button(layout=Layout(width='40px'), icon='plus',
tooltip="Add new dataset configuration")
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt',
tooltip='Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt',
tooltip='Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(
options=["1"],
value="1",
description='Database:',
disabled=False,
layout=Layout(width='140px')
)
try:
with open(f"{config.get_value(['paths','temp'])}tb_prefix", 'r') as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(
value=code_value,
placeholder='abc',
options=[m for m in data_options.eu_ms()]+[''],
description='AOI code:',
ensure_option=False,
disabled=False,
layout=Layout(width='200px'),
tooltip='Lowercase AOI code name for the dataset (5chr max).'
)
ds_year = BoundedIntText(
value=int(dsy.value),
min=1980,
max=2100,
step=1,
description='Dataset year:',
disabled=False,
layout=Layout(width='180px')
)
ds_desc = Text(
value=values['ds_conf'][dsc_value]['desc'],
description='Description:',
disabled=False
)
info_map_text = ["Set default map view options. ",
"You can get automatically the dataset ",
"center coordinates."]
lat, lon = values['ds_conf'][dsc_value]['center'].split(",")
map_cent_lat = FloatText(
value=float(lat),
description='Lat:',
disabled=False,
layout=Layout(width='160px')
)
map_cent_lon = FloatText(
value=float(lon),
description='Lon:',
disabled=False,
layout=Layout(width='160px')
)
map_zoom = BoundedIntText(
value=values['ds_conf'][dsc_value]['zoom'],
min=0,
max=20,
step=1,
description='Zoom:',
disabled=False,
layout=Layout(width='140px')
)
bt_get_center = Button(
layout=Layout(width='40px'),
icon='bullseye',
tooltip='Get center point from database.'
)
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label("Map center: "), map_cent_lat,
map_cent_lon, bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one.""")
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['dias_catalog'],
get_tb_list(), False),
description='DIAS catalog:',
disabled=False
)
tb_pr = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['parcels'],
get_tb_list(), False),
description='Parcels:',
disabled=False
)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['parcels_id'],
get_pr_columns(), False),
description='Parcels ID:',
disabled=False,
layout=Layout(width='180px')
)
tc_cn = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['crop_names'],
get_pr_columns(), False),
description='Crop names:',
disabled=False,
layout=Layout(width='180px')
)
tc_cc = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['crop_codes'],
get_pr_columns(), False),
description='Crop codes:',
disabled=False,
layout=Layout(width='180px')
)
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['s2'],
get_tb_list(), False),
description='S2 signatures:',
disabled=False
)
tb_bs = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['bs'],
get_tb_list(), False),
description='Backscattering:',
disabled=False
)
tb_6c = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['c6'],
get_tb_list(), False),
description='6 day coherence:',
disabled=False
)
wb_save = Button(
description='Save',
disabled=False,
icon='save'
)
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(
database.getTableCentroid(tb_pr.value)['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode,
'db'], str(ds_db.value))
config.update(['ds_conf', dscode,
'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f"{map_cent_lat.value},{map_cent_lon.value}")
config.update(['ds_conf', dscode,
'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog("The configurations are saved.")
return VBox([info_config, ds_box, parcel_box,
tb_dc, tb_s2, tb_bs, tb_6c,
Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog("Can not remove last configuration.")
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf']
[str(dsc.value)]['years']]
else:
outlog("Can not remove last configuration.")
wbox = VBox([Label("Datasets configurations."), dsc_box,
dsc_new_box, progress])
return wbox
|
normal
|
{
"blob_id": "22afc6b9df87ef1eba284da20a807366278c24d4",
"index": 1343,
"step-1": "<mask token>\n\n\ndef rest_api(mode=None):\n \"\"\"\"\"\"\n values = config.read()\n wt_url = Text(value=values['api']['url'], placeholder='Add URL',\n description='API URL:', disabled=False)\n wt_user = Text(value=values['api']['user'], placeholder='Username',\n description='API User:', disabled=False)\n wt_pass = Password(value=values['api']['pass'], placeholder='******',\n description='API Password:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n @wb_save.on_click\n def wb_save_on_click(b):\n config.update(['api', 'url'], str(wt_url.value))\n config.update(['api', 'user'], str(wt_user.value))\n if wt_pass.value != '':\n config.update(['api', 'pass'], str(wt_pass.value))\n outlog('API information is updated')\n wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])\n return wbox\n\n\n<mask token>\n\n\ndef direct_settings():\n values = config.read()\n ds_def = values['set']['ds_conf']\n ds_dye = values['set']['ds_year']\n if ds_def not in [d for d in values['ds_conf']]:\n ds_def = [d for d in values['ds_conf']][0]\n dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,\n description='Default:', disabled=False, layout=Layout(width='200px'))\n dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][\n 'years']], value=int(ds_dye), description='Dataset year:', disabled\n =False, layout=Layout(width='180px'))\n btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')\n\n @btn_refresh.on_click\n def btn_refresh_on_click(b):\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n\n def on_dsc_change(change):\n config.update(['set', 'ds_conf'], dsc.value)\n values = config.read()\n ds_c = values['set']['ds_conf']\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.observe(on_dsc_change, 'value')\n\n def on_dsy_change(change):\n config.update(['set', 'ds_year'], str(dsy.value))\n dsy.observe(on_dsy_change, 'value')\n bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=\n 'Configure this dataset')\n bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=\n 'Add new dataset configuration')\n bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete dataset configuration')\n bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete only the selected year.')\n dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n def dsc_config(dsc_value):\n values = config.read()\n ds_db = Dropdown(options=['1'], value='1', description='Database:',\n disabled=False, layout=Layout(width='140px'))\n try:\n with open(f\"{config.get_value(['paths', 'temp'])}tb_prefix\", 'r'\n ) as f:\n code_value = f.read()\n except Exception:\n code_value = dsc_value\n ds_code = Combobox(value=code_value, placeholder='abc', options=[m for\n m in data_options.eu_ms()] + [''], description='AOI code:',\n ensure_option=False, disabled=False, layout=Layout(width=\n '200px'), tooltip=\n 'Lowercase AOI code name for the dataset (5chr max).')\n ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,\n step=1, description='Dataset year:', disabled=False, layout=\n Layout(width='180px'))\n ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],\n description='Description:', disabled=False)\n info_map_text = ['Set default map view options. ',\n 'You can get automatically the dataset ', 'center coordinates.']\n lat, lon = values['ds_conf'][dsc_value]['center'].split(',')\n map_cent_lat = FloatText(value=float(lat), description='Lat:',\n disabled=False, layout=Layout(width='160px'))\n map_cent_lon = FloatText(value=float(lon), description='Lon:',\n disabled=False, layout=Layout(width='160px'))\n map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'\n ], min=0, max=20, step=1, description='Zoom:', disabled=False,\n layout=Layout(width='140px'))\n bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',\n tooltip='Get center point from database.')\n ds_box = HBox([ds_code, ds_year, ds_desc])\n map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,\n bt_get_center, map_zoom])\n info_config = Label(\n \"\"\"Change 'AOI code' value to create a new configuration set or \n leave the same 'AOI code' value to configure the selected one.\"\"\"\n )\n db = int(values['ds_conf'][dsc_value]['db'])\n\n def get_tb_list():\n tbls = database.tables(db, None, False)\n if tbls is None:\n return []\n else:\n return tbls\n tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['dias_catalog'], get_tb_list(), False), description=\n 'DIAS catalog:', disabled=False)\n tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['parcels'], get_tb_list(), False), description=\n 'Parcels:', disabled=False)\n\n def get_pr_columns():\n try:\n colms = database.table_columns(tb_pr.value, 1, None)\n if colms is None:\n return []\n else:\n return colms\n except Exception:\n return []\n tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['parcels_id'], get_pr_columns(), False), description\n ='Parcels ID:', disabled=False, layout=Layout(width='180px'))\n tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_names'], get_pr_columns(), False), description\n ='Crop names:', disabled=False, layout=Layout(width='180px'))\n tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_codes'], get_pr_columns(), False), description\n ='Crop codes:', disabled=False, layout=Layout(width='180px'))\n\n def on_tb_pr_change(change):\n tc_id.options = get_pr_columns()\n tc_cn.options = get_pr_columns()\n tc_cc.options = get_pr_columns()\n tb_pr.observe(on_tb_pr_change, 'value')\n parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])\n tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['s2'], get_tb_list(), False), description=\n 'S2 signatures:', disabled=False)\n tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['bs'], get_tb_list(), False), description=\n 'Backscattering:', disabled=False)\n tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['c6'], get_tb_list(), False), description=\n '6 day coherence:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n\n @bt_get_center.on_click\n def bt_get_center_on_click(b):\n import json\n center_json = json.loads(database.getTableCentroid(tb_pr.value)\n ['center'][0])\n map_cent_lat.value = round(center_json['coordinates'][1], 2)\n map_cent_lon.value = round(center_json['coordinates'][0], 2)\n map_zoom.value = 10\n\n @wb_save.on_click\n def wb_save_on_click(b):\n progress.clear_output()\n dscode = ds_code.value\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'dias_catalog'], str(tb_dc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'parcels'], str(tb_pr.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'parcels_id'], str(tc_id.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_names'], str(tc_cn.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_codes'], str(tc_cc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 's2'], str(tb_s2.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'bs'], str(tb_bs.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'c6'], str(tb_6c.value))\n config.update(['ds_conf', dscode, 'db'], str(ds_db.value))\n config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))\n config.update(['ds_conf', dscode, 'center'],\n f'{map_cent_lat.value},{map_cent_lon.value}')\n config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))\n config.update(['set', 'ds_conf'], str(dscode))\n config.update(['set', 'ds_year'], str(ds_year.value))\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n outlog('The configurations are saved.')\n return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,\n tb_6c, Label(''.join(info_map_text)), map_box, wb_save])\n dsc_new_box = HBox([])\n\n @bt_set.on_click\n def bt_set_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_new.on_click\n def bt_new_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_rec.on_click\n def bt_rec_on_click(b):\n progress.clear_output()\n if len(dsc.options) > 1:\n config.delete(['ds_conf', dsc.value])\n outlog(f\"Dataset configuration '{dsc.value}' is deleted.\")\n values = config.read()\n dsc.options = [d for d in values['ds_conf']]\n else:\n outlog('Can not remove last configuration.')\n\n @bt_rey.on_click\n def bt_rey_on_click(b):\n progress.clear_output()\n if len(dsy.options) > 1:\n config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])\n outlog(f\"Year {dsy.value} of dataset '{dsc.value}' is deleted.\")\n values = config.read()\n dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)\n ]['years']]\n else:\n outlog('Can not remove last configuration.')\n wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,\n progress])\n return wbox\n",
"step-2": "<mask token>\n\n\ndef widget_box():\n source = int(config.get_value(['set', 'data_source']))\n sources = RadioButtons(options=[('JRC RESTful API.', 0), (\n 'Direct access to database and object storage.', 1)], value=source,\n layout={'width': 'max-content'})\n sources_box = Box([Label(value='Data sources:'), sources])\n info_api = Label('RESTful API Settings.')\n info_direct = Label('Direct access settings')\n view_options = VBox([info_direct])\n if source == 0:\n view_options.children = [info_api, rest_api()]\n elif source == 1:\n view_options.children = [info_direct, direct()]\n\n def on_source_change(change):\n view_options.children = []\n if sources.value == 0:\n view_options.children = [info_api, rest_api()]\n elif sources.value == 1:\n view_options.children = [info_direct, direct()]\n config.update(['set', 'data_source'], str(sources.value))\n sources.observe(on_source_change, 'value')\n wbox_sources = VBox([sources_box, view_options], layout=Layout(border=\n '1px solid black'))\n info_general = Label(value='General settings:')\n wbox = VBox([wbox_sources, info_general, settings.widget_box()])\n return wbox\n\n\ndef rest_api(mode=None):\n \"\"\"\"\"\"\n values = config.read()\n wt_url = Text(value=values['api']['url'], placeholder='Add URL',\n description='API URL:', disabled=False)\n wt_user = Text(value=values['api']['user'], placeholder='Username',\n description='API User:', disabled=False)\n wt_pass = Password(value=values['api']['pass'], placeholder='******',\n description='API Password:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n @wb_save.on_click\n def wb_save_on_click(b):\n config.update(['api', 'url'], str(wt_url.value))\n config.update(['api', 'user'], str(wt_user.value))\n if wt_pass.value != '':\n config.update(['api', 'pass'], str(wt_pass.value))\n outlog('API information is updated')\n wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])\n return wbox\n\n\n<mask token>\n\n\ndef direct_settings():\n values = config.read()\n ds_def = values['set']['ds_conf']\n ds_dye = values['set']['ds_year']\n if ds_def not in [d for d in values['ds_conf']]:\n ds_def = [d for d in values['ds_conf']][0]\n dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,\n description='Default:', disabled=False, layout=Layout(width='200px'))\n dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][\n 'years']], value=int(ds_dye), description='Dataset year:', disabled\n =False, layout=Layout(width='180px'))\n btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')\n\n @btn_refresh.on_click\n def btn_refresh_on_click(b):\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n\n def on_dsc_change(change):\n config.update(['set', 'ds_conf'], dsc.value)\n values = config.read()\n ds_c = values['set']['ds_conf']\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.observe(on_dsc_change, 'value')\n\n def on_dsy_change(change):\n config.update(['set', 'ds_year'], str(dsy.value))\n dsy.observe(on_dsy_change, 'value')\n bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=\n 'Configure this dataset')\n bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=\n 'Add new dataset configuration')\n bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete dataset configuration')\n bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete only the selected year.')\n dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n def dsc_config(dsc_value):\n values = config.read()\n ds_db = Dropdown(options=['1'], value='1', description='Database:',\n disabled=False, layout=Layout(width='140px'))\n try:\n with open(f\"{config.get_value(['paths', 'temp'])}tb_prefix\", 'r'\n ) as f:\n code_value = f.read()\n except Exception:\n code_value = dsc_value\n ds_code = Combobox(value=code_value, placeholder='abc', options=[m for\n m in data_options.eu_ms()] + [''], description='AOI code:',\n ensure_option=False, disabled=False, layout=Layout(width=\n '200px'), tooltip=\n 'Lowercase AOI code name for the dataset (5chr max).')\n ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,\n step=1, description='Dataset year:', disabled=False, layout=\n Layout(width='180px'))\n ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],\n description='Description:', disabled=False)\n info_map_text = ['Set default map view options. ',\n 'You can get automatically the dataset ', 'center coordinates.']\n lat, lon = values['ds_conf'][dsc_value]['center'].split(',')\n map_cent_lat = FloatText(value=float(lat), description='Lat:',\n disabled=False, layout=Layout(width='160px'))\n map_cent_lon = FloatText(value=float(lon), description='Lon:',\n disabled=False, layout=Layout(width='160px'))\n map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'\n ], min=0, max=20, step=1, description='Zoom:', disabled=False,\n layout=Layout(width='140px'))\n bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',\n tooltip='Get center point from database.')\n ds_box = HBox([ds_code, ds_year, ds_desc])\n map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,\n bt_get_center, map_zoom])\n info_config = Label(\n \"\"\"Change 'AOI code' value to create a new configuration set or \n leave the same 'AOI code' value to configure the selected one.\"\"\"\n )\n db = int(values['ds_conf'][dsc_value]['db'])\n\n def get_tb_list():\n tbls = database.tables(db, None, False)\n if tbls is None:\n return []\n else:\n return tbls\n tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['dias_catalog'], get_tb_list(), False), description=\n 'DIAS catalog:', disabled=False)\n tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['parcels'], get_tb_list(), False), description=\n 'Parcels:', disabled=False)\n\n def get_pr_columns():\n try:\n colms = database.table_columns(tb_pr.value, 1, None)\n if colms is None:\n return []\n else:\n return colms\n except Exception:\n return []\n tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['parcels_id'], get_pr_columns(), False), description\n ='Parcels ID:', disabled=False, layout=Layout(width='180px'))\n tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_names'], get_pr_columns(), False), description\n ='Crop names:', disabled=False, layout=Layout(width='180px'))\n tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_codes'], get_pr_columns(), False), description\n ='Crop codes:', disabled=False, layout=Layout(width='180px'))\n\n def on_tb_pr_change(change):\n tc_id.options = get_pr_columns()\n tc_cn.options = get_pr_columns()\n tc_cc.options = get_pr_columns()\n tb_pr.observe(on_tb_pr_change, 'value')\n parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])\n tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['s2'], get_tb_list(), False), description=\n 'S2 signatures:', disabled=False)\n tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['bs'], get_tb_list(), False), description=\n 'Backscattering:', disabled=False)\n tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['c6'], get_tb_list(), False), description=\n '6 day coherence:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n\n @bt_get_center.on_click\n def bt_get_center_on_click(b):\n import json\n center_json = json.loads(database.getTableCentroid(tb_pr.value)\n ['center'][0])\n map_cent_lat.value = round(center_json['coordinates'][1], 2)\n map_cent_lon.value = round(center_json['coordinates'][0], 2)\n map_zoom.value = 10\n\n @wb_save.on_click\n def wb_save_on_click(b):\n progress.clear_output()\n dscode = ds_code.value\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'dias_catalog'], str(tb_dc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'parcels'], str(tb_pr.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'parcels_id'], str(tc_id.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_names'], str(tc_cn.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_codes'], str(tc_cc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 's2'], str(tb_s2.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'bs'], str(tb_bs.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'c6'], str(tb_6c.value))\n config.update(['ds_conf', dscode, 'db'], str(ds_db.value))\n config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))\n config.update(['ds_conf', dscode, 'center'],\n f'{map_cent_lat.value},{map_cent_lon.value}')\n config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))\n config.update(['set', 'ds_conf'], str(dscode))\n config.update(['set', 'ds_year'], str(ds_year.value))\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n outlog('The configurations are saved.')\n return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,\n tb_6c, Label(''.join(info_map_text)), map_box, wb_save])\n dsc_new_box = HBox([])\n\n @bt_set.on_click\n def bt_set_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_new.on_click\n def bt_new_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_rec.on_click\n def bt_rec_on_click(b):\n progress.clear_output()\n if len(dsc.options) > 1:\n config.delete(['ds_conf', dsc.value])\n outlog(f\"Dataset configuration '{dsc.value}' is deleted.\")\n values = config.read()\n dsc.options = [d for d in values['ds_conf']]\n else:\n outlog('Can not remove last configuration.')\n\n @bt_rey.on_click\n def bt_rey_on_click(b):\n progress.clear_output()\n if len(dsy.options) > 1:\n config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])\n outlog(f\"Year {dsy.value} of dataset '{dsc.value}' is deleted.\")\n values = config.read()\n dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)\n ]['years']]\n else:\n outlog('Can not remove last configuration.')\n wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,\n progress])\n return wbox\n",
"step-3": "<mask token>\n\n\ndef widget_box():\n source = int(config.get_value(['set', 'data_source']))\n sources = RadioButtons(options=[('JRC RESTful API.', 0), (\n 'Direct access to database and object storage.', 1)], value=source,\n layout={'width': 'max-content'})\n sources_box = Box([Label(value='Data sources:'), sources])\n info_api = Label('RESTful API Settings.')\n info_direct = Label('Direct access settings')\n view_options = VBox([info_direct])\n if source == 0:\n view_options.children = [info_api, rest_api()]\n elif source == 1:\n view_options.children = [info_direct, direct()]\n\n def on_source_change(change):\n view_options.children = []\n if sources.value == 0:\n view_options.children = [info_api, rest_api()]\n elif sources.value == 1:\n view_options.children = [info_direct, direct()]\n config.update(['set', 'data_source'], str(sources.value))\n sources.observe(on_source_change, 'value')\n wbox_sources = VBox([sources_box, view_options], layout=Layout(border=\n '1px solid black'))\n info_general = Label(value='General settings:')\n wbox = VBox([wbox_sources, info_general, settings.widget_box()])\n return wbox\n\n\ndef rest_api(mode=None):\n \"\"\"\"\"\"\n values = config.read()\n wt_url = Text(value=values['api']['url'], placeholder='Add URL',\n description='API URL:', disabled=False)\n wt_user = Text(value=values['api']['user'], placeholder='Username',\n description='API User:', disabled=False)\n wt_pass = Password(value=values['api']['pass'], placeholder='******',\n description='API Password:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n @wb_save.on_click\n def wb_save_on_click(b):\n config.update(['api', 'url'], str(wt_url.value))\n config.update(['api', 'user'], str(wt_user.value))\n if wt_pass.value != '':\n config.update(['api', 'pass'], str(wt_pass.value))\n outlog('API information is updated')\n wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])\n return wbox\n\n\ndef direct():\n tab_box = Tab(children=[settings.direct_conn(), direct_settings()])\n tab_box.set_title(0, 'Connection')\n tab_box.set_title(1, 'db Configuration')\n return tab_box\n\n\ndef direct_settings():\n values = config.read()\n ds_def = values['set']['ds_conf']\n ds_dye = values['set']['ds_year']\n if ds_def not in [d for d in values['ds_conf']]:\n ds_def = [d for d in values['ds_conf']][0]\n dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,\n description='Default:', disabled=False, layout=Layout(width='200px'))\n dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][\n 'years']], value=int(ds_dye), description='Dataset year:', disabled\n =False, layout=Layout(width='180px'))\n btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')\n\n @btn_refresh.on_click\n def btn_refresh_on_click(b):\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n\n def on_dsc_change(change):\n config.update(['set', 'ds_conf'], dsc.value)\n values = config.read()\n ds_c = values['set']['ds_conf']\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.observe(on_dsc_change, 'value')\n\n def on_dsy_change(change):\n config.update(['set', 'ds_year'], str(dsy.value))\n dsy.observe(on_dsy_change, 'value')\n bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=\n 'Configure this dataset')\n bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=\n 'Add new dataset configuration')\n bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete dataset configuration')\n bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete only the selected year.')\n dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n def dsc_config(dsc_value):\n values = config.read()\n ds_db = Dropdown(options=['1'], value='1', description='Database:',\n disabled=False, layout=Layout(width='140px'))\n try:\n with open(f\"{config.get_value(['paths', 'temp'])}tb_prefix\", 'r'\n ) as f:\n code_value = f.read()\n except Exception:\n code_value = dsc_value\n ds_code = Combobox(value=code_value, placeholder='abc', options=[m for\n m in data_options.eu_ms()] + [''], description='AOI code:',\n ensure_option=False, disabled=False, layout=Layout(width=\n '200px'), tooltip=\n 'Lowercase AOI code name for the dataset (5chr max).')\n ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,\n step=1, description='Dataset year:', disabled=False, layout=\n Layout(width='180px'))\n ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],\n description='Description:', disabled=False)\n info_map_text = ['Set default map view options. ',\n 'You can get automatically the dataset ', 'center coordinates.']\n lat, lon = values['ds_conf'][dsc_value]['center'].split(',')\n map_cent_lat = FloatText(value=float(lat), description='Lat:',\n disabled=False, layout=Layout(width='160px'))\n map_cent_lon = FloatText(value=float(lon), description='Lon:',\n disabled=False, layout=Layout(width='160px'))\n map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'\n ], min=0, max=20, step=1, description='Zoom:', disabled=False,\n layout=Layout(width='140px'))\n bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',\n tooltip='Get center point from database.')\n ds_box = HBox([ds_code, ds_year, ds_desc])\n map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,\n bt_get_center, map_zoom])\n info_config = Label(\n \"\"\"Change 'AOI code' value to create a new configuration set or \n leave the same 'AOI code' value to configure the selected one.\"\"\"\n )\n db = int(values['ds_conf'][dsc_value]['db'])\n\n def get_tb_list():\n tbls = database.tables(db, None, False)\n if tbls is None:\n return []\n else:\n return tbls\n tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['dias_catalog'], get_tb_list(), False), description=\n 'DIAS catalog:', disabled=False)\n tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['parcels'], get_tb_list(), False), description=\n 'Parcels:', disabled=False)\n\n def get_pr_columns():\n try:\n colms = database.table_columns(tb_pr.value, 1, None)\n if colms is None:\n return []\n else:\n return colms\n except Exception:\n return []\n tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['parcels_id'], get_pr_columns(), False), description\n ='Parcels ID:', disabled=False, layout=Layout(width='180px'))\n tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_names'], get_pr_columns(), False), description\n ='Crop names:', disabled=False, layout=Layout(width='180px'))\n tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_codes'], get_pr_columns(), False), description\n ='Crop codes:', disabled=False, layout=Layout(width='180px'))\n\n def on_tb_pr_change(change):\n tc_id.options = get_pr_columns()\n tc_cn.options = get_pr_columns()\n tc_cc.options = get_pr_columns()\n tb_pr.observe(on_tb_pr_change, 'value')\n parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])\n tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['s2'], get_tb_list(), False), description=\n 'S2 signatures:', disabled=False)\n tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['bs'], get_tb_list(), False), description=\n 'Backscattering:', disabled=False)\n tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['c6'], get_tb_list(), False), description=\n '6 day coherence:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n\n @bt_get_center.on_click\n def bt_get_center_on_click(b):\n import json\n center_json = json.loads(database.getTableCentroid(tb_pr.value)\n ['center'][0])\n map_cent_lat.value = round(center_json['coordinates'][1], 2)\n map_cent_lon.value = round(center_json['coordinates'][0], 2)\n map_zoom.value = 10\n\n @wb_save.on_click\n def wb_save_on_click(b):\n progress.clear_output()\n dscode = ds_code.value\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'dias_catalog'], str(tb_dc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'parcels'], str(tb_pr.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'parcels_id'], str(tc_id.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_names'], str(tc_cn.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_codes'], str(tc_cc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 's2'], str(tb_s2.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'bs'], str(tb_bs.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'c6'], str(tb_6c.value))\n config.update(['ds_conf', dscode, 'db'], str(ds_db.value))\n config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))\n config.update(['ds_conf', dscode, 'center'],\n f'{map_cent_lat.value},{map_cent_lon.value}')\n config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))\n config.update(['set', 'ds_conf'], str(dscode))\n config.update(['set', 'ds_year'], str(ds_year.value))\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n outlog('The configurations are saved.')\n return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,\n tb_6c, Label(''.join(info_map_text)), map_box, wb_save])\n dsc_new_box = HBox([])\n\n @bt_set.on_click\n def bt_set_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_new.on_click\n def bt_new_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_rec.on_click\n def bt_rec_on_click(b):\n progress.clear_output()\n if len(dsc.options) > 1:\n config.delete(['ds_conf', dsc.value])\n outlog(f\"Dataset configuration '{dsc.value}' is deleted.\")\n values = config.read()\n dsc.options = [d for d in values['ds_conf']]\n else:\n outlog('Can not remove last configuration.')\n\n @bt_rey.on_click\n def bt_rey_on_click(b):\n progress.clear_output()\n if len(dsy.options) > 1:\n config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])\n outlog(f\"Year {dsy.value} of dataset '{dsc.value}' is deleted.\")\n values = config.read()\n dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)\n ]['years']]\n else:\n outlog('Can not remove last configuration.')\n wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,\n progress])\n return wbox\n",
"step-4": "from ipywidgets import Text, VBox, HBox, Label, Password, RadioButtons, Button, Layout, Box, Tab, Output, Dropdown, FloatText, BoundedIntText, Combobox\nfrom cbm.utils import config, data_options\nfrom cbm.ipycbm.utils import settings\nfrom cbm.sources import database\n\n\ndef widget_box():\n source = int(config.get_value(['set', 'data_source']))\n sources = RadioButtons(options=[('JRC RESTful API.', 0), (\n 'Direct access to database and object storage.', 1)], value=source,\n layout={'width': 'max-content'})\n sources_box = Box([Label(value='Data sources:'), sources])\n info_api = Label('RESTful API Settings.')\n info_direct = Label('Direct access settings')\n view_options = VBox([info_direct])\n if source == 0:\n view_options.children = [info_api, rest_api()]\n elif source == 1:\n view_options.children = [info_direct, direct()]\n\n def on_source_change(change):\n view_options.children = []\n if sources.value == 0:\n view_options.children = [info_api, rest_api()]\n elif sources.value == 1:\n view_options.children = [info_direct, direct()]\n config.update(['set', 'data_source'], str(sources.value))\n sources.observe(on_source_change, 'value')\n wbox_sources = VBox([sources_box, view_options], layout=Layout(border=\n '1px solid black'))\n info_general = Label(value='General settings:')\n wbox = VBox([wbox_sources, info_general, settings.widget_box()])\n return wbox\n\n\ndef rest_api(mode=None):\n \"\"\"\"\"\"\n values = config.read()\n wt_url = Text(value=values['api']['url'], placeholder='Add URL',\n description='API URL:', disabled=False)\n wt_user = Text(value=values['api']['user'], placeholder='Username',\n description='API User:', disabled=False)\n wt_pass = Password(value=values['api']['pass'], placeholder='******',\n description='API Password:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n @wb_save.on_click\n def wb_save_on_click(b):\n config.update(['api', 'url'], str(wt_url.value))\n config.update(['api', 'user'], str(wt_user.value))\n if wt_pass.value != '':\n config.update(['api', 'pass'], str(wt_pass.value))\n outlog('API information is updated')\n wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])\n return wbox\n\n\ndef direct():\n tab_box = Tab(children=[settings.direct_conn(), direct_settings()])\n tab_box.set_title(0, 'Connection')\n tab_box.set_title(1, 'db Configuration')\n return tab_box\n\n\ndef direct_settings():\n values = config.read()\n ds_def = values['set']['ds_conf']\n ds_dye = values['set']['ds_year']\n if ds_def not in [d for d in values['ds_conf']]:\n ds_def = [d for d in values['ds_conf']][0]\n dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,\n description='Default:', disabled=False, layout=Layout(width='200px'))\n dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][\n 'years']], value=int(ds_dye), description='Dataset year:', disabled\n =False, layout=Layout(width='180px'))\n btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')\n\n @btn_refresh.on_click\n def btn_refresh_on_click(b):\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n\n def on_dsc_change(change):\n config.update(['set', 'ds_conf'], dsc.value)\n values = config.read()\n ds_c = values['set']['ds_conf']\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.observe(on_dsc_change, 'value')\n\n def on_dsy_change(change):\n config.update(['set', 'ds_year'], str(dsy.value))\n dsy.observe(on_dsy_change, 'value')\n bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=\n 'Configure this dataset')\n bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=\n 'Add new dataset configuration')\n bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete dataset configuration')\n bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=\n 'Delete only the selected year.')\n dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n def dsc_config(dsc_value):\n values = config.read()\n ds_db = Dropdown(options=['1'], value='1', description='Database:',\n disabled=False, layout=Layout(width='140px'))\n try:\n with open(f\"{config.get_value(['paths', 'temp'])}tb_prefix\", 'r'\n ) as f:\n code_value = f.read()\n except Exception:\n code_value = dsc_value\n ds_code = Combobox(value=code_value, placeholder='abc', options=[m for\n m in data_options.eu_ms()] + [''], description='AOI code:',\n ensure_option=False, disabled=False, layout=Layout(width=\n '200px'), tooltip=\n 'Lowercase AOI code name for the dataset (5chr max).')\n ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,\n step=1, description='Dataset year:', disabled=False, layout=\n Layout(width='180px'))\n ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],\n description='Description:', disabled=False)\n info_map_text = ['Set default map view options. ',\n 'You can get automatically the dataset ', 'center coordinates.']\n lat, lon = values['ds_conf'][dsc_value]['center'].split(',')\n map_cent_lat = FloatText(value=float(lat), description='Lat:',\n disabled=False, layout=Layout(width='160px'))\n map_cent_lon = FloatText(value=float(lon), description='Lon:',\n disabled=False, layout=Layout(width='160px'))\n map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'\n ], min=0, max=20, step=1, description='Zoom:', disabled=False,\n layout=Layout(width='140px'))\n bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',\n tooltip='Get center point from database.')\n ds_box = HBox([ds_code, ds_year, ds_desc])\n map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,\n bt_get_center, map_zoom])\n info_config = Label(\n \"\"\"Change 'AOI code' value to create a new configuration set or \n leave the same 'AOI code' value to configure the selected one.\"\"\"\n )\n db = int(values['ds_conf'][dsc_value]['db'])\n\n def get_tb_list():\n tbls = database.tables(db, None, False)\n if tbls is None:\n return []\n else:\n return tbls\n tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['dias_catalog'], get_tb_list(), False), description=\n 'DIAS catalog:', disabled=False)\n tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['parcels'], get_tb_list(), False), description=\n 'Parcels:', disabled=False)\n\n def get_pr_columns():\n try:\n colms = database.table_columns(tb_pr.value, 1, None)\n if colms is None:\n return []\n else:\n return colms\n except Exception:\n return []\n tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['parcels_id'], get_pr_columns(), False), description\n ='Parcels ID:', disabled=False, layout=Layout(width='180px'))\n tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_names'], get_pr_columns(), False), description\n ='Crop names:', disabled=False, layout=Layout(width='180px'))\n tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'columns']['crop_codes'], get_pr_columns(), False), description\n ='Crop codes:', disabled=False, layout=Layout(width='180px'))\n\n def on_tb_pr_change(change):\n tc_id.options = get_pr_columns()\n tc_cn.options = get_pr_columns()\n tc_cc.options = get_pr_columns()\n tb_pr.observe(on_tb_pr_change, 'value')\n parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])\n tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['s2'], get_tb_list(), False), description=\n 'S2 signatures:', disabled=False)\n tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['bs'], get_tb_list(), False), description=\n 'Backscattering:', disabled=False)\n tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][str(ds_year.value)][\n 'tables']['c6'], get_tb_list(), False), description=\n '6 day coherence:', disabled=False)\n wb_save = Button(description='Save', disabled=False, icon='save')\n\n @bt_get_center.on_click\n def bt_get_center_on_click(b):\n import json\n center_json = json.loads(database.getTableCentroid(tb_pr.value)\n ['center'][0])\n map_cent_lat.value = round(center_json['coordinates'][1], 2)\n map_cent_lon.value = round(center_json['coordinates'][0], 2)\n map_zoom.value = 10\n\n @wb_save.on_click\n def wb_save_on_click(b):\n progress.clear_output()\n dscode = ds_code.value\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'dias_catalog'], str(tb_dc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'parcels'], str(tb_pr.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'parcels_id'], str(tc_id.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_names'], str(tc_cn.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_codes'], str(tc_cc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 's2'], str(tb_s2.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'bs'], str(tb_bs.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'c6'], str(tb_6c.value))\n config.update(['ds_conf', dscode, 'db'], str(ds_db.value))\n config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))\n config.update(['ds_conf', dscode, 'center'],\n f'{map_cent_lat.value},{map_cent_lon.value}')\n config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))\n config.update(['set', 'ds_conf'], str(dscode))\n config.update(['set', 'ds_year'], str(ds_year.value))\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n outlog('The configurations are saved.')\n return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,\n tb_6c, Label(''.join(info_map_text)), map_box, wb_save])\n dsc_new_box = HBox([])\n\n @bt_set.on_click\n def bt_set_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_new.on_click\n def bt_new_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_rec.on_click\n def bt_rec_on_click(b):\n progress.clear_output()\n if len(dsc.options) > 1:\n config.delete(['ds_conf', dsc.value])\n outlog(f\"Dataset configuration '{dsc.value}' is deleted.\")\n values = config.read()\n dsc.options = [d for d in values['ds_conf']]\n else:\n outlog('Can not remove last configuration.')\n\n @bt_rey.on_click\n def bt_rey_on_click(b):\n progress.clear_output()\n if len(dsy.options) > 1:\n config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])\n outlog(f\"Year {dsy.value} of dataset '{dsc.value}' is deleted.\")\n values = config.read()\n dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)\n ]['years']]\n else:\n outlog('Can not remove last configuration.')\n wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,\n progress])\n return wbox\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This file is part of CbM (https://github.com/ec-jrc/cbm).\n# Author : Konstantinos Anastasakis\n# Credits : GTCAP Team\n# Copyright : 2021 European Commission, Joint Research Centre\n# License : 3-Clause BSD\n\n\nfrom ipywidgets import (Text, VBox, HBox, Label, Password, RadioButtons,\n Button, Layout, Box, Tab, Output, Dropdown,\n FloatText, BoundedIntText, Combobox)\n\nfrom cbm.utils import config, data_options\nfrom cbm.ipycbm.utils import settings\nfrom cbm.sources import database\n\n\ndef widget_box():\n\n source = int(config.get_value(['set', 'data_source']))\n\n sources = RadioButtons(\n options=[\n (\"JRC RESTful API.\", 0),\n (\"Direct access to database and object storage.\", 1)\n ],\n value=source,\n layout={'width': 'max-content'}\n )\n\n sources_box = Box([\n Label(value=\"Data sources:\"),\n sources]\n )\n\n info_api = Label(\"RESTful API Settings.\")\n info_direct = Label(\"Direct access settings\")\n\n view_options = VBox([info_direct])\n\n if source == 0:\n view_options.children = [info_api, rest_api()]\n elif source == 1:\n view_options.children = [info_direct, direct()]\n\n def on_source_change(change):\n view_options.children = []\n if sources.value == 0:\n view_options.children = [info_api, rest_api()]\n elif sources.value == 1:\n view_options.children = [info_direct, direct()]\n config.update(['set', 'data_source'], str(sources.value))\n\n sources.observe(on_source_change, 'value')\n\n wbox_sources = VBox([sources_box, view_options],\n layout=Layout(border='1px solid black'))\n\n info_general = Label(value=\"General settings:\")\n\n wbox = VBox([wbox_sources, info_general, settings.widget_box()])\n\n return wbox\n\n\ndef rest_api(mode=None):\n \"\"\"\"\"\"\n values = config.read()\n\n wt_url = Text(\n value=values['api']['url'],\n placeholder='Add URL',\n description='API URL:',\n disabled=False\n )\n wt_user = Text(\n value=values['api']['user'],\n placeholder='Username',\n description='API User:',\n disabled=False\n )\n wt_pass = Password(\n value=values['api']['pass'],\n placeholder='******',\n description='API Password:',\n disabled=False\n )\n\n wb_save = Button(\n description='Save',\n disabled=False,\n icon='save'\n )\n\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n @wb_save.on_click\n def wb_save_on_click(b):\n config.update(['api', 'url'], str(wt_url.value))\n config.update(['api', 'user'], str(wt_user.value))\n if wt_pass.value != '':\n config.update(['api', 'pass'], str(wt_pass.value))\n outlog(\"API information is updated\")\n\n wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])\n\n return wbox\n\n\ndef direct():\n # try:\n tab_box = Tab(children=[settings.direct_conn(), direct_settings()])\n\n tab_box.set_title(0, 'Connection')\n tab_box.set_title(1, 'db Configuration')\n# except:\n# tab_box = Tab(children=[direct_conn()])\n# tab_box.set_title(0, 'Connection')\n# print(\"!WARNING! Can not load direct configuration settings.\")\n return tab_box\n\n\ndef direct_settings():\n values = config.read()\n ds_def = values['set']['ds_conf']\n ds_dye = values['set']['ds_year']\n if ds_def not in [d for d in values['ds_conf']]:\n ds_def = [d for d in values['ds_conf']][0]\n\n dsc = Dropdown(\n options=[d for d in values['ds_conf']],\n value=ds_def,\n description='Default:',\n disabled=False,\n layout=Layout(width='200px')\n )\n\n dsy = Dropdown(\n options=[int(y) for y in values['ds_conf'][dsc.value]['years']],\n value=int(ds_dye),\n description='Dataset year:',\n disabled=False,\n layout=Layout(width='180px')\n )\n\n btn_refresh = Button(\n layout=Layout(width='35px'),\n icon='fa-refresh')\n\n @btn_refresh.on_click\n def btn_refresh_on_click(b):\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n\n def on_dsc_change(change):\n config.update(['set', 'ds_conf'], dsc.value)\n values = config.read()\n ds_c = values['set']['ds_conf']\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.observe(on_dsc_change, 'value')\n\n def on_dsy_change(change):\n config.update(['set', 'ds_year'], str(dsy.value))\n dsy.observe(on_dsy_change, 'value')\n\n bt_set = Button(layout=Layout(width='40px'), icon='cogs',\n tooltip=\"Configure this dataset\")\n bt_new = Button(layout=Layout(width='40px'), icon='plus',\n tooltip=\"Add new dataset configuration\")\n bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt',\n tooltip='Delete dataset configuration')\n bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt',\n tooltip='Delete only the selected year.')\n dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])\n\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n def dsc_config(dsc_value):\n values = config.read()\n ds_db = Dropdown(\n options=[\"1\"],\n value=\"1\",\n description='Database:',\n disabled=False,\n layout=Layout(width='140px')\n )\n\n try:\n with open(f\"{config.get_value(['paths','temp'])}tb_prefix\", 'r') as f:\n code_value = f.read()\n except Exception:\n code_value = dsc_value\n\n ds_code = Combobox(\n value=code_value,\n placeholder='abc',\n options=[m for m in data_options.eu_ms()]+[''],\n description='AOI code:',\n ensure_option=False,\n disabled=False,\n layout=Layout(width='200px'),\n tooltip='Lowercase AOI code name for the dataset (5chr max).'\n )\n ds_year = BoundedIntText(\n value=int(dsy.value),\n min=1980,\n max=2100,\n step=1,\n description='Dataset year:',\n disabled=False,\n layout=Layout(width='180px')\n\n )\n ds_desc = Text(\n value=values['ds_conf'][dsc_value]['desc'],\n description='Description:',\n disabled=False\n )\n\n info_map_text = [\"Set default map view options. \",\n \"You can get automatically the dataset \",\n \"center coordinates.\"]\n\n lat, lon = values['ds_conf'][dsc_value]['center'].split(\",\")\n map_cent_lat = FloatText(\n value=float(lat),\n description='Lat:',\n disabled=False,\n layout=Layout(width='160px')\n )\n map_cent_lon = FloatText(\n value=float(lon),\n description='Lon:',\n disabled=False,\n layout=Layout(width='160px')\n )\n map_zoom = BoundedIntText(\n value=values['ds_conf'][dsc_value]['zoom'],\n min=0,\n max=20,\n step=1,\n description='Zoom:',\n disabled=False,\n layout=Layout(width='140px')\n )\n bt_get_center = Button(\n layout=Layout(width='40px'),\n icon='bullseye',\n tooltip='Get center point from database.'\n )\n\n ds_box = HBox([ds_code, ds_year, ds_desc])\n map_box = HBox([Label(\"Map center: \"), map_cent_lat,\n map_cent_lon, bt_get_center, map_zoom])\n\n info_config = Label(\n \"\"\"Change 'AOI code' value to create a new configuration set or \n leave the same 'AOI code' value to configure the selected one.\"\"\")\n\n db = int(values['ds_conf'][dsc_value]['db'])\n\n def get_tb_list():\n tbls = database.tables(db, None, False)\n if tbls is None:\n return []\n else:\n return tbls\n\n tb_dc = Dropdown(\n options=get_tb_list(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['tables']['dias_catalog'],\n get_tb_list(), False),\n description='DIAS catalog:',\n disabled=False\n )\n tb_pr = Dropdown(\n options=get_tb_list(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['tables']['parcels'],\n get_tb_list(), False),\n description='Parcels:',\n disabled=False\n )\n\n def get_pr_columns():\n try:\n colms = database.table_columns(tb_pr.value, 1, None)\n if colms is None:\n return []\n else:\n return colms\n except Exception:\n return []\n\n tc_id = Dropdown(\n options=get_pr_columns(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['columns']['parcels_id'],\n get_pr_columns(), False),\n description='Parcels ID:',\n disabled=False,\n layout=Layout(width='180px')\n )\n tc_cn = Dropdown(\n options=get_pr_columns(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['columns']['crop_names'],\n get_pr_columns(), False),\n description='Crop names:',\n disabled=False,\n layout=Layout(width='180px')\n )\n tc_cc = Dropdown(\n options=get_pr_columns(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['columns']['crop_codes'],\n get_pr_columns(), False),\n description='Crop codes:',\n disabled=False,\n layout=Layout(width='180px')\n )\n\n def on_tb_pr_change(change):\n tc_id.options = get_pr_columns()\n tc_cn.options = get_pr_columns()\n tc_cc.options = get_pr_columns()\n tb_pr.observe(on_tb_pr_change, 'value')\n\n parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])\n\n tb_s2 = Dropdown(\n options=get_tb_list(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['tables']['s2'],\n get_tb_list(), False),\n description='S2 signatures:',\n disabled=False\n )\n tb_bs = Dropdown(\n options=get_tb_list(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['tables']['bs'],\n get_tb_list(), False),\n description='Backscattering:',\n disabled=False\n )\n tb_6c = Dropdown(\n options=get_tb_list(),\n value=config.autoselect(\n values['ds_conf'][dsc_value]['years'][\n str(ds_year.value)]['tables']['c6'],\n get_tb_list(), False),\n description='6 day coherence:',\n disabled=False\n )\n\n wb_save = Button(\n description='Save',\n disabled=False,\n icon='save'\n )\n\n @bt_get_center.on_click\n def bt_get_center_on_click(b):\n import json\n center_json = json.loads(\n database.getTableCentroid(tb_pr.value)['center'][0])\n map_cent_lat.value = round(center_json['coordinates'][1], 2)\n map_cent_lon.value = round(center_json['coordinates'][0], 2)\n map_zoom.value = 10\n\n @wb_save.on_click\n def wb_save_on_click(b):\n progress.clear_output()\n dscode = ds_code.value\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'dias_catalog'], str(tb_dc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'parcels'], str(tb_pr.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'parcels_id'], str(tc_id.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_names'], str(tc_cn.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'columns', 'crop_codes'], str(tc_cc.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 's2'], str(tb_s2.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'bs'], str(tb_bs.value))\n config.update(['ds_conf', dscode, 'years', str(ds_year.value),\n 'tables', 'c6'], str(tb_6c.value))\n config.update(['ds_conf', dscode,\n 'db'], str(ds_db.value))\n config.update(['ds_conf', dscode,\n 'desc'], str(ds_desc.value))\n config.update(['ds_conf', dscode, 'center'],\n f\"{map_cent_lat.value},{map_cent_lon.value}\")\n config.update(['ds_conf', dscode,\n 'zoom'], str(map_zoom.value))\n config.update(['set', 'ds_conf'], str(dscode))\n config.update(['set', 'ds_year'], str(ds_year.value))\n values = config.read()\n ds_c = values['set']['ds_conf']\n ds_y = values['set']['ds_year']\n dsc.options = [d for d in values['ds_conf']]\n dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]\n dsc.value = ds_c\n dsy.value = int(ds_y)\n outlog(\"The configurations are saved.\")\n\n return VBox([info_config, ds_box, parcel_box,\n tb_dc, tb_s2, tb_bs, tb_6c,\n Label(''.join(info_map_text)), map_box, wb_save])\n\n dsc_new_box = HBox([])\n\n @bt_set.on_click\n def bt_set_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_new.on_click\n def bt_new_on_click(b):\n if dsc_new_box.children == ():\n dsc_new_box.children = [dsc_config(dsc.value)]\n bt_set.icon = 'chevron-up'\n else:\n dsc_new_box.children = ()\n bt_set.icon = 'cogs'\n\n @bt_rec.on_click\n def bt_rec_on_click(b):\n progress.clear_output()\n if len(dsc.options) > 1:\n config.delete(['ds_conf', dsc.value])\n outlog(f\"Dataset configuration '{dsc.value}' is deleted.\")\n values = config.read()\n dsc.options = [d for d in values['ds_conf']]\n else:\n outlog(\"Can not remove last configuration.\")\n\n @bt_rey.on_click\n def bt_rey_on_click(b):\n progress.clear_output()\n if len(dsy.options) > 1:\n config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])\n outlog(f\"Year {dsy.value} of dataset '{dsc.value}' is deleted.\")\n values = config.read()\n dsy.options = [int(y) for y in values['ds_conf']\n [str(dsc.value)]['years']]\n else:\n outlog(\"Can not remove last configuration.\")\n\n wbox = VBox([Label(\"Datasets configurations.\"), dsc_box,\n dsc_new_box, progress])\n\n return wbox\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import unittest
import shapely.geometry as gm
from alphaBetaLab.abRectangularGridBuilder import abRectangularGridBuilder
class testAbRectangularGridBuilder(unittest.TestCase):
def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids = None):
class _mockClass:
def __init__(self, posCellCentroids):
self.posCellCentroids = posCellCentroids
self.cell = None
def getAlphaSubMatrix(self, cell):
sm = _mockClass(self.posCellCentroids)
sm.cell = cell
return sm
def _positive(self, cell):
cntrs = self.posCellCentroids
if cell is None or cntrs is None:
return False
else:
for c in cntrs:
if cell.contains(gm.Point([c[0], c[1]])):
return True
return False
def onLand(self):
cell = self.cell
return self._positive(cell)
def isCoastalCell(self, cell, boundary = None, surface = -1):
return self._positive(cell)
return _mockClass(posCellCentroids)
def testGetSeaGridSerial(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,
nParWorker=1, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertFalse(grd.wrapAroundDateline)
self.assertEqual(1, grd.nParWorker)
cells = grd.cells
self.assertEqual(nx*ny - 3, len(cells))
def testGetSeaGridParallel(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
landCntrs = [[100.25, 45.25], [105.25, 47.25]]
coastCntrs = [[100.75, 45.25], [105.25, 47.25]]
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4, minXYIsCentroid=False)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
self.assertEqual(nx*ny - 3, len(cells))
def testGetNeighborsSerial(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 1)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(1, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
def testGetNeighborsParallel(self):
minx = 100.
miny = 45.
dx = .5
dy = 1.
nx = 30
ny = 10
maxx = minx + nx*dx
maxy = miny + ny*dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertEqual(4, grd.nParWorker)
self.assertFalse(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(3, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[100]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
def testGetNeighborsWrapAroundGlobalParallel(self):
minx = -179.
miny = -89.
dx = 2
dy = 2
nx = 180
ny = 90
maxx = minx + nx*dx
maxy = miny + ny*dy
gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)
hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()
cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()
grd = gb.buildGrid(hiResMtx, cstClDet)
self.assertTrue(grd.wrapAroundDateline)
cells = grd.cells
cell = cells[0]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[45]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[65]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[5].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
self.assertEqual(-182, ncls[6].boundary.coords[0][0])
cell = cells[89]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(-182, ncls[3].boundary.coords[0][0])
self.assertEqual(-182, ncls[4].boundary.coords[0][0])
cell = cells[200]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
cell = cells[nx*ny-22]
ncls = grd.getNeighbors(cell)
self.assertEqual(8, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(182, ncls[5].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
self.assertEqual(182, ncls[6].boundary.coords[1][0])
cell = cells[nx*ny-1]
ncls = grd.getNeighbors(cell)
self.assertEqual(5, len(ncls))
for nc in ncls:
self.assertTrue( cell.distance(nc) < .000000001 )
self.assertEqual(182, ncls[3].boundary.coords[1][0])
self.assertEqual(182, ncls[4].boundary.coords[1][0])
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "6175ce6534d44d703df6cdef94fc2b1285e25f49",
"index": 2202,
"step-1": "<mask token>\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n <mask token>\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetSeaGridParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetSeaGridParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport shapely.geometry as gm\nfrom alphaBetaLab.abRectangularGridBuilder import abRectangularGridBuilder\n\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids=None):\n\n\n class _mockClass:\n\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n\n def isCoastalCell(self, cell, boundary=None, surface=-1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n def testGetSeaGridSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetSeaGridParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny,\n nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx * ny - 3, len(cells))\n\n def testGetNeighborsSerial(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsParallel(self):\n minx = 100.0\n miny = 45.0\n dx = 0.5\n dy = 1.0\n nx = 30\n ny = 10\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.0\n miny = -89.0\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx * dx\n maxy = miny + ny * dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n cell = cells[nx * ny - 22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n cell = cells[nx * ny - 1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue(cell.distance(nc) < 1e-09)\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport shapely.geometry as gm\n\nfrom alphaBetaLab.abRectangularGridBuilder import abRectangularGridBuilder\n\nclass testAbRectangularGridBuilder(unittest.TestCase):\n\n def getMockHiResAlphaMtxAndCstCellDet(self, posCellCentroids = None):\n class _mockClass:\n def __init__(self, posCellCentroids):\n self.posCellCentroids = posCellCentroids\n self.cell = None\n def getAlphaSubMatrix(self, cell):\n sm = _mockClass(self.posCellCentroids)\n sm.cell = cell\n return sm\n def _positive(self, cell):\n cntrs = self.posCellCentroids\n if cell is None or cntrs is None:\n return False\n else:\n for c in cntrs:\n if cell.contains(gm.Point([c[0], c[1]])):\n return True\n return False\n def onLand(self):\n cell = self.cell\n return self._positive(cell)\n def isCoastalCell(self, cell, boundary = None, surface = -1):\n return self._positive(cell)\n return _mockClass(posCellCentroids)\n\n\n def testGetSeaGridSerial(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, \n nParWorker=1, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertFalse(grd.wrapAroundDateline)\n self.assertEqual(1, grd.nParWorker)\n cells = grd.cells\n self.assertEqual(nx*ny - 3, len(cells))\n\n\n def testGetSeaGridParallel(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n landCntrs = [[100.25, 45.25], [105.25, 47.25]]\n coastCntrs = [[100.75, 45.25], [105.25, 47.25]]\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker=4, minXYIsCentroid=False)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet(landCntrs)\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet(coastCntrs)\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n self.assertEqual(nx*ny - 3, len(cells))\n \n\n def testGetNeighborsSerial(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 1)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(1, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n\n def testGetNeighborsParallel(self):\n minx = 100.\n miny = 45.\n dx = .5\n dy = 1.\n nx = 30\n ny = 10\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertEqual(4, grd.nParWorker)\n self.assertFalse(grd.wrapAroundDateline)\n cells = grd.cells\n\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(3, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[100]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n \n\n def testGetNeighborsWrapAroundGlobalParallel(self):\n minx = -179.\n miny = -89.\n dx = 2\n dy = 2\n nx = 180\n ny = 90\n maxx = minx + nx*dx\n maxy = miny + ny*dy\n gb = abRectangularGridBuilder(minx, miny, dx, dy, nx, ny, nParWorker = 4)\n hiResMtx = self.getMockHiResAlphaMtxAndCstCellDet()\n cstClDet = self.getMockHiResAlphaMtxAndCstCellDet()\n grd = gb.buildGrid(hiResMtx, cstClDet)\n self.assertTrue(grd.wrapAroundDateline)\n cells = grd.cells\n\n cell = cells[0]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n\n cell = cells[45]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n\n cell = cells[65]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[5].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n self.assertEqual(-182, ncls[6].boundary.coords[0][0])\n\n cell = cells[89]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(-182, ncls[3].boundary.coords[0][0])\n self.assertEqual(-182, ncls[4].boundary.coords[0][0])\n\n cell = cells[200]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n\n cell = cells[nx*ny-22]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(8, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(182, ncls[5].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n self.assertEqual(182, ncls[6].boundary.coords[1][0])\n\n cell = cells[nx*ny-1]\n ncls = grd.getNeighbors(cell)\n self.assertEqual(5, len(ncls))\n for nc in ncls:\n self.assertTrue( cell.distance(nc) < .000000001 )\n self.assertEqual(182, ncls[3].boundary.coords[1][0])\n self.assertEqual(182, ncls[4].boundary.coords[1][0])\n \n \n \nif __name__ == '__main__':\n unittest.main()\n\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
from flask import Flask, request, jsonify
import sqlite3
from database import Database
app = Flask(__name__)
db = Database()
@app.route('/')
def homepage():
argslist = request.args
faciltype = argslist.get('facil')
facils = []
try:
facils = db.getFacilitiesFromFacilityType(faciltype)
facils = map(lambda facil: facil.toDictNoType(), facils)
# https://stackoverflow.com/questions/5022066/how-to-serialize-sqlalchemy-result-to-json
except:
facils = []
return jsonify(facilities=facils)
@app.route('/auto')
def autocomplete():
argslist = request.args
terms = []
try:
term = argslist.get('term')
if len(term) > 0:
terms = db.getBuildingsLike(term)
terms = map(lambda bldg: bldg.toDictNoLatLon(), terms)
except:
terms = []
return jsonify(terms=terms)
@app.route('/building-location')
def getBuilding():
argslist = request.args
bldg = {}
try:
bldg = argslist.get('name')
bldg = db.getBuilding(bldg)
bldg = bldg.toDict()
except:
bldg = {}
return jsonify(building=bldg)
@app.route('/building')
def getFacilitiesFromBuilding():
argslist = request.args
facils = []
try:
bldg = argslist.get('name')
facils = db.getFacilitiesFromBuilding(bldg)
facils = map(lambda facil: facil.toDict(), facils)
except:
facils = []
return jsonify(facilities=facils)
@app.route('/faciltypes')
def getFacilityTypes():
argslist = request.args
faciltypes = []
try:
faciltypes = db.getAllFacilityTypes()
faciltypes = map(lambda faciltype: faciltype.name, faciltypes)
except:
faciltypes = []
return jsonify(facility_types=faciltypes)
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
|
normal
|
{
"blob_id": "2424d667e1bb4ee75b5053eb6f9b002787a5317f",
"index": 6391,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef homepage():\n argslist = request.args\n faciltype = argslist.get('facil')\n facils = []\n try:\n facils = db.getFacilitiesFromFacilityType(faciltype)\n facils = map(lambda facil: facil.toDictNoType(), facils)\n except:\n facils = []\n return jsonify(facilities=facils)\n\n\n<mask token>\n\n\n@app.route('/building')\ndef getFacilitiesFromBuilding():\n argslist = request.args\n facils = []\n try:\n bldg = argslist.get('name')\n facils = db.getFacilitiesFromBuilding(bldg)\n facils = map(lambda facil: facil.toDict(), facils)\n except:\n facils = []\n return jsonify(facilities=facils)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef homepage():\n argslist = request.args\n faciltype = argslist.get('facil')\n facils = []\n try:\n facils = db.getFacilitiesFromFacilityType(faciltype)\n facils = map(lambda facil: facil.toDictNoType(), facils)\n except:\n facils = []\n return jsonify(facilities=facils)\n\n\n@app.route('/auto')\ndef autocomplete():\n argslist = request.args\n terms = []\n try:\n term = argslist.get('term')\n if len(term) > 0:\n terms = db.getBuildingsLike(term)\n terms = map(lambda bldg: bldg.toDictNoLatLon(), terms)\n except:\n terms = []\n return jsonify(terms=terms)\n\n\n@app.route('/building-location')\ndef getBuilding():\n argslist = request.args\n bldg = {}\n try:\n bldg = argslist.get('name')\n bldg = db.getBuilding(bldg)\n bldg = bldg.toDict()\n except:\n bldg = {}\n return jsonify(building=bldg)\n\n\n@app.route('/building')\ndef getFacilitiesFromBuilding():\n argslist = request.args\n facils = []\n try:\n bldg = argslist.get('name')\n facils = db.getFacilitiesFromBuilding(bldg)\n facils = map(lambda facil: facil.toDict(), facils)\n except:\n facils = []\n return jsonify(facilities=facils)\n\n\n@app.route('/faciltypes')\ndef getFacilityTypes():\n argslist = request.args\n faciltypes = []\n try:\n faciltypes = db.getAllFacilityTypes()\n faciltypes = map(lambda faciltype: faciltype.name, faciltypes)\n except:\n faciltypes = []\n return jsonify(facility_types=faciltypes)\n\n\nif __name__ == '__main__':\n app.run(debug=True, use_reloader=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\ndb = Database()\n\n\n@app.route('/')\ndef homepage():\n argslist = request.args\n faciltype = argslist.get('facil')\n facils = []\n try:\n facils = db.getFacilitiesFromFacilityType(faciltype)\n facils = map(lambda facil: facil.toDictNoType(), facils)\n except:\n facils = []\n return jsonify(facilities=facils)\n\n\n@app.route('/auto')\ndef autocomplete():\n argslist = request.args\n terms = []\n try:\n term = argslist.get('term')\n if len(term) > 0:\n terms = db.getBuildingsLike(term)\n terms = map(lambda bldg: bldg.toDictNoLatLon(), terms)\n except:\n terms = []\n return jsonify(terms=terms)\n\n\n@app.route('/building-location')\ndef getBuilding():\n argslist = request.args\n bldg = {}\n try:\n bldg = argslist.get('name')\n bldg = db.getBuilding(bldg)\n bldg = bldg.toDict()\n except:\n bldg = {}\n return jsonify(building=bldg)\n\n\n@app.route('/building')\ndef getFacilitiesFromBuilding():\n argslist = request.args\n facils = []\n try:\n bldg = argslist.get('name')\n facils = db.getFacilitiesFromBuilding(bldg)\n facils = map(lambda facil: facil.toDict(), facils)\n except:\n facils = []\n return jsonify(facilities=facils)\n\n\n@app.route('/faciltypes')\ndef getFacilityTypes():\n argslist = request.args\n faciltypes = []\n try:\n faciltypes = db.getAllFacilityTypes()\n faciltypes = map(lambda faciltype: faciltype.name, faciltypes)\n except:\n faciltypes = []\n return jsonify(facility_types=faciltypes)\n\n\nif __name__ == '__main__':\n app.run(debug=True, use_reloader=True)\n",
"step-4": "from flask import Flask, request, jsonify\nimport sqlite3\nfrom database import Database\napp = Flask(__name__)\ndb = Database()\n\n\n@app.route('/')\ndef homepage():\n argslist = request.args\n faciltype = argslist.get('facil')\n facils = []\n try:\n facils = db.getFacilitiesFromFacilityType(faciltype)\n facils = map(lambda facil: facil.toDictNoType(), facils)\n except:\n facils = []\n return jsonify(facilities=facils)\n\n\n@app.route('/auto')\ndef autocomplete():\n argslist = request.args\n terms = []\n try:\n term = argslist.get('term')\n if len(term) > 0:\n terms = db.getBuildingsLike(term)\n terms = map(lambda bldg: bldg.toDictNoLatLon(), terms)\n except:\n terms = []\n return jsonify(terms=terms)\n\n\n@app.route('/building-location')\ndef getBuilding():\n argslist = request.args\n bldg = {}\n try:\n bldg = argslist.get('name')\n bldg = db.getBuilding(bldg)\n bldg = bldg.toDict()\n except:\n bldg = {}\n return jsonify(building=bldg)\n\n\n@app.route('/building')\ndef getFacilitiesFromBuilding():\n argslist = request.args\n facils = []\n try:\n bldg = argslist.get('name')\n facils = db.getFacilitiesFromBuilding(bldg)\n facils = map(lambda facil: facil.toDict(), facils)\n except:\n facils = []\n return jsonify(facilities=facils)\n\n\n@app.route('/faciltypes')\ndef getFacilityTypes():\n argslist = request.args\n faciltypes = []\n try:\n faciltypes = db.getAllFacilityTypes()\n faciltypes = map(lambda faciltype: faciltype.name, faciltypes)\n except:\n faciltypes = []\n return jsonify(facility_types=faciltypes)\n\n\nif __name__ == '__main__':\n app.run(debug=True, use_reloader=True)\n",
"step-5": "from flask import Flask, request, jsonify\r\nimport sqlite3\r\nfrom database import Database\r\n\r\napp = Flask(__name__)\r\ndb = Database()\r\n\r\n@app.route('/')\r\ndef homepage():\r\n argslist = request.args\r\n faciltype = argslist.get('facil')\r\n facils = []\r\n try:\r\n facils = db.getFacilitiesFromFacilityType(faciltype)\r\n facils = map(lambda facil: facil.toDictNoType(), facils)\r\n # https://stackoverflow.com/questions/5022066/how-to-serialize-sqlalchemy-result-to-json\r\n except:\r\n facils = []\r\n return jsonify(facilities=facils)\r\n\r\n\r\n@app.route('/auto')\r\ndef autocomplete():\r\n argslist = request.args\r\n terms = []\r\n try:\r\n term = argslist.get('term')\r\n if len(term) > 0:\r\n terms = db.getBuildingsLike(term)\r\n terms = map(lambda bldg: bldg.toDictNoLatLon(), terms)\r\n except:\r\n terms = []\r\n return jsonify(terms=terms)\r\n\r\n\r\n@app.route('/building-location')\r\ndef getBuilding():\r\n argslist = request.args\r\n bldg = {}\r\n try:\r\n bldg = argslist.get('name')\r\n bldg = db.getBuilding(bldg)\r\n bldg = bldg.toDict()\r\n except:\r\n bldg = {}\r\n return jsonify(building=bldg)\r\n\r\n@app.route('/building')\r\ndef getFacilitiesFromBuilding():\r\n argslist = request.args\r\n facils = []\r\n try:\r\n bldg = argslist.get('name')\r\n facils = db.getFacilitiesFromBuilding(bldg)\r\n facils = map(lambda facil: facil.toDict(), facils)\r\n except:\r\n facils = []\r\n return jsonify(facilities=facils)\r\n\r\n\r\n@app.route('/faciltypes')\r\ndef getFacilityTypes():\r\n argslist = request.args\r\n faciltypes = []\r\n try:\r\n faciltypes = db.getAllFacilityTypes()\r\n faciltypes = map(lambda faciltype: faciltype.name, faciltypes)\r\n except:\r\n faciltypes = []\r\n return jsonify(facility_types=faciltypes)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, use_reloader=True)\r\n",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
#n-repeated element
class Solution:
def repeatedNTimes(self, A):
freq = {}
for i in A:
if i in freq.keys():
freq[i] += 1
else:
freq[i] = 1
key = list(freq.keys())
val = list(freq.values())
m = max(val)
return key[val.index(m)]
s = Solution()
l = [2,1,2,5,3,2]
k = [1,1,1,2]
print(s.repeatedNTimes(l))
|
normal
|
{
"blob_id": "d50618f7784e69b46cb665ec1a9c56f7a2867785",
"index": 5033,
"step-1": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Solution:\n\n def repeatedNTimes(self, A):\n freq = {}\n for i in A:\n if i in freq.keys():\n freq[i] += 1\n else:\n freq[i] = 1\n key = list(freq.keys())\n val = list(freq.values())\n m = max(val)\n return key[val.index(m)]\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def repeatedNTimes(self, A):\n freq = {}\n for i in A:\n if i in freq.keys():\n freq[i] += 1\n else:\n freq[i] = 1\n key = list(freq.keys())\n val = list(freq.values())\n m = max(val)\n return key[val.index(m)]\n\n\n<mask token>\nprint(s.repeatedNTimes(l))\n",
"step-4": "class Solution:\n\n def repeatedNTimes(self, A):\n freq = {}\n for i in A:\n if i in freq.keys():\n freq[i] += 1\n else:\n freq[i] = 1\n key = list(freq.keys())\n val = list(freq.values())\n m = max(val)\n return key[val.index(m)]\n\n\ns = Solution()\nl = [2, 1, 2, 5, 3, 2]\nk = [1, 1, 1, 2]\nprint(s.repeatedNTimes(l))\n",
"step-5": "#n-repeated element\nclass Solution:\n def repeatedNTimes(self, A):\n freq = {}\n for i in A:\n if i in freq.keys():\n freq[i] += 1\n else:\n freq[i] = 1\n key = list(freq.keys())\n val = list(freq.values())\n m = max(val)\n return key[val.index(m)]\n\ns = Solution()\nl = [2,1,2,5,3,2]\nk = [1,1,1,2]\nprint(s.repeatedNTimes(l))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
scheme = 'http'
hostname = 'localhost'
port = 9000
routes = ['/available/2', '/available/4']
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
scheme = 'http'
hostname = 'localhost'
port = 9000
routes = [
'/available/2',
'/available/4'
]
|
flexible
|
{
"blob_id": "d1402469232b5e3c3b09339849f6899e009fd74b",
"index": 3323,
"step-1": "<mask token>\n",
"step-2": "scheme = 'http'\nhostname = 'localhost'\nport = 9000\nroutes = ['/available/2', '/available/4']\n",
"step-3": "# -*- coding: utf-8 -*-\n\n\nscheme = 'http'\n\nhostname = 'localhost'\n\nport = 9000\n\nroutes = [\n '/available/2',\n '/available/4'\n]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
data = " Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu"
lst = data.split(",")
for name in lst:
name = name.strip().upper()
rname = name[::-1]
if name == rname:
print(name)
girlsdata = "Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya"
# Name which start with DEE get those name
print("-"*20)
names = girlsdata.split(",")
for name in names:
name = name.upper()
if "D" in name:
print(name)
|
normal
|
{
"blob_id": "622b388beb56eba85bbb08510c2bcea55f23da9a",
"index": 721,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor name in lst:\n name = name.strip().upper()\n rname = name[::-1]\n if name == rname:\n print(name)\n<mask token>\nprint('-' * 20)\n<mask token>\nfor name in names:\n name = name.upper()\n if 'D' in name:\n print(name)\n",
"step-3": "data = (\n ' Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu'\n )\nlst = data.split(',')\nfor name in lst:\n name = name.strip().upper()\n rname = name[::-1]\n if name == rname:\n print(name)\ngirlsdata = 'Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya'\nprint('-' * 20)\nnames = girlsdata.split(',')\nfor name in names:\n name = name.upper()\n if 'D' in name:\n print(name)\n",
"step-4": "data = \" Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu\"\n\nlst = data.split(\",\")\n\nfor name in lst:\n name = name.strip().upper()\n rname = name[::-1]\n if name == rname:\n print(name)\n\ngirlsdata = \"Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya\"\n# Name which start with DEE get those name\nprint(\"-\"*20)\nnames = girlsdata.split(\",\")\nfor name in names:\n name = name.upper()\n if \"D\" in name:\n print(name)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def import_csv_from_aws():
client = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
client.download_file('ergast-csv', 'filtered_laptimes.csv',
'filtered_laptimes.csv')
client.download_file('ergast-csv', 'filtered_races.csv',
'filtered_races.csv')
df_lapTimes = pd.read_csv('filtered_laptimes.csv')
df_races = pd.read_csv('filtered_races.csv')
df_races['round'] = df_races['round'].astype(int)
df_races['season'] = df_races['season'].astype(int)
df_lapTimes.rename(columns={'driverId': 'driverRef'}, inplace=True)
df_lapTimes = pd.merge(df_lapTimes, df_races[['raceId', 'raceName',
'season', 'round']], on=['raceId'], how='left')
df_lapTimes.fillna('0:00:00', inplace=True)
df_lapTimes['time'] = df_lapTimes['time'].map(lambda x: get_sec(x))
df_lapTimes = df_lapTimes[['driverRef', 'season', 'raceId', 'raceName',
'round', 'lap', 'time', 'position']]
df_lapTimes.rename(columns={'round': 'roundId'}, inplace=True)
save_races_to_db(df_races, db.session)
for i, group in df_lapTimes.groupby('raceId'):
g = group.drop(['raceId'], axis=1)
b.session.bulk_insert_mappings(LapTimes, g.to_dict('records'))
db.session.commit()
<|reserved_special_token_1|>
import boto3
from app.models import *
from app.config import *
from app.lib.log import save_races_to_db, save_laptimes_to_db
from app.utils.utils import get_sec
import pandas as pd
def import_csv_from_aws():
client = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
client.download_file('ergast-csv', 'filtered_laptimes.csv',
'filtered_laptimes.csv')
client.download_file('ergast-csv', 'filtered_races.csv',
'filtered_races.csv')
df_lapTimes = pd.read_csv('filtered_laptimes.csv')
df_races = pd.read_csv('filtered_races.csv')
df_races['round'] = df_races['round'].astype(int)
df_races['season'] = df_races['season'].astype(int)
df_lapTimes.rename(columns={'driverId': 'driverRef'}, inplace=True)
df_lapTimes = pd.merge(df_lapTimes, df_races[['raceId', 'raceName',
'season', 'round']], on=['raceId'], how='left')
df_lapTimes.fillna('0:00:00', inplace=True)
df_lapTimes['time'] = df_lapTimes['time'].map(lambda x: get_sec(x))
df_lapTimes = df_lapTimes[['driverRef', 'season', 'raceId', 'raceName',
'round', 'lap', 'time', 'position']]
df_lapTimes.rename(columns={'round': 'roundId'}, inplace=True)
save_races_to_db(df_races, db.session)
for i, group in df_lapTimes.groupby('raceId'):
g = group.drop(['raceId'], axis=1)
b.session.bulk_insert_mappings(LapTimes, g.to_dict('records'))
db.session.commit()
<|reserved_special_token_1|>
import boto3
from app.models import *
from app.config import *
from app.lib.log import save_races_to_db, save_laptimes_to_db
from app.utils.utils import get_sec
import pandas as pd
def import_csv_from_aws():
client = boto3.client(
's3',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY
)
client.download_file('ergast-csv','filtered_laptimes.csv','filtered_laptimes.csv')
client.download_file('ergast-csv','filtered_races.csv','filtered_races.csv')
df_lapTimes = pd.read_csv('filtered_laptimes.csv')
df_races = pd.read_csv('filtered_races.csv')
df_races['round'] = df_races['round'].astype(int)
df_races['season'] = df_races['season'].astype(int)
df_lapTimes.rename(columns={"driverId":"driverRef"}, inplace=True)
df_lapTimes = pd.merge(df_lapTimes, df_races[['raceId', 'raceName', 'season', 'round']], on=['raceId'], how='left')
df_lapTimes.fillna("0:00:00", inplace=True)
df_lapTimes['time'] = df_lapTimes['time'].map(lambda x: get_sec(x))
df_lapTimes = df_lapTimes[["driverRef", "season", "raceId", "raceName", "round", "lap", "time", "position"]]
df_lapTimes.rename(columns={"round":"roundId"}, inplace=True)
save_races_to_db(df_races, db.session)
for i, group in df_lapTimes.groupby("raceId"):
g = group.drop(["raceId"], axis=1)
b.session.bulk_insert_mappings(LapTimes, g.to_dict("records"))
db.session.commit()
|
flexible
|
{
"blob_id": "b573db8ea0845fb947636b8d82ed462904c6005d",
"index": 5519,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef import_csv_from_aws():\n client = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n client.download_file('ergast-csv', 'filtered_laptimes.csv',\n 'filtered_laptimes.csv')\n client.download_file('ergast-csv', 'filtered_races.csv',\n 'filtered_races.csv')\n df_lapTimes = pd.read_csv('filtered_laptimes.csv')\n df_races = pd.read_csv('filtered_races.csv')\n df_races['round'] = df_races['round'].astype(int)\n df_races['season'] = df_races['season'].astype(int)\n df_lapTimes.rename(columns={'driverId': 'driverRef'}, inplace=True)\n df_lapTimes = pd.merge(df_lapTimes, df_races[['raceId', 'raceName',\n 'season', 'round']], on=['raceId'], how='left')\n df_lapTimes.fillna('0:00:00', inplace=True)\n df_lapTimes['time'] = df_lapTimes['time'].map(lambda x: get_sec(x))\n df_lapTimes = df_lapTimes[['driverRef', 'season', 'raceId', 'raceName',\n 'round', 'lap', 'time', 'position']]\n df_lapTimes.rename(columns={'round': 'roundId'}, inplace=True)\n save_races_to_db(df_races, db.session)\n for i, group in df_lapTimes.groupby('raceId'):\n g = group.drop(['raceId'], axis=1)\n b.session.bulk_insert_mappings(LapTimes, g.to_dict('records'))\n db.session.commit()\n",
"step-3": "import boto3\nfrom app.models import *\nfrom app.config import *\nfrom app.lib.log import save_races_to_db, save_laptimes_to_db\nfrom app.utils.utils import get_sec\nimport pandas as pd\n\n\ndef import_csv_from_aws():\n client = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n client.download_file('ergast-csv', 'filtered_laptimes.csv',\n 'filtered_laptimes.csv')\n client.download_file('ergast-csv', 'filtered_races.csv',\n 'filtered_races.csv')\n df_lapTimes = pd.read_csv('filtered_laptimes.csv')\n df_races = pd.read_csv('filtered_races.csv')\n df_races['round'] = df_races['round'].astype(int)\n df_races['season'] = df_races['season'].astype(int)\n df_lapTimes.rename(columns={'driverId': 'driverRef'}, inplace=True)\n df_lapTimes = pd.merge(df_lapTimes, df_races[['raceId', 'raceName',\n 'season', 'round']], on=['raceId'], how='left')\n df_lapTimes.fillna('0:00:00', inplace=True)\n df_lapTimes['time'] = df_lapTimes['time'].map(lambda x: get_sec(x))\n df_lapTimes = df_lapTimes[['driverRef', 'season', 'raceId', 'raceName',\n 'round', 'lap', 'time', 'position']]\n df_lapTimes.rename(columns={'round': 'roundId'}, inplace=True)\n save_races_to_db(df_races, db.session)\n for i, group in df_lapTimes.groupby('raceId'):\n g = group.drop(['raceId'], axis=1)\n b.session.bulk_insert_mappings(LapTimes, g.to_dict('records'))\n db.session.commit()\n",
"step-4": "import boto3\nfrom app.models import *\nfrom app.config import *\nfrom app.lib.log import save_races_to_db, save_laptimes_to_db\nfrom app.utils.utils import get_sec\nimport pandas as pd\n\ndef import_csv_from_aws():\n\n\tclient = boto3.client(\n\t\t's3',\n\t\taws_access_key_id=AWS_ACCESS_KEY_ID,\n\t\taws_secret_access_key=AWS_SECRET_ACCESS_KEY\n\t)\n\n\tclient.download_file('ergast-csv','filtered_laptimes.csv','filtered_laptimes.csv')\n\tclient.download_file('ergast-csv','filtered_races.csv','filtered_races.csv')\n\n\tdf_lapTimes = pd.read_csv('filtered_laptimes.csv')\n\tdf_races = pd.read_csv('filtered_races.csv')\n\n\tdf_races['round'] = df_races['round'].astype(int)\n\tdf_races['season'] = df_races['season'].astype(int)\n\n\tdf_lapTimes.rename(columns={\"driverId\":\"driverRef\"}, inplace=True)\n\tdf_lapTimes = pd.merge(df_lapTimes, df_races[['raceId', 'raceName', 'season', 'round']], on=['raceId'], how='left')\n\n\tdf_lapTimes.fillna(\"0:00:00\", inplace=True)\n\tdf_lapTimes['time'] = df_lapTimes['time'].map(lambda x: get_sec(x))\n\n\tdf_lapTimes = df_lapTimes[[\"driverRef\", \"season\", \"raceId\", \"raceName\", \"round\", \"lap\", \"time\", \"position\"]]\n\tdf_lapTimes.rename(columns={\"round\":\"roundId\"}, inplace=True)\n\n\tsave_races_to_db(df_races, db.session)\n\n\tfor i, group in df_lapTimes.groupby(\"raceId\"):\n\n\t\tg = group.drop([\"raceId\"], axis=1)\n\t\tb.session.bulk_insert_mappings(LapTimes, g.to_dict(\"records\"))\n\t\tdb.session.commit()\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""Ex026 Faça um programa que leia uma frase pelo teclado e mostre:
Quantas vezes aparece a letra "A".
Em que posição ela aparece a primeira vez.
Em que posição ela aparece pela última vez."""
frase = str(input('Digite uma frase: ')).strip().lower()
n_a = frase.count('a')
f_a = frase.find('a')+1
l_a= frase.rfind('a')-1
print(f'Sua frase tem {n_a} letras a')
print(f'A letra A aparece pela primeira vez na {f_a}° posição')
print(f'A letra A apaerece pela ultima vez na {l_a}° posição')
|
normal
|
{
"blob_id": "58f3b8c5470c765c81f27d39d9c28751a8c2b719",
"index": 277,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'Sua frase tem {n_a} letras a')\nprint(f'A letra A aparece pela primeira vez na {f_a}° posição')\nprint(f'A letra A apaerece pela ultima vez na {l_a}° posição')\n",
"step-3": "<mask token>\nfrase = str(input('Digite uma frase: ')).strip().lower()\nn_a = frase.count('a')\nf_a = frase.find('a') + 1\nl_a = frase.rfind('a') - 1\nprint(f'Sua frase tem {n_a} letras a')\nprint(f'A letra A aparece pela primeira vez na {f_a}° posição')\nprint(f'A letra A apaerece pela ultima vez na {l_a}° posição')\n",
"step-4": "\"\"\"Ex026 Faça um programa que leia uma frase pelo teclado e mostre:\nQuantas vezes aparece a letra \"A\".\nEm que posição ela aparece a primeira vez.\nEm que posição ela aparece pela última vez.\"\"\"\nfrase = str(input('Digite uma frase: ')).strip().lower()\nn_a = frase.count('a')\nf_a = frase.find('a')+1\nl_a= frase.rfind('a')-1\nprint(f'Sua frase tem {n_a} letras a')\nprint(f'A letra A aparece pela primeira vez na {f_a}° posição')\nprint(f'A letra A apaerece pela ultima vez na {l_a}° posição')\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import codecs
import Levenshtein
import logging
import random
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
import time
from sklearn.model_selection import KFold
import numpy as np
import scipy.io as scio
from matplotlib import pyplot as plt
logging.basicConfig(level=logging.INFO)
user_file = open('groundtruth.txt')
user_gp = user_file.readlines()
user_file.close()
same_line_dict = {}
for items in user_gp:
users = items.strip().split()
same_line_dict.update({x: users for x in users})
# print(same_line_dict)
info_file = codecs.open('new_posts.txt', 'r', 'utf-8')
info_data = info_file.readlines()
info_file.close()
info_dict = {}
for line in info_data:
tmp_str = line.strip()
# print(tmp_str)
try:
tmp_dict = json.loads(tmp_str)
k = list(tmp_dict.keys())
# print(k)
v = tmp_dict[k[0]]
info_dict.update({k[0]: v})
except:
logging.warning('Invalid Data!')
continue
valid_users = list(info_dict.keys())
user_num = len(valid_users)
print(user_num)
flw_file = open('new_followings.txt')
flw_data = flw_file.readlines()
flw_file.close()
flw_dict = {}
for lines in flw_data:
items = lines.strip().split()
flw_dict[items[0]] = items[2:]
valid_flw = list(flw_dict.keys())
print(len(flw_dict))
def gen_label(uid1, uid2):
if same_line_dict[uid1].__contains__(uid2) and same_line_dict[uid2].__contains__(uid1):
return '1'
else:
return '-1'
info_keys = ['text', 'textLength', 'source', 'id', 'screen_name',
'statuses_count', 'verified', 'verified_type',
'description', 'gender', 'urank', 'followers_count',
'follow_count', 'reposts_count', 'comments_count',
'attitudes_count', 'isLongText']
def get_info(uid):
if info_dict[uid] == []:
return False, {}
tdict = {
'text': '',
'textLength': 0,
'source': '',
'id': '',
'screen_name': '',
'statuses_count': 0,
'verified': False,
'verified_type': -1,
'description': '',
'gender': '',
'urank': 0,
'followers_count': 0,
'follow_count': 0,
'reposts_count': 0,
'comments_count': 0,
'attitudes_count': 0,
'isLongText': False
}
# print(info_dict[uid])
latest_po = info_dict[uid][0]['mblog']
user_info = latest_po['user']
# print(latest_po)
# print(user_info)
for elem in info_keys[0:3]:
if list(latest_po.keys()).__contains__(elem):
tdict.update({elem: latest_po[elem]})
for elem in info_keys[3:]:
if list(user_info.keys()).__contains__(elem):
tdict.update({elem: user_info[elem]})
return True, tdict
def gen_data(dict1, dict2):
result = []
if dict1['verified'] and dict2['verified']:
verified = -1
elif dict1['verified'] or dict2['verified']:
verified = 1
else:
verified = 0
result.append(verified)
bool_style = ['verified_type', 'gender', 'isLongText']
for items in bool_style:
result.append(1 if dict1[items] == dict2[items] else 0)
result.append(abs(dict1['urank'] - dict2['urank']))
result.append(abs(dict1['statuses_count'] - dict2['statuses_count']))
result.append(abs(dict1['followers_count'] - dict2['followers_count'])
/ abs(dict1['followers_count'] + dict2['followers_count'])
if abs(dict1['followers_count'] + dict2['followers_count']) != 0
else 1)
result.append(abs(dict1['follow_count'] - dict2['follow_count'])
/ abs(dict1['follow_count'] + dict2['follow_count'])
if abs(dict1['follow_count'] + dict2['follow_count']) != 0
else 1
)
result.append(abs(dict1['reposts_count'] - dict2['reposts_count']))
result.append(abs(dict1['comments_count'] - dict2['comments_count']))
result.append(abs(dict1['attitudes_count'] - dict2['attitudes_count']))
result.append(Levenshtein.jaro_winkler(dict1['screen_name'], dict2['screen_name']))
result.append(Levenshtein.jaro_winkler(dict1['description'], dict2['description']))
result.append(Levenshtein.jaro_winkler(dict1['text'], dict2['text']))
return result
def gen_flw(uid1, uid2):
if not valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):
return 0, 0
elif valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):
return flw_dict[uid1].__contains__(uid2), 0
elif not valid_flw.__contains__(uid1) and valid_flw.__contains__(uid2):
return flw_dict[uid2].__contains__(uid1), 0
else:
return 2, len(list(a for a in flw_dict[uid1] if a in flw_dict[uid2])) \
/ (
len(flw_dict[uid1]) + len(flw_dict[uid2]) - len(
list(a for a in flw_dict[uid1] if a in flw_dict[uid2])))
logging.info('Prepare Data!')
train_num = 8000
data = []
labels = []
uidpool = []
for i in range(0, train_num):
order1 = random.randint(0, user_num - 1)
order2 = random.randint(0, user_num - 1)
uid1 = valid_users[order1]
uid2 = same_line_dict[uid1][random.randint(0, len(same_line_dict[uid1]) - 1)]
# uid2 = valid_users[order2]
# if random.random() >= 0:
# # print('+-1')
# uid2 = same_line_dict[uid1][random.randint(0, len(same_line_dict[uid1]) - 1)]
flag1, dict1 = get_info(uid1)
flag2, dict2 = get_info(uid2)
while (uid1 == uid2 or uidpool.__contains__([uid1, uid2]) or not flag1 or not flag2):
order1 = random.randint(0, user_num - 1)
order2 = random.randint(0, user_num - 1)
uid1 = valid_users[order1]
uid2 = valid_users[order2]
flag1, dict1 = get_info(uid1)
flag2, dict2 = get_info(uid2)
uidpool.append([uid1, uid2])
uidpool.append([uid2, uid1])
tmp_data = gen_data(dict1, dict2)
flw1, flw2 = gen_flw(uid1, uid2)
# data.append(gen_data(dict1, dict2))
tmp_data.append(flw1)
tmp_data.append(flw2)
data.append(tmp_data)
labels.append(gen_label(uid1, uid2))
# print(uid1, uid2)
print(data)
print(labels)
print('total number:', train_num)
print('total positive samples:', labels.count('1'))
logging.info('Start Training!')
rf = RandomForestClassifier(n_estimators=40, n_jobs=4, verbose=0)
accur = []
begin_time=time.time()
for order in range(0, 10):
ratio = 9 / 10
train_data = []
train_labels = []
test_data = []
test_labels = []
for i in range(0, train_num):
if random.random() > ratio:
test_data.append(data[i])
test_labels.append(labels[i])
else:
train_data.append(data[i])
train_labels.append(labels[i])
# print('train number:', len(train_labels))
# print('train positive samples:', train_labels.count('1'))
rf.fit(train_data, train_labels)
logging.info('Train Done!')
# print('Train accuracy:',
# rf.score(train_data, train_labels))
# print('Test accuracy:',
# rf.score(test_data, test_labels))
acc = rf.score(data, labels)
# print('Total accuracy:', acc)
accur.append(acc)
end_time=time.time()
print('Feature Weight:')
# print('Feature Weight:', rf.feature_importances_)
features = ['verified', 'verified_type', 'gender', 'isLongText', 'urank', 'statuses_diff',
'followers_diff', 'follows_diff', 'reposts_diff', 'comment_diff', 'attitudes_diff',
'screen_name_similarity', 'description_similarity', 'text_similarity', 'co_follow', 'in_follows']
for i in range(0, 16):
print(features[i], ':', rf.feature_importances_[i])
print('Total accuracy', rf.score(data, labels))
scores = cross_val_score(rf, data, labels, cv=10)
print(sum(scores) / 10)
print('time:',end_time-begin_time)
|
normal
|
{
"blob_id": "37804c92b69d366cc1774335b6a2295dfd5b98f3",
"index": 6592,
"step-1": "<mask token>\n\n\ndef gen_label(uid1, uid2):\n if same_line_dict[uid1].__contains__(uid2) and same_line_dict[uid2\n ].__contains__(uid1):\n return '1'\n else:\n return '-1'\n\n\n<mask token>\n\n\ndef gen_flw(uid1, uid2):\n if not valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):\n return 0, 0\n elif valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):\n return flw_dict[uid1].__contains__(uid2), 0\n elif not valid_flw.__contains__(uid1) and valid_flw.__contains__(uid2):\n return flw_dict[uid2].__contains__(uid1), 0\n else:\n return 2, len(list(a for a in flw_dict[uid1] if a in flw_dict[uid2])\n ) / (len(flw_dict[uid1]) + len(flw_dict[uid2]) - len(list(a for\n a in flw_dict[uid1] if a in flw_dict[uid2])))\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO)\n<mask token>\nuser_file.close()\n<mask token>\nfor items in user_gp:\n users = items.strip().split()\n same_line_dict.update({x: users for x in users})\n<mask token>\ninfo_file.close()\n<mask token>\nfor line in info_data:\n tmp_str = line.strip()\n try:\n tmp_dict = json.loads(tmp_str)\n k = list(tmp_dict.keys())\n v = tmp_dict[k[0]]\n info_dict.update({k[0]: v})\n except:\n logging.warning('Invalid Data!')\n continue\n<mask token>\nprint(user_num)\n<mask token>\nflw_file.close()\n<mask token>\nfor lines in flw_data:\n items = lines.strip().split()\n flw_dict[items[0]] = items[2:]\n<mask token>\nprint(len(flw_dict))\n\n\ndef gen_label(uid1, uid2):\n if same_line_dict[uid1].__contains__(uid2) and same_line_dict[uid2\n ].__contains__(uid1):\n return '1'\n else:\n return '-1'\n\n\n<mask token>\n\n\ndef get_info(uid):\n if info_dict[uid] == []:\n return False, {}\n tdict = {'text': '', 'textLength': 0, 'source': '', 'id': '',\n 'screen_name': '', 'statuses_count': 0, 'verified': False,\n 'verified_type': -1, 'description': '', 'gender': '', 'urank': 0,\n 'followers_count': 0, 'follow_count': 0, 'reposts_count': 0,\n 'comments_count': 0, 'attitudes_count': 0, 'isLongText': False}\n latest_po = info_dict[uid][0]['mblog']\n user_info = latest_po['user']\n for elem in info_keys[0:3]:\n if list(latest_po.keys()).__contains__(elem):\n tdict.update({elem: latest_po[elem]})\n for elem in info_keys[3:]:\n if list(user_info.keys()).__contains__(elem):\n tdict.update({elem: user_info[elem]})\n return True, tdict\n\n\ndef gen_data(dict1, dict2):\n result = []\n if dict1['verified'] and dict2['verified']:\n verified = -1\n elif dict1['verified'] or dict2['verified']:\n verified = 1\n else:\n verified = 0\n result.append(verified)\n bool_style = ['verified_type', 'gender', 'isLongText']\n for items in bool_style:\n result.append(1 if dict1[items] == dict2[items] else 0)\n result.append(abs(dict1['urank'] - dict2['urank']))\n result.append(abs(dict1['statuses_count'] - dict2['statuses_count']))\n result.append(abs(dict1['followers_count'] - dict2['followers_count']) /\n abs(dict1['followers_count'] + dict2['followers_count']) if abs(\n dict1['followers_count'] + dict2['followers_count']) != 0 else 1)\n result.append(abs(dict1['follow_count'] - dict2['follow_count']) / abs(\n dict1['follow_count'] + dict2['follow_count']) if abs(dict1[\n 'follow_count'] + dict2['follow_count']) != 0 else 1)\n result.append(abs(dict1['reposts_count'] - dict2['reposts_count']))\n result.append(abs(dict1['comments_count'] - dict2['comments_count']))\n result.append(abs(dict1['attitudes_count'] - dict2['attitudes_count']))\n result.append(Levenshtein.jaro_winkler(dict1['screen_name'], dict2[\n 'screen_name']))\n result.append(Levenshtein.jaro_winkler(dict1['description'], dict2[\n 'description']))\n result.append(Levenshtein.jaro_winkler(dict1['text'], dict2['text']))\n return result\n\n\ndef gen_flw(uid1, uid2):\n if not valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):\n return 0, 0\n elif valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):\n return flw_dict[uid1].__contains__(uid2), 0\n elif not valid_flw.__contains__(uid1) and valid_flw.__contains__(uid2):\n return flw_dict[uid2].__contains__(uid1), 0\n else:\n return 2, len(list(a for a in flw_dict[uid1] if a in flw_dict[uid2])\n ) / (len(flw_dict[uid1]) + len(flw_dict[uid2]) - len(list(a for\n a in flw_dict[uid1] if a in flw_dict[uid2])))\n\n\nlogging.info('Prepare Data!')\n<mask token>\nfor i in range(0, train_num):\n order1 = random.randint(0, user_num - 1)\n order2 = random.randint(0, user_num - 1)\n uid1 = valid_users[order1]\n uid2 = same_line_dict[uid1][random.randint(0, len(same_line_dict[uid1]) -\n 1)]\n flag1, dict1 = get_info(uid1)\n flag2, dict2 = get_info(uid2)\n while uid1 == uid2 or uidpool.__contains__([uid1, uid2]\n ) or not flag1 or not flag2:\n order1 = random.randint(0, user_num - 1)\n order2 = random.randint(0, user_num - 1)\n uid1 = valid_users[order1]\n uid2 = valid_users[order2]\n flag1, dict1 = get_info(uid1)\n flag2, dict2 = get_info(uid2)\n uidpool.append([uid1, uid2])\n uidpool.append([uid2, uid1])\n tmp_data = gen_data(dict1, dict2)\n flw1, flw2 = gen_flw(uid1, uid2)\n tmp_data.append(flw1)\n tmp_data.append(flw2)\n data.append(tmp_data)\n labels.append(gen_label(uid1, uid2))\nprint(data)\nprint(labels)\nprint('total number:', train_num)\nprint('total positive samples:', labels.count('1'))\nlogging.info('Start Training!')\n<mask token>\nfor order in range(0, 10):\n ratio = 9 / 10\n train_data = []\n train_labels = []\n test_data = []\n test_labels = []\n for i in range(0, train_num):\n if random.random() > ratio:\n test_data.append(data[i])\n test_labels.append(labels[i])\n else:\n train_data.append(data[i])\n train_labels.append(labels[i])\n rf.fit(train_data, train_labels)\n logging.info('Train Done!')\n acc = rf.score(data, labels)\n accur.append(acc)\n<mask token>\nprint('Feature Weight:')\n<mask token>\nfor i in range(0, 16):\n print(features[i], ':', rf.feature_importances_[i])\nprint('Total accuracy', rf.score(data, labels))\n<mask token>\nprint(sum(scores) / 10)\nprint('time:', end_time - begin_time)\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.INFO)\nuser_file = open('groundtruth.txt')\nuser_gp = user_file.readlines()\nuser_file.close()\nsame_line_dict = {}\nfor items in user_gp:\n users = items.strip().split()\n same_line_dict.update({x: users for x in users})\ninfo_file = codecs.open('new_posts.txt', 'r', 'utf-8')\ninfo_data = info_file.readlines()\ninfo_file.close()\ninfo_dict = {}\nfor line in info_data:\n tmp_str = line.strip()\n try:\n tmp_dict = json.loads(tmp_str)\n k = list(tmp_dict.keys())\n v = tmp_dict[k[0]]\n info_dict.update({k[0]: v})\n except:\n logging.warning('Invalid Data!')\n continue\nvalid_users = list(info_dict.keys())\nuser_num = len(valid_users)\nprint(user_num)\nflw_file = open('new_followings.txt')\nflw_data = flw_file.readlines()\nflw_file.close()\nflw_dict = {}\nfor lines in flw_data:\n items = lines.strip().split()\n flw_dict[items[0]] = items[2:]\nvalid_flw = list(flw_dict.keys())\nprint(len(flw_dict))\n\n\ndef gen_label(uid1, uid2):\n if same_line_dict[uid1].__contains__(uid2) and same_line_dict[uid2\n ].__contains__(uid1):\n return '1'\n else:\n return '-1'\n\n\ninfo_keys = ['text', 'textLength', 'source', 'id', 'screen_name',\n 'statuses_count', 'verified', 'verified_type', 'description', 'gender',\n 'urank', 'followers_count', 'follow_count', 'reposts_count',\n 'comments_count', 'attitudes_count', 'isLongText']\n\n\ndef get_info(uid):\n if info_dict[uid] == []:\n return False, {}\n tdict = {'text': '', 'textLength': 0, 'source': '', 'id': '',\n 'screen_name': '', 'statuses_count': 0, 'verified': False,\n 'verified_type': -1, 'description': '', 'gender': '', 'urank': 0,\n 'followers_count': 0, 'follow_count': 0, 'reposts_count': 0,\n 'comments_count': 0, 'attitudes_count': 0, 'isLongText': False}\n latest_po = info_dict[uid][0]['mblog']\n user_info = latest_po['user']\n for elem in info_keys[0:3]:\n if list(latest_po.keys()).__contains__(elem):\n tdict.update({elem: latest_po[elem]})\n for elem in info_keys[3:]:\n if list(user_info.keys()).__contains__(elem):\n tdict.update({elem: user_info[elem]})\n return True, tdict\n\n\ndef gen_data(dict1, dict2):\n result = []\n if dict1['verified'] and dict2['verified']:\n verified = -1\n elif dict1['verified'] or dict2['verified']:\n verified = 1\n else:\n verified = 0\n result.append(verified)\n bool_style = ['verified_type', 'gender', 'isLongText']\n for items in bool_style:\n result.append(1 if dict1[items] == dict2[items] else 0)\n result.append(abs(dict1['urank'] - dict2['urank']))\n result.append(abs(dict1['statuses_count'] - dict2['statuses_count']))\n result.append(abs(dict1['followers_count'] - dict2['followers_count']) /\n abs(dict1['followers_count'] + dict2['followers_count']) if abs(\n dict1['followers_count'] + dict2['followers_count']) != 0 else 1)\n result.append(abs(dict1['follow_count'] - dict2['follow_count']) / abs(\n dict1['follow_count'] + dict2['follow_count']) if abs(dict1[\n 'follow_count'] + dict2['follow_count']) != 0 else 1)\n result.append(abs(dict1['reposts_count'] - dict2['reposts_count']))\n result.append(abs(dict1['comments_count'] - dict2['comments_count']))\n result.append(abs(dict1['attitudes_count'] - dict2['attitudes_count']))\n result.append(Levenshtein.jaro_winkler(dict1['screen_name'], dict2[\n 'screen_name']))\n result.append(Levenshtein.jaro_winkler(dict1['description'], dict2[\n 'description']))\n result.append(Levenshtein.jaro_winkler(dict1['text'], dict2['text']))\n return result\n\n\ndef gen_flw(uid1, uid2):\n if not valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):\n return 0, 0\n elif valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):\n return flw_dict[uid1].__contains__(uid2), 0\n elif not valid_flw.__contains__(uid1) and valid_flw.__contains__(uid2):\n return flw_dict[uid2].__contains__(uid1), 0\n else:\n return 2, len(list(a for a in flw_dict[uid1] if a in flw_dict[uid2])\n ) / (len(flw_dict[uid1]) + len(flw_dict[uid2]) - len(list(a for\n a in flw_dict[uid1] if a in flw_dict[uid2])))\n\n\nlogging.info('Prepare Data!')\ntrain_num = 8000\ndata = []\nlabels = []\nuidpool = []\nfor i in range(0, train_num):\n order1 = random.randint(0, user_num - 1)\n order2 = random.randint(0, user_num - 1)\n uid1 = valid_users[order1]\n uid2 = same_line_dict[uid1][random.randint(0, len(same_line_dict[uid1]) -\n 1)]\n flag1, dict1 = get_info(uid1)\n flag2, dict2 = get_info(uid2)\n while uid1 == uid2 or uidpool.__contains__([uid1, uid2]\n ) or not flag1 or not flag2:\n order1 = random.randint(0, user_num - 1)\n order2 = random.randint(0, user_num - 1)\n uid1 = valid_users[order1]\n uid2 = valid_users[order2]\n flag1, dict1 = get_info(uid1)\n flag2, dict2 = get_info(uid2)\n uidpool.append([uid1, uid2])\n uidpool.append([uid2, uid1])\n tmp_data = gen_data(dict1, dict2)\n flw1, flw2 = gen_flw(uid1, uid2)\n tmp_data.append(flw1)\n tmp_data.append(flw2)\n data.append(tmp_data)\n labels.append(gen_label(uid1, uid2))\nprint(data)\nprint(labels)\nprint('total number:', train_num)\nprint('total positive samples:', labels.count('1'))\nlogging.info('Start Training!')\nrf = RandomForestClassifier(n_estimators=40, n_jobs=4, verbose=0)\naccur = []\nbegin_time = time.time()\nfor order in range(0, 10):\n ratio = 9 / 10\n train_data = []\n train_labels = []\n test_data = []\n test_labels = []\n for i in range(0, train_num):\n if random.random() > ratio:\n test_data.append(data[i])\n test_labels.append(labels[i])\n else:\n train_data.append(data[i])\n train_labels.append(labels[i])\n rf.fit(train_data, train_labels)\n logging.info('Train Done!')\n acc = rf.score(data, labels)\n accur.append(acc)\nend_time = time.time()\nprint('Feature Weight:')\nfeatures = ['verified', 'verified_type', 'gender', 'isLongText', 'urank',\n 'statuses_diff', 'followers_diff', 'follows_diff', 'reposts_diff',\n 'comment_diff', 'attitudes_diff', 'screen_name_similarity',\n 'description_similarity', 'text_similarity', 'co_follow', 'in_follows']\nfor i in range(0, 16):\n print(features[i], ':', rf.feature_importances_[i])\nprint('Total accuracy', rf.score(data, labels))\nscores = cross_val_score(rf, data, labels, cv=10)\nprint(sum(scores) / 10)\nprint('time:', end_time - begin_time)\n",
"step-4": "import json\nimport codecs\nimport Levenshtein\nimport logging\nimport random\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score\nimport time\nfrom sklearn.model_selection import KFold\nimport numpy as np\nimport scipy.io as scio\nfrom matplotlib import pyplot as plt\nlogging.basicConfig(level=logging.INFO)\nuser_file = open('groundtruth.txt')\nuser_gp = user_file.readlines()\nuser_file.close()\nsame_line_dict = {}\nfor items in user_gp:\n users = items.strip().split()\n same_line_dict.update({x: users for x in users})\ninfo_file = codecs.open('new_posts.txt', 'r', 'utf-8')\ninfo_data = info_file.readlines()\ninfo_file.close()\ninfo_dict = {}\nfor line in info_data:\n tmp_str = line.strip()\n try:\n tmp_dict = json.loads(tmp_str)\n k = list(tmp_dict.keys())\n v = tmp_dict[k[0]]\n info_dict.update({k[0]: v})\n except:\n logging.warning('Invalid Data!')\n continue\nvalid_users = list(info_dict.keys())\nuser_num = len(valid_users)\nprint(user_num)\nflw_file = open('new_followings.txt')\nflw_data = flw_file.readlines()\nflw_file.close()\nflw_dict = {}\nfor lines in flw_data:\n items = lines.strip().split()\n flw_dict[items[0]] = items[2:]\nvalid_flw = list(flw_dict.keys())\nprint(len(flw_dict))\n\n\ndef gen_label(uid1, uid2):\n if same_line_dict[uid1].__contains__(uid2) and same_line_dict[uid2\n ].__contains__(uid1):\n return '1'\n else:\n return '-1'\n\n\ninfo_keys = ['text', 'textLength', 'source', 'id', 'screen_name',\n 'statuses_count', 'verified', 'verified_type', 'description', 'gender',\n 'urank', 'followers_count', 'follow_count', 'reposts_count',\n 'comments_count', 'attitudes_count', 'isLongText']\n\n\ndef get_info(uid):\n if info_dict[uid] == []:\n return False, {}\n tdict = {'text': '', 'textLength': 0, 'source': '', 'id': '',\n 'screen_name': '', 'statuses_count': 0, 'verified': False,\n 'verified_type': -1, 'description': '', 'gender': '', 'urank': 0,\n 'followers_count': 0, 'follow_count': 0, 'reposts_count': 0,\n 'comments_count': 0, 'attitudes_count': 0, 'isLongText': False}\n latest_po = info_dict[uid][0]['mblog']\n user_info = latest_po['user']\n for elem in info_keys[0:3]:\n if list(latest_po.keys()).__contains__(elem):\n tdict.update({elem: latest_po[elem]})\n for elem in info_keys[3:]:\n if list(user_info.keys()).__contains__(elem):\n tdict.update({elem: user_info[elem]})\n return True, tdict\n\n\ndef gen_data(dict1, dict2):\n result = []\n if dict1['verified'] and dict2['verified']:\n verified = -1\n elif dict1['verified'] or dict2['verified']:\n verified = 1\n else:\n verified = 0\n result.append(verified)\n bool_style = ['verified_type', 'gender', 'isLongText']\n for items in bool_style:\n result.append(1 if dict1[items] == dict2[items] else 0)\n result.append(abs(dict1['urank'] - dict2['urank']))\n result.append(abs(dict1['statuses_count'] - dict2['statuses_count']))\n result.append(abs(dict1['followers_count'] - dict2['followers_count']) /\n abs(dict1['followers_count'] + dict2['followers_count']) if abs(\n dict1['followers_count'] + dict2['followers_count']) != 0 else 1)\n result.append(abs(dict1['follow_count'] - dict2['follow_count']) / abs(\n dict1['follow_count'] + dict2['follow_count']) if abs(dict1[\n 'follow_count'] + dict2['follow_count']) != 0 else 1)\n result.append(abs(dict1['reposts_count'] - dict2['reposts_count']))\n result.append(abs(dict1['comments_count'] - dict2['comments_count']))\n result.append(abs(dict1['attitudes_count'] - dict2['attitudes_count']))\n result.append(Levenshtein.jaro_winkler(dict1['screen_name'], dict2[\n 'screen_name']))\n result.append(Levenshtein.jaro_winkler(dict1['description'], dict2[\n 'description']))\n result.append(Levenshtein.jaro_winkler(dict1['text'], dict2['text']))\n return result\n\n\ndef gen_flw(uid1, uid2):\n if not valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):\n return 0, 0\n elif valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):\n return flw_dict[uid1].__contains__(uid2), 0\n elif not valid_flw.__contains__(uid1) and valid_flw.__contains__(uid2):\n return flw_dict[uid2].__contains__(uid1), 0\n else:\n return 2, len(list(a for a in flw_dict[uid1] if a in flw_dict[uid2])\n ) / (len(flw_dict[uid1]) + len(flw_dict[uid2]) - len(list(a for\n a in flw_dict[uid1] if a in flw_dict[uid2])))\n\n\nlogging.info('Prepare Data!')\ntrain_num = 8000\ndata = []\nlabels = []\nuidpool = []\nfor i in range(0, train_num):\n order1 = random.randint(0, user_num - 1)\n order2 = random.randint(0, user_num - 1)\n uid1 = valid_users[order1]\n uid2 = same_line_dict[uid1][random.randint(0, len(same_line_dict[uid1]) -\n 1)]\n flag1, dict1 = get_info(uid1)\n flag2, dict2 = get_info(uid2)\n while uid1 == uid2 or uidpool.__contains__([uid1, uid2]\n ) or not flag1 or not flag2:\n order1 = random.randint(0, user_num - 1)\n order2 = random.randint(0, user_num - 1)\n uid1 = valid_users[order1]\n uid2 = valid_users[order2]\n flag1, dict1 = get_info(uid1)\n flag2, dict2 = get_info(uid2)\n uidpool.append([uid1, uid2])\n uidpool.append([uid2, uid1])\n tmp_data = gen_data(dict1, dict2)\n flw1, flw2 = gen_flw(uid1, uid2)\n tmp_data.append(flw1)\n tmp_data.append(flw2)\n data.append(tmp_data)\n labels.append(gen_label(uid1, uid2))\nprint(data)\nprint(labels)\nprint('total number:', train_num)\nprint('total positive samples:', labels.count('1'))\nlogging.info('Start Training!')\nrf = RandomForestClassifier(n_estimators=40, n_jobs=4, verbose=0)\naccur = []\nbegin_time = time.time()\nfor order in range(0, 10):\n ratio = 9 / 10\n train_data = []\n train_labels = []\n test_data = []\n test_labels = []\n for i in range(0, train_num):\n if random.random() > ratio:\n test_data.append(data[i])\n test_labels.append(labels[i])\n else:\n train_data.append(data[i])\n train_labels.append(labels[i])\n rf.fit(train_data, train_labels)\n logging.info('Train Done!')\n acc = rf.score(data, labels)\n accur.append(acc)\nend_time = time.time()\nprint('Feature Weight:')\nfeatures = ['verified', 'verified_type', 'gender', 'isLongText', 'urank',\n 'statuses_diff', 'followers_diff', 'follows_diff', 'reposts_diff',\n 'comment_diff', 'attitudes_diff', 'screen_name_similarity',\n 'description_similarity', 'text_similarity', 'co_follow', 'in_follows']\nfor i in range(0, 16):\n print(features[i], ':', rf.feature_importances_[i])\nprint('Total accuracy', rf.score(data, labels))\nscores = cross_val_score(rf, data, labels, cv=10)\nprint(sum(scores) / 10)\nprint('time:', end_time - begin_time)\n",
"step-5": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport json\r\nimport codecs\r\nimport Levenshtein\r\nimport logging\r\nimport random\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import cross_val_score\r\nimport time\r\nfrom sklearn.model_selection import KFold\r\nimport numpy as np\r\nimport scipy.io as scio\r\nfrom matplotlib import pyplot as plt\r\n\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\nuser_file = open('groundtruth.txt')\r\nuser_gp = user_file.readlines()\r\nuser_file.close()\r\nsame_line_dict = {}\r\nfor items in user_gp:\r\n users = items.strip().split()\r\n same_line_dict.update({x: users for x in users})\r\n# print(same_line_dict)\r\n\r\ninfo_file = codecs.open('new_posts.txt', 'r', 'utf-8')\r\ninfo_data = info_file.readlines()\r\ninfo_file.close()\r\ninfo_dict = {}\r\nfor line in info_data:\r\n tmp_str = line.strip()\r\n # print(tmp_str)\r\n try:\r\n tmp_dict = json.loads(tmp_str)\r\n k = list(tmp_dict.keys())\r\n # print(k)\r\n v = tmp_dict[k[0]]\r\n info_dict.update({k[0]: v})\r\n except:\r\n logging.warning('Invalid Data!')\r\n continue\r\n\r\nvalid_users = list(info_dict.keys())\r\nuser_num = len(valid_users)\r\nprint(user_num)\r\n\r\nflw_file = open('new_followings.txt')\r\nflw_data = flw_file.readlines()\r\nflw_file.close()\r\nflw_dict = {}\r\nfor lines in flw_data:\r\n items = lines.strip().split()\r\n flw_dict[items[0]] = items[2:]\r\nvalid_flw = list(flw_dict.keys())\r\nprint(len(flw_dict))\r\n\r\n\r\ndef gen_label(uid1, uid2):\r\n if same_line_dict[uid1].__contains__(uid2) and same_line_dict[uid2].__contains__(uid1):\r\n return '1'\r\n else:\r\n return '-1'\r\n\r\n\r\ninfo_keys = ['text', 'textLength', 'source', 'id', 'screen_name',\r\n 'statuses_count', 'verified', 'verified_type',\r\n 'description', 'gender', 'urank', 'followers_count',\r\n 'follow_count', 'reposts_count', 'comments_count',\r\n 'attitudes_count', 'isLongText']\r\n\r\n\r\ndef get_info(uid):\r\n if info_dict[uid] == []:\r\n return False, {}\r\n tdict = {\r\n 'text': '',\r\n 'textLength': 0,\r\n 'source': '',\r\n 'id': '',\r\n 'screen_name': '',\r\n 'statuses_count': 0,\r\n 'verified': False,\r\n 'verified_type': -1,\r\n 'description': '',\r\n 'gender': '',\r\n 'urank': 0,\r\n 'followers_count': 0,\r\n 'follow_count': 0,\r\n 'reposts_count': 0,\r\n 'comments_count': 0,\r\n 'attitudes_count': 0,\r\n 'isLongText': False\r\n }\r\n # print(info_dict[uid])\r\n latest_po = info_dict[uid][0]['mblog']\r\n user_info = latest_po['user']\r\n # print(latest_po)\r\n # print(user_info)\r\n for elem in info_keys[0:3]:\r\n if list(latest_po.keys()).__contains__(elem):\r\n tdict.update({elem: latest_po[elem]})\r\n for elem in info_keys[3:]:\r\n if list(user_info.keys()).__contains__(elem):\r\n tdict.update({elem: user_info[elem]})\r\n return True, tdict\r\n\r\n\r\ndef gen_data(dict1, dict2):\r\n result = []\r\n if dict1['verified'] and dict2['verified']:\r\n verified = -1\r\n elif dict1['verified'] or dict2['verified']:\r\n verified = 1\r\n else:\r\n verified = 0\r\n result.append(verified)\r\n bool_style = ['verified_type', 'gender', 'isLongText']\r\n for items in bool_style:\r\n result.append(1 if dict1[items] == dict2[items] else 0)\r\n result.append(abs(dict1['urank'] - dict2['urank']))\r\n result.append(abs(dict1['statuses_count'] - dict2['statuses_count']))\r\n result.append(abs(dict1['followers_count'] - dict2['followers_count'])\r\n / abs(dict1['followers_count'] + dict2['followers_count'])\r\n if abs(dict1['followers_count'] + dict2['followers_count']) != 0\r\n else 1)\r\n result.append(abs(dict1['follow_count'] - dict2['follow_count'])\r\n / abs(dict1['follow_count'] + dict2['follow_count'])\r\n if abs(dict1['follow_count'] + dict2['follow_count']) != 0\r\n else 1\r\n )\r\n result.append(abs(dict1['reposts_count'] - dict2['reposts_count']))\r\n result.append(abs(dict1['comments_count'] - dict2['comments_count']))\r\n result.append(abs(dict1['attitudes_count'] - dict2['attitudes_count']))\r\n result.append(Levenshtein.jaro_winkler(dict1['screen_name'], dict2['screen_name']))\r\n result.append(Levenshtein.jaro_winkler(dict1['description'], dict2['description']))\r\n result.append(Levenshtein.jaro_winkler(dict1['text'], dict2['text']))\r\n return result\r\n\r\n\r\ndef gen_flw(uid1, uid2):\r\n if not valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):\r\n return 0, 0\r\n elif valid_flw.__contains__(uid1) and not valid_flw.__contains__(uid2):\r\n return flw_dict[uid1].__contains__(uid2), 0\r\n elif not valid_flw.__contains__(uid1) and valid_flw.__contains__(uid2):\r\n return flw_dict[uid2].__contains__(uid1), 0\r\n else:\r\n return 2, len(list(a for a in flw_dict[uid1] if a in flw_dict[uid2])) \\\r\n / (\r\n len(flw_dict[uid1]) + len(flw_dict[uid2]) - len(\r\n list(a for a in flw_dict[uid1] if a in flw_dict[uid2])))\r\n\r\n\r\nlogging.info('Prepare Data!')\r\ntrain_num = 8000\r\ndata = []\r\nlabels = []\r\nuidpool = []\r\nfor i in range(0, train_num):\r\n order1 = random.randint(0, user_num - 1)\r\n order2 = random.randint(0, user_num - 1)\r\n uid1 = valid_users[order1]\r\n uid2 = same_line_dict[uid1][random.randint(0, len(same_line_dict[uid1]) - 1)]\r\n # uid2 = valid_users[order2]\r\n # if random.random() >= 0:\r\n # # print('+-1')\r\n # uid2 = same_line_dict[uid1][random.randint(0, len(same_line_dict[uid1]) - 1)]\r\n flag1, dict1 = get_info(uid1)\r\n flag2, dict2 = get_info(uid2)\r\n while (uid1 == uid2 or uidpool.__contains__([uid1, uid2]) or not flag1 or not flag2):\r\n order1 = random.randint(0, user_num - 1)\r\n order2 = random.randint(0, user_num - 1)\r\n uid1 = valid_users[order1]\r\n uid2 = valid_users[order2]\r\n flag1, dict1 = get_info(uid1)\r\n flag2, dict2 = get_info(uid2)\r\n uidpool.append([uid1, uid2])\r\n uidpool.append([uid2, uid1])\r\n tmp_data = gen_data(dict1, dict2)\r\n flw1, flw2 = gen_flw(uid1, uid2)\r\n # data.append(gen_data(dict1, dict2))\r\n tmp_data.append(flw1)\r\n tmp_data.append(flw2)\r\n data.append(tmp_data)\r\n labels.append(gen_label(uid1, uid2))\r\n # print(uid1, uid2)\r\nprint(data)\r\nprint(labels)\r\nprint('total number:', train_num)\r\nprint('total positive samples:', labels.count('1'))\r\n\r\nlogging.info('Start Training!')\r\nrf = RandomForestClassifier(n_estimators=40, n_jobs=4, verbose=0)\r\naccur = []\r\nbegin_time=time.time()\r\nfor order in range(0, 10):\r\n ratio = 9 / 10\r\n train_data = []\r\n train_labels = []\r\n test_data = []\r\n test_labels = []\r\n for i in range(0, train_num):\r\n if random.random() > ratio:\r\n test_data.append(data[i])\r\n test_labels.append(labels[i])\r\n else:\r\n train_data.append(data[i])\r\n train_labels.append(labels[i])\r\n\r\n # print('train number:', len(train_labels))\r\n # print('train positive samples:', train_labels.count('1'))\r\n rf.fit(train_data, train_labels)\r\n\r\n logging.info('Train Done!')\r\n\r\n # print('Train accuracy:',\r\n # rf.score(train_data, train_labels))\r\n # print('Test accuracy:',\r\n # rf.score(test_data, test_labels))\r\n acc = rf.score(data, labels)\r\n # print('Total accuracy:', acc)\r\n accur.append(acc)\r\nend_time=time.time()\r\nprint('Feature Weight:')\r\n# print('Feature Weight:', rf.feature_importances_)\r\nfeatures = ['verified', 'verified_type', 'gender', 'isLongText', 'urank', 'statuses_diff',\r\n 'followers_diff', 'follows_diff', 'reposts_diff', 'comment_diff', 'attitudes_diff',\r\n 'screen_name_similarity', 'description_similarity', 'text_similarity', 'co_follow', 'in_follows']\r\nfor i in range(0, 16):\r\n print(features[i], ':', rf.feature_importances_[i])\r\n\r\nprint('Total accuracy', rf.score(data, labels))\r\n\r\nscores = cross_val_score(rf, data, labels, cv=10)\r\nprint(sum(scores) / 10)\r\n\r\nprint('time:',end_time-begin_time)\r\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generatetest(n=100, filename='test_data'):
ids = []
names_list = []
for _ in range(n):
ids.append(''.join(random.choices(string.ascii_letters + string.
digits, k=9)))
names_list.append(names.get_full_name())
df = pd.DataFrame({'id': ids, 'names': names_list})
df.to_csv('tmp/{}.csv'.format(filename), index=False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generatetest(n=100, filename='test_data'):
ids = []
names_list = []
for _ in range(n):
ids.append(''.join(random.choices(string.ascii_letters + string.
digits, k=9)))
names_list.append(names.get_full_name())
df = pd.DataFrame({'id': ids, 'names': names_list})
df.to_csv('tmp/{}.csv'.format(filename), index=False)
if __name__ == '__main__':
generatetest()
print('test set generated!')
<|reserved_special_token_1|>
import pandas as pd
import random
import string
import names
def generatetest(n=100, filename='test_data'):
ids = []
names_list = []
for _ in range(n):
ids.append(''.join(random.choices(string.ascii_letters + string.
digits, k=9)))
names_list.append(names.get_full_name())
df = pd.DataFrame({'id': ids, 'names': names_list})
df.to_csv('tmp/{}.csv'.format(filename), index=False)
if __name__ == '__main__':
generatetest()
print('test set generated!')
<|reserved_special_token_1|>
import pandas as pd
import random
import string
import names
def generatetest(n=100, filename="test_data"):
ids = []
names_list = []
for _ in range(n):
ids.append(''.join(random.choices(
string.ascii_letters + string.digits, k=9)))
names_list.append(names.get_full_name())
df = pd.DataFrame({
'id': ids,
'names': names_list,
})
df.to_csv('tmp/{}.csv'.format(filename), index=False)
if __name__ == "__main__":
generatetest()
print("test set generated!")
|
flexible
|
{
"blob_id": "aa913fd40a710cfd7288fd59c4039c4b6a5745cc",
"index": 4569,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generatetest(n=100, filename='test_data'):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(string.ascii_letters + string.\n digits, k=9)))\n names_list.append(names.get_full_name())\n df = pd.DataFrame({'id': ids, 'names': names_list})\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef generatetest(n=100, filename='test_data'):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(string.ascii_letters + string.\n digits, k=9)))\n names_list.append(names.get_full_name())\n df = pd.DataFrame({'id': ids, 'names': names_list})\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\nif __name__ == '__main__':\n generatetest()\n print('test set generated!')\n",
"step-4": "import pandas as pd\nimport random\nimport string\nimport names\n\n\ndef generatetest(n=100, filename='test_data'):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(string.ascii_letters + string.\n digits, k=9)))\n names_list.append(names.get_full_name())\n df = pd.DataFrame({'id': ids, 'names': names_list})\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\nif __name__ == '__main__':\n generatetest()\n print('test set generated!')\n",
"step-5": "import pandas as pd\nimport random\nimport string\nimport names\n\n\ndef generatetest(n=100, filename=\"test_data\"):\n ids = []\n names_list = []\n for _ in range(n):\n ids.append(''.join(random.choices(\n string.ascii_letters + string.digits, k=9)))\n names_list.append(names.get_full_name())\n\n df = pd.DataFrame({\n 'id': ids,\n 'names': names_list,\n })\n df.to_csv('tmp/{}.csv'.format(filename), index=False)\n\n\nif __name__ == \"__main__\":\n generatetest()\n print(\"test set generated!\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math
from historia.utils import unique_id, position_in_range
from historia.pops.models.inventory import Inventory
from historia.economy.enums.resource import Good, NaturalResource
from historia.economy.enums.order_type import OrderType
from historia.economy.models.price_range import PriceRange
from historia.economy.models.order import Order
from historia.pops.enums.pop_job import PopJob
DEBUG = False
class Pop(object):
"""
A simulated unit of population
"""
def __init__(self, province, pop_job, population):
"""
Creates a new Pop.
manager (Historia)
province (SecondaryDivision)
culture (Culture)
religion (Religion)
language (Language)
job (Job)
"""
self.bankrupt_times = 0
self.home = province
self.location = province
self.id = unique_id('po')
self.population = population
self.population_yesterday = 0
self.pop_job = pop_job
# ECONOMY
self.money = pop_job.start_money
self.money_yesterday = 0
self.bankrupt = False
# set inventory and ideal amounts
self.inventory = Inventory(pop_job.inventory_size)
self.give_start_inventory()
self.update_ideal_inventory()
# a dictionary of Goods to PriceRanges
# represents the price range the agent considers valid for each Good
self.price_belief = {}
# a dictionary of Goods to price list
# represents the prices of the good that the Pop has observed
# during the time they have been trading
self.observed_trading_range = {}
self.successful_trades = 0
self.failed_trades = 0
# make some fake initial data
for good in Good.all():
avg_price = self.market.avg_historial_price(good, 15)
# fake trades
self.observed_trading_range[good] = [
avg_price * 0.5,
avg_price * 1.5
]
# generate fake price belief
self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price * 1.5)
# Merchant logic
self.trade_location = None # the province this Pop is traveling to
self.trade_good = None # what good we're trading in right now
self.trade_amount = 0 # amount of trade_good we should be trading
self.trading_days = 0 # number of days waiting to trade
# Generic Pop properties
@property
def social_class(self):
return self.pop_job.social_class
@property
def market(self):
"Get the market instance"
return self.location.market
@property
def profit(self):
"Determine today's profit"
return self.money - self.money_yesterday
@property
def total_trades(self):
"Total number of trades this Pop participated in"
return self.successful_trades + self.failed_trades
@property
def trade_success(self):
"Percent of trades that were successful"
if self.total_trades == 0:
return 0
return (self.successful_trades / self.total_trades) * 100
@property
def is_away(self):
"Is this Pop away from it's home?"
return self.home is not self.location
# Merchant specific logic
def go_to_province(self, province):
"Moves the Pop to another Province"
self.location = province
def decide_trade_plan(self):
"""
Decide what good to trade in and how much.
Look for the most in demand good, or the most expensive good at the home Province
Find a province near home province where its the cheapest and there's inventory
"""
self.trade_amount = 5
most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)
most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda i: i[1], reverse=True)
# if we already had a trade good, refresh ideal inventory
if self.trade_good:
self.update_ideal_inventory()
if DEBUG: print("Finding a Good to trade:")
for good, demand in most_demanded_goods:
if demand > 0:
# find nearby provinces where this has inventory and the price is lower
price_at_home = self.home.market.mean_price(good)
if DEBUG: print("Good: {}, Demand: {}, Price: ${}".format(good.title, demand, price_at_home))
neighboring_markets = [p.market for p in self.location.owned_neighbors]
neighboring_markets = [m for m in neighboring_markets if m.supply_for(good) > self.trade_amount]
neighboring_markets.sort(key=lambda m: m.supply_for(good), reverse=True)
if len(neighboring_markets) > 0:
# we found places where this good is cheaper and in inventory
target = neighboring_markets[0].location
price_at_target = target.market.mean_price(good)
# only trade with prices where we can make money
if price_at_home > price_at_target:
offset = 0
if good is Good.bread:
offset = 1
self.inventory.set_ideal(good, self.trade_amount + offset)
self.trade_location = target
if DEBUG:
print("\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}".format(
self.trade_location.name,
self.trade_location.market.supply_for(good),
self.trade_location.market.mean_price(good),
price_at_home)
)
self.trade_good = good
return
else:
if DEBUG: print("\tPrice is higher at target (home: ${} target: ${})".format(price_at_home, price_at_target))
else:
if DEBUG: print("\tNo markets selling {} found".format(good))
# Generic economic logic
def update_ideal_inventory(self):
"Update ideal inventory"
# reset so that the Pop can sell the inventory it doesn't need
for good in Good.all():
self.inventory.set_ideal(good, 0)
# update ideal inventory for new Job
for item in self.pop_job.ideal_inventory:
self.inventory.set_ideal(item['good'], item['amount'])
def give_start_inventory(self):
"Give the Pop the inventory it needs to do its job"
for item in self.pop_job.start_inventory:
self.inventory.add(item['good'], item['amount'])
def change_population(self, trade_success):
"Change the population based off the trade"
self.population_yesterday = self.population
if trade_success:
self.population += round(self.population * 0.01)
else:
self.population -= round(self.population * 0.002)
def handle_bankruptcy(self, pop_job):
"Change job, create money out of thin air, update ideal inventory"
# TODO: stop creating money out of thin air
self.pop_job = pop_job
self.bankrupt_times += 1
self.money = 2
self.update_ideal_inventory()
self.give_start_inventory()
def perform_logic(self):
"Depending on PopJob, perform logic (including production)"
logic = self.pop_job.logic(self)
logic.perform()
def create_buy_order(self, good, limit):
"Create a buy order for a given Good at a determined quantity"
bid_price = self.determine_price_of(good)
ideal = self.determine_buy_quantity(good)
# can't buy more than limit
quantity_to_buy = limit if ideal > limit else ideal
if quantity_to_buy > 0:
return Order(self, OrderType.buy_order, quantity_to_buy, bid_price, good)
return False
def create_sell_order(self, good, limit):
"Create a sell order for a given Good at a determined quantity"
sell_price = self.determine_price_of(good)
ideal = self.determine_sell_quantity(good)
# can't buy more than limit
quantity_to_sell = limit if ideal < limit else ideal
if quantity_to_sell > 0:
return Order(self, OrderType.sell_order, quantity_to_sell, sell_price, good)
return False
def price_belief_for(self, good):
"Gets the price belief this agent has for a particular Good"
if good in self.price_belief:
return self.price_belief[good]
def determine_price_of(self, good):
"Determine the price of a particular good"
return self.price_belief_for(good).random()
def trading_range_extremes(self, good):
"Gets the lowest and highst price of a Good this agent has seen"
trading_range = self.observed_trading_range[good]
return PriceRange(min(trading_range), max(trading_range))
def determine_sell_quantity(self, good):
"Determine how much inventory goods to sell based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = position_in_range(mean, trading_range.low, trading_range.high)
amount_to_sell = round(favoribility * self.inventory.surplus(good))
if amount_to_sell < 1:
amount_to_sell = 1
return amount_to_sell
def determine_buy_quantity(self, good):
"Determine how much goods to buy based on market conditions"
mean = self.market.avg_historial_price(good, 15)
trading_range = self.trading_range_extremes(good)
favoribility = 1 - position_in_range(mean, trading_range.low, trading_range.high)
amount_to_buy = round(favoribility * self.inventory.shortage(good))
if amount_to_buy < 1:
amount_to_buy = 1
return amount_to_buy
def generate_orders(self, good):
"""
If the Pop needs a Good to perform production, buy it
If the Pop has surplus Resources, sell them
"""
surplus = self.inventory.surplus(good)
if surplus >= 1: # sell inventory
# the original only old one item here
sell_amount = surplus
order = self.create_sell_order(good, surplus)
if order:
# print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))
self.market.sell(order)
else: # buy more
shortage = self.inventory.shortage(good)
free_space = self.inventory.empty_space
if shortage > 0:
if shortage <= free_space:
# enough space for ideal order
limit = shortage
else:
# not enough space for ideal order
limit = math.floor(free_space / shortage)
if limit > 0:
order = self.create_buy_order(good, limit)
if order:
# print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))
self.market.buy(order)
# else:
# print("{} has no shortage of {} (has shortage: {})".format(self.pop_job.title, good.title, shortage))
def update_price_model(self, good, order_type, is_successful, clearing_price=0):
"""
Update the Pop's price model for the given resource
good (Good) The Good which was orderd
order_type (OrderType) Which kind of Order this was
is_successful (bool) whether or not the Order was successful
clearing_price (float) The price per unit of the good that was ordered
as defined by the Pop which ordered it
"""
SIGNIFICANT = 0.25 # 25% more or less is "significant"
SIG_IMBALANCE = 0.33
LOW_INVENTORY = 0.1 # 10% of ideal inventory = "LOW"
HIGH_INVENTORY = 2.0 # 200% of ideal inventory = "HIGH"
MIN_PRICE = 0.01 # lowest allowed price of a Good
if is_successful:
# add this trade to the observed trading range
self.observed_trading_range[good].append(clearing_price)
public_mean_price = self.market.mean_price(good)
belief = self.price_belief[good]
mean = belief.mean()
wobble = 0.05 # the degree which the Pop should bid outside the belief
# how different the public mean price is from the price belief
delta_to_mean = mean - public_mean_price
if is_successful:
if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:
# this Pop overpaid, shift belief towards mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:
# this Pop underpaid!, shift belief towards mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
# increase the belief's certainty
belief.low += wobble * mean
belief.high -= wobble * mean
else:
# shift towards mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
# check for inventory special cases
stocks = self.inventory.get_amount(good)
ideal = self.inventory.get_ideal(good)
# if we're buying and inventory is too low
# meaning we're desperate to buy
if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:
wobble *= 2
# if we're selling and inventory is too high
# meaning we're desperate to sell
elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:
wobble *= 2
# all other cases
else:
sells = self.market.history.sell_orders.average(good, 1)
buys = self.market.history.buy_orders.average(good, 1)
# TODO: figure out why this is sometimes 0
if sells + buys > 0:
supply_vs_demand = (sells - buys) / (sells + buys)
if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:
# too much supply? lower bid lower to sell faster
# too much demand? raise price to buy faster
new_mean = public_mean_price * (1 - supply_vs_demand)
delta_to_mean = mean - new_mean
# shift the price belief to the new price mean
belief.low -= delta_to_mean / 2
belief.high -= delta_to_mean / 2
# decrease belief's certainty since we've just changed it (we could be wrong)
belief.low -= wobble * mean
belief.high += wobble * mean
# make sure the price belief doesn't decrease below the minimum
if belief.low < MIN_PRICE:
belief.low = MIN_PRICE
elif belief.high < MIN_PRICE:
belief.high = MIN_PRICE
# Python utility methods
def __repr__(self):
return "<Pop: id={} type={}>".format(self.id, self.pop_job.title)
def __eq__(self, other):
return self.id == other.id
def __key__(self):
return self.id
def __hash__(self):
return hash(self.__key__())
def export(self):
model = {
'pop_job': self.pop_job.ref(),
'population': self.population,
'population_yesterday': self.population_yesterday,
'inventory': self.inventory.export(),
'money': self.money,
'money_yesterday': self.money_yesterday,
'successful_trades': self.successful_trades,
'failed_trades': self.failed_trades,
'bankrupt_times': self.bankrupt_times,
}
if self.pop_job is PopJob.merchant:
location_id = None
if self.trade_location:
location_id = self.trade_location.id
model.update({
'location': self.location.id,
'trade_location': location_id,
'trade_good': self.trade_good,
'trade_amount': self.trade_amount
})
return model
|
normal
|
{
"blob_id": "887a39f1eeb81e6472938c2451e57866d3ac4a45",
"index": 661,
"step-1": "<mask token>\n\n\nclass Pop(object):\n <mask token>\n\n def __init__(self, province, pop_job, population):\n \"\"\"\n Creates a new Pop.\n manager (Historia)\n province (SecondaryDivision)\n culture (Culture)\n religion (Religion)\n language (Language)\n job (Job)\n \"\"\"\n self.bankrupt_times = 0\n self.home = province\n self.location = province\n self.id = unique_id('po')\n self.population = population\n self.population_yesterday = 0\n self.pop_job = pop_job\n self.money = pop_job.start_money\n self.money_yesterday = 0\n self.bankrupt = False\n self.inventory = Inventory(pop_job.inventory_size)\n self.give_start_inventory()\n self.update_ideal_inventory()\n self.price_belief = {}\n self.observed_trading_range = {}\n self.successful_trades = 0\n self.failed_trades = 0\n for good in Good.all():\n avg_price = self.market.avg_historial_price(good, 15)\n self.observed_trading_range[good] = [avg_price * 0.5, avg_price *\n 1.5]\n self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *\n 1.5)\n self.trade_location = None\n self.trade_good = None\n self.trade_amount = 0\n self.trading_days = 0\n <mask token>\n <mask token>\n\n @property\n def profit(self):\n \"\"\"Determine today's profit\"\"\"\n return self.money - self.money_yesterday\n\n @property\n def total_trades(self):\n \"\"\"Total number of trades this Pop participated in\"\"\"\n return self.successful_trades + self.failed_trades\n <mask token>\n\n @property\n def is_away(self):\n \"\"\"Is this Pop away from it's home?\"\"\"\n return self.home is not self.location\n <mask token>\n <mask token>\n\n def update_ideal_inventory(self):\n \"\"\"Update ideal inventory\"\"\"\n for good in Good.all():\n self.inventory.set_ideal(good, 0)\n for item in self.pop_job.ideal_inventory:\n self.inventory.set_ideal(item['good'], item['amount'])\n\n def give_start_inventory(self):\n \"\"\"Give the Pop the inventory it needs to do its job\"\"\"\n for item in self.pop_job.start_inventory:\n self.inventory.add(item['good'], item['amount'])\n <mask token>\n <mask token>\n\n def perform_logic(self):\n \"\"\"Depending on PopJob, perform logic (including production)\"\"\"\n logic = self.pop_job.logic(self)\n logic.perform()\n\n def create_buy_order(self, good, limit):\n \"\"\"Create a buy order for a given Good at a determined quantity\"\"\"\n bid_price = self.determine_price_of(good)\n ideal = self.determine_buy_quantity(good)\n quantity_to_buy = limit if ideal > limit else ideal\n if quantity_to_buy > 0:\n return Order(self, OrderType.buy_order, quantity_to_buy,\n bid_price, good)\n return False\n\n def create_sell_order(self, good, limit):\n \"\"\"Create a sell order for a given Good at a determined quantity\"\"\"\n sell_price = self.determine_price_of(good)\n ideal = self.determine_sell_quantity(good)\n quantity_to_sell = limit if ideal < limit else ideal\n if quantity_to_sell > 0:\n return Order(self, OrderType.sell_order, quantity_to_sell,\n sell_price, good)\n return False\n\n def price_belief_for(self, good):\n \"\"\"Gets the price belief this agent has for a particular Good\"\"\"\n if good in self.price_belief:\n return self.price_belief[good]\n <mask token>\n\n def trading_range_extremes(self, good):\n \"\"\"Gets the lowest and highst price of a Good this agent has seen\"\"\"\n trading_range = self.observed_trading_range[good]\n return PriceRange(min(trading_range), max(trading_range))\n\n def determine_sell_quantity(self, good):\n \"\"\"Determine how much inventory goods to sell based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_sell = round(favoribility * self.inventory.surplus(good))\n if amount_to_sell < 1:\n amount_to_sell = 1\n return amount_to_sell\n <mask token>\n\n def generate_orders(self, good):\n \"\"\"\n If the Pop needs a Good to perform production, buy it\n If the Pop has surplus Resources, sell them\n \"\"\"\n surplus = self.inventory.surplus(good)\n if surplus >= 1:\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n self.market.sell(order)\n else:\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n if shortage > 0:\n if shortage <= free_space:\n limit = shortage\n else:\n limit = math.floor(free_space / shortage)\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n self.market.buy(order)\n <mask token>\n <mask token>\n <mask token>\n\n def __key__(self):\n return self.id\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Pop(object):\n <mask token>\n\n def __init__(self, province, pop_job, population):\n \"\"\"\n Creates a new Pop.\n manager (Historia)\n province (SecondaryDivision)\n culture (Culture)\n religion (Religion)\n language (Language)\n job (Job)\n \"\"\"\n self.bankrupt_times = 0\n self.home = province\n self.location = province\n self.id = unique_id('po')\n self.population = population\n self.population_yesterday = 0\n self.pop_job = pop_job\n self.money = pop_job.start_money\n self.money_yesterday = 0\n self.bankrupt = False\n self.inventory = Inventory(pop_job.inventory_size)\n self.give_start_inventory()\n self.update_ideal_inventory()\n self.price_belief = {}\n self.observed_trading_range = {}\n self.successful_trades = 0\n self.failed_trades = 0\n for good in Good.all():\n avg_price = self.market.avg_historial_price(good, 15)\n self.observed_trading_range[good] = [avg_price * 0.5, avg_price *\n 1.5]\n self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *\n 1.5)\n self.trade_location = None\n self.trade_good = None\n self.trade_amount = 0\n self.trading_days = 0\n <mask token>\n\n @property\n def market(self):\n \"\"\"Get the market instance\"\"\"\n return self.location.market\n\n @property\n def profit(self):\n \"\"\"Determine today's profit\"\"\"\n return self.money - self.money_yesterday\n\n @property\n def total_trades(self):\n \"\"\"Total number of trades this Pop participated in\"\"\"\n return self.successful_trades + self.failed_trades\n\n @property\n def trade_success(self):\n \"\"\"Percent of trades that were successful\"\"\"\n if self.total_trades == 0:\n return 0\n return self.successful_trades / self.total_trades * 100\n\n @property\n def is_away(self):\n \"\"\"Is this Pop away from it's home?\"\"\"\n return self.home is not self.location\n\n def go_to_province(self, province):\n \"\"\"Moves the Pop to another Province\"\"\"\n self.location = province\n\n def decide_trade_plan(self):\n \"\"\"\n Decide what good to trade in and how much.\n Look for the most in demand good, or the most expensive good at the home Province\n Find a province near home province where its the cheapest and there's inventory\n \"\"\"\n self.trade_amount = 5\n most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)\n most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda\n i: i[1], reverse=True)\n if self.trade_good:\n self.update_ideal_inventory()\n if DEBUG:\n print('Finding a Good to trade:')\n for good, demand in most_demanded_goods:\n if demand > 0:\n price_at_home = self.home.market.mean_price(good)\n if DEBUG:\n print('Good: {}, Demand: {}, Price: ${}'.format(good.\n title, demand, price_at_home))\n neighboring_markets = [p.market for p in self.location.\n owned_neighbors]\n neighboring_markets = [m for m in neighboring_markets if m.\n supply_for(good) > self.trade_amount]\n neighboring_markets.sort(key=lambda m: m.supply_for(good),\n reverse=True)\n if len(neighboring_markets) > 0:\n target = neighboring_markets[0].location\n price_at_target = target.market.mean_price(good)\n if price_at_home > price_at_target:\n offset = 0\n if good is Good.bread:\n offset = 1\n self.inventory.set_ideal(good, self.trade_amount +\n offset)\n self.trade_location = target\n if DEBUG:\n print(\n '\\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}'\n .format(self.trade_location.name, self.\n trade_location.market.supply_for(good),\n self.trade_location.market.mean_price(good),\n price_at_home))\n self.trade_good = good\n return\n elif DEBUG:\n print(\n '\\tPrice is higher at target (home: ${} target: ${})'\n .format(price_at_home, price_at_target))\n elif DEBUG:\n print('\\tNo markets selling {} found'.format(good))\n\n def update_ideal_inventory(self):\n \"\"\"Update ideal inventory\"\"\"\n for good in Good.all():\n self.inventory.set_ideal(good, 0)\n for item in self.pop_job.ideal_inventory:\n self.inventory.set_ideal(item['good'], item['amount'])\n\n def give_start_inventory(self):\n \"\"\"Give the Pop the inventory it needs to do its job\"\"\"\n for item in self.pop_job.start_inventory:\n self.inventory.add(item['good'], item['amount'])\n\n def change_population(self, trade_success):\n \"\"\"Change the population based off the trade\"\"\"\n self.population_yesterday = self.population\n if trade_success:\n self.population += round(self.population * 0.01)\n else:\n self.population -= round(self.population * 0.002)\n\n def handle_bankruptcy(self, pop_job):\n \"\"\"Change job, create money out of thin air, update ideal inventory\"\"\"\n self.pop_job = pop_job\n self.bankrupt_times += 1\n self.money = 2\n self.update_ideal_inventory()\n self.give_start_inventory()\n\n def perform_logic(self):\n \"\"\"Depending on PopJob, perform logic (including production)\"\"\"\n logic = self.pop_job.logic(self)\n logic.perform()\n\n def create_buy_order(self, good, limit):\n \"\"\"Create a buy order for a given Good at a determined quantity\"\"\"\n bid_price = self.determine_price_of(good)\n ideal = self.determine_buy_quantity(good)\n quantity_to_buy = limit if ideal > limit else ideal\n if quantity_to_buy > 0:\n return Order(self, OrderType.buy_order, quantity_to_buy,\n bid_price, good)\n return False\n\n def create_sell_order(self, good, limit):\n \"\"\"Create a sell order for a given Good at a determined quantity\"\"\"\n sell_price = self.determine_price_of(good)\n ideal = self.determine_sell_quantity(good)\n quantity_to_sell = limit if ideal < limit else ideal\n if quantity_to_sell > 0:\n return Order(self, OrderType.sell_order, quantity_to_sell,\n sell_price, good)\n return False\n\n def price_belief_for(self, good):\n \"\"\"Gets the price belief this agent has for a particular Good\"\"\"\n if good in self.price_belief:\n return self.price_belief[good]\n\n def determine_price_of(self, good):\n \"\"\"Determine the price of a particular good\"\"\"\n return self.price_belief_for(good).random()\n\n def trading_range_extremes(self, good):\n \"\"\"Gets the lowest and highst price of a Good this agent has seen\"\"\"\n trading_range = self.observed_trading_range[good]\n return PriceRange(min(trading_range), max(trading_range))\n\n def determine_sell_quantity(self, good):\n \"\"\"Determine how much inventory goods to sell based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_sell = round(favoribility * self.inventory.surplus(good))\n if amount_to_sell < 1:\n amount_to_sell = 1\n return amount_to_sell\n\n def determine_buy_quantity(self, good):\n \"\"\"Determine how much goods to buy based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = 1 - position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_buy = round(favoribility * self.inventory.shortage(good))\n if amount_to_buy < 1:\n amount_to_buy = 1\n return amount_to_buy\n\n def generate_orders(self, good):\n \"\"\"\n If the Pop needs a Good to perform production, buy it\n If the Pop has surplus Resources, sell them\n \"\"\"\n surplus = self.inventory.surplus(good)\n if surplus >= 1:\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n self.market.sell(order)\n else:\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n if shortage > 0:\n if shortage <= free_space:\n limit = shortage\n else:\n limit = math.floor(free_space / shortage)\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n self.market.buy(order)\n <mask token>\n\n def __repr__(self):\n return '<Pop: id={} type={}>'.format(self.id, self.pop_job.title)\n <mask token>\n\n def __key__(self):\n return self.id\n\n def __hash__(self):\n return hash(self.__key__())\n\n def export(self):\n model = {'pop_job': self.pop_job.ref(), 'population': self.\n population, 'population_yesterday': self.population_yesterday,\n 'inventory': self.inventory.export(), 'money': self.money,\n 'money_yesterday': self.money_yesterday, 'successful_trades':\n self.successful_trades, 'failed_trades': self.failed_trades,\n 'bankrupt_times': self.bankrupt_times}\n if self.pop_job is PopJob.merchant:\n location_id = None\n if self.trade_location:\n location_id = self.trade_location.id\n model.update({'location': self.location.id, 'trade_location':\n location_id, 'trade_good': self.trade_good, 'trade_amount':\n self.trade_amount})\n return model\n",
"step-3": "<mask token>\n\n\nclass Pop(object):\n <mask token>\n\n def __init__(self, province, pop_job, population):\n \"\"\"\n Creates a new Pop.\n manager (Historia)\n province (SecondaryDivision)\n culture (Culture)\n religion (Religion)\n language (Language)\n job (Job)\n \"\"\"\n self.bankrupt_times = 0\n self.home = province\n self.location = province\n self.id = unique_id('po')\n self.population = population\n self.population_yesterday = 0\n self.pop_job = pop_job\n self.money = pop_job.start_money\n self.money_yesterday = 0\n self.bankrupt = False\n self.inventory = Inventory(pop_job.inventory_size)\n self.give_start_inventory()\n self.update_ideal_inventory()\n self.price_belief = {}\n self.observed_trading_range = {}\n self.successful_trades = 0\n self.failed_trades = 0\n for good in Good.all():\n avg_price = self.market.avg_historial_price(good, 15)\n self.observed_trading_range[good] = [avg_price * 0.5, avg_price *\n 1.5]\n self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *\n 1.5)\n self.trade_location = None\n self.trade_good = None\n self.trade_amount = 0\n self.trading_days = 0\n\n @property\n def social_class(self):\n return self.pop_job.social_class\n\n @property\n def market(self):\n \"\"\"Get the market instance\"\"\"\n return self.location.market\n\n @property\n def profit(self):\n \"\"\"Determine today's profit\"\"\"\n return self.money - self.money_yesterday\n\n @property\n def total_trades(self):\n \"\"\"Total number of trades this Pop participated in\"\"\"\n return self.successful_trades + self.failed_trades\n\n @property\n def trade_success(self):\n \"\"\"Percent of trades that were successful\"\"\"\n if self.total_trades == 0:\n return 0\n return self.successful_trades / self.total_trades * 100\n\n @property\n def is_away(self):\n \"\"\"Is this Pop away from it's home?\"\"\"\n return self.home is not self.location\n\n def go_to_province(self, province):\n \"\"\"Moves the Pop to another Province\"\"\"\n self.location = province\n\n def decide_trade_plan(self):\n \"\"\"\n Decide what good to trade in and how much.\n Look for the most in demand good, or the most expensive good at the home Province\n Find a province near home province where its the cheapest and there's inventory\n \"\"\"\n self.trade_amount = 5\n most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)\n most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda\n i: i[1], reverse=True)\n if self.trade_good:\n self.update_ideal_inventory()\n if DEBUG:\n print('Finding a Good to trade:')\n for good, demand in most_demanded_goods:\n if demand > 0:\n price_at_home = self.home.market.mean_price(good)\n if DEBUG:\n print('Good: {}, Demand: {}, Price: ${}'.format(good.\n title, demand, price_at_home))\n neighboring_markets = [p.market for p in self.location.\n owned_neighbors]\n neighboring_markets = [m for m in neighboring_markets if m.\n supply_for(good) > self.trade_amount]\n neighboring_markets.sort(key=lambda m: m.supply_for(good),\n reverse=True)\n if len(neighboring_markets) > 0:\n target = neighboring_markets[0].location\n price_at_target = target.market.mean_price(good)\n if price_at_home > price_at_target:\n offset = 0\n if good is Good.bread:\n offset = 1\n self.inventory.set_ideal(good, self.trade_amount +\n offset)\n self.trade_location = target\n if DEBUG:\n print(\n '\\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}'\n .format(self.trade_location.name, self.\n trade_location.market.supply_for(good),\n self.trade_location.market.mean_price(good),\n price_at_home))\n self.trade_good = good\n return\n elif DEBUG:\n print(\n '\\tPrice is higher at target (home: ${} target: ${})'\n .format(price_at_home, price_at_target))\n elif DEBUG:\n print('\\tNo markets selling {} found'.format(good))\n\n def update_ideal_inventory(self):\n \"\"\"Update ideal inventory\"\"\"\n for good in Good.all():\n self.inventory.set_ideal(good, 0)\n for item in self.pop_job.ideal_inventory:\n self.inventory.set_ideal(item['good'], item['amount'])\n\n def give_start_inventory(self):\n \"\"\"Give the Pop the inventory it needs to do its job\"\"\"\n for item in self.pop_job.start_inventory:\n self.inventory.add(item['good'], item['amount'])\n\n def change_population(self, trade_success):\n \"\"\"Change the population based off the trade\"\"\"\n self.population_yesterday = self.population\n if trade_success:\n self.population += round(self.population * 0.01)\n else:\n self.population -= round(self.population * 0.002)\n\n def handle_bankruptcy(self, pop_job):\n \"\"\"Change job, create money out of thin air, update ideal inventory\"\"\"\n self.pop_job = pop_job\n self.bankrupt_times += 1\n self.money = 2\n self.update_ideal_inventory()\n self.give_start_inventory()\n\n def perform_logic(self):\n \"\"\"Depending on PopJob, perform logic (including production)\"\"\"\n logic = self.pop_job.logic(self)\n logic.perform()\n\n def create_buy_order(self, good, limit):\n \"\"\"Create a buy order for a given Good at a determined quantity\"\"\"\n bid_price = self.determine_price_of(good)\n ideal = self.determine_buy_quantity(good)\n quantity_to_buy = limit if ideal > limit else ideal\n if quantity_to_buy > 0:\n return Order(self, OrderType.buy_order, quantity_to_buy,\n bid_price, good)\n return False\n\n def create_sell_order(self, good, limit):\n \"\"\"Create a sell order for a given Good at a determined quantity\"\"\"\n sell_price = self.determine_price_of(good)\n ideal = self.determine_sell_quantity(good)\n quantity_to_sell = limit if ideal < limit else ideal\n if quantity_to_sell > 0:\n return Order(self, OrderType.sell_order, quantity_to_sell,\n sell_price, good)\n return False\n\n def price_belief_for(self, good):\n \"\"\"Gets the price belief this agent has for a particular Good\"\"\"\n if good in self.price_belief:\n return self.price_belief[good]\n\n def determine_price_of(self, good):\n \"\"\"Determine the price of a particular good\"\"\"\n return self.price_belief_for(good).random()\n\n def trading_range_extremes(self, good):\n \"\"\"Gets the lowest and highst price of a Good this agent has seen\"\"\"\n trading_range = self.observed_trading_range[good]\n return PriceRange(min(trading_range), max(trading_range))\n\n def determine_sell_quantity(self, good):\n \"\"\"Determine how much inventory goods to sell based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_sell = round(favoribility * self.inventory.surplus(good))\n if amount_to_sell < 1:\n amount_to_sell = 1\n return amount_to_sell\n\n def determine_buy_quantity(self, good):\n \"\"\"Determine how much goods to buy based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = 1 - position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_buy = round(favoribility * self.inventory.shortage(good))\n if amount_to_buy < 1:\n amount_to_buy = 1\n return amount_to_buy\n\n def generate_orders(self, good):\n \"\"\"\n If the Pop needs a Good to perform production, buy it\n If the Pop has surplus Resources, sell them\n \"\"\"\n surplus = self.inventory.surplus(good)\n if surplus >= 1:\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n self.market.sell(order)\n else:\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n if shortage > 0:\n if shortage <= free_space:\n limit = shortage\n else:\n limit = math.floor(free_space / shortage)\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n self.market.buy(order)\n\n def update_price_model(self, good, order_type, is_successful,\n clearing_price=0):\n \"\"\"\n Update the Pop's price model for the given resource\n good (Good) The Good which was orderd\n order_type (OrderType) Which kind of Order this was\n is_successful (bool) whether or not the Order was successful\n clearing_price (float) The price per unit of the good that was ordered\n as defined by the Pop which ordered it\n \"\"\"\n SIGNIFICANT = 0.25\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1\n HIGH_INVENTORY = 2.0\n MIN_PRICE = 0.01\n if is_successful:\n self.observed_trading_range[good].append(clearing_price)\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05\n delta_to_mean = mean - public_mean_price\n if is_successful:\n if (order_type is OrderType.buy_order and delta_to_mean >\n SIGNIFICANT):\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n belief.low += wobble * mean\n belief.high -= wobble * mean\n else:\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n if (order_type is OrderType.buy_order and stocks < \n LOW_INVENTORY * ideal):\n wobble *= 2\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n if sells + buys > 0:\n supply_vs_demand = (sells - buys) / (sells + buys)\n if (supply_vs_demand > SIG_IMBALANCE or \n supply_vs_demand < -SIG_IMBALANCE):\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n belief.low -= wobble * mean\n belief.high += wobble * mean\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE\n\n def __repr__(self):\n return '<Pop: id={} type={}>'.format(self.id, self.pop_job.title)\n <mask token>\n\n def __key__(self):\n return self.id\n\n def __hash__(self):\n return hash(self.__key__())\n\n def export(self):\n model = {'pop_job': self.pop_job.ref(), 'population': self.\n population, 'population_yesterday': self.population_yesterday,\n 'inventory': self.inventory.export(), 'money': self.money,\n 'money_yesterday': self.money_yesterday, 'successful_trades':\n self.successful_trades, 'failed_trades': self.failed_trades,\n 'bankrupt_times': self.bankrupt_times}\n if self.pop_job is PopJob.merchant:\n location_id = None\n if self.trade_location:\n location_id = self.trade_location.id\n model.update({'location': self.location.id, 'trade_location':\n location_id, 'trade_good': self.trade_good, 'trade_amount':\n self.trade_amount})\n return model\n",
"step-4": "import math\nfrom historia.utils import unique_id, position_in_range\nfrom historia.pops.models.inventory import Inventory\nfrom historia.economy.enums.resource import Good, NaturalResource\nfrom historia.economy.enums.order_type import OrderType\nfrom historia.economy.models.price_range import PriceRange\nfrom historia.economy.models.order import Order\nfrom historia.pops.enums.pop_job import PopJob\nDEBUG = False\n\n\nclass Pop(object):\n \"\"\"\n A simulated unit of population\n \"\"\"\n\n def __init__(self, province, pop_job, population):\n \"\"\"\n Creates a new Pop.\n manager (Historia)\n province (SecondaryDivision)\n culture (Culture)\n religion (Religion)\n language (Language)\n job (Job)\n \"\"\"\n self.bankrupt_times = 0\n self.home = province\n self.location = province\n self.id = unique_id('po')\n self.population = population\n self.population_yesterday = 0\n self.pop_job = pop_job\n self.money = pop_job.start_money\n self.money_yesterday = 0\n self.bankrupt = False\n self.inventory = Inventory(pop_job.inventory_size)\n self.give_start_inventory()\n self.update_ideal_inventory()\n self.price_belief = {}\n self.observed_trading_range = {}\n self.successful_trades = 0\n self.failed_trades = 0\n for good in Good.all():\n avg_price = self.market.avg_historial_price(good, 15)\n self.observed_trading_range[good] = [avg_price * 0.5, avg_price *\n 1.5]\n self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price *\n 1.5)\n self.trade_location = None\n self.trade_good = None\n self.trade_amount = 0\n self.trading_days = 0\n\n @property\n def social_class(self):\n return self.pop_job.social_class\n\n @property\n def market(self):\n \"\"\"Get the market instance\"\"\"\n return self.location.market\n\n @property\n def profit(self):\n \"\"\"Determine today's profit\"\"\"\n return self.money - self.money_yesterday\n\n @property\n def total_trades(self):\n \"\"\"Total number of trades this Pop participated in\"\"\"\n return self.successful_trades + self.failed_trades\n\n @property\n def trade_success(self):\n \"\"\"Percent of trades that were successful\"\"\"\n if self.total_trades == 0:\n return 0\n return self.successful_trades / self.total_trades * 100\n\n @property\n def is_away(self):\n \"\"\"Is this Pop away from it's home?\"\"\"\n return self.home is not self.location\n\n def go_to_province(self, province):\n \"\"\"Moves the Pop to another Province\"\"\"\n self.location = province\n\n def decide_trade_plan(self):\n \"\"\"\n Decide what good to trade in and how much.\n Look for the most in demand good, or the most expensive good at the home Province\n Find a province near home province where its the cheapest and there's inventory\n \"\"\"\n self.trade_amount = 5\n most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)\n most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda\n i: i[1], reverse=True)\n if self.trade_good:\n self.update_ideal_inventory()\n if DEBUG:\n print('Finding a Good to trade:')\n for good, demand in most_demanded_goods:\n if demand > 0:\n price_at_home = self.home.market.mean_price(good)\n if DEBUG:\n print('Good: {}, Demand: {}, Price: ${}'.format(good.\n title, demand, price_at_home))\n neighboring_markets = [p.market for p in self.location.\n owned_neighbors]\n neighboring_markets = [m for m in neighboring_markets if m.\n supply_for(good) > self.trade_amount]\n neighboring_markets.sort(key=lambda m: m.supply_for(good),\n reverse=True)\n if len(neighboring_markets) > 0:\n target = neighboring_markets[0].location\n price_at_target = target.market.mean_price(good)\n if price_at_home > price_at_target:\n offset = 0\n if good is Good.bread:\n offset = 1\n self.inventory.set_ideal(good, self.trade_amount +\n offset)\n self.trade_location = target\n if DEBUG:\n print(\n '\\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}'\n .format(self.trade_location.name, self.\n trade_location.market.supply_for(good),\n self.trade_location.market.mean_price(good),\n price_at_home))\n self.trade_good = good\n return\n elif DEBUG:\n print(\n '\\tPrice is higher at target (home: ${} target: ${})'\n .format(price_at_home, price_at_target))\n elif DEBUG:\n print('\\tNo markets selling {} found'.format(good))\n\n def update_ideal_inventory(self):\n \"\"\"Update ideal inventory\"\"\"\n for good in Good.all():\n self.inventory.set_ideal(good, 0)\n for item in self.pop_job.ideal_inventory:\n self.inventory.set_ideal(item['good'], item['amount'])\n\n def give_start_inventory(self):\n \"\"\"Give the Pop the inventory it needs to do its job\"\"\"\n for item in self.pop_job.start_inventory:\n self.inventory.add(item['good'], item['amount'])\n\n def change_population(self, trade_success):\n \"\"\"Change the population based off the trade\"\"\"\n self.population_yesterday = self.population\n if trade_success:\n self.population += round(self.population * 0.01)\n else:\n self.population -= round(self.population * 0.002)\n\n def handle_bankruptcy(self, pop_job):\n \"\"\"Change job, create money out of thin air, update ideal inventory\"\"\"\n self.pop_job = pop_job\n self.bankrupt_times += 1\n self.money = 2\n self.update_ideal_inventory()\n self.give_start_inventory()\n\n def perform_logic(self):\n \"\"\"Depending on PopJob, perform logic (including production)\"\"\"\n logic = self.pop_job.logic(self)\n logic.perform()\n\n def create_buy_order(self, good, limit):\n \"\"\"Create a buy order for a given Good at a determined quantity\"\"\"\n bid_price = self.determine_price_of(good)\n ideal = self.determine_buy_quantity(good)\n quantity_to_buy = limit if ideal > limit else ideal\n if quantity_to_buy > 0:\n return Order(self, OrderType.buy_order, quantity_to_buy,\n bid_price, good)\n return False\n\n def create_sell_order(self, good, limit):\n \"\"\"Create a sell order for a given Good at a determined quantity\"\"\"\n sell_price = self.determine_price_of(good)\n ideal = self.determine_sell_quantity(good)\n quantity_to_sell = limit if ideal < limit else ideal\n if quantity_to_sell > 0:\n return Order(self, OrderType.sell_order, quantity_to_sell,\n sell_price, good)\n return False\n\n def price_belief_for(self, good):\n \"\"\"Gets the price belief this agent has for a particular Good\"\"\"\n if good in self.price_belief:\n return self.price_belief[good]\n\n def determine_price_of(self, good):\n \"\"\"Determine the price of a particular good\"\"\"\n return self.price_belief_for(good).random()\n\n def trading_range_extremes(self, good):\n \"\"\"Gets the lowest and highst price of a Good this agent has seen\"\"\"\n trading_range = self.observed_trading_range[good]\n return PriceRange(min(trading_range), max(trading_range))\n\n def determine_sell_quantity(self, good):\n \"\"\"Determine how much inventory goods to sell based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_sell = round(favoribility * self.inventory.surplus(good))\n if amount_to_sell < 1:\n amount_to_sell = 1\n return amount_to_sell\n\n def determine_buy_quantity(self, good):\n \"\"\"Determine how much goods to buy based on market conditions\"\"\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n favoribility = 1 - position_in_range(mean, trading_range.low,\n trading_range.high)\n amount_to_buy = round(favoribility * self.inventory.shortage(good))\n if amount_to_buy < 1:\n amount_to_buy = 1\n return amount_to_buy\n\n def generate_orders(self, good):\n \"\"\"\n If the Pop needs a Good to perform production, buy it\n If the Pop has surplus Resources, sell them\n \"\"\"\n surplus = self.inventory.surplus(good)\n if surplus >= 1:\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n self.market.sell(order)\n else:\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n if shortage > 0:\n if shortage <= free_space:\n limit = shortage\n else:\n limit = math.floor(free_space / shortage)\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n self.market.buy(order)\n\n def update_price_model(self, good, order_type, is_successful,\n clearing_price=0):\n \"\"\"\n Update the Pop's price model for the given resource\n good (Good) The Good which was orderd\n order_type (OrderType) Which kind of Order this was\n is_successful (bool) whether or not the Order was successful\n clearing_price (float) The price per unit of the good that was ordered\n as defined by the Pop which ordered it\n \"\"\"\n SIGNIFICANT = 0.25\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1\n HIGH_INVENTORY = 2.0\n MIN_PRICE = 0.01\n if is_successful:\n self.observed_trading_range[good].append(clearing_price)\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05\n delta_to_mean = mean - public_mean_price\n if is_successful:\n if (order_type is OrderType.buy_order and delta_to_mean >\n SIGNIFICANT):\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n belief.low += wobble * mean\n belief.high -= wobble * mean\n else:\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n if (order_type is OrderType.buy_order and stocks < \n LOW_INVENTORY * ideal):\n wobble *= 2\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n if sells + buys > 0:\n supply_vs_demand = (sells - buys) / (sells + buys)\n if (supply_vs_demand > SIG_IMBALANCE or \n supply_vs_demand < -SIG_IMBALANCE):\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n belief.low -= wobble * mean\n belief.high += wobble * mean\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE\n\n def __repr__(self):\n return '<Pop: id={} type={}>'.format(self.id, self.pop_job.title)\n\n def __eq__(self, other):\n return self.id == other.id\n\n def __key__(self):\n return self.id\n\n def __hash__(self):\n return hash(self.__key__())\n\n def export(self):\n model = {'pop_job': self.pop_job.ref(), 'population': self.\n population, 'population_yesterday': self.population_yesterday,\n 'inventory': self.inventory.export(), 'money': self.money,\n 'money_yesterday': self.money_yesterday, 'successful_trades':\n self.successful_trades, 'failed_trades': self.failed_trades,\n 'bankrupt_times': self.bankrupt_times}\n if self.pop_job is PopJob.merchant:\n location_id = None\n if self.trade_location:\n location_id = self.trade_location.id\n model.update({'location': self.location.id, 'trade_location':\n location_id, 'trade_good': self.trade_good, 'trade_amount':\n self.trade_amount})\n return model\n",
"step-5": "import math\nfrom historia.utils import unique_id, position_in_range\nfrom historia.pops.models.inventory import Inventory\nfrom historia.economy.enums.resource import Good, NaturalResource\nfrom historia.economy.enums.order_type import OrderType\nfrom historia.economy.models.price_range import PriceRange\nfrom historia.economy.models.order import Order\nfrom historia.pops.enums.pop_job import PopJob\n\nDEBUG = False\n\nclass Pop(object):\n \"\"\"\n A simulated unit of population\n \"\"\"\n\n def __init__(self, province, pop_job, population):\n \"\"\"\n Creates a new Pop.\n manager (Historia)\n province (SecondaryDivision)\n culture (Culture)\n religion (Religion)\n language (Language)\n job (Job)\n \"\"\"\n self.bankrupt_times = 0\n self.home = province\n self.location = province\n self.id = unique_id('po')\n\n self.population = population\n self.population_yesterday = 0\n\n self.pop_job = pop_job\n\n # ECONOMY\n self.money = pop_job.start_money\n self.money_yesterday = 0\n self.bankrupt = False\n\n # set inventory and ideal amounts\n self.inventory = Inventory(pop_job.inventory_size)\n self.give_start_inventory()\n\n self.update_ideal_inventory()\n\n # a dictionary of Goods to PriceRanges\n # represents the price range the agent considers valid for each Good\n self.price_belief = {}\n\n # a dictionary of Goods to price list\n # represents the prices of the good that the Pop has observed\n # during the time they have been trading\n self.observed_trading_range = {}\n\n self.successful_trades = 0\n self.failed_trades = 0\n\n # make some fake initial data\n for good in Good.all():\n avg_price = self.market.avg_historial_price(good, 15)\n # fake trades\n self.observed_trading_range[good] = [\n avg_price * 0.5,\n avg_price * 1.5\n ]\n # generate fake price belief\n self.price_belief[good] = PriceRange(avg_price * 0.5, avg_price * 1.5)\n\n # Merchant logic\n self.trade_location = None # the province this Pop is traveling to\n self.trade_good = None # what good we're trading in right now\n self.trade_amount = 0 # amount of trade_good we should be trading\n self.trading_days = 0 # number of days waiting to trade\n\n # Generic Pop properties\n @property\n def social_class(self):\n return self.pop_job.social_class\n\n @property\n def market(self):\n \"Get the market instance\"\n return self.location.market\n\n @property\n def profit(self):\n \"Determine today's profit\"\n return self.money - self.money_yesterday\n\n @property\n def total_trades(self):\n \"Total number of trades this Pop participated in\"\n return self.successful_trades + self.failed_trades\n\n @property\n def trade_success(self):\n \"Percent of trades that were successful\"\n if self.total_trades == 0:\n return 0\n return (self.successful_trades / self.total_trades) * 100\n\n @property\n def is_away(self):\n \"Is this Pop away from it's home?\"\n return self.home is not self.location\n\n # Merchant specific logic\n def go_to_province(self, province):\n \"Moves the Pop to another Province\"\n self.location = province\n\n def decide_trade_plan(self):\n \"\"\"\n Decide what good to trade in and how much.\n Look for the most in demand good, or the most expensive good at the home Province\n Find a province near home province where its the cheapest and there's inventory\n \"\"\"\n self.trade_amount = 5\n most_demanded_goods = self.home.market.goods_demand_ratio(day_range=1)\n most_demanded_goods = sorted(most_demanded_goods.items(), key=lambda i: i[1], reverse=True)\n\n # if we already had a trade good, refresh ideal inventory\n if self.trade_good:\n self.update_ideal_inventory()\n\n if DEBUG: print(\"Finding a Good to trade:\")\n\n for good, demand in most_demanded_goods:\n if demand > 0:\n # find nearby provinces where this has inventory and the price is lower\n price_at_home = self.home.market.mean_price(good)\n if DEBUG: print(\"Good: {}, Demand: {}, Price: ${}\".format(good.title, demand, price_at_home))\n neighboring_markets = [p.market for p in self.location.owned_neighbors]\n neighboring_markets = [m for m in neighboring_markets if m.supply_for(good) > self.trade_amount]\n neighboring_markets.sort(key=lambda m: m.supply_for(good), reverse=True)\n\n if len(neighboring_markets) > 0:\n # we found places where this good is cheaper and in inventory\n target = neighboring_markets[0].location\n price_at_target = target.market.mean_price(good)\n\n # only trade with prices where we can make money\n if price_at_home > price_at_target:\n offset = 0\n if good is Good.bread:\n offset = 1\n self.inventory.set_ideal(good, self.trade_amount + offset)\n self.trade_location = target\n if DEBUG:\n print(\"\\tTarget: {}, Supply: {}, Price: ${}, Price at home: ${}\".format(\n self.trade_location.name,\n self.trade_location.market.supply_for(good),\n self.trade_location.market.mean_price(good),\n price_at_home)\n )\n self.trade_good = good\n return\n else:\n if DEBUG: print(\"\\tPrice is higher at target (home: ${} target: ${})\".format(price_at_home, price_at_target))\n else:\n if DEBUG: print(\"\\tNo markets selling {} found\".format(good))\n\n\n # Generic economic logic\n def update_ideal_inventory(self):\n \"Update ideal inventory\"\n # reset so that the Pop can sell the inventory it doesn't need\n for good in Good.all():\n self.inventory.set_ideal(good, 0)\n\n # update ideal inventory for new Job\n for item in self.pop_job.ideal_inventory:\n self.inventory.set_ideal(item['good'], item['amount'])\n\n def give_start_inventory(self):\n \"Give the Pop the inventory it needs to do its job\"\n for item in self.pop_job.start_inventory:\n self.inventory.add(item['good'], item['amount'])\n\n def change_population(self, trade_success):\n \"Change the population based off the trade\"\n self.population_yesterday = self.population\n if trade_success:\n self.population += round(self.population * 0.01)\n else:\n self.population -= round(self.population * 0.002)\n\n def handle_bankruptcy(self, pop_job):\n \"Change job, create money out of thin air, update ideal inventory\"\n # TODO: stop creating money out of thin air\n self.pop_job = pop_job\n self.bankrupt_times += 1\n self.money = 2\n self.update_ideal_inventory()\n self.give_start_inventory()\n\n def perform_logic(self):\n \"Depending on PopJob, perform logic (including production)\"\n logic = self.pop_job.logic(self)\n logic.perform()\n\n def create_buy_order(self, good, limit):\n \"Create a buy order for a given Good at a determined quantity\"\n bid_price = self.determine_price_of(good)\n ideal = self.determine_buy_quantity(good)\n\n # can't buy more than limit\n quantity_to_buy = limit if ideal > limit else ideal\n if quantity_to_buy > 0:\n return Order(self, OrderType.buy_order, quantity_to_buy, bid_price, good)\n return False\n\n def create_sell_order(self, good, limit):\n \"Create a sell order for a given Good at a determined quantity\"\n sell_price = self.determine_price_of(good)\n ideal = self.determine_sell_quantity(good)\n\n # can't buy more than limit\n quantity_to_sell = limit if ideal < limit else ideal\n if quantity_to_sell > 0:\n return Order(self, OrderType.sell_order, quantity_to_sell, sell_price, good)\n return False\n\n def price_belief_for(self, good):\n \"Gets the price belief this agent has for a particular Good\"\n if good in self.price_belief:\n return self.price_belief[good]\n\n def determine_price_of(self, good):\n \"Determine the price of a particular good\"\n return self.price_belief_for(good).random()\n\n def trading_range_extremes(self, good):\n \"Gets the lowest and highst price of a Good this agent has seen\"\n trading_range = self.observed_trading_range[good]\n return PriceRange(min(trading_range), max(trading_range))\n\n def determine_sell_quantity(self, good):\n \"Determine how much inventory goods to sell based on market conditions\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n\n favoribility = position_in_range(mean, trading_range.low, trading_range.high)\n amount_to_sell = round(favoribility * self.inventory.surplus(good))\n if amount_to_sell < 1:\n amount_to_sell = 1\n return amount_to_sell\n\n def determine_buy_quantity(self, good):\n \"Determine how much goods to buy based on market conditions\"\n mean = self.market.avg_historial_price(good, 15)\n trading_range = self.trading_range_extremes(good)\n\n favoribility = 1 - position_in_range(mean, trading_range.low, trading_range.high)\n amount_to_buy = round(favoribility * self.inventory.shortage(good))\n if amount_to_buy < 1:\n amount_to_buy = 1\n return amount_to_buy\n\n def generate_orders(self, good):\n \"\"\"\n If the Pop needs a Good to perform production, buy it\n If the Pop has surplus Resources, sell them\n \"\"\"\n surplus = self.inventory.surplus(good)\n if surplus >= 1: # sell inventory\n # the original only old one item here\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n # print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))\n self.market.sell(order)\n else: # buy more\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n\n if shortage > 0:\n if shortage <= free_space:\n # enough space for ideal order\n limit = shortage\n else:\n # not enough space for ideal order\n limit = math.floor(free_space / shortage)\n\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n # print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))\n self.market.buy(order)\n # else:\n # print(\"{} has no shortage of {} (has shortage: {})\".format(self.pop_job.title, good.title, shortage))\n\n\n\n def update_price_model(self, good, order_type, is_successful, clearing_price=0):\n \"\"\"\n Update the Pop's price model for the given resource\n good (Good) The Good which was orderd\n order_type (OrderType) Which kind of Order this was\n is_successful (bool) whether or not the Order was successful\n clearing_price (float) The price per unit of the good that was ordered\n as defined by the Pop which ordered it\n \"\"\"\n\n SIGNIFICANT = 0.25 # 25% more or less is \"significant\"\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1 # 10% of ideal inventory = \"LOW\"\n HIGH_INVENTORY = 2.0 # 200% of ideal inventory = \"HIGH\"\n MIN_PRICE = 0.01 # lowest allowed price of a Good\n\n if is_successful:\n # add this trade to the observed trading range\n self.observed_trading_range[good].append(clearing_price)\n\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05 # the degree which the Pop should bid outside the belief\n\n # how different the public mean price is from the price belief\n delta_to_mean = mean - public_mean_price\n\n if is_successful:\n if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:\n # this Pop overpaid, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n # this Pop underpaid!, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # increase the belief's certainty\n belief.low += wobble * mean\n belief.high -= wobble * mean\n\n else:\n # shift towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # check for inventory special cases\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n\n # if we're buying and inventory is too low\n # meaning we're desperate to buy\n if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:\n wobble *= 2\n\n # if we're selling and inventory is too high\n # meaning we're desperate to sell\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n # all other cases\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n\n # TODO: figure out why this is sometimes 0\n if sells + buys > 0:\n\n supply_vs_demand = (sells - buys) / (sells + buys)\n\n if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:\n # too much supply? lower bid lower to sell faster\n # too much demand? raise price to buy faster\n\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n\n # shift the price belief to the new price mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n\n # decrease belief's certainty since we've just changed it (we could be wrong)\n belief.low -= wobble * mean\n belief.high += wobble * mean\n\n # make sure the price belief doesn't decrease below the minimum\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE\n\n # Python utility methods\n def __repr__(self):\n return \"<Pop: id={} type={}>\".format(self.id, self.pop_job.title)\n\n def __eq__(self, other):\n return self.id == other.id\n\n def __key__(self):\n return self.id\n\n def __hash__(self):\n return hash(self.__key__())\n\n def export(self):\n model = {\n 'pop_job': self.pop_job.ref(),\n 'population': self.population,\n 'population_yesterday': self.population_yesterday,\n 'inventory': self.inventory.export(),\n 'money': self.money,\n 'money_yesterday': self.money_yesterday,\n 'successful_trades': self.successful_trades,\n 'failed_trades': self.failed_trades,\n 'bankrupt_times': self.bankrupt_times,\n }\n if self.pop_job is PopJob.merchant:\n location_id = None\n if self.trade_location:\n location_id = self.trade_location.id\n model.update({\n 'location': self.location.id,\n 'trade_location': location_id,\n 'trade_good': self.trade_good,\n 'trade_amount': self.trade_amount\n })\n return model\n",
"step-ids": [
15,
26,
28,
32,
33
]
}
|
[
15,
26,
28,
32,
33
] |
# import necessary modules
import cv2
import xlsxwriter
import statistics
from matplotlib import pyplot as plt
import math
import tqdm
import numpy as np
import datetime
def getDepths(imgs, img_names, intersectionCoords, stakeValidity, templateIntersections,
upperBorder, tensors, actualTensors, intersectionDist, blobDistTemplate, debug, debug_directory,
image_dates, imageSummary):
"""
Function to calculate the change in snow depth for each stake using the tensor
from the specified template
Keyword arguments:
imgs -- list of input images
img_names -- list of corresponding image file names
intersectionCoords -- list containing intersection coordinates for input images
stakeValidity -- list indicating which stakes in input images are valid
templateIntersections -- list containing intersection coordinates for template
upperBorder -- upper crop parameter
tensors -- tensors from template image
actualTensors -- tensors calculated for input images
intersectionDist -- list containing distances from blobs to intersection points
for input images
blobDistTemplate -- list containing blob to intersection point distances from
template
debug -- bool flag indicating whether output images should be saved
debug_directory -- directory where output images should be written
image_dates -- list containing dates of images extracted from EXIF data
imageSummary -- dictionary containing information about each run
"""
# list containing median depths for each image
median_depths = list()
median_depths_est = list()
# contains output data for JSON file
depth_output = {}
# num of images
num_images = len(imgs)
# create output dictionary for images
depths = dict()
# create excel workbook and add worksheet
dest = str(debug_directory) + 'snow-depths.xlsx'
workbook = xlsxwriter.Workbook(dest)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, len(tensors) + 3, 25)
# create format
cell_format = workbook.add_format()
cell_format.set_align('center')
# add titles
worksheet.write(0, 0, "Image", cell_format)
worksheet.write(0, 1, "Date", cell_format)
worksheet.write(0, len(tensors) + 2, "Median Depth (mm)", cell_format)
worksheet.write(0, len(tensors) + 3, "Median Estimate (mm)", cell_format)
for i, j in enumerate(tensors):
worksheet.write(0, i+2, ("Stake %s" % str(i)), cell_format)
# start from the first cell
row = 1
col = 0
# image iterator
iterator = 0
# iterate through images
for img_ in tqdm.tqdm(imgs):
# create an image to overlay points on if debugging
if(debug):
img_overlay = img_.copy()
# list to hold calculated depths
depths_stake = list()
estimate_stake = list()
# get image name
img_name = img_names[iterator]
# reset column
col = 0
# write to excel file
worksheet.write(row, col, img_name, cell_format)
if isinstance(image_dates[iterator], datetime.datetime):
worksheet.write(row, col + 1, image_dates[iterator].strftime('%x %X'), cell_format)
col = 2
# get intersection coordiantes
coords_stake = intersectionCoords[img_name]
# get blob intersection distances
intersection_dist_stake = intersectionDist[img_name]
# iterate through stakes in image
for i, stake in enumerate(coords_stake):
# if stake is valid and intersection point was found
if stakeValidity[img_name][i] and stake["average"][1] != False:
# add reference circles to output image if debugging
# shows intersection point of image with reference to template
if(debug):
cv2.circle(img_overlay, (int(templateIntersections[i][0]), int(templateIntersections[i][1]) - upperBorder), 5, (255,0,0), 3)
cv2.circle(img_overlay, (int(stake["average"][0]), int(stake["average"][1])), 5, (0,255,0), 2)
# calculate change in snow depth in mm
tensor = actualTensors[img_name][i] if actualTensors[img_name][i] != True else tensors[i]
depth_change = ((templateIntersections[i][1] - upperBorder) - stake["average"][1]) * tensor
# calculate change in snow depth using blob distances
distances_stake = list()
for w, x in enumerate(intersection_dist_stake[i]):
if x != False:
distances_stake.append((abs(blobDistTemplate[i][w]) - abs(x)) * tensor)
distance_estimate = statistics.median(distances_stake) if len(distances_stake) > 0 else 0
# write to excel file
worksheet.write(row, col + i, "%.2f (%.2f)" % (depth_change, distance_estimate), cell_format)
# add to list
depths_stake.append(depth_change)
estimate_stake.append(distance_estimate)
# if stake wasn't valid or intersection point not found
else:
# if stake was valid
if stakeValidity[img_name][i]:
worksheet.write(row, col + i, "Not Found", cell_format)
# invalid stake
else:
worksheet.write(row, col + i, "Invalid Stake", cell_format)
# append false to array
depths_stake.append(False)
estimate_stake.append(False)
# output debug image
if(debug):
cv2.imwrite(debug_directory + img_name, img_overlay)
# add list to dictionary
depths[img_name] = depths_stake
# determine median depth
valid_depths = [x for x in depths_stake if x != False]
valid_estimates = [x for x in estimate_stake if x != False]
if(len(valid_depths) > 0):
median = statistics.median(valid_depths)
median_est = statistics.median(valid_estimates)
else:
median = False
median_est = False
# add to median depth list
median_depths.append(median)
median_depths_est.append(median_est)
# write median to excel file
if median != False and median > 0:
worksheet.write(row, len(tensors) + 2, "%.2f" % median, cell_format)
worksheet.write(row, len(tensors) + 3, "%.2f" % median_est, cell_format)
elif median != False:
worksheet.write(row, len(tensors) + 2, "0.0", cell_format)
worksheet.write(row, len(tensors) + 3, "0.0", cell_format)
else:
worksheet.write(row, len(tensors) + 2, "n/a", cell_format)
worksheet.write(row, len(tensors) + 3, "n/a", cell_format)
# increment row
row += 1
# increment iterator
iterator += 1
# update image summary
imageSummary[img_name][" "] = ""
imageSummary[img_name]["Stake (Depth Calculation)"] = "Depth (mm) Estimate (mm)"
for e, depth in enumerate(depths_stake):
if isinstance(depth, float):
imageSummary[img_name][" %d " % (e+1)] = "%0.2f %0.2f " % \
(depth, estimate_stake[e])
else:
imageSummary[img_name][" %d " % (e+1)] = "%s %s " % \
("n/a", "n/a")
# close workbook
workbook.close()
# remove negative values
filterSet = zip(median_depths, median_depths_est, image_dates)
filterSet = [(x, y, z) for x, y, z in filterSet if x != False]
median_depths, median_depths_est, image_dates = zip(*filterSet)
median_depths = np.asarray(median_depths).clip(0)
median_depths_est = np.asarray(median_depths_est).clip(0)
# generate plot
fig,ax = plt.subplots(1)
plt.plot(image_dates, median_depths)
plt.plot(image_dates, median_depths_est)
plt.gcf().autofmt_xdate()
plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')
ax.set_xlabel("Date")
ax.set_ylabel("Snow Depth (mm)")
plt.xticks(rotation=75)
plt.tight_layout()
# save figure
plt.savefig(debug_directory + "depth-graph.jpg")
plt.close()
# return dictionary containing snow depth changes
return depths, imageSummary
|
normal
|
{
"blob_id": "24a538dcc885b37eb0147a1ee089189f11b20f8a",
"index": 7945,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getDepths(imgs, img_names, intersectionCoords, stakeValidity,\n templateIntersections, upperBorder, tensors, actualTensors,\n intersectionDist, blobDistTemplate, debug, debug_directory, image_dates,\n imageSummary):\n \"\"\"\n Function to calculate the change in snow depth for each stake using the tensor\n from the specified template\n\n Keyword arguments:\n imgs -- list of input images\n img_names -- list of corresponding image file names\n intersectionCoords -- list containing intersection coordinates for input images\n stakeValidity -- list indicating which stakes in input images are valid\n templateIntersections -- list containing intersection coordinates for template\n upperBorder -- upper crop parameter\n tensors -- tensors from template image\n actualTensors -- tensors calculated for input images\n intersectionDist -- list containing distances from blobs to intersection points\n for input images\n blobDistTemplate -- list containing blob to intersection point distances from\n template\n debug -- bool flag indicating whether output images should be saved\n debug_directory -- directory where output images should be written\n image_dates -- list containing dates of images extracted from EXIF data\n imageSummary -- dictionary containing information about each run\n \"\"\"\n median_depths = list()\n median_depths_est = list()\n depth_output = {}\n num_images = len(imgs)\n depths = dict()\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n worksheet.write(0, 0, 'Image', cell_format)\n worksheet.write(0, 1, 'Date', cell_format)\n worksheet.write(0, len(tensors) + 2, 'Median Depth (mm)', cell_format)\n worksheet.write(0, len(tensors) + 3, 'Median Estimate (mm)', cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i + 2, 'Stake %s' % str(i), cell_format)\n row = 1\n col = 0\n iterator = 0\n for img_ in tqdm.tqdm(imgs):\n if debug:\n img_overlay = img_.copy()\n depths_stake = list()\n estimate_stake = list()\n img_name = img_names[iterator]\n col = 0\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime(\n '%x %X'), cell_format)\n col = 2\n coords_stake = intersectionCoords[img_name]\n intersection_dist_stake = intersectionDist[img_name]\n for i, stake in enumerate(coords_stake):\n if stakeValidity[img_name][i] and stake['average'][1] != False:\n if debug:\n cv2.circle(img_overlay, (int(templateIntersections[i][0\n ]), int(templateIntersections[i][1]) - upperBorder),\n 5, (255, 0, 0), 3)\n cv2.circle(img_overlay, (int(stake['average'][0]), int(\n stake['average'][1])), 5, (0, 255, 0), 2)\n tensor = actualTensors[img_name][i] if actualTensors[img_name][\n i] != True else tensors[i]\n depth_change = (templateIntersections[i][1] - upperBorder -\n stake['average'][1]) * tensor\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) -\n abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(\n distances_stake) > 0 else 0\n worksheet.write(row, col + i, '%.2f (%.2f)' % (depth_change,\n distance_estimate), cell_format)\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n else:\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, 'Not Found', cell_format)\n else:\n worksheet.write(row, col + i, 'Invalid Stake', cell_format)\n depths_stake.append(False)\n estimate_stake.append(False)\n if debug:\n cv2.imwrite(debug_directory + img_name, img_overlay)\n depths[img_name] = depths_stake\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n if len(valid_depths) > 0:\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n median_depths.append(median)\n median_depths_est.append(median_est)\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, '%.2f' % median, cell_format\n )\n worksheet.write(row, len(tensors) + 3, '%.2f' % median_est,\n cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, '0.0', cell_format)\n worksheet.write(row, len(tensors) + 3, '0.0', cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, 'n/a', cell_format)\n worksheet.write(row, len(tensors) + 3, 'n/a', cell_format)\n row += 1\n iterator += 1\n imageSummary[img_name][' '] = ''\n imageSummary[img_name]['Stake (Depth Calculation)'\n ] = 'Depth (mm) Estimate (mm)'\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%0.2f %0.2f ' % (depth,\n estimate_stake[e])\n else:\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%s %s ' % ('n/a', 'n/a')\n workbook.close()\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n fig, ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel('Date')\n ax.set_ylabel('Snow Depth (mm)')\n plt.xticks(rotation=75)\n plt.tight_layout()\n plt.savefig(debug_directory + 'depth-graph.jpg')\n plt.close()\n return depths, imageSummary\n",
"step-3": "import cv2\nimport xlsxwriter\nimport statistics\nfrom matplotlib import pyplot as plt\nimport math\nimport tqdm\nimport numpy as np\nimport datetime\n\n\ndef getDepths(imgs, img_names, intersectionCoords, stakeValidity,\n templateIntersections, upperBorder, tensors, actualTensors,\n intersectionDist, blobDistTemplate, debug, debug_directory, image_dates,\n imageSummary):\n \"\"\"\n Function to calculate the change in snow depth for each stake using the tensor\n from the specified template\n\n Keyword arguments:\n imgs -- list of input images\n img_names -- list of corresponding image file names\n intersectionCoords -- list containing intersection coordinates for input images\n stakeValidity -- list indicating which stakes in input images are valid\n templateIntersections -- list containing intersection coordinates for template\n upperBorder -- upper crop parameter\n tensors -- tensors from template image\n actualTensors -- tensors calculated for input images\n intersectionDist -- list containing distances from blobs to intersection points\n for input images\n blobDistTemplate -- list containing blob to intersection point distances from\n template\n debug -- bool flag indicating whether output images should be saved\n debug_directory -- directory where output images should be written\n image_dates -- list containing dates of images extracted from EXIF data\n imageSummary -- dictionary containing information about each run\n \"\"\"\n median_depths = list()\n median_depths_est = list()\n depth_output = {}\n num_images = len(imgs)\n depths = dict()\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n worksheet.write(0, 0, 'Image', cell_format)\n worksheet.write(0, 1, 'Date', cell_format)\n worksheet.write(0, len(tensors) + 2, 'Median Depth (mm)', cell_format)\n worksheet.write(0, len(tensors) + 3, 'Median Estimate (mm)', cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i + 2, 'Stake %s' % str(i), cell_format)\n row = 1\n col = 0\n iterator = 0\n for img_ in tqdm.tqdm(imgs):\n if debug:\n img_overlay = img_.copy()\n depths_stake = list()\n estimate_stake = list()\n img_name = img_names[iterator]\n col = 0\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime(\n '%x %X'), cell_format)\n col = 2\n coords_stake = intersectionCoords[img_name]\n intersection_dist_stake = intersectionDist[img_name]\n for i, stake in enumerate(coords_stake):\n if stakeValidity[img_name][i] and stake['average'][1] != False:\n if debug:\n cv2.circle(img_overlay, (int(templateIntersections[i][0\n ]), int(templateIntersections[i][1]) - upperBorder),\n 5, (255, 0, 0), 3)\n cv2.circle(img_overlay, (int(stake['average'][0]), int(\n stake['average'][1])), 5, (0, 255, 0), 2)\n tensor = actualTensors[img_name][i] if actualTensors[img_name][\n i] != True else tensors[i]\n depth_change = (templateIntersections[i][1] - upperBorder -\n stake['average'][1]) * tensor\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) -\n abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(\n distances_stake) > 0 else 0\n worksheet.write(row, col + i, '%.2f (%.2f)' % (depth_change,\n distance_estimate), cell_format)\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n else:\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, 'Not Found', cell_format)\n else:\n worksheet.write(row, col + i, 'Invalid Stake', cell_format)\n depths_stake.append(False)\n estimate_stake.append(False)\n if debug:\n cv2.imwrite(debug_directory + img_name, img_overlay)\n depths[img_name] = depths_stake\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n if len(valid_depths) > 0:\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n median_depths.append(median)\n median_depths_est.append(median_est)\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, '%.2f' % median, cell_format\n )\n worksheet.write(row, len(tensors) + 3, '%.2f' % median_est,\n cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, '0.0', cell_format)\n worksheet.write(row, len(tensors) + 3, '0.0', cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, 'n/a', cell_format)\n worksheet.write(row, len(tensors) + 3, 'n/a', cell_format)\n row += 1\n iterator += 1\n imageSummary[img_name][' '] = ''\n imageSummary[img_name]['Stake (Depth Calculation)'\n ] = 'Depth (mm) Estimate (mm)'\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%0.2f %0.2f ' % (depth,\n estimate_stake[e])\n else:\n imageSummary[img_name][' %d ' % (e + 1)\n ] = '%s %s ' % ('n/a', 'n/a')\n workbook.close()\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n fig, ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel('Date')\n ax.set_ylabel('Snow Depth (mm)')\n plt.xticks(rotation=75)\n plt.tight_layout()\n plt.savefig(debug_directory + 'depth-graph.jpg')\n plt.close()\n return depths, imageSummary\n",
"step-4": "# import necessary modules\nimport cv2\nimport xlsxwriter\nimport statistics\nfrom matplotlib import pyplot as plt\nimport math\nimport tqdm\nimport numpy as np\nimport datetime\n\ndef getDepths(imgs, img_names, intersectionCoords, stakeValidity, templateIntersections,\n upperBorder, tensors, actualTensors, intersectionDist, blobDistTemplate, debug, debug_directory,\n image_dates, imageSummary):\n \"\"\"\n Function to calculate the change in snow depth for each stake using the tensor\n from the specified template\n\n Keyword arguments:\n imgs -- list of input images\n img_names -- list of corresponding image file names\n intersectionCoords -- list containing intersection coordinates for input images\n stakeValidity -- list indicating which stakes in input images are valid\n templateIntersections -- list containing intersection coordinates for template\n upperBorder -- upper crop parameter\n tensors -- tensors from template image\n actualTensors -- tensors calculated for input images\n intersectionDist -- list containing distances from blobs to intersection points\n for input images\n blobDistTemplate -- list containing blob to intersection point distances from\n template\n debug -- bool flag indicating whether output images should be saved\n debug_directory -- directory where output images should be written\n image_dates -- list containing dates of images extracted from EXIF data\n imageSummary -- dictionary containing information about each run\n \"\"\"\n\n # list containing median depths for each image\n median_depths = list()\n median_depths_est = list()\n\n # contains output data for JSON file\n depth_output = {}\n\n # num of images\n num_images = len(imgs)\n\n # create output dictionary for images\n depths = dict()\n\n # create excel workbook and add worksheet\n dest = str(debug_directory) + 'snow-depths.xlsx'\n workbook = xlsxwriter.Workbook(dest)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(0, len(tensors) + 3, 25)\n\n # create format\n cell_format = workbook.add_format()\n cell_format.set_align('center')\n\n # add titles\n worksheet.write(0, 0, \"Image\", cell_format)\n worksheet.write(0, 1, \"Date\", cell_format)\n worksheet.write(0, len(tensors) + 2, \"Median Depth (mm)\", cell_format)\n worksheet.write(0, len(tensors) + 3, \"Median Estimate (mm)\", cell_format)\n for i, j in enumerate(tensors):\n worksheet.write(0, i+2, (\"Stake %s\" % str(i)), cell_format)\n\n # start from the first cell\n row = 1\n col = 0\n\n # image iterator\n iterator = 0\n\n # iterate through images\n for img_ in tqdm.tqdm(imgs):\n # create an image to overlay points on if debugging\n if(debug):\n img_overlay = img_.copy()\n\n # list to hold calculated depths\n depths_stake = list()\n estimate_stake = list()\n\n # get image name\n img_name = img_names[iterator]\n\n # reset column\n col = 0\n\n # write to excel file\n worksheet.write(row, col, img_name, cell_format)\n if isinstance(image_dates[iterator], datetime.datetime):\n worksheet.write(row, col + 1, image_dates[iterator].strftime('%x %X'), cell_format)\n col = 2\n\n # get intersection coordiantes\n coords_stake = intersectionCoords[img_name]\n\n # get blob intersection distances\n intersection_dist_stake = intersectionDist[img_name]\n\n # iterate through stakes in image\n for i, stake in enumerate(coords_stake):\n # if stake is valid and intersection point was found\n if stakeValidity[img_name][i] and stake[\"average\"][1] != False:\n # add reference circles to output image if debugging\n # shows intersection point of image with reference to template\n if(debug):\n cv2.circle(img_overlay, (int(templateIntersections[i][0]), int(templateIntersections[i][1]) - upperBorder), 5, (255,0,0), 3)\n cv2.circle(img_overlay, (int(stake[\"average\"][0]), int(stake[\"average\"][1])), 5, (0,255,0), 2)\n\n # calculate change in snow depth in mm\n tensor = actualTensors[img_name][i] if actualTensors[img_name][i] != True else tensors[i]\n depth_change = ((templateIntersections[i][1] - upperBorder) - stake[\"average\"][1]) * tensor\n\n # calculate change in snow depth using blob distances\n distances_stake = list()\n for w, x in enumerate(intersection_dist_stake[i]):\n if x != False:\n distances_stake.append((abs(blobDistTemplate[i][w]) - abs(x)) * tensor)\n distance_estimate = statistics.median(distances_stake) if len(distances_stake) > 0 else 0\n\n # write to excel file\n worksheet.write(row, col + i, \"%.2f (%.2f)\" % (depth_change, distance_estimate), cell_format)\n\n # add to list\n depths_stake.append(depth_change)\n estimate_stake.append(distance_estimate)\n\n # if stake wasn't valid or intersection point not found\n else:\n # if stake was valid\n if stakeValidity[img_name][i]:\n worksheet.write(row, col + i, \"Not Found\", cell_format)\n # invalid stake\n else:\n worksheet.write(row, col + i, \"Invalid Stake\", cell_format)\n\n # append false to array\n depths_stake.append(False)\n estimate_stake.append(False)\n\n # output debug image\n if(debug):\n cv2.imwrite(debug_directory + img_name, img_overlay)\n\n # add list to dictionary\n depths[img_name] = depths_stake\n\n # determine median depth\n valid_depths = [x for x in depths_stake if x != False]\n valid_estimates = [x for x in estimate_stake if x != False]\n\n if(len(valid_depths) > 0):\n median = statistics.median(valid_depths)\n median_est = statistics.median(valid_estimates)\n else:\n median = False\n median_est = False\n\n # add to median depth list\n median_depths.append(median)\n median_depths_est.append(median_est)\n\n # write median to excel file\n if median != False and median > 0:\n worksheet.write(row, len(tensors) + 2, \"%.2f\" % median, cell_format)\n worksheet.write(row, len(tensors) + 3, \"%.2f\" % median_est, cell_format)\n elif median != False:\n worksheet.write(row, len(tensors) + 2, \"0.0\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"0.0\", cell_format)\n else:\n worksheet.write(row, len(tensors) + 2, \"n/a\", cell_format)\n worksheet.write(row, len(tensors) + 3, \"n/a\", cell_format)\n\n # increment row\n row += 1\n\n # increment iterator\n iterator += 1\n\n # update image summary\n imageSummary[img_name][\" \"] = \"\"\n imageSummary[img_name][\"Stake (Depth Calculation)\"] = \"Depth (mm) Estimate (mm)\"\n for e, depth in enumerate(depths_stake):\n if isinstance(depth, float):\n imageSummary[img_name][\" %d \" % (e+1)] = \"%0.2f %0.2f \" % \\\n (depth, estimate_stake[e])\n else:\n imageSummary[img_name][\" %d \" % (e+1)] = \"%s %s \" % \\\n (\"n/a\", \"n/a\")\n\n # close workbook\n workbook.close()\n\n # remove negative values\n filterSet = zip(median_depths, median_depths_est, image_dates)\n filterSet = [(x, y, z) for x, y, z in filterSet if x != False]\n median_depths, median_depths_est, image_dates = zip(*filterSet)\n median_depths = np.asarray(median_depths).clip(0)\n median_depths_est = np.asarray(median_depths_est).clip(0)\n\n # generate plot\n fig,ax = plt.subplots(1)\n plt.plot(image_dates, median_depths)\n plt.plot(image_dates, median_depths_est)\n plt.gcf().autofmt_xdate()\n plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Snow Depth (mm)\")\n plt.xticks(rotation=75)\n plt.tight_layout()\n\n # save figure\n plt.savefig(debug_directory + \"depth-graph.jpg\")\n plt.close()\n\n # return dictionary containing snow depth changes\n return depths, imageSummary\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import discord
class Leveling:
__slots__ = ('sid', 'channelID', 'message', 'noxpchannelIDs',
'noxproleID', 'remove', 'bot', 'roles')
sid: int
channelID: int
message: str
noxpchannelIDs: list[int]
noxproleID: int
remove: bool
roles: list[list]
def __init__(self, bot, sid, record):
self.sid = sid
self.bot = bot
if record is None:
self.roles = []
self.noxpchannelIDs = []
self.remove = None
self.channelID = None
self.message = None
self.noxproleID = None
else:
self.remove = record.get('remove')
self.message = record.get('message')
self.channelID = record.get('channel')
self.noxproleID = record.get('noxprole')
self.noxpchannelIDs = record.get('noxpchannels') or []
self.roles = record.get('roles') or []
@property
def channel(self) ->discord.TextChannel:
guild = self.bot.get_guild(self.sid)
return guild and guild.get_channel(self.channelID)
async def reload(self):
record = await self.bot.db.fetchrow(
'SELECT * FROM config.leveling WHERE sid = $1', self.sid)
self.remove = record['remove']
self.message = record['message']
self.channelID = record['channel']
self.noxproleID = record['noxprole']
self.roles = record['roles'] or []
self.noxpchannelIDs = record['noxpchannels'] or []
|
normal
|
{
"blob_id": "346df9706dc222f43a77928964cd54e7d999a585",
"index": 8052,
"step-1": "<mask token>\n\n\nclass Leveling:\n <mask token>\n sid: int\n channelID: int\n message: str\n noxpchannelIDs: list[int]\n noxproleID: int\n remove: bool\n roles: list[list]\n <mask token>\n\n @property\n def channel(self) ->discord.TextChannel:\n guild = self.bot.get_guild(self.sid)\n return guild and guild.get_channel(self.channelID)\n\n async def reload(self):\n record = await self.bot.db.fetchrow(\n 'SELECT * FROM config.leveling WHERE sid = $1', self.sid)\n self.remove = record['remove']\n self.message = record['message']\n self.channelID = record['channel']\n self.noxproleID = record['noxprole']\n self.roles = record['roles'] or []\n self.noxpchannelIDs = record['noxpchannels'] or []\n",
"step-2": "<mask token>\n\n\nclass Leveling:\n <mask token>\n sid: int\n channelID: int\n message: str\n noxpchannelIDs: list[int]\n noxproleID: int\n remove: bool\n roles: list[list]\n\n def __init__(self, bot, sid, record):\n self.sid = sid\n self.bot = bot\n if record is None:\n self.roles = []\n self.noxpchannelIDs = []\n self.remove = None\n self.channelID = None\n self.message = None\n self.noxproleID = None\n else:\n self.remove = record.get('remove')\n self.message = record.get('message')\n self.channelID = record.get('channel')\n self.noxproleID = record.get('noxprole')\n self.noxpchannelIDs = record.get('noxpchannels') or []\n self.roles = record.get('roles') or []\n\n @property\n def channel(self) ->discord.TextChannel:\n guild = self.bot.get_guild(self.sid)\n return guild and guild.get_channel(self.channelID)\n\n async def reload(self):\n record = await self.bot.db.fetchrow(\n 'SELECT * FROM config.leveling WHERE sid = $1', self.sid)\n self.remove = record['remove']\n self.message = record['message']\n self.channelID = record['channel']\n self.noxproleID = record['noxprole']\n self.roles = record['roles'] or []\n self.noxpchannelIDs = record['noxpchannels'] or []\n",
"step-3": "<mask token>\n\n\nclass Leveling:\n __slots__ = ('sid', 'channelID', 'message', 'noxpchannelIDs',\n 'noxproleID', 'remove', 'bot', 'roles')\n sid: int\n channelID: int\n message: str\n noxpchannelIDs: list[int]\n noxproleID: int\n remove: bool\n roles: list[list]\n\n def __init__(self, bot, sid, record):\n self.sid = sid\n self.bot = bot\n if record is None:\n self.roles = []\n self.noxpchannelIDs = []\n self.remove = None\n self.channelID = None\n self.message = None\n self.noxproleID = None\n else:\n self.remove = record.get('remove')\n self.message = record.get('message')\n self.channelID = record.get('channel')\n self.noxproleID = record.get('noxprole')\n self.noxpchannelIDs = record.get('noxpchannels') or []\n self.roles = record.get('roles') or []\n\n @property\n def channel(self) ->discord.TextChannel:\n guild = self.bot.get_guild(self.sid)\n return guild and guild.get_channel(self.channelID)\n\n async def reload(self):\n record = await self.bot.db.fetchrow(\n 'SELECT * FROM config.leveling WHERE sid = $1', self.sid)\n self.remove = record['remove']\n self.message = record['message']\n self.channelID = record['channel']\n self.noxproleID = record['noxprole']\n self.roles = record['roles'] or []\n self.noxpchannelIDs = record['noxpchannels'] or []\n",
"step-4": "import discord\n\n\nclass Leveling:\n __slots__ = ('sid', 'channelID', 'message', 'noxpchannelIDs',\n 'noxproleID', 'remove', 'bot', 'roles')\n sid: int\n channelID: int\n message: str\n noxpchannelIDs: list[int]\n noxproleID: int\n remove: bool\n roles: list[list]\n\n def __init__(self, bot, sid, record):\n self.sid = sid\n self.bot = bot\n if record is None:\n self.roles = []\n self.noxpchannelIDs = []\n self.remove = None\n self.channelID = None\n self.message = None\n self.noxproleID = None\n else:\n self.remove = record.get('remove')\n self.message = record.get('message')\n self.channelID = record.get('channel')\n self.noxproleID = record.get('noxprole')\n self.noxpchannelIDs = record.get('noxpchannels') or []\n self.roles = record.get('roles') or []\n\n @property\n def channel(self) ->discord.TextChannel:\n guild = self.bot.get_guild(self.sid)\n return guild and guild.get_channel(self.channelID)\n\n async def reload(self):\n record = await self.bot.db.fetchrow(\n 'SELECT * FROM config.leveling WHERE sid = $1', self.sid)\n self.remove = record['remove']\n self.message = record['message']\n self.channelID = record['channel']\n self.noxproleID = record['noxprole']\n self.roles = record['roles'] or []\n self.noxpchannelIDs = record['noxpchannels'] or []\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
#!/usr/local/bin/python3
from sys import stdin
import argparse
# Default values
alignment = 'l'
border = 'none'
stretch_factor = '1.0'
toprule = ''
# Default options
custom_header = False
standalone = False
stretch = False
booktabs = False
# Parsing command-line options
parser = argparse.ArgumentParser('<stdin> | csv2table')
parser.add_argument('-a', action='store_true', help='Create standalone tex document')
parser.add_argument('-b', action='store_true', help='Use booktab rules')
parser.add_argument('-box', action='store_true', help='Draw box border')
parser.add_argument('-c', action='store_true', help='Align all elements center')
parser.add_argument('-d', default=',', help='Table column delimiter')
parser.add_argument('-f', default='10', help='Font size in standalone document')
parser.add_argument('-grid', action='store_true', help='Draw grid border')
parser.add_argument('-i', type=int, default=4, help='Number of spaces to indent elements')
parser.add_argument('-l', action='store_true', help='Align all elements left')
parser.add_argument('-r', action='store_true', help='Align all elements right')
parser.add_argument('-s', default='1.0', help='Table stretch factor')
parser.add_argument('-t', default='nil', help='Custom table layout string')
args = parser.parse_args()
if args.a:
standalone = True
if args.b:
booktabs = True
if args.box:
border = 'box'
if args.c:
alignment = 'c'
delimiter = args.d
font = args.f
if args.grid:
border = 'grid'
indent = ''.join([' '] * args.i)
if args.l:
alignment = 'l'
if args.r:
alignment = 'r'
if args.s != '1.0':
stretch = True
stretch_factor = args.s
if args.t != 'nil':
custom_header = args.t
# Global reader state
first_line_read = False
previous_line_read = False
# Returning proper rule types
def rule(type):
if booktabs:
if type == 'top':
return '\\toprule'
if type == 'mid':
return '\\midrule'
if type == 'bottom':
return '\\bottomrule'
else:
return '\\hline'
# Returning proper header string
def make_header(alignment, border, custom, xs):
if custom:
return custom
fields_n = len(xs)
fields = [alignment] * fields_n
if border == 'box':
return '| ' + ' '.join(fields) + ' |'
if border == 'grid':
return '| ' + ' | '.join(fields) + ' |'
else:
return ' '.join(fields)
# Placing standalone header
if standalone:
print('\\documentclass[a4paper,{}pt]{{article}}'.format(font))
if booktabs:
print('\\usepackage{booktabs}')
print('\\begin{document}')
print('\\pagenumbering{gobble}')
# Main parser
for line in stdin:
line = line.rstrip('\n')
# ! Text is passed literally
if line[0] == '!':
print(line[1:])
# # Text is commented out
elif line[0] != '#' and not line.isspace():
xs = line.split(delimiter)
if not first_line_read:
if line == '---':
toprule = '\n' + indent + rule('top')
else:
header = make_header(alignment, border, custom_header, xs)
if stretch:
print('\\bgroup')
print('\\def\\arraystretch{{{}}}%'.format(stretch_factor))
print('\\begin{{tabular}}{{{}}}'.format(header), toprule)
first_line_read = True
if line == '---':
if first_line_read:
print(rule('bottom'))
elif line == '--':
print(rule('mid'))
else:
if previous_line_read and border == 'grid':
print(rule('mid'))
print(indent,' & '.join(xs), '\\\\')
previous_line_read = True
print('\\end{tabular}')
if stretch:
print('\\egroup')
if standalone:
print('\\end{document}')
|
normal
|
{
"blob_id": "591ac07e735e08bcafa8274eb1a1547a01261f55",
"index": 8430,
"step-1": "<mask token>\n\n\ndef rule(type):\n if booktabs:\n if type == 'top':\n return '\\\\toprule'\n if type == 'mid':\n return '\\\\midrule'\n if type == 'bottom':\n return '\\\\bottomrule'\n else:\n return '\\\\hline'\n\n\ndef make_header(alignment, border, custom, xs):\n if custom:\n return custom\n fields_n = len(xs)\n fields = [alignment] * fields_n\n if border == 'box':\n return '| ' + ' '.join(fields) + ' |'\n if border == 'grid':\n return '| ' + ' | '.join(fields) + ' |'\n else:\n return ' '.join(fields)\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('-a', action='store_true', help=\n 'Create standalone tex document')\nparser.add_argument('-b', action='store_true', help='Use booktab rules')\nparser.add_argument('-box', action='store_true', help='Draw box border')\nparser.add_argument('-c', action='store_true', help='Align all elements center'\n )\nparser.add_argument('-d', default=',', help='Table column delimiter')\nparser.add_argument('-f', default='10', help='Font size in standalone document'\n )\nparser.add_argument('-grid', action='store_true', help='Draw grid border')\nparser.add_argument('-i', type=int, default=4, help=\n 'Number of spaces to indent elements')\nparser.add_argument('-l', action='store_true', help='Align all elements left')\nparser.add_argument('-r', action='store_true', help='Align all elements right')\nparser.add_argument('-s', default='1.0', help='Table stretch factor')\nparser.add_argument('-t', default='nil', help='Custom table layout string')\n<mask token>\nif args.a:\n standalone = True\nif args.b:\n booktabs = True\nif args.box:\n border = 'box'\nif args.c:\n alignment = 'c'\n<mask token>\nif args.grid:\n border = 'grid'\n<mask token>\nif args.l:\n alignment = 'l'\nif args.r:\n alignment = 'r'\nif args.s != '1.0':\n stretch = True\n stretch_factor = args.s\nif args.t != 'nil':\n custom_header = args.t\n<mask token>\n\n\ndef rule(type):\n if booktabs:\n if type == 'top':\n return '\\\\toprule'\n if type == 'mid':\n return '\\\\midrule'\n if type == 'bottom':\n return '\\\\bottomrule'\n else:\n return '\\\\hline'\n\n\ndef make_header(alignment, border, custom, xs):\n if custom:\n return custom\n fields_n = len(xs)\n fields = [alignment] * fields_n\n if border == 'box':\n return '| ' + ' '.join(fields) + ' |'\n if border == 'grid':\n return '| ' + ' | '.join(fields) + ' |'\n else:\n return ' '.join(fields)\n\n\nif standalone:\n print('\\\\documentclass[a4paper,{}pt]{{article}}'.format(font))\n if booktabs:\n print('\\\\usepackage{booktabs}')\n print('\\\\begin{document}')\n print('\\\\pagenumbering{gobble}')\nfor line in stdin:\n line = line.rstrip('\\n')\n if line[0] == '!':\n print(line[1:])\n elif line[0] != '#' and not line.isspace():\n xs = line.split(delimiter)\n if not first_line_read:\n if line == '---':\n toprule = '\\n' + indent + rule('top')\n else:\n header = make_header(alignment, border, custom_header, xs)\n if stretch:\n print('\\\\bgroup')\n print('\\\\def\\\\arraystretch{{{}}}%'.format(stretch_factor))\n print('\\\\begin{{tabular}}{{{}}}'.format(header), toprule)\n first_line_read = True\n if line == '---':\n if first_line_read:\n print(rule('bottom'))\n elif line == '--':\n print(rule('mid'))\n else:\n if previous_line_read and border == 'grid':\n print(rule('mid'))\n print(indent, ' & '.join(xs), '\\\\\\\\')\n previous_line_read = True\nprint('\\\\end{tabular}')\nif stretch:\n print('\\\\egroup')\nif standalone:\n print('\\\\end{document}')\n",
"step-3": "<mask token>\nalignment = 'l'\nborder = 'none'\nstretch_factor = '1.0'\ntoprule = ''\ncustom_header = False\nstandalone = False\nstretch = False\nbooktabs = False\nparser = argparse.ArgumentParser('<stdin> | csv2table')\nparser.add_argument('-a', action='store_true', help=\n 'Create standalone tex document')\nparser.add_argument('-b', action='store_true', help='Use booktab rules')\nparser.add_argument('-box', action='store_true', help='Draw box border')\nparser.add_argument('-c', action='store_true', help='Align all elements center'\n )\nparser.add_argument('-d', default=',', help='Table column delimiter')\nparser.add_argument('-f', default='10', help='Font size in standalone document'\n )\nparser.add_argument('-grid', action='store_true', help='Draw grid border')\nparser.add_argument('-i', type=int, default=4, help=\n 'Number of spaces to indent elements')\nparser.add_argument('-l', action='store_true', help='Align all elements left')\nparser.add_argument('-r', action='store_true', help='Align all elements right')\nparser.add_argument('-s', default='1.0', help='Table stretch factor')\nparser.add_argument('-t', default='nil', help='Custom table layout string')\nargs = parser.parse_args()\nif args.a:\n standalone = True\nif args.b:\n booktabs = True\nif args.box:\n border = 'box'\nif args.c:\n alignment = 'c'\ndelimiter = args.d\nfont = args.f\nif args.grid:\n border = 'grid'\nindent = ''.join([' '] * args.i)\nif args.l:\n alignment = 'l'\nif args.r:\n alignment = 'r'\nif args.s != '1.0':\n stretch = True\n stretch_factor = args.s\nif args.t != 'nil':\n custom_header = args.t\nfirst_line_read = False\nprevious_line_read = False\n\n\ndef rule(type):\n if booktabs:\n if type == 'top':\n return '\\\\toprule'\n if type == 'mid':\n return '\\\\midrule'\n if type == 'bottom':\n return '\\\\bottomrule'\n else:\n return '\\\\hline'\n\n\ndef make_header(alignment, border, custom, xs):\n if custom:\n return custom\n fields_n = len(xs)\n fields = [alignment] * fields_n\n if border == 'box':\n return '| ' + ' '.join(fields) + ' |'\n if border == 'grid':\n return '| ' + ' | '.join(fields) + ' |'\n else:\n return ' '.join(fields)\n\n\nif standalone:\n print('\\\\documentclass[a4paper,{}pt]{{article}}'.format(font))\n if booktabs:\n print('\\\\usepackage{booktabs}')\n print('\\\\begin{document}')\n print('\\\\pagenumbering{gobble}')\nfor line in stdin:\n line = line.rstrip('\\n')\n if line[0] == '!':\n print(line[1:])\n elif line[0] != '#' and not line.isspace():\n xs = line.split(delimiter)\n if not first_line_read:\n if line == '---':\n toprule = '\\n' + indent + rule('top')\n else:\n header = make_header(alignment, border, custom_header, xs)\n if stretch:\n print('\\\\bgroup')\n print('\\\\def\\\\arraystretch{{{}}}%'.format(stretch_factor))\n print('\\\\begin{{tabular}}{{{}}}'.format(header), toprule)\n first_line_read = True\n if line == '---':\n if first_line_read:\n print(rule('bottom'))\n elif line == '--':\n print(rule('mid'))\n else:\n if previous_line_read and border == 'grid':\n print(rule('mid'))\n print(indent, ' & '.join(xs), '\\\\\\\\')\n previous_line_read = True\nprint('\\\\end{tabular}')\nif stretch:\n print('\\\\egroup')\nif standalone:\n print('\\\\end{document}')\n",
"step-4": "from sys import stdin\nimport argparse\nalignment = 'l'\nborder = 'none'\nstretch_factor = '1.0'\ntoprule = ''\ncustom_header = False\nstandalone = False\nstretch = False\nbooktabs = False\nparser = argparse.ArgumentParser('<stdin> | csv2table')\nparser.add_argument('-a', action='store_true', help=\n 'Create standalone tex document')\nparser.add_argument('-b', action='store_true', help='Use booktab rules')\nparser.add_argument('-box', action='store_true', help='Draw box border')\nparser.add_argument('-c', action='store_true', help='Align all elements center'\n )\nparser.add_argument('-d', default=',', help='Table column delimiter')\nparser.add_argument('-f', default='10', help='Font size in standalone document'\n )\nparser.add_argument('-grid', action='store_true', help='Draw grid border')\nparser.add_argument('-i', type=int, default=4, help=\n 'Number of spaces to indent elements')\nparser.add_argument('-l', action='store_true', help='Align all elements left')\nparser.add_argument('-r', action='store_true', help='Align all elements right')\nparser.add_argument('-s', default='1.0', help='Table stretch factor')\nparser.add_argument('-t', default='nil', help='Custom table layout string')\nargs = parser.parse_args()\nif args.a:\n standalone = True\nif args.b:\n booktabs = True\nif args.box:\n border = 'box'\nif args.c:\n alignment = 'c'\ndelimiter = args.d\nfont = args.f\nif args.grid:\n border = 'grid'\nindent = ''.join([' '] * args.i)\nif args.l:\n alignment = 'l'\nif args.r:\n alignment = 'r'\nif args.s != '1.0':\n stretch = True\n stretch_factor = args.s\nif args.t != 'nil':\n custom_header = args.t\nfirst_line_read = False\nprevious_line_read = False\n\n\ndef rule(type):\n if booktabs:\n if type == 'top':\n return '\\\\toprule'\n if type == 'mid':\n return '\\\\midrule'\n if type == 'bottom':\n return '\\\\bottomrule'\n else:\n return '\\\\hline'\n\n\ndef make_header(alignment, border, custom, xs):\n if custom:\n return custom\n fields_n = len(xs)\n fields = [alignment] * fields_n\n if border == 'box':\n return '| ' + ' '.join(fields) + ' |'\n if border == 'grid':\n return '| ' + ' | '.join(fields) + ' |'\n else:\n return ' '.join(fields)\n\n\nif standalone:\n print('\\\\documentclass[a4paper,{}pt]{{article}}'.format(font))\n if booktabs:\n print('\\\\usepackage{booktabs}')\n print('\\\\begin{document}')\n print('\\\\pagenumbering{gobble}')\nfor line in stdin:\n line = line.rstrip('\\n')\n if line[0] == '!':\n print(line[1:])\n elif line[0] != '#' and not line.isspace():\n xs = line.split(delimiter)\n if not first_line_read:\n if line == '---':\n toprule = '\\n' + indent + rule('top')\n else:\n header = make_header(alignment, border, custom_header, xs)\n if stretch:\n print('\\\\bgroup')\n print('\\\\def\\\\arraystretch{{{}}}%'.format(stretch_factor))\n print('\\\\begin{{tabular}}{{{}}}'.format(header), toprule)\n first_line_read = True\n if line == '---':\n if first_line_read:\n print(rule('bottom'))\n elif line == '--':\n print(rule('mid'))\n else:\n if previous_line_read and border == 'grid':\n print(rule('mid'))\n print(indent, ' & '.join(xs), '\\\\\\\\')\n previous_line_read = True\nprint('\\\\end{tabular}')\nif stretch:\n print('\\\\egroup')\nif standalone:\n print('\\\\end{document}')\n",
"step-5": "#!/usr/local/bin/python3\nfrom sys import stdin\nimport argparse\n\n# Default values\nalignment = 'l'\nborder = 'none'\nstretch_factor = '1.0'\ntoprule = ''\n\n# Default options\ncustom_header = False\nstandalone = False\nstretch = False\nbooktabs = False\n\n# Parsing command-line options\nparser = argparse.ArgumentParser('<stdin> | csv2table')\nparser.add_argument('-a', action='store_true', help='Create standalone tex document')\nparser.add_argument('-b', action='store_true', help='Use booktab rules')\nparser.add_argument('-box', action='store_true', help='Draw box border')\nparser.add_argument('-c', action='store_true', help='Align all elements center')\nparser.add_argument('-d', default=',', help='Table column delimiter')\nparser.add_argument('-f', default='10', help='Font size in standalone document')\nparser.add_argument('-grid', action='store_true', help='Draw grid border')\nparser.add_argument('-i', type=int, default=4, help='Number of spaces to indent elements')\nparser.add_argument('-l', action='store_true', help='Align all elements left')\nparser.add_argument('-r', action='store_true', help='Align all elements right')\nparser.add_argument('-s', default='1.0', help='Table stretch factor')\nparser.add_argument('-t', default='nil', help='Custom table layout string')\nargs = parser.parse_args()\nif args.a:\n standalone = True\nif args.b:\n booktabs = True\nif args.box:\n border = 'box'\nif args.c:\n alignment = 'c'\ndelimiter = args.d\nfont = args.f\nif args.grid:\n border = 'grid'\nindent = ''.join([' '] * args.i)\nif args.l:\n alignment = 'l'\nif args.r:\n alignment = 'r'\nif args.s != '1.0':\n stretch = True\n stretch_factor = args.s\nif args.t != 'nil':\n custom_header = args.t\n\n# Global reader state\nfirst_line_read = False\nprevious_line_read = False\n\n# Returning proper rule types\ndef rule(type):\n if booktabs:\n if type == 'top':\n return '\\\\toprule'\n if type == 'mid':\n return '\\\\midrule'\n if type == 'bottom':\n return '\\\\bottomrule'\n else:\n return '\\\\hline'\n\n# Returning proper header string\ndef make_header(alignment, border, custom, xs):\n if custom:\n return custom\n fields_n = len(xs)\n fields = [alignment] * fields_n\n if border == 'box':\n return '| ' + ' '.join(fields) + ' |'\n if border == 'grid':\n return '| ' + ' | '.join(fields) + ' |'\n else:\n return ' '.join(fields)\n\n# Placing standalone header\nif standalone:\n print('\\\\documentclass[a4paper,{}pt]{{article}}'.format(font))\n if booktabs:\n print('\\\\usepackage{booktabs}')\n print('\\\\begin{document}')\n print('\\\\pagenumbering{gobble}')\n\n# Main parser\nfor line in stdin:\n line = line.rstrip('\\n')\n # ! Text is passed literally\n if line[0] == '!':\n print(line[1:])\n # # Text is commented out\n elif line[0] != '#' and not line.isspace():\n xs = line.split(delimiter)\n if not first_line_read:\n if line == '---':\n toprule = '\\n' + indent + rule('top')\n else:\n header = make_header(alignment, border, custom_header, xs)\n if stretch:\n print('\\\\bgroup')\n print('\\\\def\\\\arraystretch{{{}}}%'.format(stretch_factor))\n print('\\\\begin{{tabular}}{{{}}}'.format(header), toprule)\n first_line_read = True\n if line == '---':\n if first_line_read:\n print(rule('bottom'))\n elif line == '--':\n print(rule('mid'))\n else:\n if previous_line_read and border == 'grid':\n print(rule('mid'))\n print(indent,' & '.join(xs), '\\\\\\\\')\n previous_line_read = True\nprint('\\\\end{tabular}')\nif stretch:\n print('\\\\egroup')\nif standalone:\n print('\\\\end{document}')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Individual:
<|reserved_special_token_0|>
@property
def dir(self):
"""Get the unitary vector of direction.
Returns:
numpy.ndarray: The unitary vector of direction.
"""
return unit_vector(normalize_angle(self.angle))
<|reserved_special_token_0|>
def turn_by(self, dangle, dt):
"""Movement from the given angular speed.
Args:
dangle (float): The angular variation (in radians).
dt (float): The simulation time step (in seconds).
"""
self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.
turning_rate)
self.angle = normalize_angle(self.angle)
def turn_to(self, angle, dt):
"""Turn to the desired angle.
Args:
angle (float): The desired orientation (in radians).
dt (float): The simulation time step (in seconds).
"""
a = normalize_angle(angle - self.angle)
self.turn_by(a, dt)
def tick(self, dt):
"""Update function.
Update the position wrt. the velocity.
Args:
dt (float): simulation time step.
"""
self.pos += self.vel * dt
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Individual:
<|reserved_special_token_0|>
@property
def dir(self):
"""Get the unitary vector of direction.
Returns:
numpy.ndarray: The unitary vector of direction.
"""
return unit_vector(normalize_angle(self.angle))
@property
def vel(self):
"""Get the velocity.
Returns:
numpy.ndarray: The velocity vector (in length units per seconds).
"""
return self.speed * self.dir
def turn_by(self, dangle, dt):
"""Movement from the given angular speed.
Args:
dangle (float): The angular variation (in radians).
dt (float): The simulation time step (in seconds).
"""
self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.
turning_rate)
self.angle = normalize_angle(self.angle)
def turn_to(self, angle, dt):
"""Turn to the desired angle.
Args:
angle (float): The desired orientation (in radians).
dt (float): The simulation time step (in seconds).
"""
a = normalize_angle(angle - self.angle)
self.turn_by(a, dt)
def tick(self, dt):
"""Update function.
Update the position wrt. the velocity.
Args:
dt (float): simulation time step.
"""
self.pos += self.vel * dt
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Individual:
def __init__(self, color, pos, ror, roo, roa, angle=0, speed=1.0,
turning_rate=0.2):
"""Constructor of Individual.
Args:
color (Color): color for canvas visualisation.
pos (numpy.ndarray): Initial position.
angle (float, optional): Initial orientation.
"""
self.pos = np.array(pos, dtype='float')
"""numpy.ndarray: The position (in length units)."""
self.angle = normalize_angle(angle)
"""float: The orientation (in radians)."""
self.color = color
"""The color to display."""
self.speed = speed
"""float: The speed (in length units per seconds)."""
self.turning_rate = turning_rate
"""float: The angular speed (in radians per seconds)."""
self.ror = ror
"""float: The range of repulsion (in length units)."""
self.roo = roo
"""float: The range of orientation (in length units)."""
self.roa = roa
"""float: The range of attraction (in length units)."""
@property
def dir(self):
"""Get the unitary vector of direction.
Returns:
numpy.ndarray: The unitary vector of direction.
"""
return unit_vector(normalize_angle(self.angle))
@property
def vel(self):
"""Get the velocity.
Returns:
numpy.ndarray: The velocity vector (in length units per seconds).
"""
return self.speed * self.dir
def turn_by(self, dangle, dt):
"""Movement from the given angular speed.
Args:
dangle (float): The angular variation (in radians).
dt (float): The simulation time step (in seconds).
"""
self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.
turning_rate)
self.angle = normalize_angle(self.angle)
def turn_to(self, angle, dt):
"""Turn to the desired angle.
Args:
angle (float): The desired orientation (in radians).
dt (float): The simulation time step (in seconds).
"""
a = normalize_angle(angle - self.angle)
self.turn_by(a, dt)
def tick(self, dt):
"""Update function.
Update the position wrt. the velocity.
Args:
dt (float): simulation time step.
"""
self.pos += self.vel * dt
<|reserved_special_token_1|>
import numpy as np
from . import BOID_NOSE_LEN
from .utils import normalize_angle, unit_vector
class Individual:
def __init__(self, color, pos, ror, roo, roa, angle=0, speed=1.0,
turning_rate=0.2):
"""Constructor of Individual.
Args:
color (Color): color for canvas visualisation.
pos (numpy.ndarray): Initial position.
angle (float, optional): Initial orientation.
"""
self.pos = np.array(pos, dtype='float')
"""numpy.ndarray: The position (in length units)."""
self.angle = normalize_angle(angle)
"""float: The orientation (in radians)."""
self.color = color
"""The color to display."""
self.speed = speed
"""float: The speed (in length units per seconds)."""
self.turning_rate = turning_rate
"""float: The angular speed (in radians per seconds)."""
self.ror = ror
"""float: The range of repulsion (in length units)."""
self.roo = roo
"""float: The range of orientation (in length units)."""
self.roa = roa
"""float: The range of attraction (in length units)."""
@property
def dir(self):
"""Get the unitary vector of direction.
Returns:
numpy.ndarray: The unitary vector of direction.
"""
return unit_vector(normalize_angle(self.angle))
@property
def vel(self):
"""Get the velocity.
Returns:
numpy.ndarray: The velocity vector (in length units per seconds).
"""
return self.speed * self.dir
def turn_by(self, dangle, dt):
"""Movement from the given angular speed.
Args:
dangle (float): The angular variation (in radians).
dt (float): The simulation time step (in seconds).
"""
self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.
turning_rate)
self.angle = normalize_angle(self.angle)
def turn_to(self, angle, dt):
"""Turn to the desired angle.
Args:
angle (float): The desired orientation (in radians).
dt (float): The simulation time step (in seconds).
"""
a = normalize_angle(angle - self.angle)
self.turn_by(a, dt)
def tick(self, dt):
"""Update function.
Update the position wrt. the velocity.
Args:
dt (float): simulation time step.
"""
self.pos += self.vel * dt
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import numpy as np
from . import BOID_NOSE_LEN
from .utils import normalize_angle, unit_vector
class Individual:
def __init__(self, color, pos, ror, roo, roa, angle=0, speed=1.0, turning_rate=0.2):
"""Constructor of Individual.
Args:
color (Color): color for canvas visualisation.
pos (numpy.ndarray): Initial position.
angle (float, optional): Initial orientation.
"""
self.pos = np.array(pos, dtype="float")
"""numpy.ndarray: The position (in length units)."""
self.angle = normalize_angle(angle)
"""float: The orientation (in radians)."""
self.color = color
"""The color to display."""
self.speed = speed
"""float: The speed (in length units per seconds)."""
self.turning_rate = turning_rate
"""float: The angular speed (in radians per seconds)."""
self.ror = ror
"""float: The range of repulsion (in length units)."""
self.roo = roo
"""float: The range of orientation (in length units)."""
self.roa = roa
"""float: The range of attraction (in length units)."""
@property
def dir(self):
"""Get the unitary vector of direction.
Returns:
numpy.ndarray: The unitary vector of direction.
"""
return unit_vector(normalize_angle(self.angle))
@property
def vel(self):
"""Get the velocity.
Returns:
numpy.ndarray: The velocity vector (in length units per seconds).
"""
return self.speed * self.dir
def turn_by(self, dangle, dt):
"""Movement from the given angular speed.
Args:
dangle (float): The angular variation (in radians).
dt (float): The simulation time step (in seconds).
"""
# Don't turn too fast
self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.turning_rate)
# Keep angle in range [-pi, pi)
self.angle = normalize_angle(self.angle)
def turn_to(self, angle, dt):
"""Turn to the desired angle.
Args:
angle (float): The desired orientation (in radians).
dt (float): The simulation time step (in seconds).
"""
a = normalize_angle(angle - self.angle)
self.turn_by(a, dt)
def tick(self, dt):
"""Update function.
Update the position wrt. the velocity.
Args:
dt (float): simulation time step.
"""
self.pos += self.vel * dt
|
flexible
|
{
"blob_id": "386e491f6b10ca27f513d678c632571c29093ad2",
"index": 5825,
"step-1": "<mask token>\n\n\nclass Individual:\n <mask token>\n\n @property\n def dir(self):\n \"\"\"Get the unitary vector of direction.\n\n Returns:\n numpy.ndarray: The unitary vector of direction.\n\n \"\"\"\n return unit_vector(normalize_angle(self.angle))\n <mask token>\n\n def turn_by(self, dangle, dt):\n \"\"\"Movement from the given angular speed.\n\n Args:\n dangle (float): The angular variation (in radians).\n dt (float): The simulation time step (in seconds).\n\n \"\"\"\n self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.\n turning_rate)\n self.angle = normalize_angle(self.angle)\n\n def turn_to(self, angle, dt):\n \"\"\"Turn to the desired angle.\n\n Args:\n angle (float): The desired orientation (in radians).\n dt (float): The simulation time step (in seconds).\n\n \"\"\"\n a = normalize_angle(angle - self.angle)\n self.turn_by(a, dt)\n\n def tick(self, dt):\n \"\"\"Update function.\n\n Update the position wrt. the velocity.\n\n Args:\n dt (float): simulation time step.\n\n \"\"\"\n self.pos += self.vel * dt\n",
"step-2": "<mask token>\n\n\nclass Individual:\n <mask token>\n\n @property\n def dir(self):\n \"\"\"Get the unitary vector of direction.\n\n Returns:\n numpy.ndarray: The unitary vector of direction.\n\n \"\"\"\n return unit_vector(normalize_angle(self.angle))\n\n @property\n def vel(self):\n \"\"\"Get the velocity.\n\n Returns:\n numpy.ndarray: The velocity vector (in length units per seconds).\n\n \"\"\"\n return self.speed * self.dir\n\n def turn_by(self, dangle, dt):\n \"\"\"Movement from the given angular speed.\n\n Args:\n dangle (float): The angular variation (in radians).\n dt (float): The simulation time step (in seconds).\n\n \"\"\"\n self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.\n turning_rate)\n self.angle = normalize_angle(self.angle)\n\n def turn_to(self, angle, dt):\n \"\"\"Turn to the desired angle.\n\n Args:\n angle (float): The desired orientation (in radians).\n dt (float): The simulation time step (in seconds).\n\n \"\"\"\n a = normalize_angle(angle - self.angle)\n self.turn_by(a, dt)\n\n def tick(self, dt):\n \"\"\"Update function.\n\n Update the position wrt. the velocity.\n\n Args:\n dt (float): simulation time step.\n\n \"\"\"\n self.pos += self.vel * dt\n",
"step-3": "<mask token>\n\n\nclass Individual:\n\n def __init__(self, color, pos, ror, roo, roa, angle=0, speed=1.0,\n turning_rate=0.2):\n \"\"\"Constructor of Individual.\n\n Args:\n color (Color): color for canvas visualisation.\n pos (numpy.ndarray): Initial position.\n angle (float, optional): Initial orientation.\n\n \"\"\"\n self.pos = np.array(pos, dtype='float')\n \"\"\"numpy.ndarray: The position (in length units).\"\"\"\n self.angle = normalize_angle(angle)\n \"\"\"float: The orientation (in radians).\"\"\"\n self.color = color\n \"\"\"The color to display.\"\"\"\n self.speed = speed\n \"\"\"float: The speed (in length units per seconds).\"\"\"\n self.turning_rate = turning_rate\n \"\"\"float: The angular speed (in radians per seconds).\"\"\"\n self.ror = ror\n \"\"\"float: The range of repulsion (in length units).\"\"\"\n self.roo = roo\n \"\"\"float: The range of orientation (in length units).\"\"\"\n self.roa = roa\n \"\"\"float: The range of attraction (in length units).\"\"\"\n\n @property\n def dir(self):\n \"\"\"Get the unitary vector of direction.\n\n Returns:\n numpy.ndarray: The unitary vector of direction.\n\n \"\"\"\n return unit_vector(normalize_angle(self.angle))\n\n @property\n def vel(self):\n \"\"\"Get the velocity.\n\n Returns:\n numpy.ndarray: The velocity vector (in length units per seconds).\n\n \"\"\"\n return self.speed * self.dir\n\n def turn_by(self, dangle, dt):\n \"\"\"Movement from the given angular speed.\n\n Args:\n dangle (float): The angular variation (in radians).\n dt (float): The simulation time step (in seconds).\n\n \"\"\"\n self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.\n turning_rate)\n self.angle = normalize_angle(self.angle)\n\n def turn_to(self, angle, dt):\n \"\"\"Turn to the desired angle.\n\n Args:\n angle (float): The desired orientation (in radians).\n dt (float): The simulation time step (in seconds).\n\n \"\"\"\n a = normalize_angle(angle - self.angle)\n self.turn_by(a, dt)\n\n def tick(self, dt):\n \"\"\"Update function.\n\n Update the position wrt. the velocity.\n\n Args:\n dt (float): simulation time step.\n\n \"\"\"\n self.pos += self.vel * dt\n",
"step-4": "import numpy as np\nfrom . import BOID_NOSE_LEN\nfrom .utils import normalize_angle, unit_vector\n\n\nclass Individual:\n\n def __init__(self, color, pos, ror, roo, roa, angle=0, speed=1.0,\n turning_rate=0.2):\n \"\"\"Constructor of Individual.\n\n Args:\n color (Color): color for canvas visualisation.\n pos (numpy.ndarray): Initial position.\n angle (float, optional): Initial orientation.\n\n \"\"\"\n self.pos = np.array(pos, dtype='float')\n \"\"\"numpy.ndarray: The position (in length units).\"\"\"\n self.angle = normalize_angle(angle)\n \"\"\"float: The orientation (in radians).\"\"\"\n self.color = color\n \"\"\"The color to display.\"\"\"\n self.speed = speed\n \"\"\"float: The speed (in length units per seconds).\"\"\"\n self.turning_rate = turning_rate\n \"\"\"float: The angular speed (in radians per seconds).\"\"\"\n self.ror = ror\n \"\"\"float: The range of repulsion (in length units).\"\"\"\n self.roo = roo\n \"\"\"float: The range of orientation (in length units).\"\"\"\n self.roa = roa\n \"\"\"float: The range of attraction (in length units).\"\"\"\n\n @property\n def dir(self):\n \"\"\"Get the unitary vector of direction.\n\n Returns:\n numpy.ndarray: The unitary vector of direction.\n\n \"\"\"\n return unit_vector(normalize_angle(self.angle))\n\n @property\n def vel(self):\n \"\"\"Get the velocity.\n\n Returns:\n numpy.ndarray: The velocity vector (in length units per seconds).\n\n \"\"\"\n return self.speed * self.dir\n\n def turn_by(self, dangle, dt):\n \"\"\"Movement from the given angular speed.\n\n Args:\n dangle (float): The angular variation (in radians).\n dt (float): The simulation time step (in seconds).\n\n \"\"\"\n self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.\n turning_rate)\n self.angle = normalize_angle(self.angle)\n\n def turn_to(self, angle, dt):\n \"\"\"Turn to the desired angle.\n\n Args:\n angle (float): The desired orientation (in radians).\n dt (float): The simulation time step (in seconds).\n\n \"\"\"\n a = normalize_angle(angle - self.angle)\n self.turn_by(a, dt)\n\n def tick(self, dt):\n \"\"\"Update function.\n\n Update the position wrt. the velocity.\n\n Args:\n dt (float): simulation time step.\n\n \"\"\"\n self.pos += self.vel * dt\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom . import BOID_NOSE_LEN\nfrom .utils import normalize_angle, unit_vector\n\n\nclass Individual:\n def __init__(self, color, pos, ror, roo, roa, angle=0, speed=1.0, turning_rate=0.2):\n \"\"\"Constructor of Individual.\n\n Args:\n color (Color): color for canvas visualisation.\n pos (numpy.ndarray): Initial position.\n angle (float, optional): Initial orientation.\n\n \"\"\"\n self.pos = np.array(pos, dtype=\"float\")\n \"\"\"numpy.ndarray: The position (in length units).\"\"\"\n self.angle = normalize_angle(angle)\n \"\"\"float: The orientation (in radians).\"\"\"\n self.color = color\n \"\"\"The color to display.\"\"\"\n self.speed = speed\n \"\"\"float: The speed (in length units per seconds).\"\"\"\n self.turning_rate = turning_rate\n \"\"\"float: The angular speed (in radians per seconds).\"\"\"\n self.ror = ror\n \"\"\"float: The range of repulsion (in length units).\"\"\"\n self.roo = roo\n \"\"\"float: The range of orientation (in length units).\"\"\"\n self.roa = roa\n \"\"\"float: The range of attraction (in length units).\"\"\"\n\n @property\n def dir(self):\n \"\"\"Get the unitary vector of direction.\n\n Returns:\n numpy.ndarray: The unitary vector of direction.\n\n \"\"\"\n return unit_vector(normalize_angle(self.angle))\n\n @property\n def vel(self):\n \"\"\"Get the velocity.\n\n Returns:\n numpy.ndarray: The velocity vector (in length units per seconds).\n\n \"\"\"\n return self.speed * self.dir\n\n def turn_by(self, dangle, dt):\n \"\"\"Movement from the given angular speed.\n\n Args:\n dangle (float): The angular variation (in radians).\n dt (float): The simulation time step (in seconds).\n\n \"\"\"\n # Don't turn too fast\n self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.turning_rate)\n\n # Keep angle in range [-pi, pi)\n self.angle = normalize_angle(self.angle)\n\n def turn_to(self, angle, dt):\n \"\"\"Turn to the desired angle.\n\n Args:\n angle (float): The desired orientation (in radians).\n dt (float): The simulation time step (in seconds).\n\n \"\"\"\n a = normalize_angle(angle - self.angle)\n self.turn_by(a, dt)\n\n def tick(self, dt):\n \"\"\"Update function.\n\n Update the position wrt. the velocity.\n\n Args:\n dt (float): simulation time step.\n\n \"\"\"\n self.pos += self.vel * dt\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for line in f:
line = line.strip()
if len(line) == 0:
continue
name, *marks = line.split(',')
if len(marks) == 0:
continue
marks = filter(str.isdigit, marks)
total = sum(map(int, marks))
print(f'{name:15} {total:4}')
f.close()
<|reserved_special_token_1|>
f = open('marks.txt', 'rt')
for line in f:
line = line.strip()
if len(line) == 0:
continue
name, *marks = line.split(',')
if len(marks) == 0:
continue
marks = filter(str.isdigit, marks)
total = sum(map(int, marks))
print(f'{name:15} {total:4}')
f.close()
<|reserved_special_token_1|>
# Print name and marks
f = open("marks.txt", "rt")
for line in f:
line = line.strip()
if len(line) == 0: # Blank line
continue
name, *marks = line.split(",")
if len(marks) == 0:
continue
marks = filter(str.isdigit, marks) # Take only numbers
total = sum(map(int, marks)) # Convert str to it and sum it
print(f"{name:15} {total:4}")
f.close()
|
flexible
|
{
"blob_id": "00587de133ee68415f31649f147fbff7e9bf65d5",
"index": 3337,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n name, *marks = line.split(',')\n if len(marks) == 0:\n continue\n marks = filter(str.isdigit, marks)\n total = sum(map(int, marks))\n print(f'{name:15} {total:4}')\nf.close()\n",
"step-3": "f = open('marks.txt', 'rt')\nfor line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n name, *marks = line.split(',')\n if len(marks) == 0:\n continue\n marks = filter(str.isdigit, marks)\n total = sum(map(int, marks))\n print(f'{name:15} {total:4}')\nf.close()\n",
"step-4": "# Print name and marks\nf = open(\"marks.txt\", \"rt\")\nfor line in f:\n line = line.strip()\n if len(line) == 0: # Blank line\n continue\n\n name, *marks = line.split(\",\")\n if len(marks) == 0:\n continue\n\n marks = filter(str.isdigit, marks) # Take only numbers\n total = sum(map(int, marks)) # Convert str to it and sum it\n print(f\"{name:15} {total:4}\")\n\nf.close()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Python program to count number of digits in a number.
# print len(str(input('Enter No.: ')))
num = input("Enter no.: ")
i = 1
while num / 10:
num = num / 10
i += 1
if num < 10:
break
print i
|
normal
|
{
"blob_id": "37748e3dd17f2bdf05bb28b4dfded12de97e37e4",
"index": 9619,
"step-1": "# Python program to count number of digits in a number.\n\n# print len(str(input('Enter No.: ')))\n\nnum = input(\"Enter no.: \")\n\ni = 1\nwhile num / 10:\n num = num / 10\n i += 1\n if num < 10:\n break\nprint i\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import requests
import tkinter as tk
from tkinter.font import Font
from time import strptime
class Window(tk.Tk):
def __init__(self):
super().__init__()
#取得網路上的資料
res = requests.get('https://flask-robert.herokuapp.com/youbike')
jsonObj = res.json()
areas = jsonObj['areas']
#介面
self.title("台北市行政區")
topFrame = tk.Frame(self,bd=2,relief=tk.GROOVE,padx=20,pady=10)
buttonFont = Font(family='Helvetica', size=20)
for index, area in enumerate(areas):
if index % 6 == 0:
parentframe = tk.Frame(topFrame)
parentframe.pack()
btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5, pady=5)
btn.bind('<Button-1>', self.userClick)
btn.pack(side=tk.LEFT, padx=5)
topFrame.pack(padx=20, pady=30)
#建立下方radioButton的介面
self.fixedWidthFrame = tk.Frame(self,height=600,bg='red')
self.createdRadioButtonFrame()
self.fixedWidthFrame.pack(padx=20)
#建立message介面
messageDisplayFrame = tk.Frame(self,bd=2,relief=tk.GROOVE,padx=20,pady=10)
self.mdayLabel = tk.Label(messageDisplayFrame, text="記錄時間:")
self.mdayLabel.pack(anchor=tk.W)
self.snaLabel = tk.Label(messageDisplayFrame,text="站名:")
self.snaLabel.pack(anchor=tk.W)
self.arLabel = tk.Label(messageDisplayFrame,text="地址:")
self.arLabel.pack(anchor=tk.W)
self.bempLabel = tk.Label(messageDisplayFrame, text="空位數量:")
self.bempLabel.pack(anchor=tk.W)
self.sbiLabel = tk.Label(messageDisplayFrame, text="可借車數:")
self.sbiLabel.pack(anchor=tk.W)
self.totLabel = tk.Label(messageDisplayFrame, text="總車數:")
self.totLabel.pack(anchor=tk.W)
messageDisplayFrame.pack(expand=True,fill=tk.BOTH,padx=20,pady=30)
def userClick(self,event):
self.bottomFrame.destroy()
selectedArea = event.widget['text']
urlString = "https://flask-robert.herokuapp.com/youbike/%s" % selectedArea
res = requests.get(urlString)
jsonobj = res.json()
self.areas = jsonobj['data']
snaList = []
for area in self.areas:
snaList.append(area["sna"])
self.createdRadioButtonFrame(data=snaList)
def createdRadioButtonFrame(self,data=None):
self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.GROOVE, padx=20, pady=10)
if data == None:
urlString = "https://flask-robert.herokuapp.com/youbike/南港區"
res = requests.get(urlString)
jsonobj = res.json()
self.areas = jsonobj['data']
snaList = []
for area in self.areas:
snaList.append(area["sna"])
self.radioButtonData = snaList
else:
self.radioButtonData = data
self.var = tk.IntVar()
for index, data in enumerate(self.radioButtonData):
if index % 10 == 0:
parentframe = tk.Frame(self.bottomFrame)
parentframe.pack(side=tk.LEFT,expand=True,fill=tk.Y)
radioButton = tk.Radiobutton(parentframe, text=data, value=index, variable=self.var,command=self.userChoicedRadioButton).pack(anchor=tk.W)
self.bottomFrame.pack()
self.var.set(0)
def userChoicedRadioButton(self):
index = self.var.get()
infomation = self.areas[index]
print(infomation)
datetimeString = infomation["mday"]
datetimeFormat = "%Y%m%d%H%M%S"
structTime = strptime(datetimeString,datetimeFormat)
self.mdayLabel["text"] = "記錄時間:%d年%d月%d日 %d:%d:%d" % (structTime.tm_year,structTime.tm_mon,structTime.tm_mday,structTime.tm_hour,structTime.tm_min,structTime.tm_sec)
self.snaLabel["text"] = "站名:%s" % infomation["sna"]
self.arLabel.configure(text="地址:{0:s}".format(infomation["ar"]))
self.bempLabel["text"] = "空位數量:{0:d}".format(infomation["bemp"])
self.sbiLabel["text"] = "可借車數:{0:d}".format(infomation["sbi"])
self.totLabel["text"] = "總車數:{0:d}".format(infomation["tot"])
if __name__ == "__main__":
window = Window()
window.mainloop()
|
normal
|
{
"blob_id": "f9becdb48583423e7bd3730d1cd74a6a016663dc",
"index": 1768,
"step-1": "<mask token>\n\n\nclass Window(tk.Tk):\n\n def __init__(self):\n super().__init__()\n res = requests.get('https://flask-robert.herokuapp.com/youbike')\n jsonObj = res.json()\n areas = jsonObj['areas']\n self.title('台北市行政區')\n topFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=20, pady=10)\n buttonFont = Font(family='Helvetica', size=20)\n for index, area in enumerate(areas):\n if index % 6 == 0:\n parentframe = tk.Frame(topFrame)\n parentframe.pack()\n btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5,\n pady=5)\n btn.bind('<Button-1>', self.userClick)\n btn.pack(side=tk.LEFT, padx=5)\n topFrame.pack(padx=20, pady=30)\n self.fixedWidthFrame = tk.Frame(self, height=600, bg='red')\n self.createdRadioButtonFrame()\n self.fixedWidthFrame.pack(padx=20)\n messageDisplayFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=\n 20, pady=10)\n self.mdayLabel = tk.Label(messageDisplayFrame, text='記錄時間:')\n self.mdayLabel.pack(anchor=tk.W)\n self.snaLabel = tk.Label(messageDisplayFrame, text='站名:')\n self.snaLabel.pack(anchor=tk.W)\n self.arLabel = tk.Label(messageDisplayFrame, text='地址:')\n self.arLabel.pack(anchor=tk.W)\n self.bempLabel = tk.Label(messageDisplayFrame, text='空位數量:')\n self.bempLabel.pack(anchor=tk.W)\n self.sbiLabel = tk.Label(messageDisplayFrame, text='可借車數:')\n self.sbiLabel.pack(anchor=tk.W)\n self.totLabel = tk.Label(messageDisplayFrame, text='總車數:')\n self.totLabel.pack(anchor=tk.W)\n messageDisplayFrame.pack(expand=True, fill=tk.BOTH, padx=20, pady=30)\n\n def userClick(self, event):\n self.bottomFrame.destroy()\n selectedArea = event.widget['text']\n urlString = ('https://flask-robert.herokuapp.com/youbike/%s' %\n selectedArea)\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.createdRadioButtonFrame(data=snaList)\n\n def createdRadioButtonFrame(self, data=None):\n self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.\n GROOVE, padx=20, pady=10)\n if data == None:\n urlString = 'https://flask-robert.herokuapp.com/youbike/南港區'\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.radioButtonData = snaList\n else:\n self.radioButtonData = data\n self.var = tk.IntVar()\n for index, data in enumerate(self.radioButtonData):\n if index % 10 == 0:\n parentframe = tk.Frame(self.bottomFrame)\n parentframe.pack(side=tk.LEFT, expand=True, fill=tk.Y)\n radioButton = tk.Radiobutton(parentframe, text=data, value=\n index, variable=self.var, command=self.userChoicedRadioButton\n ).pack(anchor=tk.W)\n self.bottomFrame.pack()\n self.var.set(0)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Window(tk.Tk):\n\n def __init__(self):\n super().__init__()\n res = requests.get('https://flask-robert.herokuapp.com/youbike')\n jsonObj = res.json()\n areas = jsonObj['areas']\n self.title('台北市行政區')\n topFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=20, pady=10)\n buttonFont = Font(family='Helvetica', size=20)\n for index, area in enumerate(areas):\n if index % 6 == 0:\n parentframe = tk.Frame(topFrame)\n parentframe.pack()\n btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5,\n pady=5)\n btn.bind('<Button-1>', self.userClick)\n btn.pack(side=tk.LEFT, padx=5)\n topFrame.pack(padx=20, pady=30)\n self.fixedWidthFrame = tk.Frame(self, height=600, bg='red')\n self.createdRadioButtonFrame()\n self.fixedWidthFrame.pack(padx=20)\n messageDisplayFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=\n 20, pady=10)\n self.mdayLabel = tk.Label(messageDisplayFrame, text='記錄時間:')\n self.mdayLabel.pack(anchor=tk.W)\n self.snaLabel = tk.Label(messageDisplayFrame, text='站名:')\n self.snaLabel.pack(anchor=tk.W)\n self.arLabel = tk.Label(messageDisplayFrame, text='地址:')\n self.arLabel.pack(anchor=tk.W)\n self.bempLabel = tk.Label(messageDisplayFrame, text='空位數量:')\n self.bempLabel.pack(anchor=tk.W)\n self.sbiLabel = tk.Label(messageDisplayFrame, text='可借車數:')\n self.sbiLabel.pack(anchor=tk.W)\n self.totLabel = tk.Label(messageDisplayFrame, text='總車數:')\n self.totLabel.pack(anchor=tk.W)\n messageDisplayFrame.pack(expand=True, fill=tk.BOTH, padx=20, pady=30)\n\n def userClick(self, event):\n self.bottomFrame.destroy()\n selectedArea = event.widget['text']\n urlString = ('https://flask-robert.herokuapp.com/youbike/%s' %\n selectedArea)\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.createdRadioButtonFrame(data=snaList)\n\n def createdRadioButtonFrame(self, data=None):\n self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.\n GROOVE, padx=20, pady=10)\n if data == None:\n urlString = 'https://flask-robert.herokuapp.com/youbike/南港區'\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.radioButtonData = snaList\n else:\n self.radioButtonData = data\n self.var = tk.IntVar()\n for index, data in enumerate(self.radioButtonData):\n if index % 10 == 0:\n parentframe = tk.Frame(self.bottomFrame)\n parentframe.pack(side=tk.LEFT, expand=True, fill=tk.Y)\n radioButton = tk.Radiobutton(parentframe, text=data, value=\n index, variable=self.var, command=self.userChoicedRadioButton\n ).pack(anchor=tk.W)\n self.bottomFrame.pack()\n self.var.set(0)\n\n def userChoicedRadioButton(self):\n index = self.var.get()\n infomation = self.areas[index]\n print(infomation)\n datetimeString = infomation['mday']\n datetimeFormat = '%Y%m%d%H%M%S'\n structTime = strptime(datetimeString, datetimeFormat)\n self.mdayLabel['text'] = '記錄時間:%d年%d月%d日 %d:%d:%d' % (structTime.\n tm_year, structTime.tm_mon, structTime.tm_mday, structTime.\n tm_hour, structTime.tm_min, structTime.tm_sec)\n self.snaLabel['text'] = '站名:%s' % infomation['sna']\n self.arLabel.configure(text='地址:{0:s}'.format(infomation['ar']))\n self.bempLabel['text'] = '空位數量:{0:d}'.format(infomation['bemp'])\n self.sbiLabel['text'] = '可借車數:{0:d}'.format(infomation['sbi'])\n self.totLabel['text'] = '總車數:{0:d}'.format(infomation['tot'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Window(tk.Tk):\n\n def __init__(self):\n super().__init__()\n res = requests.get('https://flask-robert.herokuapp.com/youbike')\n jsonObj = res.json()\n areas = jsonObj['areas']\n self.title('台北市行政區')\n topFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=20, pady=10)\n buttonFont = Font(family='Helvetica', size=20)\n for index, area in enumerate(areas):\n if index % 6 == 0:\n parentframe = tk.Frame(topFrame)\n parentframe.pack()\n btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5,\n pady=5)\n btn.bind('<Button-1>', self.userClick)\n btn.pack(side=tk.LEFT, padx=5)\n topFrame.pack(padx=20, pady=30)\n self.fixedWidthFrame = tk.Frame(self, height=600, bg='red')\n self.createdRadioButtonFrame()\n self.fixedWidthFrame.pack(padx=20)\n messageDisplayFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=\n 20, pady=10)\n self.mdayLabel = tk.Label(messageDisplayFrame, text='記錄時間:')\n self.mdayLabel.pack(anchor=tk.W)\n self.snaLabel = tk.Label(messageDisplayFrame, text='站名:')\n self.snaLabel.pack(anchor=tk.W)\n self.arLabel = tk.Label(messageDisplayFrame, text='地址:')\n self.arLabel.pack(anchor=tk.W)\n self.bempLabel = tk.Label(messageDisplayFrame, text='空位數量:')\n self.bempLabel.pack(anchor=tk.W)\n self.sbiLabel = tk.Label(messageDisplayFrame, text='可借車數:')\n self.sbiLabel.pack(anchor=tk.W)\n self.totLabel = tk.Label(messageDisplayFrame, text='總車數:')\n self.totLabel.pack(anchor=tk.W)\n messageDisplayFrame.pack(expand=True, fill=tk.BOTH, padx=20, pady=30)\n\n def userClick(self, event):\n self.bottomFrame.destroy()\n selectedArea = event.widget['text']\n urlString = ('https://flask-robert.herokuapp.com/youbike/%s' %\n selectedArea)\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.createdRadioButtonFrame(data=snaList)\n\n def createdRadioButtonFrame(self, data=None):\n self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.\n GROOVE, padx=20, pady=10)\n if data == None:\n urlString = 'https://flask-robert.herokuapp.com/youbike/南港區'\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.radioButtonData = snaList\n else:\n self.radioButtonData = data\n self.var = tk.IntVar()\n for index, data in enumerate(self.radioButtonData):\n if index % 10 == 0:\n parentframe = tk.Frame(self.bottomFrame)\n parentframe.pack(side=tk.LEFT, expand=True, fill=tk.Y)\n radioButton = tk.Radiobutton(parentframe, text=data, value=\n index, variable=self.var, command=self.userChoicedRadioButton\n ).pack(anchor=tk.W)\n self.bottomFrame.pack()\n self.var.set(0)\n\n def userChoicedRadioButton(self):\n index = self.var.get()\n infomation = self.areas[index]\n print(infomation)\n datetimeString = infomation['mday']\n datetimeFormat = '%Y%m%d%H%M%S'\n structTime = strptime(datetimeString, datetimeFormat)\n self.mdayLabel['text'] = '記錄時間:%d年%d月%d日 %d:%d:%d' % (structTime.\n tm_year, structTime.tm_mon, structTime.tm_mday, structTime.\n tm_hour, structTime.tm_min, structTime.tm_sec)\n self.snaLabel['text'] = '站名:%s' % infomation['sna']\n self.arLabel.configure(text='地址:{0:s}'.format(infomation['ar']))\n self.bempLabel['text'] = '空位數量:{0:d}'.format(infomation['bemp'])\n self.sbiLabel['text'] = '可借車數:{0:d}'.format(infomation['sbi'])\n self.totLabel['text'] = '總車數:{0:d}'.format(infomation['tot'])\n\n\nif __name__ == '__main__':\n window = Window()\n window.mainloop()\n",
"step-4": "import requests\nimport tkinter as tk\nfrom tkinter.font import Font\nfrom time import strptime\n\n\nclass Window(tk.Tk):\n\n def __init__(self):\n super().__init__()\n res = requests.get('https://flask-robert.herokuapp.com/youbike')\n jsonObj = res.json()\n areas = jsonObj['areas']\n self.title('台北市行政區')\n topFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=20, pady=10)\n buttonFont = Font(family='Helvetica', size=20)\n for index, area in enumerate(areas):\n if index % 6 == 0:\n parentframe = tk.Frame(topFrame)\n parentframe.pack()\n btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5,\n pady=5)\n btn.bind('<Button-1>', self.userClick)\n btn.pack(side=tk.LEFT, padx=5)\n topFrame.pack(padx=20, pady=30)\n self.fixedWidthFrame = tk.Frame(self, height=600, bg='red')\n self.createdRadioButtonFrame()\n self.fixedWidthFrame.pack(padx=20)\n messageDisplayFrame = tk.Frame(self, bd=2, relief=tk.GROOVE, padx=\n 20, pady=10)\n self.mdayLabel = tk.Label(messageDisplayFrame, text='記錄時間:')\n self.mdayLabel.pack(anchor=tk.W)\n self.snaLabel = tk.Label(messageDisplayFrame, text='站名:')\n self.snaLabel.pack(anchor=tk.W)\n self.arLabel = tk.Label(messageDisplayFrame, text='地址:')\n self.arLabel.pack(anchor=tk.W)\n self.bempLabel = tk.Label(messageDisplayFrame, text='空位數量:')\n self.bempLabel.pack(anchor=tk.W)\n self.sbiLabel = tk.Label(messageDisplayFrame, text='可借車數:')\n self.sbiLabel.pack(anchor=tk.W)\n self.totLabel = tk.Label(messageDisplayFrame, text='總車數:')\n self.totLabel.pack(anchor=tk.W)\n messageDisplayFrame.pack(expand=True, fill=tk.BOTH, padx=20, pady=30)\n\n def userClick(self, event):\n self.bottomFrame.destroy()\n selectedArea = event.widget['text']\n urlString = ('https://flask-robert.herokuapp.com/youbike/%s' %\n selectedArea)\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.createdRadioButtonFrame(data=snaList)\n\n def createdRadioButtonFrame(self, data=None):\n self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.\n GROOVE, padx=20, pady=10)\n if data == None:\n urlString = 'https://flask-robert.herokuapp.com/youbike/南港區'\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area['sna'])\n self.radioButtonData = snaList\n else:\n self.radioButtonData = data\n self.var = tk.IntVar()\n for index, data in enumerate(self.radioButtonData):\n if index % 10 == 0:\n parentframe = tk.Frame(self.bottomFrame)\n parentframe.pack(side=tk.LEFT, expand=True, fill=tk.Y)\n radioButton = tk.Radiobutton(parentframe, text=data, value=\n index, variable=self.var, command=self.userChoicedRadioButton\n ).pack(anchor=tk.W)\n self.bottomFrame.pack()\n self.var.set(0)\n\n def userChoicedRadioButton(self):\n index = self.var.get()\n infomation = self.areas[index]\n print(infomation)\n datetimeString = infomation['mday']\n datetimeFormat = '%Y%m%d%H%M%S'\n structTime = strptime(datetimeString, datetimeFormat)\n self.mdayLabel['text'] = '記錄時間:%d年%d月%d日 %d:%d:%d' % (structTime.\n tm_year, structTime.tm_mon, structTime.tm_mday, structTime.\n tm_hour, structTime.tm_min, structTime.tm_sec)\n self.snaLabel['text'] = '站名:%s' % infomation['sna']\n self.arLabel.configure(text='地址:{0:s}'.format(infomation['ar']))\n self.bempLabel['text'] = '空位數量:{0:d}'.format(infomation['bemp'])\n self.sbiLabel['text'] = '可借車數:{0:d}'.format(infomation['sbi'])\n self.totLabel['text'] = '總車數:{0:d}'.format(infomation['tot'])\n\n\nif __name__ == '__main__':\n window = Window()\n window.mainloop()\n",
"step-5": "import requests\nimport tkinter as tk\nfrom tkinter.font import Font\nfrom time import strptime\n\n\nclass Window(tk.Tk):\n def __init__(self):\n super().__init__()\n #取得網路上的資料\n res = requests.get('https://flask-robert.herokuapp.com/youbike')\n jsonObj = res.json()\n areas = jsonObj['areas']\n\n #介面\n self.title(\"台北市行政區\")\n topFrame = tk.Frame(self,bd=2,relief=tk.GROOVE,padx=20,pady=10)\n buttonFont = Font(family='Helvetica', size=20)\n\n for index, area in enumerate(areas):\n if index % 6 == 0:\n parentframe = tk.Frame(topFrame)\n parentframe.pack()\n btn = tk.Button(parentframe, text=area, font=buttonFont, padx=5, pady=5)\n btn.bind('<Button-1>', self.userClick)\n btn.pack(side=tk.LEFT, padx=5)\n topFrame.pack(padx=20, pady=30)\n\n\n #建立下方radioButton的介面\n self.fixedWidthFrame = tk.Frame(self,height=600,bg='red')\n self.createdRadioButtonFrame()\n self.fixedWidthFrame.pack(padx=20)\n\n #建立message介面\n messageDisplayFrame = tk.Frame(self,bd=2,relief=tk.GROOVE,padx=20,pady=10)\n self.mdayLabel = tk.Label(messageDisplayFrame, text=\"記錄時間:\")\n self.mdayLabel.pack(anchor=tk.W)\n self.snaLabel = tk.Label(messageDisplayFrame,text=\"站名:\")\n self.snaLabel.pack(anchor=tk.W)\n self.arLabel = tk.Label(messageDisplayFrame,text=\"地址:\")\n self.arLabel.pack(anchor=tk.W)\n self.bempLabel = tk.Label(messageDisplayFrame, text=\"空位數量:\")\n self.bempLabel.pack(anchor=tk.W)\n self.sbiLabel = tk.Label(messageDisplayFrame, text=\"可借車數:\")\n self.sbiLabel.pack(anchor=tk.W)\n self.totLabel = tk.Label(messageDisplayFrame, text=\"總車數:\")\n self.totLabel.pack(anchor=tk.W)\n messageDisplayFrame.pack(expand=True,fill=tk.BOTH,padx=20,pady=30)\n\n\n\n\n def userClick(self,event):\n self.bottomFrame.destroy()\n selectedArea = event.widget['text']\n urlString = \"https://flask-robert.herokuapp.com/youbike/%s\" % selectedArea\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area[\"sna\"])\n self.createdRadioButtonFrame(data=snaList)\n\n\n def createdRadioButtonFrame(self,data=None):\n self.bottomFrame = tk.Frame(self.fixedWidthFrame, bd=2, relief=tk.GROOVE, padx=20, pady=10)\n if data == None:\n urlString = \"https://flask-robert.herokuapp.com/youbike/南港區\"\n res = requests.get(urlString)\n jsonobj = res.json()\n self.areas = jsonobj['data']\n snaList = []\n for area in self.areas:\n snaList.append(area[\"sna\"])\n self.radioButtonData = snaList\n else:\n self.radioButtonData = data\n\n self.var = tk.IntVar()\n for index, data in enumerate(self.radioButtonData):\n if index % 10 == 0:\n parentframe = tk.Frame(self.bottomFrame)\n parentframe.pack(side=tk.LEFT,expand=True,fill=tk.Y)\n radioButton = tk.Radiobutton(parentframe, text=data, value=index, variable=self.var,command=self.userChoicedRadioButton).pack(anchor=tk.W)\n self.bottomFrame.pack()\n self.var.set(0)\n\n def userChoicedRadioButton(self):\n index = self.var.get()\n infomation = self.areas[index]\n print(infomation)\n datetimeString = infomation[\"mday\"]\n datetimeFormat = \"%Y%m%d%H%M%S\"\n structTime = strptime(datetimeString,datetimeFormat)\n\n self.mdayLabel[\"text\"] = \"記錄時間:%d年%d月%d日 %d:%d:%d\" % (structTime.tm_year,structTime.tm_mon,structTime.tm_mday,structTime.tm_hour,structTime.tm_min,structTime.tm_sec)\n self.snaLabel[\"text\"] = \"站名:%s\" % infomation[\"sna\"]\n self.arLabel.configure(text=\"地址:{0:s}\".format(infomation[\"ar\"]))\n self.bempLabel[\"text\"] = \"空位數量:{0:d}\".format(infomation[\"bemp\"])\n self.sbiLabel[\"text\"] = \"可借車數:{0:d}\".format(infomation[\"sbi\"])\n self.totLabel[\"text\"] = \"總車數:{0:d}\".format(infomation[\"tot\"])\n\nif __name__ == \"__main__\":\n window = Window()\n window.mainloop()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def print_all_models():
return models.Sample.objects.all()
@sync_to_async
def _create_record(name):
return models.Sample.objects.create(name=name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
<|reserved_special_token_0|>
django.setup()
<|reserved_special_token_0|>
def print_all_models():
return models.Sample.objects.all()
@sync_to_async
def _create_record(name):
return models.Sample.objects.create(name=name)
async def create_record(name=None):
await _create_record(name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.dont_write_bytecode = True
<|reserved_special_token_0|>
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
<|reserved_special_token_0|>
django.setup()
<|reserved_special_token_0|>
def print_all_models():
return models.Sample.objects.all()
@sync_to_async
def _create_record(name):
return models.Sample.objects.create(name=name)
async def create_record(name=None):
await _create_record(name)
<|reserved_special_token_1|>
import sys
from asgiref.sync import sync_to_async
from django.core.wsgi import get_wsgi_application
sys.dont_write_bytecode = True
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
import django
django.setup()
from db import models
def print_all_models():
return models.Sample.objects.all()
@sync_to_async
def _create_record(name):
return models.Sample.objects.create(name=name)
async def create_record(name=None):
await _create_record(name)
<|reserved_special_token_1|>
# Turn off bytecode generation
import sys
from asgiref.sync import sync_to_async
from django.core.wsgi import get_wsgi_application
sys.dont_write_bytecode = True
# Django specific settings
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
import django
django.setup()
from db import models
def print_all_models():
return models.Sample.objects.all()
@sync_to_async
def _create_record(name):
return models.Sample.objects.create(name=name)
async def create_record(name=None):
await _create_record(name)
|
flexible
|
{
"blob_id": "4afb556ceca89eb90ba800db4f383afad1cd42a5",
"index": 3765,
"step-1": "<mask token>\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\n<mask token>\n",
"step-2": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')\n<mask token>\ndjango.setup()\n<mask token>\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-3": "<mask token>\nsys.dont_write_bytecode = True\n<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')\n<mask token>\ndjango.setup()\n<mask token>\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-4": "import sys\nfrom asgiref.sync import sync_to_async\nfrom django.core.wsgi import get_wsgi_application\nsys.dont_write_bytecode = True\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')\nimport django\ndjango.setup()\nfrom db import models\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-5": "# Turn off bytecode generation\nimport sys\nfrom asgiref.sync import sync_to_async\nfrom django.core.wsgi import get_wsgi_application\n\n\nsys.dont_write_bytecode = True\n\n# Django specific settings\nimport os\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"settings\")\nimport django\n\ndjango.setup()\n\nfrom db import models\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
def test_corr_callable_method(self, datetime_series):
my_corr = (lambda a, b: (1.0 if (a == b).all() else 0.0))
s1 = Series([1, 2, 3, 4, 5])
s2 = Series([5, 4, 3, 2, 1])
expected = 0
tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected)
tm.assert_almost_equal(datetime_series.corr(datetime_series, method=my_corr), 1.0)
tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:], method=my_corr), 1.0)
assert np.isnan(datetime_series[::2].corr(datetime_series[1::2], method=my_corr))
df = pd.DataFrame([s1, s2])
expected = pd.DataFrame([{
0: 1.0,
1: 0,
}, {
0: 0,
1: 1.0,
}])
tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)
|
normal
|
{
"blob_id": "5e68233fde741c0d2a94bf099afb6a91c08e2a29",
"index": 6071,
"step-1": "<mask token>\n",
"step-2": "def test_corr_callable_method(self, datetime_series):\n my_corr = lambda a, b: 1.0 if (a == b).all() else 0.0\n s1 = Series([1, 2, 3, 4, 5])\n s2 = Series([5, 4, 3, 2, 1])\n expected = 0\n tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected)\n tm.assert_almost_equal(datetime_series.corr(datetime_series, method=\n my_corr), 1.0)\n tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:],\n method=my_corr), 1.0)\n assert np.isnan(datetime_series[::2].corr(datetime_series[1::2], method\n =my_corr))\n df = pd.DataFrame([s1, s2])\n expected = pd.DataFrame([{(0): 1.0, (1): 0}, {(0): 0, (1): 1.0}])\n tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)\n",
"step-3": "def test_corr_callable_method(self, datetime_series):\n my_corr = (lambda a, b: (1.0 if (a == b).all() else 0.0))\n s1 = Series([1, 2, 3, 4, 5])\n s2 = Series([5, 4, 3, 2, 1])\n expected = 0\n tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected)\n tm.assert_almost_equal(datetime_series.corr(datetime_series, method=my_corr), 1.0)\n tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:], method=my_corr), 1.0)\n assert np.isnan(datetime_series[::2].corr(datetime_series[1::2], method=my_corr))\n df = pd.DataFrame([s1, s2])\n expected = pd.DataFrame([{\n 0: 1.0,\n 1: 0,\n }, {\n 0: 0,\n 1: 1.0,\n }])\n tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^$', SprintListView.as_view(), name='sprint_list'),
path('create/', view=CreateSprintView.as_view(), name='create_sprint'),
path('modificar/<int:sprint_pk>/', view=UpdateSprintView.as_view(),
name='update_sprint'), path('<int:sprint_pk>/asignarus/', view=
AsignarUSUpdateView.as_view(), name='asignar_us'), path(
'<int:sprint_pk>/tableros/<int:flujo_pk>/', view=TableroTemplateView.
as_view(), name='tablero'), path(route='ver/<int:pk>/', view=
VerSprintDetailView.as_view(), name='ver_sprint'), path(route=
'<int:sprint_pk>/sprintbacklogpdf/', view=SprintBacklogPDF.as_view(),
name='reporte_sb'), path(route='<int:sprint_pk>/prioridades/', view=
PrioridadesPDF.as_view(), name='prioridades')]
<|reserved_special_token_1|>
from django.conf.urls import url
from django.urls import path
from .views import *
from flujo.views import *
<|reserved_special_token_0|>
urlpatterns = [url('^$', SprintListView.as_view(), name='sprint_list'),
path('create/', view=CreateSprintView.as_view(), name='create_sprint'),
path('modificar/<int:sprint_pk>/', view=UpdateSprintView.as_view(),
name='update_sprint'), path('<int:sprint_pk>/asignarus/', view=
AsignarUSUpdateView.as_view(), name='asignar_us'), path(
'<int:sprint_pk>/tableros/<int:flujo_pk>/', view=TableroTemplateView.
as_view(), name='tablero'), path(route='ver/<int:pk>/', view=
VerSprintDetailView.as_view(), name='ver_sprint'), path(route=
'<int:sprint_pk>/sprintbacklogpdf/', view=SprintBacklogPDF.as_view(),
name='reporte_sb'), path(route='<int:sprint_pk>/prioridades/', view=
PrioridadesPDF.as_view(), name='prioridades')]
<|reserved_special_token_1|>
from django.conf.urls import url
from django.urls import path
from .views import *
from flujo.views import *
"""
URL para el Sprint crear, listar y modificar
"""
urlpatterns = [
url(r'^$', SprintListView.as_view(), name='sprint_list'),
path('create/', view=CreateSprintView.as_view(), name='create_sprint'),
path('modificar/<int:sprint_pk>/', view=UpdateSprintView.as_view(), name='update_sprint'),
path('<int:sprint_pk>/asignarus/', view=AsignarUSUpdateView.as_view(), name='asignar_us'),
path('<int:sprint_pk>/tableros/<int:flujo_pk>/', view=TableroTemplateView.as_view(), name='tablero'),
path(route='ver/<int:pk>/', view=VerSprintDetailView.as_view(), name='ver_sprint'),
path(route='<int:sprint_pk>/sprintbacklogpdf/', view=SprintBacklogPDF.as_view(), name="reporte_sb"),
path(route='<int:sprint_pk>/prioridades/', view=PrioridadesPDF.as_view(), name="prioridades")
]
|
flexible
|
{
"blob_id": "2b1ec422a42af59a048c708f86b686eb0564b51f",
"index": 2456,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', SprintListView.as_view(), name='sprint_list'),\n path('create/', view=CreateSprintView.as_view(), name='create_sprint'),\n path('modificar/<int:sprint_pk>/', view=UpdateSprintView.as_view(),\n name='update_sprint'), path('<int:sprint_pk>/asignarus/', view=\n AsignarUSUpdateView.as_view(), name='asignar_us'), path(\n '<int:sprint_pk>/tableros/<int:flujo_pk>/', view=TableroTemplateView.\n as_view(), name='tablero'), path(route='ver/<int:pk>/', view=\n VerSprintDetailView.as_view(), name='ver_sprint'), path(route=\n '<int:sprint_pk>/sprintbacklogpdf/', view=SprintBacklogPDF.as_view(),\n name='reporte_sb'), path(route='<int:sprint_pk>/prioridades/', view=\n PrioridadesPDF.as_view(), name='prioridades')]\n",
"step-3": "from django.conf.urls import url\nfrom django.urls import path\nfrom .views import *\nfrom flujo.views import *\n<mask token>\nurlpatterns = [url('^$', SprintListView.as_view(), name='sprint_list'),\n path('create/', view=CreateSprintView.as_view(), name='create_sprint'),\n path('modificar/<int:sprint_pk>/', view=UpdateSprintView.as_view(),\n name='update_sprint'), path('<int:sprint_pk>/asignarus/', view=\n AsignarUSUpdateView.as_view(), name='asignar_us'), path(\n '<int:sprint_pk>/tableros/<int:flujo_pk>/', view=TableroTemplateView.\n as_view(), name='tablero'), path(route='ver/<int:pk>/', view=\n VerSprintDetailView.as_view(), name='ver_sprint'), path(route=\n '<int:sprint_pk>/sprintbacklogpdf/', view=SprintBacklogPDF.as_view(),\n name='reporte_sb'), path(route='<int:sprint_pk>/prioridades/', view=\n PrioridadesPDF.as_view(), name='prioridades')]\n",
"step-4": "from django.conf.urls import url\nfrom django.urls import path\nfrom .views import *\nfrom flujo.views import *\n\n\"\"\"\nURL para el Sprint crear, listar y modificar\n\"\"\"\nurlpatterns = [\n url(r'^$', SprintListView.as_view(), name='sprint_list'),\n path('create/', view=CreateSprintView.as_view(), name='create_sprint'),\n path('modificar/<int:sprint_pk>/', view=UpdateSprintView.as_view(), name='update_sprint'),\n path('<int:sprint_pk>/asignarus/', view=AsignarUSUpdateView.as_view(), name='asignar_us'),\n path('<int:sprint_pk>/tableros/<int:flujo_pk>/', view=TableroTemplateView.as_view(), name='tablero'),\n path(route='ver/<int:pk>/', view=VerSprintDetailView.as_view(), name='ver_sprint'),\n path(route='<int:sprint_pk>/sprintbacklogpdf/', view=SprintBacklogPDF.as_view(), name=\"reporte_sb\"),\n path(route='<int:sprint_pk>/prioridades/', view=PrioridadesPDF.as_view(), name=\"prioridades\")\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class FlowerIdentify(tornado.web.RequestHandler):
def get(self):
self.render('flower_identify.html')
class IdentifyHandler(tornado.websocket.WebSocketHandler):
def post(self):
dataUrl = self.get_body_argument('image')
Orientation = self.get_body_argument('orientation')
content = base64.b64decode(dataUrl)
"""保存到图片target.jpg"""
file = open('./static/images/target.jpg', 'wb')
file.write(content)
file.close()
"""图片旋转270(根据实际情况)"""
img = Image.open('./static/images/target.jpg')
if Orientation == '3':
img = img.rotate(180, expand=True)
elif Orientation == '6':
img = img.rotate(270, expand=True)
elif Orientation == '8':
img = img.rotate(90, expand=True)
img.save('./static/images/target.jpg')
"""调用函数识别"""
flowerIndex = flower_identify()
flowerInfo = FlowersInfo.flowersInfo[flowerIndex]
self.render('fi_result.html', data=flowerInfo)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FlowersInfo:
<|reserved_special_token_0|>
class FlowerIdentify(tornado.web.RequestHandler):
def get(self):
self.render('flower_identify.html')
class IdentifyHandler(tornado.websocket.WebSocketHandler):
def post(self):
dataUrl = self.get_body_argument('image')
Orientation = self.get_body_argument('orientation')
content = base64.b64decode(dataUrl)
"""保存到图片target.jpg"""
file = open('./static/images/target.jpg', 'wb')
file.write(content)
file.close()
"""图片旋转270(根据实际情况)"""
img = Image.open('./static/images/target.jpg')
if Orientation == '3':
img = img.rotate(180, expand=True)
elif Orientation == '6':
img = img.rotate(270, expand=True)
elif Orientation == '8':
img = img.rotate(90, expand=True)
img.save('./static/images/target.jpg')
"""调用函数识别"""
flowerIndex = flower_identify()
flowerInfo = FlowersInfo.flowersInfo[flowerIndex]
self.render('fi_result.html', data=flowerInfo)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FlowersInfo:
flowersInfo = [
'月季花(学名:Rosa chinensis Jacq.): 被称为花中皇后,又称“月月红”,是常绿、半常绿低矮灌木,四季开花,一般为红色,或粉色、偶有白色和黄色,可作为观赏植物,也可作为药用植物,亦称月季。有三个自然变种,现代月季花型多样,有单瓣和重瓣,还有高心卷边等优美花型;其色彩艳丽、丰富,不仅有红、粉黄、白等单色,还有混色、银边等品种;多数品种有芳香。月季的品种繁多,世界上已有近万种,中国也有千种以上。'
,
'绣球(学名:Hydrangea macrophylla (Thunb.) Ser. ): 为虎耳草科绣球属植物。灌木,高1-4米;茎常于基部发出多数放射枝而形成一圆形灌丛;枝圆柱形。叶纸质或近革质,倒卵形或阔椭圆形。伞房状聚伞花序近球形,直径8-20厘米,具短的总花梗,花密集,粉红色、淡蓝色或白色;花瓣长圆形,长3-3.5毫米。蒴果未成熟,长陀螺状;种子未熟。花期6-8月。'
,
'万寿菊(Tagetes erecta L)为菊科万寿菊属一年生草本植物,茎直立,粗壮,具纵细条棱,分枝向上平展。叶羽状分裂;沿叶缘有少数腺体。头状花序单生;总苞杯状,顶端具齿尖;舌状花黄色或暗橙色;管状花花冠黄色。瘦果线形,基部缩小,黑色或褐色,被短微毛;冠毛有1-2个长芒和2-3个短而钝的鳞片。花期7-9月。'
,
'三色堇(学名:Viola tricolor L.)是堇菜科堇菜属的二年或多年生草本植物。基生叶叶片长卵形或披针形,具长柄,茎生叶叶片卵形、长圆形或长圆披针形,先端圆或钝,边缘具稀疏的圆齿或钝锯齿。三色堇是欧洲常见的野花物种,也常栽培于公园中,是冰岛、波兰的国花。花朵通常每花有紫、白、黄三色,故名三色堇。该物种较耐寒,喜凉爽,开花受光照影响较大。'
,
'石榴花,落叶灌木或小乔木石榴的花;为石榴属植物,石榴树干灰褐色,有片状剥落,嫩枝黄绿光滑,常呈四棱形,枝端多为刺状,无顶芽。石榴花单叶对生或簇生,矩圆形或倒卵形,新叶嫩绿或古铜色。花朵至数朵生于枝顶或叶腋,花萼钟形,肉质,先端6裂,表面光滑具腊质,橙红色,宿存。花瓣5~7枚红色或白色,单瓣或重瓣。'
]
class FlowerIdentify(tornado.web.RequestHandler):
def get(self):
self.render('flower_identify.html')
class IdentifyHandler(tornado.websocket.WebSocketHandler):
def post(self):
dataUrl = self.get_body_argument('image')
Orientation = self.get_body_argument('orientation')
content = base64.b64decode(dataUrl)
"""保存到图片target.jpg"""
file = open('./static/images/target.jpg', 'wb')
file.write(content)
file.close()
"""图片旋转270(根据实际情况)"""
img = Image.open('./static/images/target.jpg')
if Orientation == '3':
img = img.rotate(180, expand=True)
elif Orientation == '6':
img = img.rotate(270, expand=True)
elif Orientation == '8':
img = img.rotate(90, expand=True)
img.save('./static/images/target.jpg')
"""调用函数识别"""
flowerIndex = flower_identify()
flowerInfo = FlowersInfo.flowersInfo[flowerIndex]
self.render('fi_result.html', data=flowerInfo)
<|reserved_special_token_1|>
import tornado.web
import tornado.websocket
from PIL import Image
import base64
from model.flower_identify import flower_identify
class FlowersInfo:
flowersInfo = [
'月季花(学名:Rosa chinensis Jacq.): 被称为花中皇后,又称“月月红”,是常绿、半常绿低矮灌木,四季开花,一般为红色,或粉色、偶有白色和黄色,可作为观赏植物,也可作为药用植物,亦称月季。有三个自然变种,现代月季花型多样,有单瓣和重瓣,还有高心卷边等优美花型;其色彩艳丽、丰富,不仅有红、粉黄、白等单色,还有混色、银边等品种;多数品种有芳香。月季的品种繁多,世界上已有近万种,中国也有千种以上。'
,
'绣球(学名:Hydrangea macrophylla (Thunb.) Ser. ): 为虎耳草科绣球属植物。灌木,高1-4米;茎常于基部发出多数放射枝而形成一圆形灌丛;枝圆柱形。叶纸质或近革质,倒卵形或阔椭圆形。伞房状聚伞花序近球形,直径8-20厘米,具短的总花梗,花密集,粉红色、淡蓝色或白色;花瓣长圆形,长3-3.5毫米。蒴果未成熟,长陀螺状;种子未熟。花期6-8月。'
,
'万寿菊(Tagetes erecta L)为菊科万寿菊属一年生草本植物,茎直立,粗壮,具纵细条棱,分枝向上平展。叶羽状分裂;沿叶缘有少数腺体。头状花序单生;总苞杯状,顶端具齿尖;舌状花黄色或暗橙色;管状花花冠黄色。瘦果线形,基部缩小,黑色或褐色,被短微毛;冠毛有1-2个长芒和2-3个短而钝的鳞片。花期7-9月。'
,
'三色堇(学名:Viola tricolor L.)是堇菜科堇菜属的二年或多年生草本植物。基生叶叶片长卵形或披针形,具长柄,茎生叶叶片卵形、长圆形或长圆披针形,先端圆或钝,边缘具稀疏的圆齿或钝锯齿。三色堇是欧洲常见的野花物种,也常栽培于公园中,是冰岛、波兰的国花。花朵通常每花有紫、白、黄三色,故名三色堇。该物种较耐寒,喜凉爽,开花受光照影响较大。'
,
'石榴花,落叶灌木或小乔木石榴的花;为石榴属植物,石榴树干灰褐色,有片状剥落,嫩枝黄绿光滑,常呈四棱形,枝端多为刺状,无顶芽。石榴花单叶对生或簇生,矩圆形或倒卵形,新叶嫩绿或古铜色。花朵至数朵生于枝顶或叶腋,花萼钟形,肉质,先端6裂,表面光滑具腊质,橙红色,宿存。花瓣5~7枚红色或白色,单瓣或重瓣。'
]
class FlowerIdentify(tornado.web.RequestHandler):
def get(self):
self.render('flower_identify.html')
class IdentifyHandler(tornado.websocket.WebSocketHandler):
def post(self):
dataUrl = self.get_body_argument('image')
Orientation = self.get_body_argument('orientation')
content = base64.b64decode(dataUrl)
"""保存到图片target.jpg"""
file = open('./static/images/target.jpg', 'wb')
file.write(content)
file.close()
"""图片旋转270(根据实际情况)"""
img = Image.open('./static/images/target.jpg')
if Orientation == '3':
img = img.rotate(180, expand=True)
elif Orientation == '6':
img = img.rotate(270, expand=True)
elif Orientation == '8':
img = img.rotate(90, expand=True)
img.save('./static/images/target.jpg')
"""调用函数识别"""
flowerIndex = flower_identify()
flowerInfo = FlowersInfo.flowersInfo[flowerIndex]
self.render('fi_result.html', data=flowerInfo)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# @File :fi_handlers.py
# @Author:ZengYu
# @Date :2019/5/16
# @software:PyCharm
import tornado.web
import tornado.websocket
from PIL import Image
import base64
from model.flower_identify import flower_identify
class FlowersInfo():
flowersInfo = ["月季花(学名:Rosa chinensis Jacq.): 被称为花中皇后,又称“月月红”,是常绿、半常绿低矮灌木,四季开花,一般为红色,或粉色、偶有白色和黄色,可作为观赏植物,也可作为药用植物,亦称月季。有三个自然变种,现代月季花型多样,有单瓣和重瓣,还有高心卷边等优美花型;其色彩艳丽、丰富,不仅有红、粉黄、白等单色,还有混色、银边等品种;多数品种有芳香。月季的品种繁多,世界上已有近万种,中国也有千种以上。",
"绣球(学名:Hydrangea macrophylla (Thunb.) Ser. ): 为虎耳草科绣球属植物。灌木,高1-4米;茎常于基部发出多数放射枝而形成一圆形灌丛;枝圆柱形。叶纸质或近革质,倒卵形或阔椭圆形。伞房状聚伞花序近球形,直径8-20厘米,具短的总花梗,花密集,粉红色、淡蓝色或白色;花瓣长圆形,长3-3.5毫米。蒴果未成熟,长陀螺状;种子未熟。花期6-8月。",
"万寿菊(Tagetes erecta L)为菊科万寿菊属一年生草本植物,茎直立,粗壮,具纵细条棱,分枝向上平展。叶羽状分裂;沿叶缘有少数腺体。头状花序单生;总苞杯状,顶端具齿尖;舌状花黄色或暗橙色;管状花花冠黄色。瘦果线形,基部缩小,黑色或褐色,被短微毛;冠毛有1-2个长芒和2-3个短而钝的鳞片。花期7-9月。",
"三色堇(学名:Viola tricolor L.)是堇菜科堇菜属的二年或多年生草本植物。基生叶叶片长卵形或披针形,具长柄,茎生叶叶片卵形、长圆形或长圆披针形,先端圆或钝,边缘具稀疏的圆齿或钝锯齿。三色堇是欧洲常见的野花物种,也常栽培于公园中,是冰岛、波兰的国花。花朵通常每花有紫、白、黄三色,故名三色堇。该物种较耐寒,喜凉爽,开花受光照影响较大。",
"石榴花,落叶灌木或小乔木石榴的花;为石榴属植物,石榴树干灰褐色,有片状剥落,嫩枝黄绿光滑,常呈四棱形,枝端多为刺状,无顶芽。石榴花单叶对生或簇生,矩圆形或倒卵形,新叶嫩绿或古铜色。花朵至数朵生于枝顶或叶腋,花萼钟形,肉质,先端6裂,表面光滑具腊质,橙红色,宿存。花瓣5~7枚红色或白色,单瓣或重瓣。"]
class FlowerIdentify(tornado.web.RequestHandler):
def get(self):
self.render("flower_identify.html")
class IdentifyHandler(tornado.websocket.WebSocketHandler):
def post(self):
# 从JSON字符串读取图片数据
dataUrl = self.get_body_argument("image")
Orientation = self.get_body_argument("orientation") # 得到图片方向以便旋转处理
content = base64.b64decode(dataUrl)
'''保存到图片target.jpg'''
file = open('./static/images/target.jpg', 'wb')
file.write(content)
file.close()
'''图片旋转270(根据实际情况)'''
img = Image.open('./static/images/target.jpg')
if Orientation == "3":
img = img.rotate(180, expand=True)
elif Orientation == "6":
img = img.rotate(270, expand=True)
elif Orientation == "8":
img = img.rotate(90, expand=True)
img.save('./static/images/target.jpg')
'''调用函数识别'''
flowerIndex = flower_identify() # 调用识别函数
flowerInfo = FlowersInfo.flowersInfo[flowerIndex] # 得到结果,并从FlowersInfo里找到该花的资料
self.render("fi_result.html", data=flowerInfo)
|
flexible
|
{
"blob_id": "1c3b1776f14a085bec90be11028c87dc47f00293",
"index": 1722,
"step-1": "<mask token>\n\n\nclass FlowerIdentify(tornado.web.RequestHandler):\n\n def get(self):\n self.render('flower_identify.html')\n\n\nclass IdentifyHandler(tornado.websocket.WebSocketHandler):\n\n def post(self):\n dataUrl = self.get_body_argument('image')\n Orientation = self.get_body_argument('orientation')\n content = base64.b64decode(dataUrl)\n \"\"\"保存到图片target.jpg\"\"\"\n file = open('./static/images/target.jpg', 'wb')\n file.write(content)\n file.close()\n \"\"\"图片旋转270(根据实际情况)\"\"\"\n img = Image.open('./static/images/target.jpg')\n if Orientation == '3':\n img = img.rotate(180, expand=True)\n elif Orientation == '6':\n img = img.rotate(270, expand=True)\n elif Orientation == '8':\n img = img.rotate(90, expand=True)\n img.save('./static/images/target.jpg')\n \"\"\"调用函数识别\"\"\"\n flowerIndex = flower_identify()\n flowerInfo = FlowersInfo.flowersInfo[flowerIndex]\n self.render('fi_result.html', data=flowerInfo)\n",
"step-2": "<mask token>\n\n\nclass FlowersInfo:\n <mask token>\n\n\nclass FlowerIdentify(tornado.web.RequestHandler):\n\n def get(self):\n self.render('flower_identify.html')\n\n\nclass IdentifyHandler(tornado.websocket.WebSocketHandler):\n\n def post(self):\n dataUrl = self.get_body_argument('image')\n Orientation = self.get_body_argument('orientation')\n content = base64.b64decode(dataUrl)\n \"\"\"保存到图片target.jpg\"\"\"\n file = open('./static/images/target.jpg', 'wb')\n file.write(content)\n file.close()\n \"\"\"图片旋转270(根据实际情况)\"\"\"\n img = Image.open('./static/images/target.jpg')\n if Orientation == '3':\n img = img.rotate(180, expand=True)\n elif Orientation == '6':\n img = img.rotate(270, expand=True)\n elif Orientation == '8':\n img = img.rotate(90, expand=True)\n img.save('./static/images/target.jpg')\n \"\"\"调用函数识别\"\"\"\n flowerIndex = flower_identify()\n flowerInfo = FlowersInfo.flowersInfo[flowerIndex]\n self.render('fi_result.html', data=flowerInfo)\n",
"step-3": "<mask token>\n\n\nclass FlowersInfo:\n flowersInfo = [\n '月季花(学名:Rosa chinensis Jacq.): 被称为花中皇后,又称“月月红”,是常绿、半常绿低矮灌木,四季开花,一般为红色,或粉色、偶有白色和黄色,可作为观赏植物,也可作为药用植物,亦称月季。有三个自然变种,现代月季花型多样,有单瓣和重瓣,还有高心卷边等优美花型;其色彩艳丽、丰富,不仅有红、粉黄、白等单色,还有混色、银边等品种;多数品种有芳香。月季的品种繁多,世界上已有近万种,中国也有千种以上。'\n ,\n '绣球(学名:Hydrangea macrophylla (Thunb.) Ser. ): 为虎耳草科绣球属植物。灌木,高1-4米;茎常于基部发出多数放射枝而形成一圆形灌丛;枝圆柱形。叶纸质或近革质,倒卵形或阔椭圆形。伞房状聚伞花序近球形,直径8-20厘米,具短的总花梗,花密集,粉红色、淡蓝色或白色;花瓣长圆形,长3-3.5毫米。蒴果未成熟,长陀螺状;种子未熟。花期6-8月。'\n ,\n '万寿菊(Tagetes erecta L)为菊科万寿菊属一年生草本植物,茎直立,粗壮,具纵细条棱,分枝向上平展。叶羽状分裂;沿叶缘有少数腺体。头状花序单生;总苞杯状,顶端具齿尖;舌状花黄色或暗橙色;管状花花冠黄色。瘦果线形,基部缩小,黑色或褐色,被短微毛;冠毛有1-2个长芒和2-3个短而钝的鳞片。花期7-9月。'\n ,\n '三色堇(学名:Viola tricolor L.)是堇菜科堇菜属的二年或多年生草本植物。基生叶叶片长卵形或披针形,具长柄,茎生叶叶片卵形、长圆形或长圆披针形,先端圆或钝,边缘具稀疏的圆齿或钝锯齿。三色堇是欧洲常见的野花物种,也常栽培于公园中,是冰岛、波兰的国花。花朵通常每花有紫、白、黄三色,故名三色堇。该物种较耐寒,喜凉爽,开花受光照影响较大。'\n ,\n '石榴花,落叶灌木或小乔木石榴的花;为石榴属植物,石榴树干灰褐色,有片状剥落,嫩枝黄绿光滑,常呈四棱形,枝端多为刺状,无顶芽。石榴花单叶对生或簇生,矩圆形或倒卵形,新叶嫩绿或古铜色。花朵至数朵生于枝顶或叶腋,花萼钟形,肉质,先端6裂,表面光滑具腊质,橙红色,宿存。花瓣5~7枚红色或白色,单瓣或重瓣。'\n ]\n\n\nclass FlowerIdentify(tornado.web.RequestHandler):\n\n def get(self):\n self.render('flower_identify.html')\n\n\nclass IdentifyHandler(tornado.websocket.WebSocketHandler):\n\n def post(self):\n dataUrl = self.get_body_argument('image')\n Orientation = self.get_body_argument('orientation')\n content = base64.b64decode(dataUrl)\n \"\"\"保存到图片target.jpg\"\"\"\n file = open('./static/images/target.jpg', 'wb')\n file.write(content)\n file.close()\n \"\"\"图片旋转270(根据实际情况)\"\"\"\n img = Image.open('./static/images/target.jpg')\n if Orientation == '3':\n img = img.rotate(180, expand=True)\n elif Orientation == '6':\n img = img.rotate(270, expand=True)\n elif Orientation == '8':\n img = img.rotate(90, expand=True)\n img.save('./static/images/target.jpg')\n \"\"\"调用函数识别\"\"\"\n flowerIndex = flower_identify()\n flowerInfo = FlowersInfo.flowersInfo[flowerIndex]\n self.render('fi_result.html', data=flowerInfo)\n",
"step-4": "import tornado.web\nimport tornado.websocket\nfrom PIL import Image\nimport base64\nfrom model.flower_identify import flower_identify\n\n\nclass FlowersInfo:\n flowersInfo = [\n '月季花(学名:Rosa chinensis Jacq.): 被称为花中皇后,又称“月月红”,是常绿、半常绿低矮灌木,四季开花,一般为红色,或粉色、偶有白色和黄色,可作为观赏植物,也可作为药用植物,亦称月季。有三个自然变种,现代月季花型多样,有单瓣和重瓣,还有高心卷边等优美花型;其色彩艳丽、丰富,不仅有红、粉黄、白等单色,还有混色、银边等品种;多数品种有芳香。月季的品种繁多,世界上已有近万种,中国也有千种以上。'\n ,\n '绣球(学名:Hydrangea macrophylla (Thunb.) Ser. ): 为虎耳草科绣球属植物。灌木,高1-4米;茎常于基部发出多数放射枝而形成一圆形灌丛;枝圆柱形。叶纸质或近革质,倒卵形或阔椭圆形。伞房状聚伞花序近球形,直径8-20厘米,具短的总花梗,花密集,粉红色、淡蓝色或白色;花瓣长圆形,长3-3.5毫米。蒴果未成熟,长陀螺状;种子未熟。花期6-8月。'\n ,\n '万寿菊(Tagetes erecta L)为菊科万寿菊属一年生草本植物,茎直立,粗壮,具纵细条棱,分枝向上平展。叶羽状分裂;沿叶缘有少数腺体。头状花序单生;总苞杯状,顶端具齿尖;舌状花黄色或暗橙色;管状花花冠黄色。瘦果线形,基部缩小,黑色或褐色,被短微毛;冠毛有1-2个长芒和2-3个短而钝的鳞片。花期7-9月。'\n ,\n '三色堇(学名:Viola tricolor L.)是堇菜科堇菜属的二年或多年生草本植物。基生叶叶片长卵形或披针形,具长柄,茎生叶叶片卵形、长圆形或长圆披针形,先端圆或钝,边缘具稀疏的圆齿或钝锯齿。三色堇是欧洲常见的野花物种,也常栽培于公园中,是冰岛、波兰的国花。花朵通常每花有紫、白、黄三色,故名三色堇。该物种较耐寒,喜凉爽,开花受光照影响较大。'\n ,\n '石榴花,落叶灌木或小乔木石榴的花;为石榴属植物,石榴树干灰褐色,有片状剥落,嫩枝黄绿光滑,常呈四棱形,枝端多为刺状,无顶芽。石榴花单叶对生或簇生,矩圆形或倒卵形,新叶嫩绿或古铜色。花朵至数朵生于枝顶或叶腋,花萼钟形,肉质,先端6裂,表面光滑具腊质,橙红色,宿存。花瓣5~7枚红色或白色,单瓣或重瓣。'\n ]\n\n\nclass FlowerIdentify(tornado.web.RequestHandler):\n\n def get(self):\n self.render('flower_identify.html')\n\n\nclass IdentifyHandler(tornado.websocket.WebSocketHandler):\n\n def post(self):\n dataUrl = self.get_body_argument('image')\n Orientation = self.get_body_argument('orientation')\n content = base64.b64decode(dataUrl)\n \"\"\"保存到图片target.jpg\"\"\"\n file = open('./static/images/target.jpg', 'wb')\n file.write(content)\n file.close()\n \"\"\"图片旋转270(根据实际情况)\"\"\"\n img = Image.open('./static/images/target.jpg')\n if Orientation == '3':\n img = img.rotate(180, expand=True)\n elif Orientation == '6':\n img = img.rotate(270, expand=True)\n elif Orientation == '8':\n img = img.rotate(90, expand=True)\n img.save('./static/images/target.jpg')\n \"\"\"调用函数识别\"\"\"\n flowerIndex = flower_identify()\n flowerInfo = FlowersInfo.flowersInfo[flowerIndex]\n self.render('fi_result.html', data=flowerInfo)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# @File :fi_handlers.py\r\n# @Author:ZengYu\r\n# @Date :2019/5/16\r\n# @software:PyCharm\r\n\r\nimport tornado.web\r\nimport tornado.websocket\r\nfrom PIL import Image\r\nimport base64\r\nfrom model.flower_identify import flower_identify\r\n\r\nclass FlowersInfo():\r\n flowersInfo = [\"月季花(学名:Rosa chinensis Jacq.): 被称为花中皇后,又称“月月红”,是常绿、半常绿低矮灌木,四季开花,一般为红色,或粉色、偶有白色和黄色,可作为观赏植物,也可作为药用植物,亦称月季。有三个自然变种,现代月季花型多样,有单瓣和重瓣,还有高心卷边等优美花型;其色彩艳丽、丰富,不仅有红、粉黄、白等单色,还有混色、银边等品种;多数品种有芳香。月季的品种繁多,世界上已有近万种,中国也有千种以上。\",\r\n \"绣球(学名:Hydrangea macrophylla (Thunb.) Ser. ): 为虎耳草科绣球属植物。灌木,高1-4米;茎常于基部发出多数放射枝而形成一圆形灌丛;枝圆柱形。叶纸质或近革质,倒卵形或阔椭圆形。伞房状聚伞花序近球形,直径8-20厘米,具短的总花梗,花密集,粉红色、淡蓝色或白色;花瓣长圆形,长3-3.5毫米。蒴果未成熟,长陀螺状;种子未熟。花期6-8月。\",\r\n \"万寿菊(Tagetes erecta L)为菊科万寿菊属一年生草本植物,茎直立,粗壮,具纵细条棱,分枝向上平展。叶羽状分裂;沿叶缘有少数腺体。头状花序单生;总苞杯状,顶端具齿尖;舌状花黄色或暗橙色;管状花花冠黄色。瘦果线形,基部缩小,黑色或褐色,被短微毛;冠毛有1-2个长芒和2-3个短而钝的鳞片。花期7-9月。\",\r\n \"三色堇(学名:Viola tricolor L.)是堇菜科堇菜属的二年或多年生草本植物。基生叶叶片长卵形或披针形,具长柄,茎生叶叶片卵形、长圆形或长圆披针形,先端圆或钝,边缘具稀疏的圆齿或钝锯齿。三色堇是欧洲常见的野花物种,也常栽培于公园中,是冰岛、波兰的国花。花朵通常每花有紫、白、黄三色,故名三色堇。该物种较耐寒,喜凉爽,开花受光照影响较大。\",\r\n \"石榴花,落叶灌木或小乔木石榴的花;为石榴属植物,石榴树干灰褐色,有片状剥落,嫩枝黄绿光滑,常呈四棱形,枝端多为刺状,无顶芽。石榴花单叶对生或簇生,矩圆形或倒卵形,新叶嫩绿或古铜色。花朵至数朵生于枝顶或叶腋,花萼钟形,肉质,先端6裂,表面光滑具腊质,橙红色,宿存。花瓣5~7枚红色或白色,单瓣或重瓣。\"]\r\n\r\nclass FlowerIdentify(tornado.web.RequestHandler):\r\n def get(self):\r\n self.render(\"flower_identify.html\")\r\n\r\nclass IdentifyHandler(tornado.websocket.WebSocketHandler):\r\n def post(self):\r\n # 从JSON字符串读取图片数据\r\n dataUrl = self.get_body_argument(\"image\")\r\n Orientation = self.get_body_argument(\"orientation\") # 得到图片方向以便旋转处理\r\n content = base64.b64decode(dataUrl)\r\n '''保存到图片target.jpg'''\r\n file = open('./static/images/target.jpg', 'wb')\r\n file.write(content)\r\n file.close()\r\n\r\n '''图片旋转270(根据实际情况)'''\r\n img = Image.open('./static/images/target.jpg')\r\n if Orientation == \"3\":\r\n img = img.rotate(180, expand=True)\r\n elif Orientation == \"6\":\r\n img = img.rotate(270, expand=True)\r\n elif Orientation == \"8\":\r\n img = img.rotate(90, expand=True)\r\n img.save('./static/images/target.jpg')\r\n\r\n '''调用函数识别'''\r\n flowerIndex = flower_identify() # 调用识别函数\r\n flowerInfo = FlowersInfo.flowersInfo[flowerIndex] # 得到结果,并从FlowersInfo里找到该花的资料\r\n self.render(\"fi_result.html\", data=flowerInfo)\r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class GenomicArray:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, data_table: Optional[Union[Sequence, pd.DataFrame]],
meta_dict: Optional[Mapping]=None):
if data_table is None or isinstance(data_table, (list, tuple)
) and not len(data_table) or isinstance(data_table, pd.DataFrame
) and not len(data_table.columns):
data_table = self._make_blank()
else:
if not isinstance(data_table, pd.DataFrame):
data_table = pd.DataFrame(data_table)
if not all(c in data_table.columns for c in self._required_columns
):
raise ValueError('data table must have at least columns ' +
f'{self._required_columns!r}; got {tuple(data_table.columns)!r}'
)
if len(data_table):
def ok_dtype(col, dtype):
return isinstance(data_table[col].iat[0], dtype)
else:
def ok_dtype(col, dtype):
return data_table[col].dtype == np.dtype(dtype)
recast_cols = {col: dtype for col, dtype in zip(self.
_required_columns, self._required_dtypes) if not ok_dtype(
col, dtype)}
if recast_cols:
data_table = data_table.astype(recast_cols)
self.data = data_table
self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict
) else {}
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def from_rows(cls, rows: Iterable, columns: Optional[Sequence[str]]=
None, meta_dict: Optional[Mapping]=None):
"""Create a new instance from a list of rows, as tuples or arrays."""
if columns is None:
columns = cls._required_columns
table = pd.DataFrame.from_records(rows, columns=columns)
return cls(table, meta_dict)
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):
"""Wrap the given pandas DataFrame in this instance's metadata."""
if reset_index:
dframe = dframe.reset_index(drop=True)
return self.__class__(dframe, self.meta.copy())
def as_series(self, arraylike: Iterable) ->pd.Series:
"""Coerce `arraylike` to a Series with this instance's index."""
return pd.Series(arraylike, index=self.data.index)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __eq__(self, other) ->bool:
return isinstance(other, self.__class__) and self.data.equals(other
.data)
def __len__(self) ->int:
return len(self.data)
def __contains__(self, key) ->bool:
return key in self.data.columns
def __getitem__(self, index) ->Union[pd.Series, pd.DataFrame]:
"""Access a portion of the data.
Cases:
- single integer: a row, as pd.Series
- string row name: a column, as pd.Series
- a boolean array: masked rows, as_dataframe
- tuple of integers: selected rows, as_dataframe
"""
if isinstance(index, int):
return self.data.iloc[index]
if isinstance(index, str):
return self.data[index]
if isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
return self.data.loc[index]
if isinstance(index, slice):
return self.as_dataframe(self.data[index])
try:
if isinstance(index, type(None)) or len(index) == 0:
empty = pd.DataFrame(columns=self.data.columns)
return self.as_dataframe(empty)
except TypeError as exc:
raise TypeError(
f'object of type {type(index)!r} cannot be used as an index into a {self.__class__.__name__}'
) from exc
return self.as_dataframe(self.data[index])
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, str):
self.data[index] = value
elif isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def start(self) ->pd.Series:
"""Get column 'start'."""
return self.data['start']
@property
def end(self) ->pd.Series:
"""Get column 'end'."""
return self.data['end']
<|reserved_special_token_0|>
def autosomes(self, also=None):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
is_auto = self.chromosome.str.match('(chr)?\\d+$', na=False)
if not is_auto.any():
return self
if also is not None:
if isinstance(also, pd.Series):
is_auto |= also
else:
if isinstance(also, str):
also = [also]
for a_chrom in also:
is_auto |= self.chromosome == a_chrom
return self[is_auto]
<|reserved_special_token_0|>
def by_chromosome(self) ->Iterator:
"""Iterate over bins grouped by chromosome name."""
for chrom, subtable in self.data.groupby('chromosome', sort=False):
yield chrom, self.as_dataframe(subtable)
def by_ranges(self, other, mode: str='outer', keep_empty: bool=True
) ->Iterator:
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data, mode,
keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also: Union[str, Iterable[str]]=()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, str):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
<|reserved_special_token_0|>
def in_range(self, chrom: Optional[str]=None, start: Optional[Numeric]=
None, end: Optional[Numeric]=None, mode: str='outer'):
"""Get the GenomicArray portion within the given genomic range.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
start : int or None
Start coordinate of range to select, in 0-based coordinates.
If None, start from 0.
end : int or None
End coordinate of range to select. If None, select to the end of the
chromosome.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
The subset of `self` enclosed by the specified range.
"""
starts = [int(start)] if start is not None else None
ends = [int(end)] if end is not None else None
results = iter_ranges(self.data, chrom, starts, ends, mode)
return self.as_dataframe(next(results))
def in_ranges(self, chrom: Optional[str]=None, starts: Optional[
Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,
mode: str='outer'):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),
sort=False)
return self.as_dataframe(table)
def into_ranges(self, other, column: str, default, summary_func:
Optional[Callable]=None):
"""Re-bin values from `column` into the corresponding ranges in `other`.
Match overlapping/intersecting rows from `other` to each row in `self`.
Then, within each range in `other`, extract the value(s) from `column`
in `self`, using the function `summary_func` to produce a single value
if multiple bins in `self` map to a single range in `other`.
For example, group SNVs (self) by CNV segments (other) and calculate the
median (summary_func) of each SNV group's allele frequencies.
Parameters
----------
other : GenomicArray
Ranges into which the overlapping values of `self` will be
summarized.
column : string
Column name in `self` to extract values from.
default
Value to assign to indices in `other` that do not overlap any bins in
`self`. Type should be the same as or compatible with the output
field specified by `column`, or the output of `summary_func`.
summary_func : callable, dict of string-to-callable, or None
Specify how to reduce 1 or more `other` rows into a single value for
the corresponding row in `self`.
- If callable, apply to the `column` field each group of rows in
`other` column.
- If a single-element dict of column name to callable, apply to that
field in `other` instead of `column`.
- If None, use an appropriate summarizing function for the datatype
of the `column` column in `other` (e.g. median of numbers,
concatenation of strings).
- If some other value, assign that value to `self` wherever there is
an overlap.
Returns
-------
pd.Series
The extracted and summarized values from `self` corresponding to
other's genomic ranges, the same length as `other`.
"""
if column not in self:
logging.warning("No '%s' column available for summary calculation",
column)
return pd.Series(np.repeat(default, len(other)))
return into_ranges(self.data, other.data, column, default, summary_func
)
def iter_ranges_of(self, other, column: str, mode: str='outer',
keep_empty: bool=True):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError(f'No column named {column!r} in this object')
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def copy(self):
"""Create an independent copy of this object."""
return self.as_dataframe(self.data.copy())
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
return self.as_dataframe(self.data.assign(**columns))
def keep_columns(self, colnames):
"""Extract a subset of columns, reusing this instance's metadata."""
colnames = self.data.columns.intersection(colnames)
return self.__class__(self.data.loc[:, colnames], self.meta.copy())
<|reserved_special_token_0|>
def filter(self, func=None, **kwargs):
"""Take a subset of rows where the given condition is true.
Parameters
----------
func : callable
A boolean function which will be applied to each row to keep rows
where the result is True.
**kwargs : string
Keyword arguments like ``chromosome="chr7"`` or
``gene="Antitarget"``, which will keep rows where the keyed field
equals the specified value.
Return
------
GenomicArray
Subset of `self` where the specified condition is True.
"""
table = self.data
if func is not None:
table = table[table.apply(func, axis=1)]
for key, val in list(kwargs.items()):
assert key in self
table = table[table[key] == val]
return self.as_dataframe(table)
def shuffle(self):
"""Randomize the order of bins in this array (in-place)."""
order = np.arange(len(self.data))
np.random.seed(679661)
np.random.shuffle(order)
self.data = self.data.iloc[order]
return order
def sort(self):
"""Sort this array's bins in-place, with smart chromosome ordering."""
sort_key = self.data.chromosome.apply(sorter_chrom)
self.data = self.data.assign(_sort_key_=sort_key).sort_values(by=[
'_sort_key_', 'start', 'end'], kind='mergesort').drop('_sort_key_',
axis=1).reset_index(drop=True)
def sort_columns(self):
"""Sort this array's columns in-place, per class definition."""
extra_cols = []
for col in self.data.columns:
if col not in self._required_columns:
extra_cols.append(col)
sorted_colnames = list(self._required_columns) + sorted(extra_cols)
assert len(sorted_colnames) == len(self.data.columns)
self.data = self.data.reindex(columns=sorted_colnames)
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
return NotImplemented
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def merge(self, bp: int=0, stranded: bool=False, combine: Optional[Dict
[str, Callable]]=None):
"""Merge adjacent or overlapping regions into single rows.
Similar to 'bedtools merge'.
"""
return self.as_dataframe(merge(self.data, bp, stranded, combine))
def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,
Numeric]]=None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = {'lower': 0}
if chrom_sizes:
limits['upper'] = self.chromosome.replace(chrom_sizes)
table = table.assign(start=(table['start'] - bp).clip(**limits),
end=(table['end'] + bp).clip(**limits))
if bp < 0:
ok_size = table['end'] - table['start'] > 0
logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())
table = table[ok_size]
return self.as_dataframe(table.copy())
<|reserved_special_token_0|>
def subdivide(self, avg_size: int, min_size: int=0, verbose: bool=False):
"""Split this array's regions into roughly equal-sized sub-regions."""
return self.as_dataframe(subdivide(self.data, avg_size, min_size,
verbose))
<|reserved_special_token_0|>
def total_range_size(self) ->int:
"""Total number of bases covered by all (merged) regions."""
if not len(self):
return 0
regions = merge(self.data, bp=1)
return regions.end.sum() - regions.start.sum()
def _get_gene_map(self) ->OrderedDict:
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if 'gene' not in self.data:
return OrderedDict()
genes: OrderedDict = OrderedDict()
for idx, genestr in self.data['gene'].items():
if pd.isnull(genestr):
continue
for gene in genestr.split(','):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GenomicArray:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, data_table: Optional[Union[Sequence, pd.DataFrame]],
meta_dict: Optional[Mapping]=None):
if data_table is None or isinstance(data_table, (list, tuple)
) and not len(data_table) or isinstance(data_table, pd.DataFrame
) and not len(data_table.columns):
data_table = self._make_blank()
else:
if not isinstance(data_table, pd.DataFrame):
data_table = pd.DataFrame(data_table)
if not all(c in data_table.columns for c in self._required_columns
):
raise ValueError('data table must have at least columns ' +
f'{self._required_columns!r}; got {tuple(data_table.columns)!r}'
)
if len(data_table):
def ok_dtype(col, dtype):
return isinstance(data_table[col].iat[0], dtype)
else:
def ok_dtype(col, dtype):
return data_table[col].dtype == np.dtype(dtype)
recast_cols = {col: dtype for col, dtype in zip(self.
_required_columns, self._required_dtypes) if not ok_dtype(
col, dtype)}
if recast_cols:
data_table = data_table.astype(recast_cols)
self.data = data_table
self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict
) else {}
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def from_rows(cls, rows: Iterable, columns: Optional[Sequence[str]]=
None, meta_dict: Optional[Mapping]=None):
"""Create a new instance from a list of rows, as tuples or arrays."""
if columns is None:
columns = cls._required_columns
table = pd.DataFrame.from_records(rows, columns=columns)
return cls(table, meta_dict)
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):
"""Wrap the given pandas DataFrame in this instance's metadata."""
if reset_index:
dframe = dframe.reset_index(drop=True)
return self.__class__(dframe, self.meta.copy())
def as_series(self, arraylike: Iterable) ->pd.Series:
"""Coerce `arraylike` to a Series with this instance's index."""
return pd.Series(arraylike, index=self.data.index)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __eq__(self, other) ->bool:
return isinstance(other, self.__class__) and self.data.equals(other
.data)
def __len__(self) ->int:
return len(self.data)
def __contains__(self, key) ->bool:
return key in self.data.columns
def __getitem__(self, index) ->Union[pd.Series, pd.DataFrame]:
"""Access a portion of the data.
Cases:
- single integer: a row, as pd.Series
- string row name: a column, as pd.Series
- a boolean array: masked rows, as_dataframe
- tuple of integers: selected rows, as_dataframe
"""
if isinstance(index, int):
return self.data.iloc[index]
if isinstance(index, str):
return self.data[index]
if isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
return self.data.loc[index]
if isinstance(index, slice):
return self.as_dataframe(self.data[index])
try:
if isinstance(index, type(None)) or len(index) == 0:
empty = pd.DataFrame(columns=self.data.columns)
return self.as_dataframe(empty)
except TypeError as exc:
raise TypeError(
f'object of type {type(index)!r} cannot be used as an index into a {self.__class__.__name__}'
) from exc
return self.as_dataframe(self.data[index])
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, str):
self.data[index] = value
elif isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
<|reserved_special_token_0|>
@property
def chromosome(self) ->pd.Series:
"""Get column 'chromosome'."""
return self.data['chromosome']
@property
def start(self) ->pd.Series:
"""Get column 'start'."""
return self.data['start']
@property
def end(self) ->pd.Series:
"""Get column 'end'."""
return self.data['end']
<|reserved_special_token_0|>
def autosomes(self, also=None):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
is_auto = self.chromosome.str.match('(chr)?\\d+$', na=False)
if not is_auto.any():
return self
if also is not None:
if isinstance(also, pd.Series):
is_auto |= also
else:
if isinstance(also, str):
also = [also]
for a_chrom in also:
is_auto |= self.chromosome == a_chrom
return self[is_auto]
<|reserved_special_token_0|>
def by_chromosome(self) ->Iterator:
"""Iterate over bins grouped by chromosome name."""
for chrom, subtable in self.data.groupby('chromosome', sort=False):
yield chrom, self.as_dataframe(subtable)
def by_ranges(self, other, mode: str='outer', keep_empty: bool=True
) ->Iterator:
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data, mode,
keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also: Union[str, Iterable[str]]=()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, str):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
<|reserved_special_token_0|>
def in_range(self, chrom: Optional[str]=None, start: Optional[Numeric]=
None, end: Optional[Numeric]=None, mode: str='outer'):
"""Get the GenomicArray portion within the given genomic range.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
start : int or None
Start coordinate of range to select, in 0-based coordinates.
If None, start from 0.
end : int or None
End coordinate of range to select. If None, select to the end of the
chromosome.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
The subset of `self` enclosed by the specified range.
"""
starts = [int(start)] if start is not None else None
ends = [int(end)] if end is not None else None
results = iter_ranges(self.data, chrom, starts, ends, mode)
return self.as_dataframe(next(results))
def in_ranges(self, chrom: Optional[str]=None, starts: Optional[
Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,
mode: str='outer'):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),
sort=False)
return self.as_dataframe(table)
def into_ranges(self, other, column: str, default, summary_func:
Optional[Callable]=None):
"""Re-bin values from `column` into the corresponding ranges in `other`.
Match overlapping/intersecting rows from `other` to each row in `self`.
Then, within each range in `other`, extract the value(s) from `column`
in `self`, using the function `summary_func` to produce a single value
if multiple bins in `self` map to a single range in `other`.
For example, group SNVs (self) by CNV segments (other) and calculate the
median (summary_func) of each SNV group's allele frequencies.
Parameters
----------
other : GenomicArray
Ranges into which the overlapping values of `self` will be
summarized.
column : string
Column name in `self` to extract values from.
default
Value to assign to indices in `other` that do not overlap any bins in
`self`. Type should be the same as or compatible with the output
field specified by `column`, or the output of `summary_func`.
summary_func : callable, dict of string-to-callable, or None
Specify how to reduce 1 or more `other` rows into a single value for
the corresponding row in `self`.
- If callable, apply to the `column` field each group of rows in
`other` column.
- If a single-element dict of column name to callable, apply to that
field in `other` instead of `column`.
- If None, use an appropriate summarizing function for the datatype
of the `column` column in `other` (e.g. median of numbers,
concatenation of strings).
- If some other value, assign that value to `self` wherever there is
an overlap.
Returns
-------
pd.Series
The extracted and summarized values from `self` corresponding to
other's genomic ranges, the same length as `other`.
"""
if column not in self:
logging.warning("No '%s' column available for summary calculation",
column)
return pd.Series(np.repeat(default, len(other)))
return into_ranges(self.data, other.data, column, default, summary_func
)
def iter_ranges_of(self, other, column: str, mode: str='outer',
keep_empty: bool=True):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError(f'No column named {column!r} in this object')
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def copy(self):
"""Create an independent copy of this object."""
return self.as_dataframe(self.data.copy())
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
return self.as_dataframe(self.data.assign(**columns))
def keep_columns(self, colnames):
"""Extract a subset of columns, reusing this instance's metadata."""
colnames = self.data.columns.intersection(colnames)
return self.__class__(self.data.loc[:, colnames], self.meta.copy())
<|reserved_special_token_0|>
def filter(self, func=None, **kwargs):
"""Take a subset of rows where the given condition is true.
Parameters
----------
func : callable
A boolean function which will be applied to each row to keep rows
where the result is True.
**kwargs : string
Keyword arguments like ``chromosome="chr7"`` or
``gene="Antitarget"``, which will keep rows where the keyed field
equals the specified value.
Return
------
GenomicArray
Subset of `self` where the specified condition is True.
"""
table = self.data
if func is not None:
table = table[table.apply(func, axis=1)]
for key, val in list(kwargs.items()):
assert key in self
table = table[table[key] == val]
return self.as_dataframe(table)
def shuffle(self):
"""Randomize the order of bins in this array (in-place)."""
order = np.arange(len(self.data))
np.random.seed(679661)
np.random.shuffle(order)
self.data = self.data.iloc[order]
return order
def sort(self):
"""Sort this array's bins in-place, with smart chromosome ordering."""
sort_key = self.data.chromosome.apply(sorter_chrom)
self.data = self.data.assign(_sort_key_=sort_key).sort_values(by=[
'_sort_key_', 'start', 'end'], kind='mergesort').drop('_sort_key_',
axis=1).reset_index(drop=True)
def sort_columns(self):
"""Sort this array's columns in-place, per class definition."""
extra_cols = []
for col in self.data.columns:
if col not in self._required_columns:
extra_cols.append(col)
sorted_colnames = list(self._required_columns) + sorted(extra_cols)
assert len(sorted_colnames) == len(self.data.columns)
self.data = self.data.reindex(columns=sorted_colnames)
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
return NotImplemented
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def merge(self, bp: int=0, stranded: bool=False, combine: Optional[Dict
[str, Callable]]=None):
"""Merge adjacent or overlapping regions into single rows.
Similar to 'bedtools merge'.
"""
return self.as_dataframe(merge(self.data, bp, stranded, combine))
def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,
Numeric]]=None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = {'lower': 0}
if chrom_sizes:
limits['upper'] = self.chromosome.replace(chrom_sizes)
table = table.assign(start=(table['start'] - bp).clip(**limits),
end=(table['end'] + bp).clip(**limits))
if bp < 0:
ok_size = table['end'] - table['start'] > 0
logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())
table = table[ok_size]
return self.as_dataframe(table.copy())
def squash(self, combine=None):
"""Combine some groups of rows, by some criteria, into single rows."""
return NotImplemented
def subdivide(self, avg_size: int, min_size: int=0, verbose: bool=False):
"""Split this array's regions into roughly equal-sized sub-regions."""
return self.as_dataframe(subdivide(self.data, avg_size, min_size,
verbose))
<|reserved_special_token_0|>
def total_range_size(self) ->int:
"""Total number of bases covered by all (merged) regions."""
if not len(self):
return 0
regions = merge(self.data, bp=1)
return regions.end.sum() - regions.start.sum()
def _get_gene_map(self) ->OrderedDict:
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if 'gene' not in self.data:
return OrderedDict()
genes: OrderedDict = OrderedDict()
for idx, genestr in self.data['gene'].items():
if pd.isnull(genestr):
continue
for gene in genestr.split(','):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GenomicArray:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, data_table: Optional[Union[Sequence, pd.DataFrame]],
meta_dict: Optional[Mapping]=None):
if data_table is None or isinstance(data_table, (list, tuple)
) and not len(data_table) or isinstance(data_table, pd.DataFrame
) and not len(data_table.columns):
data_table = self._make_blank()
else:
if not isinstance(data_table, pd.DataFrame):
data_table = pd.DataFrame(data_table)
if not all(c in data_table.columns for c in self._required_columns
):
raise ValueError('data table must have at least columns ' +
f'{self._required_columns!r}; got {tuple(data_table.columns)!r}'
)
if len(data_table):
def ok_dtype(col, dtype):
return isinstance(data_table[col].iat[0], dtype)
else:
def ok_dtype(col, dtype):
return data_table[col].dtype == np.dtype(dtype)
recast_cols = {col: dtype for col, dtype in zip(self.
_required_columns, self._required_dtypes) if not ok_dtype(
col, dtype)}
if recast_cols:
data_table = data_table.astype(recast_cols)
self.data = data_table
self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict
) else {}
@classmethod
def _make_blank(cls) ->pd.DataFrame:
"""Create an empty dataframe with the columns required by this class."""
spec = list(zip(cls._required_columns, cls._required_dtypes))
try:
arr = np.zeros(0, dtype=spec)
return pd.DataFrame(arr)
except TypeError as exc:
raise TypeError('{exc}: {spec}') from exc
@classmethod
def from_columns(cls, columns: Mapping[str, Iterable], meta_dict:
Optional[Mapping]=None):
"""Create a new instance from column arrays, given as a dict."""
table = pd.DataFrame.from_dict(columns)
ary = cls(table, meta_dict)
ary.sort_columns()
return ary
@classmethod
def from_rows(cls, rows: Iterable, columns: Optional[Sequence[str]]=
None, meta_dict: Optional[Mapping]=None):
"""Create a new instance from a list of rows, as tuples or arrays."""
if columns is None:
columns = cls._required_columns
table = pd.DataFrame.from_records(rows, columns=columns)
return cls(table, meta_dict)
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):
"""Wrap the given pandas DataFrame in this instance's metadata."""
if reset_index:
dframe = dframe.reset_index(drop=True)
return self.__class__(dframe, self.meta.copy())
def as_series(self, arraylike: Iterable) ->pd.Series:
"""Coerce `arraylike` to a Series with this instance's index."""
return pd.Series(arraylike, index=self.data.index)
def as_rows(self, rows: Iterable):
"""Wrap the given rows in this instance's metadata."""
try:
out = self.from_rows(rows, columns=self.data.columns, meta_dict
=self.meta)
except AssertionError as exc:
columns = self.data.columns.tolist()
firstrow = next(iter(rows))
raise RuntimeError(
f'Passed {len(columns)} columns {columns!r}, but {len(firstrow)} elements in first row: {firstrow}'
) from exc
return out
def __bool__(self) ->bool:
return bool(len(self.data))
def __eq__(self, other) ->bool:
return isinstance(other, self.__class__) and self.data.equals(other
.data)
def __len__(self) ->int:
return len(self.data)
def __contains__(self, key) ->bool:
return key in self.data.columns
def __getitem__(self, index) ->Union[pd.Series, pd.DataFrame]:
"""Access a portion of the data.
Cases:
- single integer: a row, as pd.Series
- string row name: a column, as pd.Series
- a boolean array: masked rows, as_dataframe
- tuple of integers: selected rows, as_dataframe
"""
if isinstance(index, int):
return self.data.iloc[index]
if isinstance(index, str):
return self.data[index]
if isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
return self.data.loc[index]
if isinstance(index, slice):
return self.as_dataframe(self.data[index])
try:
if isinstance(index, type(None)) or len(index) == 0:
empty = pd.DataFrame(columns=self.data.columns)
return self.as_dataframe(empty)
except TypeError as exc:
raise TypeError(
f'object of type {type(index)!r} cannot be used as an index into a {self.__class__.__name__}'
) from exc
return self.as_dataframe(self.data[index])
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, str):
self.data[index] = value
elif isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
<|reserved_special_token_0|>
@property
def chromosome(self) ->pd.Series:
"""Get column 'chromosome'."""
return self.data['chromosome']
@property
def start(self) ->pd.Series:
"""Get column 'start'."""
return self.data['start']
@property
def end(self) ->pd.Series:
"""Get column 'end'."""
return self.data['end']
@property
def sample_id(self) ->pd.Series:
"""Get metadata field 'sample_id'."""
return self.meta.get('sample_id')
def autosomes(self, also=None):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
is_auto = self.chromosome.str.match('(chr)?\\d+$', na=False)
if not is_auto.any():
return self
if also is not None:
if isinstance(also, pd.Series):
is_auto |= also
else:
if isinstance(also, str):
also = [also]
for a_chrom in also:
is_auto |= self.chromosome == a_chrom
return self[is_auto]
<|reserved_special_token_0|>
def by_chromosome(self) ->Iterator:
"""Iterate over bins grouped by chromosome name."""
for chrom, subtable in self.data.groupby('chromosome', sort=False):
yield chrom, self.as_dataframe(subtable)
def by_ranges(self, other, mode: str='outer', keep_empty: bool=True
) ->Iterator:
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data, mode,
keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also: Union[str, Iterable[str]]=()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, str):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
def labels(self) ->pd.Series:
"""Get chromosomal coordinates as genomic range labels."""
return self.data.apply(to_label, axis=1)
def in_range(self, chrom: Optional[str]=None, start: Optional[Numeric]=
None, end: Optional[Numeric]=None, mode: str='outer'):
"""Get the GenomicArray portion within the given genomic range.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
start : int or None
Start coordinate of range to select, in 0-based coordinates.
If None, start from 0.
end : int or None
End coordinate of range to select. If None, select to the end of the
chromosome.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
The subset of `self` enclosed by the specified range.
"""
starts = [int(start)] if start is not None else None
ends = [int(end)] if end is not None else None
results = iter_ranges(self.data, chrom, starts, ends, mode)
return self.as_dataframe(next(results))
def in_ranges(self, chrom: Optional[str]=None, starts: Optional[
Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,
mode: str='outer'):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),
sort=False)
return self.as_dataframe(table)
def into_ranges(self, other, column: str, default, summary_func:
Optional[Callable]=None):
"""Re-bin values from `column` into the corresponding ranges in `other`.
Match overlapping/intersecting rows from `other` to each row in `self`.
Then, within each range in `other`, extract the value(s) from `column`
in `self`, using the function `summary_func` to produce a single value
if multiple bins in `self` map to a single range in `other`.
For example, group SNVs (self) by CNV segments (other) and calculate the
median (summary_func) of each SNV group's allele frequencies.
Parameters
----------
other : GenomicArray
Ranges into which the overlapping values of `self` will be
summarized.
column : string
Column name in `self` to extract values from.
default
Value to assign to indices in `other` that do not overlap any bins in
`self`. Type should be the same as or compatible with the output
field specified by `column`, or the output of `summary_func`.
summary_func : callable, dict of string-to-callable, or None
Specify how to reduce 1 or more `other` rows into a single value for
the corresponding row in `self`.
- If callable, apply to the `column` field each group of rows in
`other` column.
- If a single-element dict of column name to callable, apply to that
field in `other` instead of `column`.
- If None, use an appropriate summarizing function for the datatype
of the `column` column in `other` (e.g. median of numbers,
concatenation of strings).
- If some other value, assign that value to `self` wherever there is
an overlap.
Returns
-------
pd.Series
The extracted and summarized values from `self` corresponding to
other's genomic ranges, the same length as `other`.
"""
if column not in self:
logging.warning("No '%s' column available for summary calculation",
column)
return pd.Series(np.repeat(default, len(other)))
return into_ranges(self.data, other.data, column, default, summary_func
)
def iter_ranges_of(self, other, column: str, mode: str='outer',
keep_empty: bool=True):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError(f'No column named {column!r} in this object')
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
def add(self, other):
"""Combine this array's data with another GenomicArray (in-place).
Any optional columns must match between both arrays.
"""
if not isinstance(other, self.__class__):
raise ValueError(
f'Argument (type {type(other)}) is not a {self.__class__} instance'
)
if len(other.data):
self.data = pd.concat([self.data, other.data], ignore_index=True)
self.sort()
<|reserved_special_token_0|>
def copy(self):
"""Create an independent copy of this object."""
return self.as_dataframe(self.data.copy())
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
return self.as_dataframe(self.data.assign(**columns))
def keep_columns(self, colnames):
"""Extract a subset of columns, reusing this instance's metadata."""
colnames = self.data.columns.intersection(colnames)
return self.__class__(self.data.loc[:, colnames], self.meta.copy())
def drop_extra_columns(self):
"""Remove any optional columns from this GenomicArray.
Returns
-------
GenomicArray or subclass
A new copy with only the minimal set of columns required by the
class (e.g. chromosome, start, end for GenomicArray; may be more for
subclasses).
"""
table = self.data.loc[:, self._required_columns]
return self.as_dataframe(table)
def filter(self, func=None, **kwargs):
"""Take a subset of rows where the given condition is true.
Parameters
----------
func : callable
A boolean function which will be applied to each row to keep rows
where the result is True.
**kwargs : string
Keyword arguments like ``chromosome="chr7"`` or
``gene="Antitarget"``, which will keep rows where the keyed field
equals the specified value.
Return
------
GenomicArray
Subset of `self` where the specified condition is True.
"""
table = self.data
if func is not None:
table = table[table.apply(func, axis=1)]
for key, val in list(kwargs.items()):
assert key in self
table = table[table[key] == val]
return self.as_dataframe(table)
def shuffle(self):
"""Randomize the order of bins in this array (in-place)."""
order = np.arange(len(self.data))
np.random.seed(679661)
np.random.shuffle(order)
self.data = self.data.iloc[order]
return order
def sort(self):
"""Sort this array's bins in-place, with smart chromosome ordering."""
sort_key = self.data.chromosome.apply(sorter_chrom)
self.data = self.data.assign(_sort_key_=sort_key).sort_values(by=[
'_sort_key_', 'start', 'end'], kind='mergesort').drop('_sort_key_',
axis=1).reset_index(drop=True)
def sort_columns(self):
"""Sort this array's columns in-place, per class definition."""
extra_cols = []
for col in self.data.columns:
if col not in self._required_columns:
extra_cols.append(col)
sorted_colnames = list(self._required_columns) + sorted(extra_cols)
assert len(sorted_colnames) == len(self.data.columns)
self.data = self.data.reindex(columns=sorted_colnames)
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
return NotImplemented
<|reserved_special_token_0|>
def intersection(self, other, mode: str='outer'):
"""Select the bins in `self` that overlap the regions in `other`.
The extra fields of `self`, but not `other`, are retained in the output.
"""
if mode == 'trim':
chunks = [chunk.data for _, chunk in self.by_ranges(other, mode
=mode, keep_empty=False)]
return self.as_dataframe(pd.concat(chunks))
slices = iter_slices(self.data, other.data, mode, False)
indices = np.concatenate(list(slices))
return self.as_dataframe(self.data.loc[indices])
def merge(self, bp: int=0, stranded: bool=False, combine: Optional[Dict
[str, Callable]]=None):
"""Merge adjacent or overlapping regions into single rows.
Similar to 'bedtools merge'.
"""
return self.as_dataframe(merge(self.data, bp, stranded, combine))
def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,
Numeric]]=None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = {'lower': 0}
if chrom_sizes:
limits['upper'] = self.chromosome.replace(chrom_sizes)
table = table.assign(start=(table['start'] - bp).clip(**limits),
end=(table['end'] + bp).clip(**limits))
if bp < 0:
ok_size = table['end'] - table['start'] > 0
logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())
table = table[ok_size]
return self.as_dataframe(table.copy())
def squash(self, combine=None):
"""Combine some groups of rows, by some criteria, into single rows."""
return NotImplemented
def subdivide(self, avg_size: int, min_size: int=0, verbose: bool=False):
"""Split this array's regions into roughly equal-sized sub-regions."""
return self.as_dataframe(subdivide(self.data, avg_size, min_size,
verbose))
def subtract(self, other):
"""Remove the overlapping regions in `other` from this array."""
return self.as_dataframe(subtract(self.data, other.data))
def total_range_size(self) ->int:
"""Total number of bases covered by all (merged) regions."""
if not len(self):
return 0
regions = merge(self.data, bp=1)
return regions.end.sum() - regions.start.sum()
def _get_gene_map(self) ->OrderedDict:
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if 'gene' not in self.data:
return OrderedDict()
genes: OrderedDict = OrderedDict()
for idx, genestr in self.data['gene'].items():
if pd.isnull(genestr):
continue
for gene in genestr.split(','):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GenomicArray:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, data_table: Optional[Union[Sequence, pd.DataFrame]],
meta_dict: Optional[Mapping]=None):
if data_table is None or isinstance(data_table, (list, tuple)
) and not len(data_table) or isinstance(data_table, pd.DataFrame
) and not len(data_table.columns):
data_table = self._make_blank()
else:
if not isinstance(data_table, pd.DataFrame):
data_table = pd.DataFrame(data_table)
if not all(c in data_table.columns for c in self._required_columns
):
raise ValueError('data table must have at least columns ' +
f'{self._required_columns!r}; got {tuple(data_table.columns)!r}'
)
if len(data_table):
def ok_dtype(col, dtype):
return isinstance(data_table[col].iat[0], dtype)
else:
def ok_dtype(col, dtype):
return data_table[col].dtype == np.dtype(dtype)
recast_cols = {col: dtype for col, dtype in zip(self.
_required_columns, self._required_dtypes) if not ok_dtype(
col, dtype)}
if recast_cols:
data_table = data_table.astype(recast_cols)
self.data = data_table
self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict
) else {}
@classmethod
def _make_blank(cls) ->pd.DataFrame:
"""Create an empty dataframe with the columns required by this class."""
spec = list(zip(cls._required_columns, cls._required_dtypes))
try:
arr = np.zeros(0, dtype=spec)
return pd.DataFrame(arr)
except TypeError as exc:
raise TypeError('{exc}: {spec}') from exc
@classmethod
def from_columns(cls, columns: Mapping[str, Iterable], meta_dict:
Optional[Mapping]=None):
"""Create a new instance from column arrays, given as a dict."""
table = pd.DataFrame.from_dict(columns)
ary = cls(table, meta_dict)
ary.sort_columns()
return ary
@classmethod
def from_rows(cls, rows: Iterable, columns: Optional[Sequence[str]]=
None, meta_dict: Optional[Mapping]=None):
"""Create a new instance from a list of rows, as tuples or arrays."""
if columns is None:
columns = cls._required_columns
table = pd.DataFrame.from_records(rows, columns=columns)
return cls(table, meta_dict)
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):
"""Wrap the given pandas DataFrame in this instance's metadata."""
if reset_index:
dframe = dframe.reset_index(drop=True)
return self.__class__(dframe, self.meta.copy())
def as_series(self, arraylike: Iterable) ->pd.Series:
"""Coerce `arraylike` to a Series with this instance's index."""
return pd.Series(arraylike, index=self.data.index)
def as_rows(self, rows: Iterable):
"""Wrap the given rows in this instance's metadata."""
try:
out = self.from_rows(rows, columns=self.data.columns, meta_dict
=self.meta)
except AssertionError as exc:
columns = self.data.columns.tolist()
firstrow = next(iter(rows))
raise RuntimeError(
f'Passed {len(columns)} columns {columns!r}, but {len(firstrow)} elements in first row: {firstrow}'
) from exc
return out
def __bool__(self) ->bool:
return bool(len(self.data))
def __eq__(self, other) ->bool:
return isinstance(other, self.__class__) and self.data.equals(other
.data)
def __len__(self) ->int:
return len(self.data)
def __contains__(self, key) ->bool:
return key in self.data.columns
def __getitem__(self, index) ->Union[pd.Series, pd.DataFrame]:
"""Access a portion of the data.
Cases:
- single integer: a row, as pd.Series
- string row name: a column, as pd.Series
- a boolean array: masked rows, as_dataframe
- tuple of integers: selected rows, as_dataframe
"""
if isinstance(index, int):
return self.data.iloc[index]
if isinstance(index, str):
return self.data[index]
if isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
return self.data.loc[index]
if isinstance(index, slice):
return self.as_dataframe(self.data[index])
try:
if isinstance(index, type(None)) or len(index) == 0:
empty = pd.DataFrame(columns=self.data.columns)
return self.as_dataframe(empty)
except TypeError as exc:
raise TypeError(
f'object of type {type(index)!r} cannot be used as an index into a {self.__class__.__name__}'
) from exc
return self.as_dataframe(self.data[index])
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, str):
self.data[index] = value
elif isinstance(index, tuple) and len(index) == 2 and index[1
] in self.data.columns:
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
<|reserved_special_token_0|>
@property
def chromosome(self) ->pd.Series:
"""Get column 'chromosome'."""
return self.data['chromosome']
@property
def start(self) ->pd.Series:
"""Get column 'start'."""
return self.data['start']
@property
def end(self) ->pd.Series:
"""Get column 'end'."""
return self.data['end']
@property
def sample_id(self) ->pd.Series:
"""Get metadata field 'sample_id'."""
return self.meta.get('sample_id')
def autosomes(self, also=None):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
is_auto = self.chromosome.str.match('(chr)?\\d+$', na=False)
if not is_auto.any():
return self
if also is not None:
if isinstance(also, pd.Series):
is_auto |= also
else:
if isinstance(also, str):
also = [also]
for a_chrom in also:
is_auto |= self.chromosome == a_chrom
return self[is_auto]
<|reserved_special_token_0|>
def by_chromosome(self) ->Iterator:
"""Iterate over bins grouped by chromosome name."""
for chrom, subtable in self.data.groupby('chromosome', sort=False):
yield chrom, self.as_dataframe(subtable)
def by_ranges(self, other, mode: str='outer', keep_empty: bool=True
) ->Iterator:
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data, mode,
keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also: Union[str, Iterable[str]]=()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, str):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
def labels(self) ->pd.Series:
"""Get chromosomal coordinates as genomic range labels."""
return self.data.apply(to_label, axis=1)
def in_range(self, chrom: Optional[str]=None, start: Optional[Numeric]=
None, end: Optional[Numeric]=None, mode: str='outer'):
"""Get the GenomicArray portion within the given genomic range.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
start : int or None
Start coordinate of range to select, in 0-based coordinates.
If None, start from 0.
end : int or None
End coordinate of range to select. If None, select to the end of the
chromosome.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
The subset of `self` enclosed by the specified range.
"""
starts = [int(start)] if start is not None else None
ends = [int(end)] if end is not None else None
results = iter_ranges(self.data, chrom, starts, ends, mode)
return self.as_dataframe(next(results))
def in_ranges(self, chrom: Optional[str]=None, starts: Optional[
Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,
mode: str='outer'):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),
sort=False)
return self.as_dataframe(table)
def into_ranges(self, other, column: str, default, summary_func:
Optional[Callable]=None):
"""Re-bin values from `column` into the corresponding ranges in `other`.
Match overlapping/intersecting rows from `other` to each row in `self`.
Then, within each range in `other`, extract the value(s) from `column`
in `self`, using the function `summary_func` to produce a single value
if multiple bins in `self` map to a single range in `other`.
For example, group SNVs (self) by CNV segments (other) and calculate the
median (summary_func) of each SNV group's allele frequencies.
Parameters
----------
other : GenomicArray
Ranges into which the overlapping values of `self` will be
summarized.
column : string
Column name in `self` to extract values from.
default
Value to assign to indices in `other` that do not overlap any bins in
`self`. Type should be the same as or compatible with the output
field specified by `column`, or the output of `summary_func`.
summary_func : callable, dict of string-to-callable, or None
Specify how to reduce 1 or more `other` rows into a single value for
the corresponding row in `self`.
- If callable, apply to the `column` field each group of rows in
`other` column.
- If a single-element dict of column name to callable, apply to that
field in `other` instead of `column`.
- If None, use an appropriate summarizing function for the datatype
of the `column` column in `other` (e.g. median of numbers,
concatenation of strings).
- If some other value, assign that value to `self` wherever there is
an overlap.
Returns
-------
pd.Series
The extracted and summarized values from `self` corresponding to
other's genomic ranges, the same length as `other`.
"""
if column not in self:
logging.warning("No '%s' column available for summary calculation",
column)
return pd.Series(np.repeat(default, len(other)))
return into_ranges(self.data, other.data, column, default, summary_func
)
def iter_ranges_of(self, other, column: str, mode: str='outer',
keep_empty: bool=True):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError(f'No column named {column!r} in this object')
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
def add(self, other):
"""Combine this array's data with another GenomicArray (in-place).
Any optional columns must match between both arrays.
"""
if not isinstance(other, self.__class__):
raise ValueError(
f'Argument (type {type(other)}) is not a {self.__class__} instance'
)
if len(other.data):
self.data = pd.concat([self.data, other.data], ignore_index=True)
self.sort()
<|reserved_special_token_0|>
def copy(self):
"""Create an independent copy of this object."""
return self.as_dataframe(self.data.copy())
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
return self.as_dataframe(self.data.assign(**columns))
def keep_columns(self, colnames):
"""Extract a subset of columns, reusing this instance's metadata."""
colnames = self.data.columns.intersection(colnames)
return self.__class__(self.data.loc[:, colnames], self.meta.copy())
def drop_extra_columns(self):
"""Remove any optional columns from this GenomicArray.
Returns
-------
GenomicArray or subclass
A new copy with only the minimal set of columns required by the
class (e.g. chromosome, start, end for GenomicArray; may be more for
subclasses).
"""
table = self.data.loc[:, self._required_columns]
return self.as_dataframe(table)
def filter(self, func=None, **kwargs):
"""Take a subset of rows where the given condition is true.
Parameters
----------
func : callable
A boolean function which will be applied to each row to keep rows
where the result is True.
**kwargs : string
Keyword arguments like ``chromosome="chr7"`` or
``gene="Antitarget"``, which will keep rows where the keyed field
equals the specified value.
Return
------
GenomicArray
Subset of `self` where the specified condition is True.
"""
table = self.data
if func is not None:
table = table[table.apply(func, axis=1)]
for key, val in list(kwargs.items()):
assert key in self
table = table[table[key] == val]
return self.as_dataframe(table)
def shuffle(self):
"""Randomize the order of bins in this array (in-place)."""
order = np.arange(len(self.data))
np.random.seed(679661)
np.random.shuffle(order)
self.data = self.data.iloc[order]
return order
def sort(self):
"""Sort this array's bins in-place, with smart chromosome ordering."""
sort_key = self.data.chromosome.apply(sorter_chrom)
self.data = self.data.assign(_sort_key_=sort_key).sort_values(by=[
'_sort_key_', 'start', 'end'], kind='mergesort').drop('_sort_key_',
axis=1).reset_index(drop=True)
def sort_columns(self):
"""Sort this array's columns in-place, per class definition."""
extra_cols = []
for col in self.data.columns:
if col not in self._required_columns:
extra_cols.append(col)
sorted_colnames = list(self._required_columns) + sorted(extra_cols)
assert len(sorted_colnames) == len(self.data.columns)
self.data = self.data.reindex(columns=sorted_colnames)
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
return NotImplemented
def flatten(self, combine: Optional[Dict[str, Callable]]=None,
split_columns: Optional[Iterable[str]]=None):
"""Split this array's regions where they overlap."""
return self.as_dataframe(flatten(self.data, combine=combine,
split_columns=split_columns))
def intersection(self, other, mode: str='outer'):
"""Select the bins in `self` that overlap the regions in `other`.
The extra fields of `self`, but not `other`, are retained in the output.
"""
if mode == 'trim':
chunks = [chunk.data for _, chunk in self.by_ranges(other, mode
=mode, keep_empty=False)]
return self.as_dataframe(pd.concat(chunks))
slices = iter_slices(self.data, other.data, mode, False)
indices = np.concatenate(list(slices))
return self.as_dataframe(self.data.loc[indices])
def merge(self, bp: int=0, stranded: bool=False, combine: Optional[Dict
[str, Callable]]=None):
"""Merge adjacent or overlapping regions into single rows.
Similar to 'bedtools merge'.
"""
return self.as_dataframe(merge(self.data, bp, stranded, combine))
def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,
Numeric]]=None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = {'lower': 0}
if chrom_sizes:
limits['upper'] = self.chromosome.replace(chrom_sizes)
table = table.assign(start=(table['start'] - bp).clip(**limits),
end=(table['end'] + bp).clip(**limits))
if bp < 0:
ok_size = table['end'] - table['start'] > 0
logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())
table = table[ok_size]
return self.as_dataframe(table.copy())
def squash(self, combine=None):
"""Combine some groups of rows, by some criteria, into single rows."""
return NotImplemented
def subdivide(self, avg_size: int, min_size: int=0, verbose: bool=False):
"""Split this array's regions into roughly equal-sized sub-regions."""
return self.as_dataframe(subdivide(self.data, avg_size, min_size,
verbose))
def subtract(self, other):
"""Remove the overlapping regions in `other` from this array."""
return self.as_dataframe(subtract(self.data, other.data))
def total_range_size(self) ->int:
"""Total number of bases covered by all (merged) regions."""
if not len(self):
return 0
regions = merge(self.data, bp=1)
return regions.end.sum() - regions.start.sum()
def _get_gene_map(self) ->OrderedDict:
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if 'gene' not in self.data:
return OrderedDict()
genes: OrderedDict = OrderedDict()
for idx, genestr in self.data['gene'].items():
if pd.isnull(genestr):
continue
for gene in genestr.split(','):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
<|reserved_special_token_1|>
"""Base class for an array of annotated genomic regions."""
import logging
from typing import Callable, Dict, Iterable, Iterator, Mapping, Optional, Sequence, Union
from collections import OrderedDict
import numpy as np
import pandas as pd
from .chromsort import sorter_chrom
from .intersect import by_ranges, into_ranges, iter_ranges, iter_slices, Numeric
from .merge import flatten, merge
from .rangelabel import to_label
from .subtract import subtract
from .subdivide import subdivide
class GenomicArray:
"""An array of genomic intervals. Base class for genomic data structures.
Can represent most BED-like tabular formats with arbitrary additional
columns.
"""
_required_columns = ("chromosome", "start", "end")
_required_dtypes = (str, int, int)
def __init__(
self,
data_table: Optional[Union[Sequence, pd.DataFrame]],
meta_dict: Optional[Mapping] = None,
):
# Validation
if (
data_table is None
or (isinstance(data_table, (list, tuple)) and not len(data_table))
or (isinstance(data_table, pd.DataFrame) and not len(data_table.columns))
):
data_table = self._make_blank()
else:
if not isinstance(data_table, pd.DataFrame):
# Rarely if ever needed -- prefer from_rows, from_columns, etc.
data_table = pd.DataFrame(data_table)
if not all(c in data_table.columns for c in self._required_columns):
raise ValueError(
"data table must have at least columns "
+ f"{self._required_columns!r}; got {tuple(data_table.columns)!r}"
)
# Ensure columns are the right type
# (in case they've been automatically converted to the wrong type,
# e.g. chromosome names as integers; genome coordinates as floats)
if len(data_table):
def ok_dtype(col, dtype):
return isinstance(data_table[col].iat[0], dtype)
else:
def ok_dtype(col, dtype):
return data_table[col].dtype == np.dtype(dtype)
recast_cols = {
col: dtype
for col, dtype in zip(self._required_columns, self._required_dtypes)
if not ok_dtype(col, dtype)
}
if recast_cols:
data_table = data_table.astype(recast_cols)
self.data = data_table
self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict) else {}
@classmethod
def _make_blank(cls) -> pd.DataFrame:
"""Create an empty dataframe with the columns required by this class."""
spec = list(zip(cls._required_columns, cls._required_dtypes))
try:
arr = np.zeros(0, dtype=spec)
return pd.DataFrame(arr)
except TypeError as exc:
raise TypeError(r"{exc}: {spec}") from exc
@classmethod
def from_columns(
cls, columns: Mapping[str, Iterable], meta_dict: Optional[Mapping] = None
):
"""Create a new instance from column arrays, given as a dict."""
table = pd.DataFrame.from_dict(columns)
ary = cls(table, meta_dict)
ary.sort_columns()
return ary
@classmethod
def from_rows(
cls,
rows: Iterable,
columns: Optional[Sequence[str]] = None,
meta_dict: Optional[Mapping] = None,
):
"""Create a new instance from a list of rows, as tuples or arrays."""
if columns is None:
columns = cls._required_columns
table = pd.DataFrame.from_records(rows, columns=columns)
return cls(table, meta_dict)
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
# return self.__class__(self.data.loc[:, columns], self.meta.copy())
def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool = False):
"""Wrap the given pandas DataFrame in this instance's metadata."""
if reset_index:
dframe = dframe.reset_index(drop=True)
return self.__class__(dframe, self.meta.copy())
def as_series(self, arraylike: Iterable) -> pd.Series:
"""Coerce `arraylike` to a Series with this instance's index."""
return pd.Series(arraylike, index=self.data.index)
def as_rows(self, rows: Iterable):
"""Wrap the given rows in this instance's metadata."""
try:
out = self.from_rows(rows, columns=self.data.columns, meta_dict=self.meta)
except AssertionError as exc:
columns = self.data.columns.tolist()
firstrow = next(iter(rows))
raise RuntimeError(
f"Passed {len(columns)} columns {columns!r}, but "
f"{len(firstrow)} elements in first row: {firstrow}"
) from exc
return out
# Container behaviour
def __bool__(self) -> bool:
return bool(len(self.data))
def __eq__(self, other) -> bool:
return isinstance(other, self.__class__) and self.data.equals(other.data)
def __len__(self) -> int:
return len(self.data)
def __contains__(self, key) -> bool:
return key in self.data.columns
def __getitem__(self, index) -> Union[pd.Series, pd.DataFrame]:
"""Access a portion of the data.
Cases:
- single integer: a row, as pd.Series
- string row name: a column, as pd.Series
- a boolean array: masked rows, as_dataframe
- tuple of integers: selected rows, as_dataframe
"""
if isinstance(index, int):
# A single row
return self.data.iloc[index]
# return self.as_dataframe(self.data.iloc[index:index+1])
if isinstance(index, str):
# A column, by name
return self.data[index]
if (
isinstance(index, tuple)
and len(index) == 2
and index[1] in self.data.columns
):
# Row index, column index -> cell value
return self.data.loc[index]
if isinstance(index, slice):
# return self.as_dataframe(self.data.take(index))
return self.as_dataframe(self.data[index])
# Iterable -- selected row indices or boolean array, probably
try:
if isinstance(index, type(None)) or len(index) == 0:
empty = pd.DataFrame(columns=self.data.columns)
return self.as_dataframe(empty)
except TypeError as exc:
raise TypeError(
f"object of type {type(index)!r} "
f"cannot be used as an index into a {self.__class__.__name__}"
) from exc
return self.as_dataframe(self.data[index])
# return self.as_dataframe(self.data.take(index))
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, str):
self.data[index] = value
elif (
isinstance(index, tuple)
and len(index) == 2
and index[1] in self.data.columns
):
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
__next__ = next
@property
def chromosome(self) -> pd.Series:
"""Get column 'chromosome'."""
return self.data["chromosome"]
@property
def start(self) -> pd.Series:
"""Get column 'start'."""
return self.data["start"]
@property
def end(self) -> pd.Series:
"""Get column 'end'."""
return self.data["end"]
@property
def sample_id(self) -> pd.Series:
"""Get metadata field 'sample_id'."""
return self.meta.get("sample_id")
# Traversal
def autosomes(self, also=None):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
is_auto = self.chromosome.str.match(r"(chr)?\d+$", na=False)
if not is_auto.any():
# The autosomes, if any, are not named with plain integers
return self
if also is not None:
if isinstance(also, pd.Series):
is_auto |= also
else:
# The assumption is that `also` is a single chromosome name or an iterable thereof.
if isinstance(also, str):
also = [also]
for a_chrom in also:
is_auto |= self.chromosome == a_chrom
return self[is_auto]
def by_arm(self, min_gap_size: Union[int, float] = 1e5, min_arm_bins: int = 50):
"""Iterate over bins grouped by chromosome arm (inferred)."""
# ENH:
# - Accept GArray of actual centromere regions as input
# -> find largest gap (any size) within cmere region, split there
# - Cache centromere locations once found
self.data.chromosome = self.data.chromosome.astype(str)
for chrom, subtable in self.data.groupby("chromosome", sort=False):
margin = max(min_arm_bins, int(round(0.1 * len(subtable))))
if len(subtable) > 2 * margin + 1:
# Found a candidate centromere
gaps = (
subtable.start.values[margin + 1 : -margin]
- subtable.end.values[margin : -margin - 1]
)
cmere_idx = gaps.argmax() + margin + 1
cmere_size = gaps[cmere_idx - margin - 1]
else:
cmere_idx = 0
cmere_size = 0
if cmere_idx and cmere_size >= min_gap_size:
logging.debug(
"%s centromere at %d of %d bins (size %s)",
chrom,
cmere_idx,
len(subtable),
cmere_size,
)
p_arm = subtable.index[:cmere_idx]
yield chrom, self.as_dataframe(subtable.loc[p_arm, :])
q_arm = subtable.index[cmere_idx:]
yield chrom, self.as_dataframe(subtable.loc[q_arm, :])
else:
# No centromere found -- emit the whole chromosome
if cmere_idx:
logging.debug(
"%s: Ignoring centromere at %d of %d bins (size %s)",
chrom,
cmere_idx,
len(subtable),
cmere_size,
)
else:
logging.debug("%s: Skipping centromere search, too small", chrom)
yield chrom, self.as_dataframe(subtable)
def by_chromosome(self) -> Iterator:
"""Iterate over bins grouped by chromosome name."""
for chrom, subtable in self.data.groupby("chromosome", sort=False):
yield chrom, self.as_dataframe(subtable)
def by_ranges(
self, other, mode: str = "outer", keep_empty: bool = True
) -> Iterator:
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data, mode, keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also: Union[str, Iterable[str]] = ()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, str):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
def labels(self) -> pd.Series:
"""Get chromosomal coordinates as genomic range labels."""
return self.data.apply(to_label, axis=1)
def in_range(
self,
chrom: Optional[str] = None,
start: Optional[Numeric] = None,
end: Optional[Numeric] = None,
mode: str = "outer",
):
"""Get the GenomicArray portion within the given genomic range.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
start : int or None
Start coordinate of range to select, in 0-based coordinates.
If None, start from 0.
end : int or None
End coordinate of range to select. If None, select to the end of the
chromosome.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
The subset of `self` enclosed by the specified range.
"""
starts = [int(start)] if start is not None else None
ends = [int(end)] if end is not None else None
results = iter_ranges(self.data, chrom, starts, ends, mode)
return self.as_dataframe(next(results))
def in_ranges(
self,
chrom: Optional[str] = None,
starts: Optional[Sequence[Numeric]] = None,
ends: Optional[Sequence[Numeric]] = None,
mode: str = "outer",
):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode), sort=False)
return self.as_dataframe(table)
def into_ranges(
self, other, column: str, default, summary_func: Optional[Callable] = None
):
"""Re-bin values from `column` into the corresponding ranges in `other`.
Match overlapping/intersecting rows from `other` to each row in `self`.
Then, within each range in `other`, extract the value(s) from `column`
in `self`, using the function `summary_func` to produce a single value
if multiple bins in `self` map to a single range in `other`.
For example, group SNVs (self) by CNV segments (other) and calculate the
median (summary_func) of each SNV group's allele frequencies.
Parameters
----------
other : GenomicArray
Ranges into which the overlapping values of `self` will be
summarized.
column : string
Column name in `self` to extract values from.
default
Value to assign to indices in `other` that do not overlap any bins in
`self`. Type should be the same as or compatible with the output
field specified by `column`, or the output of `summary_func`.
summary_func : callable, dict of string-to-callable, or None
Specify how to reduce 1 or more `other` rows into a single value for
the corresponding row in `self`.
- If callable, apply to the `column` field each group of rows in
`other` column.
- If a single-element dict of column name to callable, apply to that
field in `other` instead of `column`.
- If None, use an appropriate summarizing function for the datatype
of the `column` column in `other` (e.g. median of numbers,
concatenation of strings).
- If some other value, assign that value to `self` wherever there is
an overlap.
Returns
-------
pd.Series
The extracted and summarized values from `self` corresponding to
other's genomic ranges, the same length as `other`.
"""
if column not in self:
logging.warning("No '%s' column available for summary calculation", column)
return pd.Series(np.repeat(default, len(other)))
return into_ranges(self.data, other.data, column, default, summary_func)
def iter_ranges_of(
self, other, column: str, mode: str = "outer", keep_empty: bool = True
):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError(f"No column named {column!r} in this object")
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
# Modification
def add(self, other):
"""Combine this array's data with another GenomicArray (in-place).
Any optional columns must match between both arrays.
"""
if not isinstance(other, self.__class__):
raise ValueError(
f"Argument (type {type(other)}) is not a {self.__class__} instance"
)
if len(other.data):
self.data = pd.concat([self.data, other.data], ignore_index=True)
self.sort()
def concat(self, others):
"""Concatenate several GenomicArrays, keeping this array's metadata.
This array's data table is not implicitly included in the result.
"""
table = pd.concat([otr.data for otr in others], ignore_index=True)
result = self.as_dataframe(table)
result.sort()
return result
def copy(self):
"""Create an independent copy of this object."""
return self.as_dataframe(self.data.copy())
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
return self.as_dataframe(self.data.assign(**columns))
def keep_columns(self, colnames):
"""Extract a subset of columns, reusing this instance's metadata."""
colnames = self.data.columns.intersection(colnames)
return self.__class__(self.data.loc[:, colnames], self.meta.copy())
def drop_extra_columns(self):
"""Remove any optional columns from this GenomicArray.
Returns
-------
GenomicArray or subclass
A new copy with only the minimal set of columns required by the
class (e.g. chromosome, start, end for GenomicArray; may be more for
subclasses).
"""
table = self.data.loc[:, self._required_columns]
return self.as_dataframe(table)
def filter(self, func=None, **kwargs):
"""Take a subset of rows where the given condition is true.
Parameters
----------
func : callable
A boolean function which will be applied to each row to keep rows
where the result is True.
**kwargs : string
Keyword arguments like ``chromosome="chr7"`` or
``gene="Antitarget"``, which will keep rows where the keyed field
equals the specified value.
Return
------
GenomicArray
Subset of `self` where the specified condition is True.
"""
table = self.data
if func is not None:
table = table[table.apply(func, axis=1)]
for key, val in list(kwargs.items()):
assert key in self
table = table[table[key] == val]
return self.as_dataframe(table)
def shuffle(self):
"""Randomize the order of bins in this array (in-place)."""
order = np.arange(len(self.data))
np.random.seed(0xA5EED)
np.random.shuffle(order)
self.data = self.data.iloc[order]
return order
def sort(self):
"""Sort this array's bins in-place, with smart chromosome ordering."""
sort_key = self.data.chromosome.apply(sorter_chrom)
self.data = (
self.data.assign(_sort_key_=sort_key)
.sort_values(by=["_sort_key_", "start", "end"], kind="mergesort")
.drop("_sort_key_", axis=1)
.reset_index(drop=True)
)
def sort_columns(self):
"""Sort this array's columns in-place, per class definition."""
extra_cols = []
for col in self.data.columns:
if col not in self._required_columns:
extra_cols.append(col)
sorted_colnames = list(self._required_columns) + sorted(extra_cols)
assert len(sorted_colnames) == len(self.data.columns)
self.data = self.data.reindex(columns=sorted_colnames)
# Genome arithmetic
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
# TODO
return NotImplemented
def flatten(
self,
combine: Optional[Dict[str, Callable]] = None,
split_columns: Optional[Iterable[str]] = None,
):
"""Split this array's regions where they overlap."""
return self.as_dataframe(
flatten(self.data, combine=combine, split_columns=split_columns)
)
def intersection(self, other, mode: str = "outer"):
"""Select the bins in `self` that overlap the regions in `other`.
The extra fields of `self`, but not `other`, are retained in the output.
"""
# TODO options for which extra fields to keep
# by default, keep just the fields in 'table'
if mode == "trim":
# Slower
chunks = [
chunk.data
for _, chunk in self.by_ranges(other, mode=mode, keep_empty=False)
]
return self.as_dataframe(pd.concat(chunks))
# Faster
slices = iter_slices(self.data, other.data, mode, False)
indices = np.concatenate(list(slices))
return self.as_dataframe(self.data.loc[indices])
def merge(
self,
bp: int = 0,
stranded: bool = False,
combine: Optional[Dict[str, Callable]] = None,
):
"""Merge adjacent or overlapping regions into single rows.
Similar to 'bedtools merge'.
"""
return self.as_dataframe(merge(self.data, bp, stranded, combine))
def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str, Numeric]] = None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = {"lower": 0}
if chrom_sizes:
limits["upper"] = self.chromosome.replace(chrom_sizes)
table = table.assign(
start=(table["start"] - bp).clip(**limits),
end=(table["end"] + bp).clip(**limits),
)
if bp < 0:
# Drop any bins that now have zero or negative size
ok_size = table["end"] - table["start"] > 0
logging.debug("Dropping %d bins with size <= 0", (~ok_size).sum())
table = table[ok_size]
# Don't modify the original
return self.as_dataframe(table.copy())
def squash(self, combine=None):
"""Combine some groups of rows, by some criteria, into single rows."""
# TODO
return NotImplemented
def subdivide(self, avg_size: int, min_size: int = 0, verbose: bool = False):
"""Split this array's regions into roughly equal-sized sub-regions."""
return self.as_dataframe(subdivide(self.data, avg_size, min_size, verbose))
def subtract(self, other):
"""Remove the overlapping regions in `other` from this array."""
return self.as_dataframe(subtract(self.data, other.data))
def total_range_size(self) -> int:
"""Total number of bases covered by all (merged) regions."""
if not len(self):
return 0
regions = merge(self.data, bp=1)
return regions.end.sum() - regions.start.sum()
def _get_gene_map(self) -> OrderedDict:
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if "gene" not in self.data:
return OrderedDict()
genes: OrderedDict = OrderedDict()
for idx, genestr in self.data["gene"].items():
if pd.isnull(genestr):
continue
for gene in genestr.split(","):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
|
flexible
|
{
"blob_id": "0b833276ca10118f2d60e229ff03400b03915958",
"index": 2429,
"step-1": "<mask token>\n\n\nclass GenomicArray:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, data_table: Optional[Union[Sequence, pd.DataFrame]],\n meta_dict: Optional[Mapping]=None):\n if data_table is None or isinstance(data_table, (list, tuple)\n ) and not len(data_table) or isinstance(data_table, pd.DataFrame\n ) and not len(data_table.columns):\n data_table = self._make_blank()\n else:\n if not isinstance(data_table, pd.DataFrame):\n data_table = pd.DataFrame(data_table)\n if not all(c in data_table.columns for c in self._required_columns\n ):\n raise ValueError('data table must have at least columns ' +\n f'{self._required_columns!r}; got {tuple(data_table.columns)!r}'\n )\n if len(data_table):\n\n def ok_dtype(col, dtype):\n return isinstance(data_table[col].iat[0], dtype)\n else:\n\n def ok_dtype(col, dtype):\n return data_table[col].dtype == np.dtype(dtype)\n recast_cols = {col: dtype for col, dtype in zip(self.\n _required_columns, self._required_dtypes) if not ok_dtype(\n col, dtype)}\n if recast_cols:\n data_table = data_table.astype(recast_cols)\n self.data = data_table\n self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict\n ) else {}\n <mask token>\n <mask token>\n\n @classmethod\n def from_rows(cls, rows: Iterable, columns: Optional[Sequence[str]]=\n None, meta_dict: Optional[Mapping]=None):\n \"\"\"Create a new instance from a list of rows, as tuples or arrays.\"\"\"\n if columns is None:\n columns = cls._required_columns\n table = pd.DataFrame.from_records(rows, columns=columns)\n return cls(table, meta_dict)\n\n def as_columns(self, **columns):\n \"\"\"Wrap the named columns in this instance's metadata.\"\"\"\n return self.__class__.from_columns(columns, self.meta)\n\n def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):\n \"\"\"Wrap the given pandas DataFrame in this instance's metadata.\"\"\"\n if reset_index:\n dframe = dframe.reset_index(drop=True)\n return self.__class__(dframe, self.meta.copy())\n\n def as_series(self, arraylike: Iterable) ->pd.Series:\n \"\"\"Coerce `arraylike` to a Series with this instance's index.\"\"\"\n return pd.Series(arraylike, index=self.data.index)\n <mask token>\n <mask token>\n\n def __eq__(self, other) ->bool:\n return isinstance(other, self.__class__) and self.data.equals(other\n .data)\n\n def __len__(self) ->int:\n return len(self.data)\n\n def __contains__(self, key) ->bool:\n return key in self.data.columns\n\n def __getitem__(self, index) ->Union[pd.Series, pd.DataFrame]:\n \"\"\"Access a portion of the data.\n\n Cases:\n\n - single integer: a row, as pd.Series\n - string row name: a column, as pd.Series\n - a boolean array: masked rows, as_dataframe\n - tuple of integers: selected rows, as_dataframe\n \"\"\"\n if isinstance(index, int):\n return self.data.iloc[index]\n if isinstance(index, str):\n return self.data[index]\n if isinstance(index, tuple) and len(index) == 2 and index[1\n ] in self.data.columns:\n return self.data.loc[index]\n if isinstance(index, slice):\n return self.as_dataframe(self.data[index])\n try:\n if isinstance(index, type(None)) or len(index) == 0:\n empty = pd.DataFrame(columns=self.data.columns)\n return self.as_dataframe(empty)\n except TypeError as exc:\n raise TypeError(\n f'object of type {type(index)!r} cannot be used as an index into a {self.__class__.__name__}'\n ) from exc\n return self.as_dataframe(self.data[index])\n\n def __setitem__(self, index, value):\n \"\"\"Assign to a portion of the data.\"\"\"\n if isinstance(index, int):\n self.data.iloc[index] = value\n elif isinstance(index, str):\n self.data[index] = value\n elif isinstance(index, tuple) and len(index) == 2 and index[1\n ] in self.data.columns:\n self.data.loc[index] = value\n else:\n assert isinstance(index, slice) or len(index) > 0\n self.data[index] = value\n\n def __delitem__(self, index):\n return NotImplemented\n\n def __iter__(self):\n return self.data.itertuples(index=False)\n <mask token>\n <mask token>\n\n @property\n def start(self) ->pd.Series:\n \"\"\"Get column 'start'.\"\"\"\n return self.data['start']\n\n @property\n def end(self) ->pd.Series:\n \"\"\"Get column 'end'.\"\"\"\n return self.data['end']\n <mask token>\n\n def autosomes(self, also=None):\n \"\"\"Select chromosomes w/ integer names, ignoring any 'chr' prefixes.\"\"\"\n is_auto = self.chromosome.str.match('(chr)?\\\\d+$', na=False)\n if not is_auto.any():\n return self\n if also is not None:\n if isinstance(also, pd.Series):\n is_auto |= also\n else:\n if isinstance(also, str):\n also = [also]\n for a_chrom in also:\n is_auto |= self.chromosome == a_chrom\n return self[is_auto]\n <mask token>\n\n def by_chromosome(self) ->Iterator:\n \"\"\"Iterate over bins grouped by chromosome name.\"\"\"\n for chrom, subtable in self.data.groupby('chromosome', sort=False):\n yield chrom, self.as_dataframe(subtable)\n\n def by_ranges(self, other, mode: str='outer', keep_empty: bool=True\n ) ->Iterator:\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n for bin_row, subrange in by_ranges(self.data, other.data, mode,\n keep_empty):\n if len(subrange):\n yield bin_row, self.as_dataframe(subrange)\n elif keep_empty:\n yield bin_row, self.as_rows(subrange)\n\n def coords(self, also: Union[str, Iterable[str]]=()):\n \"\"\"Iterate over plain coordinates of each bin: chromosome, start, end.\n\n Parameters\n ----------\n also : str, or iterable of strings\n Also include these columns from `self`, in addition to chromosome,\n start, and end.\n\n Example, yielding rows in BED format:\n\n >>> probes.coords(also=[\"gene\", \"strand\"])\n \"\"\"\n cols = list(GenomicArray._required_columns)\n if also:\n if isinstance(also, str):\n cols.append(also)\n else:\n cols.extend(also)\n coordframe = self.data.loc[:, cols]\n return coordframe.itertuples(index=False)\n <mask token>\n\n def in_range(self, chrom: Optional[str]=None, start: Optional[Numeric]=\n None, end: Optional[Numeric]=None, mode: str='outer'):\n \"\"\"Get the GenomicArray portion within the given genomic range.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n start : int or None\n Start coordinate of range to select, in 0-based coordinates.\n If None, start from 0.\n end : int or None\n End coordinate of range to select. If None, select to the end of the\n chromosome.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n The subset of `self` enclosed by the specified range.\n \"\"\"\n starts = [int(start)] if start is not None else None\n ends = [int(end)] if end is not None else None\n results = iter_ranges(self.data, chrom, starts, ends, mode)\n return self.as_dataframe(next(results))\n\n def in_ranges(self, chrom: Optional[str]=None, starts: Optional[\n Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,\n mode: str='outer'):\n \"\"\"Get the GenomicArray portion within the specified ranges.\n\n Similar to `in_ranges`, but concatenating the selections of all the\n regions specified by the `starts` and `ends` arrays.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n starts : int array, or None\n Start coordinates of ranges to select, in 0-based coordinates.\n If None, start from 0.\n ends : int array, or None\n End coordinates of ranges to select. If None, select to the end of the\n chromosome. If `starts` and `ends` are both specified, they must be\n arrays of equal length.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n Concatenation of all the subsets of `self` enclosed by the specified\n ranges.\n \"\"\"\n table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),\n sort=False)\n return self.as_dataframe(table)\n\n def into_ranges(self, other, column: str, default, summary_func:\n Optional[Callable]=None):\n \"\"\"Re-bin values from `column` into the corresponding ranges in `other`.\n\n Match overlapping/intersecting rows from `other` to each row in `self`.\n Then, within each range in `other`, extract the value(s) from `column`\n in `self`, using the function `summary_func` to produce a single value\n if multiple bins in `self` map to a single range in `other`.\n\n For example, group SNVs (self) by CNV segments (other) and calculate the\n median (summary_func) of each SNV group's allele frequencies.\n\n Parameters\n ----------\n other : GenomicArray\n Ranges into which the overlapping values of `self` will be\n summarized.\n column : string\n Column name in `self` to extract values from.\n default\n Value to assign to indices in `other` that do not overlap any bins in\n `self`. Type should be the same as or compatible with the output\n field specified by `column`, or the output of `summary_func`.\n summary_func : callable, dict of string-to-callable, or None\n Specify how to reduce 1 or more `other` rows into a single value for\n the corresponding row in `self`.\n\n - If callable, apply to the `column` field each group of rows in\n `other` column.\n - If a single-element dict of column name to callable, apply to that\n field in `other` instead of `column`.\n - If None, use an appropriate summarizing function for the datatype\n of the `column` column in `other` (e.g. median of numbers,\n concatenation of strings).\n - If some other value, assign that value to `self` wherever there is\n an overlap.\n\n Returns\n -------\n pd.Series\n The extracted and summarized values from `self` corresponding to\n other's genomic ranges, the same length as `other`.\n \"\"\"\n if column not in self:\n logging.warning(\"No '%s' column available for summary calculation\",\n column)\n return pd.Series(np.repeat(default, len(other)))\n return into_ranges(self.data, other.data, column, default, summary_func\n )\n\n def iter_ranges_of(self, other, column: str, mode: str='outer',\n keep_empty: bool=True):\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n column : string\n Column name in `self` to extract values from.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n if column not in self.data.columns:\n raise ValueError(f'No column named {column!r} in this object')\n ser = self.data[column]\n for slc in iter_slices(self.data, other.data, mode, keep_empty):\n yield ser[slc]\n <mask token>\n <mask token>\n\n def copy(self):\n \"\"\"Create an independent copy of this object.\"\"\"\n return self.as_dataframe(self.data.copy())\n\n def add_columns(self, **columns):\n \"\"\"Add the given columns to a copy of this GenomicArray.\n\n Parameters\n ----------\n **columns : array\n Keyword arguments where the key is the new column's name and the\n value is an array of the same length as `self` which will be the new\n column's values.\n\n Returns\n -------\n GenomicArray or subclass\n A new instance of `self` with the given columns included in the\n underlying dataframe.\n \"\"\"\n return self.as_dataframe(self.data.assign(**columns))\n\n def keep_columns(self, colnames):\n \"\"\"Extract a subset of columns, reusing this instance's metadata.\"\"\"\n colnames = self.data.columns.intersection(colnames)\n return self.__class__(self.data.loc[:, colnames], self.meta.copy())\n <mask token>\n\n def filter(self, func=None, **kwargs):\n \"\"\"Take a subset of rows where the given condition is true.\n\n Parameters\n ----------\n func : callable\n A boolean function which will be applied to each row to keep rows\n where the result is True.\n **kwargs : string\n Keyword arguments like ``chromosome=\"chr7\"`` or\n ``gene=\"Antitarget\"``, which will keep rows where the keyed field\n equals the specified value.\n\n Return\n ------\n GenomicArray\n Subset of `self` where the specified condition is True.\n \"\"\"\n table = self.data\n if func is not None:\n table = table[table.apply(func, axis=1)]\n for key, val in list(kwargs.items()):\n assert key in self\n table = table[table[key] == val]\n return self.as_dataframe(table)\n\n def shuffle(self):\n \"\"\"Randomize the order of bins in this array (in-place).\"\"\"\n order = np.arange(len(self.data))\n np.random.seed(679661)\n np.random.shuffle(order)\n self.data = self.data.iloc[order]\n return order\n\n def sort(self):\n \"\"\"Sort this array's bins in-place, with smart chromosome ordering.\"\"\"\n sort_key = self.data.chromosome.apply(sorter_chrom)\n self.data = self.data.assign(_sort_key_=sort_key).sort_values(by=[\n '_sort_key_', 'start', 'end'], kind='mergesort').drop('_sort_key_',\n axis=1).reset_index(drop=True)\n\n def sort_columns(self):\n \"\"\"Sort this array's columns in-place, per class definition.\"\"\"\n extra_cols = []\n for col in self.data.columns:\n if col not in self._required_columns:\n extra_cols.append(col)\n sorted_colnames = list(self._required_columns) + sorted(extra_cols)\n assert len(sorted_colnames) == len(self.data.columns)\n self.data = self.data.reindex(columns=sorted_colnames)\n\n def cut(self, other, combine=None):\n \"\"\"Split this array's regions at the boundaries in `other`.\"\"\"\n return NotImplemented\n <mask token>\n <mask token>\n\n def merge(self, bp: int=0, stranded: bool=False, combine: Optional[Dict\n [str, Callable]]=None):\n \"\"\"Merge adjacent or overlapping regions into single rows.\n\n Similar to 'bedtools merge'.\n \"\"\"\n return self.as_dataframe(merge(self.data, bp, stranded, combine))\n\n def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,\n Numeric]]=None):\n \"\"\"Resize each genomic bin by a fixed number of bases at each end.\n\n Bin 'start' values have a minimum of 0, and `chrom_sizes` can\n specify each chromosome's maximum 'end' value.\n\n Similar to 'bedtools slop'.\n\n Parameters\n ----------\n bp : int\n Number of bases in each direction to expand or shrink each bin.\n Applies to 'start' and 'end' values symmetrically, and may be\n positive (expand) or negative (shrink).\n chrom_sizes : dict of string-to-int\n Chromosome name to length in base pairs. If given, all chromosomes\n in `self` must be included.\n \"\"\"\n table = self.data\n limits = {'lower': 0}\n if chrom_sizes:\n limits['upper'] = self.chromosome.replace(chrom_sizes)\n table = table.assign(start=(table['start'] - bp).clip(**limits),\n end=(table['end'] + bp).clip(**limits))\n if bp < 0:\n ok_size = table['end'] - table['start'] > 0\n logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())\n table = table[ok_size]\n return self.as_dataframe(table.copy())\n <mask token>\n\n def subdivide(self, avg_size: int, min_size: int=0, verbose: bool=False):\n \"\"\"Split this array's regions into roughly equal-sized sub-regions.\"\"\"\n return self.as_dataframe(subdivide(self.data, avg_size, min_size,\n verbose))\n <mask token>\n\n def total_range_size(self) ->int:\n \"\"\"Total number of bases covered by all (merged) regions.\"\"\"\n if not len(self):\n return 0\n regions = merge(self.data, bp=1)\n return regions.end.sum() - regions.start.sum()\n\n def _get_gene_map(self) ->OrderedDict:\n \"\"\"Map unique gene names to their indices in this array.\n\n Returns\n -------\n OrderedDict\n An (ordered) dictionary of unique gene names and the data indices of\n their segments in the order of occurrence (genomic order).\n \"\"\"\n if 'gene' not in self.data:\n return OrderedDict()\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data['gene'].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(','):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes\n",
"step-2": "<mask token>\n\n\nclass GenomicArray:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, data_table: Optional[Union[Sequence, pd.DataFrame]],\n meta_dict: Optional[Mapping]=None):\n if data_table is None or isinstance(data_table, (list, tuple)\n ) and not len(data_table) or isinstance(data_table, pd.DataFrame\n ) and not len(data_table.columns):\n data_table = self._make_blank()\n else:\n if not isinstance(data_table, pd.DataFrame):\n data_table = pd.DataFrame(data_table)\n if not all(c in data_table.columns for c in self._required_columns\n ):\n raise ValueError('data table must have at least columns ' +\n f'{self._required_columns!r}; got {tuple(data_table.columns)!r}'\n )\n if len(data_table):\n\n def ok_dtype(col, dtype):\n return isinstance(data_table[col].iat[0], dtype)\n else:\n\n def ok_dtype(col, dtype):\n return data_table[col].dtype == np.dtype(dtype)\n recast_cols = {col: dtype for col, dtype in zip(self.\n _required_columns, self._required_dtypes) if not ok_dtype(\n col, dtype)}\n if recast_cols:\n data_table = data_table.astype(recast_cols)\n self.data = data_table\n self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict\n ) else {}\n <mask token>\n <mask token>\n\n @classmethod\n def from_rows(cls, rows: Iterable, columns: Optional[Sequence[str]]=\n None, meta_dict: Optional[Mapping]=None):\n \"\"\"Create a new instance from a list of rows, as tuples or arrays.\"\"\"\n if columns is None:\n columns = cls._required_columns\n table = pd.DataFrame.from_records(rows, columns=columns)\n return cls(table, meta_dict)\n\n def as_columns(self, **columns):\n \"\"\"Wrap the named columns in this instance's metadata.\"\"\"\n return self.__class__.from_columns(columns, self.meta)\n\n def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):\n \"\"\"Wrap the given pandas DataFrame in this instance's metadata.\"\"\"\n if reset_index:\n dframe = dframe.reset_index(drop=True)\n return self.__class__(dframe, self.meta.copy())\n\n def as_series(self, arraylike: Iterable) ->pd.Series:\n \"\"\"Coerce `arraylike` to a Series with this instance's index.\"\"\"\n return pd.Series(arraylike, index=self.data.index)\n <mask token>\n <mask token>\n\n def __eq__(self, other) ->bool:\n return isinstance(other, self.__class__) and self.data.equals(other\n .data)\n\n def __len__(self) ->int:\n return len(self.data)\n\n def __contains__(self, key) ->bool:\n return key in self.data.columns\n\n def __getitem__(self, index) ->Union[pd.Series, pd.DataFrame]:\n \"\"\"Access a portion of the data.\n\n Cases:\n\n - single integer: a row, as pd.Series\n - string row name: a column, as pd.Series\n - a boolean array: masked rows, as_dataframe\n - tuple of integers: selected rows, as_dataframe\n \"\"\"\n if isinstance(index, int):\n return self.data.iloc[index]\n if isinstance(index, str):\n return self.data[index]\n if isinstance(index, tuple) and len(index) == 2 and index[1\n ] in self.data.columns:\n return self.data.loc[index]\n if isinstance(index, slice):\n return self.as_dataframe(self.data[index])\n try:\n if isinstance(index, type(None)) or len(index) == 0:\n empty = pd.DataFrame(columns=self.data.columns)\n return self.as_dataframe(empty)\n except TypeError as exc:\n raise TypeError(\n f'object of type {type(index)!r} cannot be used as an index into a {self.__class__.__name__}'\n ) from exc\n return self.as_dataframe(self.data[index])\n\n def __setitem__(self, index, value):\n \"\"\"Assign to a portion of the data.\"\"\"\n if isinstance(index, int):\n self.data.iloc[index] = value\n elif isinstance(index, str):\n self.data[index] = value\n elif isinstance(index, tuple) and len(index) == 2 and index[1\n ] in self.data.columns:\n self.data.loc[index] = value\n else:\n assert isinstance(index, slice) or len(index) > 0\n self.data[index] = value\n\n def __delitem__(self, index):\n return NotImplemented\n\n def __iter__(self):\n return self.data.itertuples(index=False)\n <mask token>\n\n @property\n def chromosome(self) ->pd.Series:\n \"\"\"Get column 'chromosome'.\"\"\"\n return self.data['chromosome']\n\n @property\n def start(self) ->pd.Series:\n \"\"\"Get column 'start'.\"\"\"\n return self.data['start']\n\n @property\n def end(self) ->pd.Series:\n \"\"\"Get column 'end'.\"\"\"\n return self.data['end']\n <mask token>\n\n def autosomes(self, also=None):\n \"\"\"Select chromosomes w/ integer names, ignoring any 'chr' prefixes.\"\"\"\n is_auto = self.chromosome.str.match('(chr)?\\\\d+$', na=False)\n if not is_auto.any():\n return self\n if also is not None:\n if isinstance(also, pd.Series):\n is_auto |= also\n else:\n if isinstance(also, str):\n also = [also]\n for a_chrom in also:\n is_auto |= self.chromosome == a_chrom\n return self[is_auto]\n <mask token>\n\n def by_chromosome(self) ->Iterator:\n \"\"\"Iterate over bins grouped by chromosome name.\"\"\"\n for chrom, subtable in self.data.groupby('chromosome', sort=False):\n yield chrom, self.as_dataframe(subtable)\n\n def by_ranges(self, other, mode: str='outer', keep_empty: bool=True\n ) ->Iterator:\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n for bin_row, subrange in by_ranges(self.data, other.data, mode,\n keep_empty):\n if len(subrange):\n yield bin_row, self.as_dataframe(subrange)\n elif keep_empty:\n yield bin_row, self.as_rows(subrange)\n\n def coords(self, also: Union[str, Iterable[str]]=()):\n \"\"\"Iterate over plain coordinates of each bin: chromosome, start, end.\n\n Parameters\n ----------\n also : str, or iterable of strings\n Also include these columns from `self`, in addition to chromosome,\n start, and end.\n\n Example, yielding rows in BED format:\n\n >>> probes.coords(also=[\"gene\", \"strand\"])\n \"\"\"\n cols = list(GenomicArray._required_columns)\n if also:\n if isinstance(also, str):\n cols.append(also)\n else:\n cols.extend(also)\n coordframe = self.data.loc[:, cols]\n return coordframe.itertuples(index=False)\n <mask token>\n\n def in_range(self, chrom: Optional[str]=None, start: Optional[Numeric]=\n None, end: Optional[Numeric]=None, mode: str='outer'):\n \"\"\"Get the GenomicArray portion within the given genomic range.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n start : int or None\n Start coordinate of range to select, in 0-based coordinates.\n If None, start from 0.\n end : int or None\n End coordinate of range to select. If None, select to the end of the\n chromosome.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n The subset of `self` enclosed by the specified range.\n \"\"\"\n starts = [int(start)] if start is not None else None\n ends = [int(end)] if end is not None else None\n results = iter_ranges(self.data, chrom, starts, ends, mode)\n return self.as_dataframe(next(results))\n\n def in_ranges(self, chrom: Optional[str]=None, starts: Optional[\n Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,\n mode: str='outer'):\n \"\"\"Get the GenomicArray portion within the specified ranges.\n\n Similar to `in_ranges`, but concatenating the selections of all the\n regions specified by the `starts` and `ends` arrays.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n starts : int array, or None\n Start coordinates of ranges to select, in 0-based coordinates.\n If None, start from 0.\n ends : int array, or None\n End coordinates of ranges to select. If None, select to the end of the\n chromosome. If `starts` and `ends` are both specified, they must be\n arrays of equal length.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n Concatenation of all the subsets of `self` enclosed by the specified\n ranges.\n \"\"\"\n table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),\n sort=False)\n return self.as_dataframe(table)\n\n def into_ranges(self, other, column: str, default, summary_func:\n Optional[Callable]=None):\n \"\"\"Re-bin values from `column` into the corresponding ranges in `other`.\n\n Match overlapping/intersecting rows from `other` to each row in `self`.\n Then, within each range in `other`, extract the value(s) from `column`\n in `self`, using the function `summary_func` to produce a single value\n if multiple bins in `self` map to a single range in `other`.\n\n For example, group SNVs (self) by CNV segments (other) and calculate the\n median (summary_func) of each SNV group's allele frequencies.\n\n Parameters\n ----------\n other : GenomicArray\n Ranges into which the overlapping values of `self` will be\n summarized.\n column : string\n Column name in `self` to extract values from.\n default\n Value to assign to indices in `other` that do not overlap any bins in\n `self`. Type should be the same as or compatible with the output\n field specified by `column`, or the output of `summary_func`.\n summary_func : callable, dict of string-to-callable, or None\n Specify how to reduce 1 or more `other` rows into a single value for\n the corresponding row in `self`.\n\n - If callable, apply to the `column` field each group of rows in\n `other` column.\n - If a single-element dict of column name to callable, apply to that\n field in `other` instead of `column`.\n - If None, use an appropriate summarizing function for the datatype\n of the `column` column in `other` (e.g. median of numbers,\n concatenation of strings).\n - If some other value, assign that value to `self` wherever there is\n an overlap.\n\n Returns\n -------\n pd.Series\n The extracted and summarized values from `self` corresponding to\n other's genomic ranges, the same length as `other`.\n \"\"\"\n if column not in self:\n logging.warning(\"No '%s' column available for summary calculation\",\n column)\n return pd.Series(np.repeat(default, len(other)))\n return into_ranges(self.data, other.data, column, default, summary_func\n )\n\n def iter_ranges_of(self, other, column: str, mode: str='outer',\n keep_empty: bool=True):\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n column : string\n Column name in `self` to extract values from.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n if column not in self.data.columns:\n raise ValueError(f'No column named {column!r} in this object')\n ser = self.data[column]\n for slc in iter_slices(self.data, other.data, mode, keep_empty):\n yield ser[slc]\n <mask token>\n <mask token>\n\n def copy(self):\n \"\"\"Create an independent copy of this object.\"\"\"\n return self.as_dataframe(self.data.copy())\n\n def add_columns(self, **columns):\n \"\"\"Add the given columns to a copy of this GenomicArray.\n\n Parameters\n ----------\n **columns : array\n Keyword arguments where the key is the new column's name and the\n value is an array of the same length as `self` which will be the new\n column's values.\n\n Returns\n -------\n GenomicArray or subclass\n A new instance of `self` with the given columns included in the\n underlying dataframe.\n \"\"\"\n return self.as_dataframe(self.data.assign(**columns))\n\n def keep_columns(self, colnames):\n \"\"\"Extract a subset of columns, reusing this instance's metadata.\"\"\"\n colnames = self.data.columns.intersection(colnames)\n return self.__class__(self.data.loc[:, colnames], self.meta.copy())\n <mask token>\n\n def filter(self, func=None, **kwargs):\n \"\"\"Take a subset of rows where the given condition is true.\n\n Parameters\n ----------\n func : callable\n A boolean function which will be applied to each row to keep rows\n where the result is True.\n **kwargs : string\n Keyword arguments like ``chromosome=\"chr7\"`` or\n ``gene=\"Antitarget\"``, which will keep rows where the keyed field\n equals the specified value.\n\n Return\n ------\n GenomicArray\n Subset of `self` where the specified condition is True.\n \"\"\"\n table = self.data\n if func is not None:\n table = table[table.apply(func, axis=1)]\n for key, val in list(kwargs.items()):\n assert key in self\n table = table[table[key] == val]\n return self.as_dataframe(table)\n\n def shuffle(self):\n \"\"\"Randomize the order of bins in this array (in-place).\"\"\"\n order = np.arange(len(self.data))\n np.random.seed(679661)\n np.random.shuffle(order)\n self.data = self.data.iloc[order]\n return order\n\n def sort(self):\n \"\"\"Sort this array's bins in-place, with smart chromosome ordering.\"\"\"\n sort_key = self.data.chromosome.apply(sorter_chrom)\n self.data = self.data.assign(_sort_key_=sort_key).sort_values(by=[\n '_sort_key_', 'start', 'end'], kind='mergesort').drop('_sort_key_',\n axis=1).reset_index(drop=True)\n\n def sort_columns(self):\n \"\"\"Sort this array's columns in-place, per class definition.\"\"\"\n extra_cols = []\n for col in self.data.columns:\n if col not in self._required_columns:\n extra_cols.append(col)\n sorted_colnames = list(self._required_columns) + sorted(extra_cols)\n assert len(sorted_colnames) == len(self.data.columns)\n self.data = self.data.reindex(columns=sorted_colnames)\n\n def cut(self, other, combine=None):\n \"\"\"Split this array's regions at the boundaries in `other`.\"\"\"\n return NotImplemented\n <mask token>\n <mask token>\n\n def merge(self, bp: int=0, stranded: bool=False, combine: Optional[Dict\n [str, Callable]]=None):\n \"\"\"Merge adjacent or overlapping regions into single rows.\n\n Similar to 'bedtools merge'.\n \"\"\"\n return self.as_dataframe(merge(self.data, bp, stranded, combine))\n\n def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,\n Numeric]]=None):\n \"\"\"Resize each genomic bin by a fixed number of bases at each end.\n\n Bin 'start' values have a minimum of 0, and `chrom_sizes` can\n specify each chromosome's maximum 'end' value.\n\n Similar to 'bedtools slop'.\n\n Parameters\n ----------\n bp : int\n Number of bases in each direction to expand or shrink each bin.\n Applies to 'start' and 'end' values symmetrically, and may be\n positive (expand) or negative (shrink).\n chrom_sizes : dict of string-to-int\n Chromosome name to length in base pairs. If given, all chromosomes\n in `self` must be included.\n \"\"\"\n table = self.data\n limits = {'lower': 0}\n if chrom_sizes:\n limits['upper'] = self.chromosome.replace(chrom_sizes)\n table = table.assign(start=(table['start'] - bp).clip(**limits),\n end=(table['end'] + bp).clip(**limits))\n if bp < 0:\n ok_size = table['end'] - table['start'] > 0\n logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())\n table = table[ok_size]\n return self.as_dataframe(table.copy())\n\n def squash(self, combine=None):\n \"\"\"Combine some groups of rows, by some criteria, into single rows.\"\"\"\n return NotImplemented\n\n def subdivide(self, avg_size: int, min_size: int=0, verbose: bool=False):\n \"\"\"Split this array's regions into roughly equal-sized sub-regions.\"\"\"\n return self.as_dataframe(subdivide(self.data, avg_size, min_size,\n verbose))\n <mask token>\n\n def total_range_size(self) ->int:\n \"\"\"Total number of bases covered by all (merged) regions.\"\"\"\n if not len(self):\n return 0\n regions = merge(self.data, bp=1)\n return regions.end.sum() - regions.start.sum()\n\n def _get_gene_map(self) ->OrderedDict:\n \"\"\"Map unique gene names to their indices in this array.\n\n Returns\n -------\n OrderedDict\n An (ordered) dictionary of unique gene names and the data indices of\n their segments in the order of occurrence (genomic order).\n \"\"\"\n if 'gene' not in self.data:\n return OrderedDict()\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data['gene'].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(','):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes\n",
"step-3": "<mask token>\n\n\nclass GenomicArray:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, data_table: Optional[Union[Sequence, pd.DataFrame]],\n meta_dict: Optional[Mapping]=None):\n if data_table is None or isinstance(data_table, (list, tuple)\n ) and not len(data_table) or isinstance(data_table, pd.DataFrame\n ) and not len(data_table.columns):\n data_table = self._make_blank()\n else:\n if not isinstance(data_table, pd.DataFrame):\n data_table = pd.DataFrame(data_table)\n if not all(c in data_table.columns for c in self._required_columns\n ):\n raise ValueError('data table must have at least columns ' +\n f'{self._required_columns!r}; got {tuple(data_table.columns)!r}'\n )\n if len(data_table):\n\n def ok_dtype(col, dtype):\n return isinstance(data_table[col].iat[0], dtype)\n else:\n\n def ok_dtype(col, dtype):\n return data_table[col].dtype == np.dtype(dtype)\n recast_cols = {col: dtype for col, dtype in zip(self.\n _required_columns, self._required_dtypes) if not ok_dtype(\n col, dtype)}\n if recast_cols:\n data_table = data_table.astype(recast_cols)\n self.data = data_table\n self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict\n ) else {}\n\n @classmethod\n def _make_blank(cls) ->pd.DataFrame:\n \"\"\"Create an empty dataframe with the columns required by this class.\"\"\"\n spec = list(zip(cls._required_columns, cls._required_dtypes))\n try:\n arr = np.zeros(0, dtype=spec)\n return pd.DataFrame(arr)\n except TypeError as exc:\n raise TypeError('{exc}: {spec}') from exc\n\n @classmethod\n def from_columns(cls, columns: Mapping[str, Iterable], meta_dict:\n Optional[Mapping]=None):\n \"\"\"Create a new instance from column arrays, given as a dict.\"\"\"\n table = pd.DataFrame.from_dict(columns)\n ary = cls(table, meta_dict)\n ary.sort_columns()\n return ary\n\n @classmethod\n def from_rows(cls, rows: Iterable, columns: Optional[Sequence[str]]=\n None, meta_dict: Optional[Mapping]=None):\n \"\"\"Create a new instance from a list of rows, as tuples or arrays.\"\"\"\n if columns is None:\n columns = cls._required_columns\n table = pd.DataFrame.from_records(rows, columns=columns)\n return cls(table, meta_dict)\n\n def as_columns(self, **columns):\n \"\"\"Wrap the named columns in this instance's metadata.\"\"\"\n return self.__class__.from_columns(columns, self.meta)\n\n def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):\n \"\"\"Wrap the given pandas DataFrame in this instance's metadata.\"\"\"\n if reset_index:\n dframe = dframe.reset_index(drop=True)\n return self.__class__(dframe, self.meta.copy())\n\n def as_series(self, arraylike: Iterable) ->pd.Series:\n \"\"\"Coerce `arraylike` to a Series with this instance's index.\"\"\"\n return pd.Series(arraylike, index=self.data.index)\n\n def as_rows(self, rows: Iterable):\n \"\"\"Wrap the given rows in this instance's metadata.\"\"\"\n try:\n out = self.from_rows(rows, columns=self.data.columns, meta_dict\n =self.meta)\n except AssertionError as exc:\n columns = self.data.columns.tolist()\n firstrow = next(iter(rows))\n raise RuntimeError(\n f'Passed {len(columns)} columns {columns!r}, but {len(firstrow)} elements in first row: {firstrow}'\n ) from exc\n return out\n\n def __bool__(self) ->bool:\n return bool(len(self.data))\n\n def __eq__(self, other) ->bool:\n return isinstance(other, self.__class__) and self.data.equals(other\n .data)\n\n def __len__(self) ->int:\n return len(self.data)\n\n def __contains__(self, key) ->bool:\n return key in self.data.columns\n\n def __getitem__(self, index) ->Union[pd.Series, pd.DataFrame]:\n \"\"\"Access a portion of the data.\n\n Cases:\n\n - single integer: a row, as pd.Series\n - string row name: a column, as pd.Series\n - a boolean array: masked rows, as_dataframe\n - tuple of integers: selected rows, as_dataframe\n \"\"\"\n if isinstance(index, int):\n return self.data.iloc[index]\n if isinstance(index, str):\n return self.data[index]\n if isinstance(index, tuple) and len(index) == 2 and index[1\n ] in self.data.columns:\n return self.data.loc[index]\n if isinstance(index, slice):\n return self.as_dataframe(self.data[index])\n try:\n if isinstance(index, type(None)) or len(index) == 0:\n empty = pd.DataFrame(columns=self.data.columns)\n return self.as_dataframe(empty)\n except TypeError as exc:\n raise TypeError(\n f'object of type {type(index)!r} cannot be used as an index into a {self.__class__.__name__}'\n ) from exc\n return self.as_dataframe(self.data[index])\n\n def __setitem__(self, index, value):\n \"\"\"Assign to a portion of the data.\"\"\"\n if isinstance(index, int):\n self.data.iloc[index] = value\n elif isinstance(index, str):\n self.data[index] = value\n elif isinstance(index, tuple) and len(index) == 2 and index[1\n ] in self.data.columns:\n self.data.loc[index] = value\n else:\n assert isinstance(index, slice) or len(index) > 0\n self.data[index] = value\n\n def __delitem__(self, index):\n return NotImplemented\n\n def __iter__(self):\n return self.data.itertuples(index=False)\n <mask token>\n\n @property\n def chromosome(self) ->pd.Series:\n \"\"\"Get column 'chromosome'.\"\"\"\n return self.data['chromosome']\n\n @property\n def start(self) ->pd.Series:\n \"\"\"Get column 'start'.\"\"\"\n return self.data['start']\n\n @property\n def end(self) ->pd.Series:\n \"\"\"Get column 'end'.\"\"\"\n return self.data['end']\n\n @property\n def sample_id(self) ->pd.Series:\n \"\"\"Get metadata field 'sample_id'.\"\"\"\n return self.meta.get('sample_id')\n\n def autosomes(self, also=None):\n \"\"\"Select chromosomes w/ integer names, ignoring any 'chr' prefixes.\"\"\"\n is_auto = self.chromosome.str.match('(chr)?\\\\d+$', na=False)\n if not is_auto.any():\n return self\n if also is not None:\n if isinstance(also, pd.Series):\n is_auto |= also\n else:\n if isinstance(also, str):\n also = [also]\n for a_chrom in also:\n is_auto |= self.chromosome == a_chrom\n return self[is_auto]\n <mask token>\n\n def by_chromosome(self) ->Iterator:\n \"\"\"Iterate over bins grouped by chromosome name.\"\"\"\n for chrom, subtable in self.data.groupby('chromosome', sort=False):\n yield chrom, self.as_dataframe(subtable)\n\n def by_ranges(self, other, mode: str='outer', keep_empty: bool=True\n ) ->Iterator:\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n for bin_row, subrange in by_ranges(self.data, other.data, mode,\n keep_empty):\n if len(subrange):\n yield bin_row, self.as_dataframe(subrange)\n elif keep_empty:\n yield bin_row, self.as_rows(subrange)\n\n def coords(self, also: Union[str, Iterable[str]]=()):\n \"\"\"Iterate over plain coordinates of each bin: chromosome, start, end.\n\n Parameters\n ----------\n also : str, or iterable of strings\n Also include these columns from `self`, in addition to chromosome,\n start, and end.\n\n Example, yielding rows in BED format:\n\n >>> probes.coords(also=[\"gene\", \"strand\"])\n \"\"\"\n cols = list(GenomicArray._required_columns)\n if also:\n if isinstance(also, str):\n cols.append(also)\n else:\n cols.extend(also)\n coordframe = self.data.loc[:, cols]\n return coordframe.itertuples(index=False)\n\n def labels(self) ->pd.Series:\n \"\"\"Get chromosomal coordinates as genomic range labels.\"\"\"\n return self.data.apply(to_label, axis=1)\n\n def in_range(self, chrom: Optional[str]=None, start: Optional[Numeric]=\n None, end: Optional[Numeric]=None, mode: str='outer'):\n \"\"\"Get the GenomicArray portion within the given genomic range.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n start : int or None\n Start coordinate of range to select, in 0-based coordinates.\n If None, start from 0.\n end : int or None\n End coordinate of range to select. If None, select to the end of the\n chromosome.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n The subset of `self` enclosed by the specified range.\n \"\"\"\n starts = [int(start)] if start is not None else None\n ends = [int(end)] if end is not None else None\n results = iter_ranges(self.data, chrom, starts, ends, mode)\n return self.as_dataframe(next(results))\n\n def in_ranges(self, chrom: Optional[str]=None, starts: Optional[\n Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,\n mode: str='outer'):\n \"\"\"Get the GenomicArray portion within the specified ranges.\n\n Similar to `in_ranges`, but concatenating the selections of all the\n regions specified by the `starts` and `ends` arrays.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n starts : int array, or None\n Start coordinates of ranges to select, in 0-based coordinates.\n If None, start from 0.\n ends : int array, or None\n End coordinates of ranges to select. If None, select to the end of the\n chromosome. If `starts` and `ends` are both specified, they must be\n arrays of equal length.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n Concatenation of all the subsets of `self` enclosed by the specified\n ranges.\n \"\"\"\n table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),\n sort=False)\n return self.as_dataframe(table)\n\n def into_ranges(self, other, column: str, default, summary_func:\n Optional[Callable]=None):\n \"\"\"Re-bin values from `column` into the corresponding ranges in `other`.\n\n Match overlapping/intersecting rows from `other` to each row in `self`.\n Then, within each range in `other`, extract the value(s) from `column`\n in `self`, using the function `summary_func` to produce a single value\n if multiple bins in `self` map to a single range in `other`.\n\n For example, group SNVs (self) by CNV segments (other) and calculate the\n median (summary_func) of each SNV group's allele frequencies.\n\n Parameters\n ----------\n other : GenomicArray\n Ranges into which the overlapping values of `self` will be\n summarized.\n column : string\n Column name in `self` to extract values from.\n default\n Value to assign to indices in `other` that do not overlap any bins in\n `self`. Type should be the same as or compatible with the output\n field specified by `column`, or the output of `summary_func`.\n summary_func : callable, dict of string-to-callable, or None\n Specify how to reduce 1 or more `other` rows into a single value for\n the corresponding row in `self`.\n\n - If callable, apply to the `column` field each group of rows in\n `other` column.\n - If a single-element dict of column name to callable, apply to that\n field in `other` instead of `column`.\n - If None, use an appropriate summarizing function for the datatype\n of the `column` column in `other` (e.g. median of numbers,\n concatenation of strings).\n - If some other value, assign that value to `self` wherever there is\n an overlap.\n\n Returns\n -------\n pd.Series\n The extracted and summarized values from `self` corresponding to\n other's genomic ranges, the same length as `other`.\n \"\"\"\n if column not in self:\n logging.warning(\"No '%s' column available for summary calculation\",\n column)\n return pd.Series(np.repeat(default, len(other)))\n return into_ranges(self.data, other.data, column, default, summary_func\n )\n\n def iter_ranges_of(self, other, column: str, mode: str='outer',\n keep_empty: bool=True):\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n column : string\n Column name in `self` to extract values from.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n if column not in self.data.columns:\n raise ValueError(f'No column named {column!r} in this object')\n ser = self.data[column]\n for slc in iter_slices(self.data, other.data, mode, keep_empty):\n yield ser[slc]\n\n def add(self, other):\n \"\"\"Combine this array's data with another GenomicArray (in-place).\n\n Any optional columns must match between both arrays.\n \"\"\"\n if not isinstance(other, self.__class__):\n raise ValueError(\n f'Argument (type {type(other)}) is not a {self.__class__} instance'\n )\n if len(other.data):\n self.data = pd.concat([self.data, other.data], ignore_index=True)\n self.sort()\n <mask token>\n\n def copy(self):\n \"\"\"Create an independent copy of this object.\"\"\"\n return self.as_dataframe(self.data.copy())\n\n def add_columns(self, **columns):\n \"\"\"Add the given columns to a copy of this GenomicArray.\n\n Parameters\n ----------\n **columns : array\n Keyword arguments where the key is the new column's name and the\n value is an array of the same length as `self` which will be the new\n column's values.\n\n Returns\n -------\n GenomicArray or subclass\n A new instance of `self` with the given columns included in the\n underlying dataframe.\n \"\"\"\n return self.as_dataframe(self.data.assign(**columns))\n\n def keep_columns(self, colnames):\n \"\"\"Extract a subset of columns, reusing this instance's metadata.\"\"\"\n colnames = self.data.columns.intersection(colnames)\n return self.__class__(self.data.loc[:, colnames], self.meta.copy())\n\n def drop_extra_columns(self):\n \"\"\"Remove any optional columns from this GenomicArray.\n\n Returns\n -------\n GenomicArray or subclass\n A new copy with only the minimal set of columns required by the\n class (e.g. chromosome, start, end for GenomicArray; may be more for\n subclasses).\n \"\"\"\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)\n\n def filter(self, func=None, **kwargs):\n \"\"\"Take a subset of rows where the given condition is true.\n\n Parameters\n ----------\n func : callable\n A boolean function which will be applied to each row to keep rows\n where the result is True.\n **kwargs : string\n Keyword arguments like ``chromosome=\"chr7\"`` or\n ``gene=\"Antitarget\"``, which will keep rows where the keyed field\n equals the specified value.\n\n Return\n ------\n GenomicArray\n Subset of `self` where the specified condition is True.\n \"\"\"\n table = self.data\n if func is not None:\n table = table[table.apply(func, axis=1)]\n for key, val in list(kwargs.items()):\n assert key in self\n table = table[table[key] == val]\n return self.as_dataframe(table)\n\n def shuffle(self):\n \"\"\"Randomize the order of bins in this array (in-place).\"\"\"\n order = np.arange(len(self.data))\n np.random.seed(679661)\n np.random.shuffle(order)\n self.data = self.data.iloc[order]\n return order\n\n def sort(self):\n \"\"\"Sort this array's bins in-place, with smart chromosome ordering.\"\"\"\n sort_key = self.data.chromosome.apply(sorter_chrom)\n self.data = self.data.assign(_sort_key_=sort_key).sort_values(by=[\n '_sort_key_', 'start', 'end'], kind='mergesort').drop('_sort_key_',\n axis=1).reset_index(drop=True)\n\n def sort_columns(self):\n \"\"\"Sort this array's columns in-place, per class definition.\"\"\"\n extra_cols = []\n for col in self.data.columns:\n if col not in self._required_columns:\n extra_cols.append(col)\n sorted_colnames = list(self._required_columns) + sorted(extra_cols)\n assert len(sorted_colnames) == len(self.data.columns)\n self.data = self.data.reindex(columns=sorted_colnames)\n\n def cut(self, other, combine=None):\n \"\"\"Split this array's regions at the boundaries in `other`.\"\"\"\n return NotImplemented\n <mask token>\n\n def intersection(self, other, mode: str='outer'):\n \"\"\"Select the bins in `self` that overlap the regions in `other`.\n\n The extra fields of `self`, but not `other`, are retained in the output.\n \"\"\"\n if mode == 'trim':\n chunks = [chunk.data for _, chunk in self.by_ranges(other, mode\n =mode, keep_empty=False)]\n return self.as_dataframe(pd.concat(chunks))\n slices = iter_slices(self.data, other.data, mode, False)\n indices = np.concatenate(list(slices))\n return self.as_dataframe(self.data.loc[indices])\n\n def merge(self, bp: int=0, stranded: bool=False, combine: Optional[Dict\n [str, Callable]]=None):\n \"\"\"Merge adjacent or overlapping regions into single rows.\n\n Similar to 'bedtools merge'.\n \"\"\"\n return self.as_dataframe(merge(self.data, bp, stranded, combine))\n\n def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,\n Numeric]]=None):\n \"\"\"Resize each genomic bin by a fixed number of bases at each end.\n\n Bin 'start' values have a minimum of 0, and `chrom_sizes` can\n specify each chromosome's maximum 'end' value.\n\n Similar to 'bedtools slop'.\n\n Parameters\n ----------\n bp : int\n Number of bases in each direction to expand or shrink each bin.\n Applies to 'start' and 'end' values symmetrically, and may be\n positive (expand) or negative (shrink).\n chrom_sizes : dict of string-to-int\n Chromosome name to length in base pairs. If given, all chromosomes\n in `self` must be included.\n \"\"\"\n table = self.data\n limits = {'lower': 0}\n if chrom_sizes:\n limits['upper'] = self.chromosome.replace(chrom_sizes)\n table = table.assign(start=(table['start'] - bp).clip(**limits),\n end=(table['end'] + bp).clip(**limits))\n if bp < 0:\n ok_size = table['end'] - table['start'] > 0\n logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())\n table = table[ok_size]\n return self.as_dataframe(table.copy())\n\n def squash(self, combine=None):\n \"\"\"Combine some groups of rows, by some criteria, into single rows.\"\"\"\n return NotImplemented\n\n def subdivide(self, avg_size: int, min_size: int=0, verbose: bool=False):\n \"\"\"Split this array's regions into roughly equal-sized sub-regions.\"\"\"\n return self.as_dataframe(subdivide(self.data, avg_size, min_size,\n verbose))\n\n def subtract(self, other):\n \"\"\"Remove the overlapping regions in `other` from this array.\"\"\"\n return self.as_dataframe(subtract(self.data, other.data))\n\n def total_range_size(self) ->int:\n \"\"\"Total number of bases covered by all (merged) regions.\"\"\"\n if not len(self):\n return 0\n regions = merge(self.data, bp=1)\n return regions.end.sum() - regions.start.sum()\n\n def _get_gene_map(self) ->OrderedDict:\n \"\"\"Map unique gene names to their indices in this array.\n\n Returns\n -------\n OrderedDict\n An (ordered) dictionary of unique gene names and the data indices of\n their segments in the order of occurrence (genomic order).\n \"\"\"\n if 'gene' not in self.data:\n return OrderedDict()\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data['gene'].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(','):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes\n",
"step-4": "<mask token>\n\n\nclass GenomicArray:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, data_table: Optional[Union[Sequence, pd.DataFrame]],\n meta_dict: Optional[Mapping]=None):\n if data_table is None or isinstance(data_table, (list, tuple)\n ) and not len(data_table) or isinstance(data_table, pd.DataFrame\n ) and not len(data_table.columns):\n data_table = self._make_blank()\n else:\n if not isinstance(data_table, pd.DataFrame):\n data_table = pd.DataFrame(data_table)\n if not all(c in data_table.columns for c in self._required_columns\n ):\n raise ValueError('data table must have at least columns ' +\n f'{self._required_columns!r}; got {tuple(data_table.columns)!r}'\n )\n if len(data_table):\n\n def ok_dtype(col, dtype):\n return isinstance(data_table[col].iat[0], dtype)\n else:\n\n def ok_dtype(col, dtype):\n return data_table[col].dtype == np.dtype(dtype)\n recast_cols = {col: dtype for col, dtype in zip(self.\n _required_columns, self._required_dtypes) if not ok_dtype(\n col, dtype)}\n if recast_cols:\n data_table = data_table.astype(recast_cols)\n self.data = data_table\n self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict\n ) else {}\n\n @classmethod\n def _make_blank(cls) ->pd.DataFrame:\n \"\"\"Create an empty dataframe with the columns required by this class.\"\"\"\n spec = list(zip(cls._required_columns, cls._required_dtypes))\n try:\n arr = np.zeros(0, dtype=spec)\n return pd.DataFrame(arr)\n except TypeError as exc:\n raise TypeError('{exc}: {spec}') from exc\n\n @classmethod\n def from_columns(cls, columns: Mapping[str, Iterable], meta_dict:\n Optional[Mapping]=None):\n \"\"\"Create a new instance from column arrays, given as a dict.\"\"\"\n table = pd.DataFrame.from_dict(columns)\n ary = cls(table, meta_dict)\n ary.sort_columns()\n return ary\n\n @classmethod\n def from_rows(cls, rows: Iterable, columns: Optional[Sequence[str]]=\n None, meta_dict: Optional[Mapping]=None):\n \"\"\"Create a new instance from a list of rows, as tuples or arrays.\"\"\"\n if columns is None:\n columns = cls._required_columns\n table = pd.DataFrame.from_records(rows, columns=columns)\n return cls(table, meta_dict)\n\n def as_columns(self, **columns):\n \"\"\"Wrap the named columns in this instance's metadata.\"\"\"\n return self.__class__.from_columns(columns, self.meta)\n\n def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool=False):\n \"\"\"Wrap the given pandas DataFrame in this instance's metadata.\"\"\"\n if reset_index:\n dframe = dframe.reset_index(drop=True)\n return self.__class__(dframe, self.meta.copy())\n\n def as_series(self, arraylike: Iterable) ->pd.Series:\n \"\"\"Coerce `arraylike` to a Series with this instance's index.\"\"\"\n return pd.Series(arraylike, index=self.data.index)\n\n def as_rows(self, rows: Iterable):\n \"\"\"Wrap the given rows in this instance's metadata.\"\"\"\n try:\n out = self.from_rows(rows, columns=self.data.columns, meta_dict\n =self.meta)\n except AssertionError as exc:\n columns = self.data.columns.tolist()\n firstrow = next(iter(rows))\n raise RuntimeError(\n f'Passed {len(columns)} columns {columns!r}, but {len(firstrow)} elements in first row: {firstrow}'\n ) from exc\n return out\n\n def __bool__(self) ->bool:\n return bool(len(self.data))\n\n def __eq__(self, other) ->bool:\n return isinstance(other, self.__class__) and self.data.equals(other\n .data)\n\n def __len__(self) ->int:\n return len(self.data)\n\n def __contains__(self, key) ->bool:\n return key in self.data.columns\n\n def __getitem__(self, index) ->Union[pd.Series, pd.DataFrame]:\n \"\"\"Access a portion of the data.\n\n Cases:\n\n - single integer: a row, as pd.Series\n - string row name: a column, as pd.Series\n - a boolean array: masked rows, as_dataframe\n - tuple of integers: selected rows, as_dataframe\n \"\"\"\n if isinstance(index, int):\n return self.data.iloc[index]\n if isinstance(index, str):\n return self.data[index]\n if isinstance(index, tuple) and len(index) == 2 and index[1\n ] in self.data.columns:\n return self.data.loc[index]\n if isinstance(index, slice):\n return self.as_dataframe(self.data[index])\n try:\n if isinstance(index, type(None)) or len(index) == 0:\n empty = pd.DataFrame(columns=self.data.columns)\n return self.as_dataframe(empty)\n except TypeError as exc:\n raise TypeError(\n f'object of type {type(index)!r} cannot be used as an index into a {self.__class__.__name__}'\n ) from exc\n return self.as_dataframe(self.data[index])\n\n def __setitem__(self, index, value):\n \"\"\"Assign to a portion of the data.\"\"\"\n if isinstance(index, int):\n self.data.iloc[index] = value\n elif isinstance(index, str):\n self.data[index] = value\n elif isinstance(index, tuple) and len(index) == 2 and index[1\n ] in self.data.columns:\n self.data.loc[index] = value\n else:\n assert isinstance(index, slice) or len(index) > 0\n self.data[index] = value\n\n def __delitem__(self, index):\n return NotImplemented\n\n def __iter__(self):\n return self.data.itertuples(index=False)\n <mask token>\n\n @property\n def chromosome(self) ->pd.Series:\n \"\"\"Get column 'chromosome'.\"\"\"\n return self.data['chromosome']\n\n @property\n def start(self) ->pd.Series:\n \"\"\"Get column 'start'.\"\"\"\n return self.data['start']\n\n @property\n def end(self) ->pd.Series:\n \"\"\"Get column 'end'.\"\"\"\n return self.data['end']\n\n @property\n def sample_id(self) ->pd.Series:\n \"\"\"Get metadata field 'sample_id'.\"\"\"\n return self.meta.get('sample_id')\n\n def autosomes(self, also=None):\n \"\"\"Select chromosomes w/ integer names, ignoring any 'chr' prefixes.\"\"\"\n is_auto = self.chromosome.str.match('(chr)?\\\\d+$', na=False)\n if not is_auto.any():\n return self\n if also is not None:\n if isinstance(also, pd.Series):\n is_auto |= also\n else:\n if isinstance(also, str):\n also = [also]\n for a_chrom in also:\n is_auto |= self.chromosome == a_chrom\n return self[is_auto]\n <mask token>\n\n def by_chromosome(self) ->Iterator:\n \"\"\"Iterate over bins grouped by chromosome name.\"\"\"\n for chrom, subtable in self.data.groupby('chromosome', sort=False):\n yield chrom, self.as_dataframe(subtable)\n\n def by_ranges(self, other, mode: str='outer', keep_empty: bool=True\n ) ->Iterator:\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n for bin_row, subrange in by_ranges(self.data, other.data, mode,\n keep_empty):\n if len(subrange):\n yield bin_row, self.as_dataframe(subrange)\n elif keep_empty:\n yield bin_row, self.as_rows(subrange)\n\n def coords(self, also: Union[str, Iterable[str]]=()):\n \"\"\"Iterate over plain coordinates of each bin: chromosome, start, end.\n\n Parameters\n ----------\n also : str, or iterable of strings\n Also include these columns from `self`, in addition to chromosome,\n start, and end.\n\n Example, yielding rows in BED format:\n\n >>> probes.coords(also=[\"gene\", \"strand\"])\n \"\"\"\n cols = list(GenomicArray._required_columns)\n if also:\n if isinstance(also, str):\n cols.append(also)\n else:\n cols.extend(also)\n coordframe = self.data.loc[:, cols]\n return coordframe.itertuples(index=False)\n\n def labels(self) ->pd.Series:\n \"\"\"Get chromosomal coordinates as genomic range labels.\"\"\"\n return self.data.apply(to_label, axis=1)\n\n def in_range(self, chrom: Optional[str]=None, start: Optional[Numeric]=\n None, end: Optional[Numeric]=None, mode: str='outer'):\n \"\"\"Get the GenomicArray portion within the given genomic range.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n start : int or None\n Start coordinate of range to select, in 0-based coordinates.\n If None, start from 0.\n end : int or None\n End coordinate of range to select. If None, select to the end of the\n chromosome.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n The subset of `self` enclosed by the specified range.\n \"\"\"\n starts = [int(start)] if start is not None else None\n ends = [int(end)] if end is not None else None\n results = iter_ranges(self.data, chrom, starts, ends, mode)\n return self.as_dataframe(next(results))\n\n def in_ranges(self, chrom: Optional[str]=None, starts: Optional[\n Sequence[Numeric]]=None, ends: Optional[Sequence[Numeric]]=None,\n mode: str='outer'):\n \"\"\"Get the GenomicArray portion within the specified ranges.\n\n Similar to `in_ranges`, but concatenating the selections of all the\n regions specified by the `starts` and `ends` arrays.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n starts : int array, or None\n Start coordinates of ranges to select, in 0-based coordinates.\n If None, start from 0.\n ends : int array, or None\n End coordinates of ranges to select. If None, select to the end of the\n chromosome. If `starts` and `ends` are both specified, they must be\n arrays of equal length.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n Concatenation of all the subsets of `self` enclosed by the specified\n ranges.\n \"\"\"\n table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),\n sort=False)\n return self.as_dataframe(table)\n\n def into_ranges(self, other, column: str, default, summary_func:\n Optional[Callable]=None):\n \"\"\"Re-bin values from `column` into the corresponding ranges in `other`.\n\n Match overlapping/intersecting rows from `other` to each row in `self`.\n Then, within each range in `other`, extract the value(s) from `column`\n in `self`, using the function `summary_func` to produce a single value\n if multiple bins in `self` map to a single range in `other`.\n\n For example, group SNVs (self) by CNV segments (other) and calculate the\n median (summary_func) of each SNV group's allele frequencies.\n\n Parameters\n ----------\n other : GenomicArray\n Ranges into which the overlapping values of `self` will be\n summarized.\n column : string\n Column name in `self` to extract values from.\n default\n Value to assign to indices in `other` that do not overlap any bins in\n `self`. Type should be the same as or compatible with the output\n field specified by `column`, or the output of `summary_func`.\n summary_func : callable, dict of string-to-callable, or None\n Specify how to reduce 1 or more `other` rows into a single value for\n the corresponding row in `self`.\n\n - If callable, apply to the `column` field each group of rows in\n `other` column.\n - If a single-element dict of column name to callable, apply to that\n field in `other` instead of `column`.\n - If None, use an appropriate summarizing function for the datatype\n of the `column` column in `other` (e.g. median of numbers,\n concatenation of strings).\n - If some other value, assign that value to `self` wherever there is\n an overlap.\n\n Returns\n -------\n pd.Series\n The extracted and summarized values from `self` corresponding to\n other's genomic ranges, the same length as `other`.\n \"\"\"\n if column not in self:\n logging.warning(\"No '%s' column available for summary calculation\",\n column)\n return pd.Series(np.repeat(default, len(other)))\n return into_ranges(self.data, other.data, column, default, summary_func\n )\n\n def iter_ranges_of(self, other, column: str, mode: str='outer',\n keep_empty: bool=True):\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n column : string\n Column name in `self` to extract values from.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n if column not in self.data.columns:\n raise ValueError(f'No column named {column!r} in this object')\n ser = self.data[column]\n for slc in iter_slices(self.data, other.data, mode, keep_empty):\n yield ser[slc]\n\n def add(self, other):\n \"\"\"Combine this array's data with another GenomicArray (in-place).\n\n Any optional columns must match between both arrays.\n \"\"\"\n if not isinstance(other, self.__class__):\n raise ValueError(\n f'Argument (type {type(other)}) is not a {self.__class__} instance'\n )\n if len(other.data):\n self.data = pd.concat([self.data, other.data], ignore_index=True)\n self.sort()\n <mask token>\n\n def copy(self):\n \"\"\"Create an independent copy of this object.\"\"\"\n return self.as_dataframe(self.data.copy())\n\n def add_columns(self, **columns):\n \"\"\"Add the given columns to a copy of this GenomicArray.\n\n Parameters\n ----------\n **columns : array\n Keyword arguments where the key is the new column's name and the\n value is an array of the same length as `self` which will be the new\n column's values.\n\n Returns\n -------\n GenomicArray or subclass\n A new instance of `self` with the given columns included in the\n underlying dataframe.\n \"\"\"\n return self.as_dataframe(self.data.assign(**columns))\n\n def keep_columns(self, colnames):\n \"\"\"Extract a subset of columns, reusing this instance's metadata.\"\"\"\n colnames = self.data.columns.intersection(colnames)\n return self.__class__(self.data.loc[:, colnames], self.meta.copy())\n\n def drop_extra_columns(self):\n \"\"\"Remove any optional columns from this GenomicArray.\n\n Returns\n -------\n GenomicArray or subclass\n A new copy with only the minimal set of columns required by the\n class (e.g. chromosome, start, end for GenomicArray; may be more for\n subclasses).\n \"\"\"\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)\n\n def filter(self, func=None, **kwargs):\n \"\"\"Take a subset of rows where the given condition is true.\n\n Parameters\n ----------\n func : callable\n A boolean function which will be applied to each row to keep rows\n where the result is True.\n **kwargs : string\n Keyword arguments like ``chromosome=\"chr7\"`` or\n ``gene=\"Antitarget\"``, which will keep rows where the keyed field\n equals the specified value.\n\n Return\n ------\n GenomicArray\n Subset of `self` where the specified condition is True.\n \"\"\"\n table = self.data\n if func is not None:\n table = table[table.apply(func, axis=1)]\n for key, val in list(kwargs.items()):\n assert key in self\n table = table[table[key] == val]\n return self.as_dataframe(table)\n\n def shuffle(self):\n \"\"\"Randomize the order of bins in this array (in-place).\"\"\"\n order = np.arange(len(self.data))\n np.random.seed(679661)\n np.random.shuffle(order)\n self.data = self.data.iloc[order]\n return order\n\n def sort(self):\n \"\"\"Sort this array's bins in-place, with smart chromosome ordering.\"\"\"\n sort_key = self.data.chromosome.apply(sorter_chrom)\n self.data = self.data.assign(_sort_key_=sort_key).sort_values(by=[\n '_sort_key_', 'start', 'end'], kind='mergesort').drop('_sort_key_',\n axis=1).reset_index(drop=True)\n\n def sort_columns(self):\n \"\"\"Sort this array's columns in-place, per class definition.\"\"\"\n extra_cols = []\n for col in self.data.columns:\n if col not in self._required_columns:\n extra_cols.append(col)\n sorted_colnames = list(self._required_columns) + sorted(extra_cols)\n assert len(sorted_colnames) == len(self.data.columns)\n self.data = self.data.reindex(columns=sorted_colnames)\n\n def cut(self, other, combine=None):\n \"\"\"Split this array's regions at the boundaries in `other`.\"\"\"\n return NotImplemented\n\n def flatten(self, combine: Optional[Dict[str, Callable]]=None,\n split_columns: Optional[Iterable[str]]=None):\n \"\"\"Split this array's regions where they overlap.\"\"\"\n return self.as_dataframe(flatten(self.data, combine=combine,\n split_columns=split_columns))\n\n def intersection(self, other, mode: str='outer'):\n \"\"\"Select the bins in `self` that overlap the regions in `other`.\n\n The extra fields of `self`, but not `other`, are retained in the output.\n \"\"\"\n if mode == 'trim':\n chunks = [chunk.data for _, chunk in self.by_ranges(other, mode\n =mode, keep_empty=False)]\n return self.as_dataframe(pd.concat(chunks))\n slices = iter_slices(self.data, other.data, mode, False)\n indices = np.concatenate(list(slices))\n return self.as_dataframe(self.data.loc[indices])\n\n def merge(self, bp: int=0, stranded: bool=False, combine: Optional[Dict\n [str, Callable]]=None):\n \"\"\"Merge adjacent or overlapping regions into single rows.\n\n Similar to 'bedtools merge'.\n \"\"\"\n return self.as_dataframe(merge(self.data, bp, stranded, combine))\n\n def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str,\n Numeric]]=None):\n \"\"\"Resize each genomic bin by a fixed number of bases at each end.\n\n Bin 'start' values have a minimum of 0, and `chrom_sizes` can\n specify each chromosome's maximum 'end' value.\n\n Similar to 'bedtools slop'.\n\n Parameters\n ----------\n bp : int\n Number of bases in each direction to expand or shrink each bin.\n Applies to 'start' and 'end' values symmetrically, and may be\n positive (expand) or negative (shrink).\n chrom_sizes : dict of string-to-int\n Chromosome name to length in base pairs. If given, all chromosomes\n in `self` must be included.\n \"\"\"\n table = self.data\n limits = {'lower': 0}\n if chrom_sizes:\n limits['upper'] = self.chromosome.replace(chrom_sizes)\n table = table.assign(start=(table['start'] - bp).clip(**limits),\n end=(table['end'] + bp).clip(**limits))\n if bp < 0:\n ok_size = table['end'] - table['start'] > 0\n logging.debug('Dropping %d bins with size <= 0', (~ok_size).sum())\n table = table[ok_size]\n return self.as_dataframe(table.copy())\n\n def squash(self, combine=None):\n \"\"\"Combine some groups of rows, by some criteria, into single rows.\"\"\"\n return NotImplemented\n\n def subdivide(self, avg_size: int, min_size: int=0, verbose: bool=False):\n \"\"\"Split this array's regions into roughly equal-sized sub-regions.\"\"\"\n return self.as_dataframe(subdivide(self.data, avg_size, min_size,\n verbose))\n\n def subtract(self, other):\n \"\"\"Remove the overlapping regions in `other` from this array.\"\"\"\n return self.as_dataframe(subtract(self.data, other.data))\n\n def total_range_size(self) ->int:\n \"\"\"Total number of bases covered by all (merged) regions.\"\"\"\n if not len(self):\n return 0\n regions = merge(self.data, bp=1)\n return regions.end.sum() - regions.start.sum()\n\n def _get_gene_map(self) ->OrderedDict:\n \"\"\"Map unique gene names to their indices in this array.\n\n Returns\n -------\n OrderedDict\n An (ordered) dictionary of unique gene names and the data indices of\n their segments in the order of occurrence (genomic order).\n \"\"\"\n if 'gene' not in self.data:\n return OrderedDict()\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data['gene'].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(','):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes\n",
"step-5": "\"\"\"Base class for an array of annotated genomic regions.\"\"\"\nimport logging\nfrom typing import Callable, Dict, Iterable, Iterator, Mapping, Optional, Sequence, Union\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\n\nfrom .chromsort import sorter_chrom\nfrom .intersect import by_ranges, into_ranges, iter_ranges, iter_slices, Numeric\nfrom .merge import flatten, merge\nfrom .rangelabel import to_label\nfrom .subtract import subtract\nfrom .subdivide import subdivide\n\n\nclass GenomicArray:\n \"\"\"An array of genomic intervals. Base class for genomic data structures.\n\n Can represent most BED-like tabular formats with arbitrary additional\n columns.\n \"\"\"\n\n _required_columns = (\"chromosome\", \"start\", \"end\")\n _required_dtypes = (str, int, int)\n\n def __init__(\n self,\n data_table: Optional[Union[Sequence, pd.DataFrame]],\n meta_dict: Optional[Mapping] = None,\n ):\n # Validation\n if (\n data_table is None\n or (isinstance(data_table, (list, tuple)) and not len(data_table))\n or (isinstance(data_table, pd.DataFrame) and not len(data_table.columns))\n ):\n data_table = self._make_blank()\n else:\n if not isinstance(data_table, pd.DataFrame):\n # Rarely if ever needed -- prefer from_rows, from_columns, etc.\n data_table = pd.DataFrame(data_table)\n if not all(c in data_table.columns for c in self._required_columns):\n raise ValueError(\n \"data table must have at least columns \"\n + f\"{self._required_columns!r}; got {tuple(data_table.columns)!r}\"\n )\n # Ensure columns are the right type\n # (in case they've been automatically converted to the wrong type,\n # e.g. chromosome names as integers; genome coordinates as floats)\n if len(data_table):\n\n def ok_dtype(col, dtype):\n return isinstance(data_table[col].iat[0], dtype)\n\n else:\n\n def ok_dtype(col, dtype):\n return data_table[col].dtype == np.dtype(dtype)\n\n recast_cols = {\n col: dtype\n for col, dtype in zip(self._required_columns, self._required_dtypes)\n if not ok_dtype(col, dtype)\n }\n if recast_cols:\n data_table = data_table.astype(recast_cols)\n\n self.data = data_table\n self.meta = dict(meta_dict) if meta_dict is not None and len(meta_dict) else {}\n\n @classmethod\n def _make_blank(cls) -> pd.DataFrame:\n \"\"\"Create an empty dataframe with the columns required by this class.\"\"\"\n spec = list(zip(cls._required_columns, cls._required_dtypes))\n try:\n arr = np.zeros(0, dtype=spec)\n return pd.DataFrame(arr)\n except TypeError as exc:\n raise TypeError(r\"{exc}: {spec}\") from exc\n\n @classmethod\n def from_columns(\n cls, columns: Mapping[str, Iterable], meta_dict: Optional[Mapping] = None\n ):\n \"\"\"Create a new instance from column arrays, given as a dict.\"\"\"\n table = pd.DataFrame.from_dict(columns)\n ary = cls(table, meta_dict)\n ary.sort_columns()\n return ary\n\n @classmethod\n def from_rows(\n cls,\n rows: Iterable,\n columns: Optional[Sequence[str]] = None,\n meta_dict: Optional[Mapping] = None,\n ):\n \"\"\"Create a new instance from a list of rows, as tuples or arrays.\"\"\"\n if columns is None:\n columns = cls._required_columns\n table = pd.DataFrame.from_records(rows, columns=columns)\n return cls(table, meta_dict)\n\n def as_columns(self, **columns):\n \"\"\"Wrap the named columns in this instance's metadata.\"\"\"\n return self.__class__.from_columns(columns, self.meta)\n # return self.__class__(self.data.loc[:, columns], self.meta.copy())\n\n def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool = False):\n \"\"\"Wrap the given pandas DataFrame in this instance's metadata.\"\"\"\n if reset_index:\n dframe = dframe.reset_index(drop=True)\n return self.__class__(dframe, self.meta.copy())\n\n def as_series(self, arraylike: Iterable) -> pd.Series:\n \"\"\"Coerce `arraylike` to a Series with this instance's index.\"\"\"\n return pd.Series(arraylike, index=self.data.index)\n\n def as_rows(self, rows: Iterable):\n \"\"\"Wrap the given rows in this instance's metadata.\"\"\"\n try:\n out = self.from_rows(rows, columns=self.data.columns, meta_dict=self.meta)\n except AssertionError as exc:\n columns = self.data.columns.tolist()\n firstrow = next(iter(rows))\n raise RuntimeError(\n f\"Passed {len(columns)} columns {columns!r}, but \"\n f\"{len(firstrow)} elements in first row: {firstrow}\"\n ) from exc\n return out\n\n # Container behaviour\n\n def __bool__(self) -> bool:\n return bool(len(self.data))\n\n def __eq__(self, other) -> bool:\n return isinstance(other, self.__class__) and self.data.equals(other.data)\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __contains__(self, key) -> bool:\n return key in self.data.columns\n\n def __getitem__(self, index) -> Union[pd.Series, pd.DataFrame]:\n \"\"\"Access a portion of the data.\n\n Cases:\n\n - single integer: a row, as pd.Series\n - string row name: a column, as pd.Series\n - a boolean array: masked rows, as_dataframe\n - tuple of integers: selected rows, as_dataframe\n \"\"\"\n if isinstance(index, int):\n # A single row\n return self.data.iloc[index]\n # return self.as_dataframe(self.data.iloc[index:index+1])\n if isinstance(index, str):\n # A column, by name\n return self.data[index]\n if (\n isinstance(index, tuple)\n and len(index) == 2\n and index[1] in self.data.columns\n ):\n # Row index, column index -> cell value\n return self.data.loc[index]\n if isinstance(index, slice):\n # return self.as_dataframe(self.data.take(index))\n return self.as_dataframe(self.data[index])\n # Iterable -- selected row indices or boolean array, probably\n try:\n if isinstance(index, type(None)) or len(index) == 0:\n empty = pd.DataFrame(columns=self.data.columns)\n return self.as_dataframe(empty)\n except TypeError as exc:\n raise TypeError(\n f\"object of type {type(index)!r} \"\n f\"cannot be used as an index into a {self.__class__.__name__}\"\n ) from exc\n return self.as_dataframe(self.data[index])\n # return self.as_dataframe(self.data.take(index))\n\n def __setitem__(self, index, value):\n \"\"\"Assign to a portion of the data.\"\"\"\n if isinstance(index, int):\n self.data.iloc[index] = value\n elif isinstance(index, str):\n self.data[index] = value\n elif (\n isinstance(index, tuple)\n and len(index) == 2\n and index[1] in self.data.columns\n ):\n self.data.loc[index] = value\n else:\n assert isinstance(index, slice) or len(index) > 0\n self.data[index] = value\n\n def __delitem__(self, index):\n return NotImplemented\n\n def __iter__(self):\n return self.data.itertuples(index=False)\n\n __next__ = next\n\n @property\n def chromosome(self) -> pd.Series:\n \"\"\"Get column 'chromosome'.\"\"\"\n return self.data[\"chromosome\"]\n\n @property\n def start(self) -> pd.Series:\n \"\"\"Get column 'start'.\"\"\"\n return self.data[\"start\"]\n\n @property\n def end(self) -> pd.Series:\n \"\"\"Get column 'end'.\"\"\"\n return self.data[\"end\"]\n\n @property\n def sample_id(self) -> pd.Series:\n \"\"\"Get metadata field 'sample_id'.\"\"\"\n return self.meta.get(\"sample_id\")\n\n # Traversal\n\n def autosomes(self, also=None):\n \"\"\"Select chromosomes w/ integer names, ignoring any 'chr' prefixes.\"\"\"\n is_auto = self.chromosome.str.match(r\"(chr)?\\d+$\", na=False)\n if not is_auto.any():\n # The autosomes, if any, are not named with plain integers\n return self\n if also is not None:\n if isinstance(also, pd.Series):\n is_auto |= also\n else:\n # The assumption is that `also` is a single chromosome name or an iterable thereof.\n if isinstance(also, str):\n also = [also]\n for a_chrom in also:\n is_auto |= self.chromosome == a_chrom\n return self[is_auto]\n\n def by_arm(self, min_gap_size: Union[int, float] = 1e5, min_arm_bins: int = 50):\n \"\"\"Iterate over bins grouped by chromosome arm (inferred).\"\"\"\n # ENH:\n # - Accept GArray of actual centromere regions as input\n # -> find largest gap (any size) within cmere region, split there\n # - Cache centromere locations once found\n self.data.chromosome = self.data.chromosome.astype(str)\n for chrom, subtable in self.data.groupby(\"chromosome\", sort=False):\n margin = max(min_arm_bins, int(round(0.1 * len(subtable))))\n if len(subtable) > 2 * margin + 1:\n # Found a candidate centromere\n gaps = (\n subtable.start.values[margin + 1 : -margin]\n - subtable.end.values[margin : -margin - 1]\n )\n cmere_idx = gaps.argmax() + margin + 1\n cmere_size = gaps[cmere_idx - margin - 1]\n else:\n cmere_idx = 0\n cmere_size = 0\n if cmere_idx and cmere_size >= min_gap_size:\n logging.debug(\n \"%s centromere at %d of %d bins (size %s)\",\n chrom,\n cmere_idx,\n len(subtable),\n cmere_size,\n )\n p_arm = subtable.index[:cmere_idx]\n yield chrom, self.as_dataframe(subtable.loc[p_arm, :])\n q_arm = subtable.index[cmere_idx:]\n yield chrom, self.as_dataframe(subtable.loc[q_arm, :])\n else:\n # No centromere found -- emit the whole chromosome\n if cmere_idx:\n logging.debug(\n \"%s: Ignoring centromere at %d of %d bins (size %s)\",\n chrom,\n cmere_idx,\n len(subtable),\n cmere_size,\n )\n else:\n logging.debug(\"%s: Skipping centromere search, too small\", chrom)\n yield chrom, self.as_dataframe(subtable)\n\n def by_chromosome(self) -> Iterator:\n \"\"\"Iterate over bins grouped by chromosome name.\"\"\"\n for chrom, subtable in self.data.groupby(\"chromosome\", sort=False):\n yield chrom, self.as_dataframe(subtable)\n\n def by_ranges(\n self, other, mode: str = \"outer\", keep_empty: bool = True\n ) -> Iterator:\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n for bin_row, subrange in by_ranges(self.data, other.data, mode, keep_empty):\n if len(subrange):\n yield bin_row, self.as_dataframe(subrange)\n elif keep_empty:\n yield bin_row, self.as_rows(subrange)\n\n def coords(self, also: Union[str, Iterable[str]] = ()):\n \"\"\"Iterate over plain coordinates of each bin: chromosome, start, end.\n\n Parameters\n ----------\n also : str, or iterable of strings\n Also include these columns from `self`, in addition to chromosome,\n start, and end.\n\n Example, yielding rows in BED format:\n\n >>> probes.coords(also=[\"gene\", \"strand\"])\n \"\"\"\n cols = list(GenomicArray._required_columns)\n if also:\n if isinstance(also, str):\n cols.append(also)\n else:\n cols.extend(also)\n coordframe = self.data.loc[:, cols]\n return coordframe.itertuples(index=False)\n\n def labels(self) -> pd.Series:\n \"\"\"Get chromosomal coordinates as genomic range labels.\"\"\"\n return self.data.apply(to_label, axis=1)\n\n def in_range(\n self,\n chrom: Optional[str] = None,\n start: Optional[Numeric] = None,\n end: Optional[Numeric] = None,\n mode: str = \"outer\",\n ):\n \"\"\"Get the GenomicArray portion within the given genomic range.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n start : int or None\n Start coordinate of range to select, in 0-based coordinates.\n If None, start from 0.\n end : int or None\n End coordinate of range to select. If None, select to the end of the\n chromosome.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n The subset of `self` enclosed by the specified range.\n \"\"\"\n starts = [int(start)] if start is not None else None\n ends = [int(end)] if end is not None else None\n results = iter_ranges(self.data, chrom, starts, ends, mode)\n return self.as_dataframe(next(results))\n\n def in_ranges(\n self,\n chrom: Optional[str] = None,\n starts: Optional[Sequence[Numeric]] = None,\n ends: Optional[Sequence[Numeric]] = None,\n mode: str = \"outer\",\n ):\n \"\"\"Get the GenomicArray portion within the specified ranges.\n\n Similar to `in_ranges`, but concatenating the selections of all the\n regions specified by the `starts` and `ends` arrays.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n starts : int array, or None\n Start coordinates of ranges to select, in 0-based coordinates.\n If None, start from 0.\n ends : int array, or None\n End coordinates of ranges to select. If None, select to the end of the\n chromosome. If `starts` and `ends` are both specified, they must be\n arrays of equal length.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n Concatenation of all the subsets of `self` enclosed by the specified\n ranges.\n \"\"\"\n table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode), sort=False)\n return self.as_dataframe(table)\n\n def into_ranges(\n self, other, column: str, default, summary_func: Optional[Callable] = None\n ):\n \"\"\"Re-bin values from `column` into the corresponding ranges in `other`.\n\n Match overlapping/intersecting rows from `other` to each row in `self`.\n Then, within each range in `other`, extract the value(s) from `column`\n in `self`, using the function `summary_func` to produce a single value\n if multiple bins in `self` map to a single range in `other`.\n\n For example, group SNVs (self) by CNV segments (other) and calculate the\n median (summary_func) of each SNV group's allele frequencies.\n\n Parameters\n ----------\n other : GenomicArray\n Ranges into which the overlapping values of `self` will be\n summarized.\n column : string\n Column name in `self` to extract values from.\n default\n Value to assign to indices in `other` that do not overlap any bins in\n `self`. Type should be the same as or compatible with the output\n field specified by `column`, or the output of `summary_func`.\n summary_func : callable, dict of string-to-callable, or None\n Specify how to reduce 1 or more `other` rows into a single value for\n the corresponding row in `self`.\n\n - If callable, apply to the `column` field each group of rows in\n `other` column.\n - If a single-element dict of column name to callable, apply to that\n field in `other` instead of `column`.\n - If None, use an appropriate summarizing function for the datatype\n of the `column` column in `other` (e.g. median of numbers,\n concatenation of strings).\n - If some other value, assign that value to `self` wherever there is\n an overlap.\n\n Returns\n -------\n pd.Series\n The extracted and summarized values from `self` corresponding to\n other's genomic ranges, the same length as `other`.\n \"\"\"\n if column not in self:\n logging.warning(\"No '%s' column available for summary calculation\", column)\n return pd.Series(np.repeat(default, len(other)))\n return into_ranges(self.data, other.data, column, default, summary_func)\n\n def iter_ranges_of(\n self, other, column: str, mode: str = \"outer\", keep_empty: bool = True\n ):\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n column : string\n Column name in `self` to extract values from.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n if column not in self.data.columns:\n raise ValueError(f\"No column named {column!r} in this object\")\n ser = self.data[column]\n for slc in iter_slices(self.data, other.data, mode, keep_empty):\n yield ser[slc]\n\n # Modification\n\n def add(self, other):\n \"\"\"Combine this array's data with another GenomicArray (in-place).\n\n Any optional columns must match between both arrays.\n \"\"\"\n if not isinstance(other, self.__class__):\n raise ValueError(\n f\"Argument (type {type(other)}) is not a {self.__class__} instance\"\n )\n if len(other.data):\n self.data = pd.concat([self.data, other.data], ignore_index=True)\n self.sort()\n\n def concat(self, others):\n \"\"\"Concatenate several GenomicArrays, keeping this array's metadata.\n\n This array's data table is not implicitly included in the result.\n \"\"\"\n table = pd.concat([otr.data for otr in others], ignore_index=True)\n result = self.as_dataframe(table)\n result.sort()\n return result\n\n def copy(self):\n \"\"\"Create an independent copy of this object.\"\"\"\n return self.as_dataframe(self.data.copy())\n\n def add_columns(self, **columns):\n \"\"\"Add the given columns to a copy of this GenomicArray.\n\n Parameters\n ----------\n **columns : array\n Keyword arguments where the key is the new column's name and the\n value is an array of the same length as `self` which will be the new\n column's values.\n\n Returns\n -------\n GenomicArray or subclass\n A new instance of `self` with the given columns included in the\n underlying dataframe.\n \"\"\"\n return self.as_dataframe(self.data.assign(**columns))\n\n def keep_columns(self, colnames):\n \"\"\"Extract a subset of columns, reusing this instance's metadata.\"\"\"\n colnames = self.data.columns.intersection(colnames)\n return self.__class__(self.data.loc[:, colnames], self.meta.copy())\n\n def drop_extra_columns(self):\n \"\"\"Remove any optional columns from this GenomicArray.\n\n Returns\n -------\n GenomicArray or subclass\n A new copy with only the minimal set of columns required by the\n class (e.g. chromosome, start, end for GenomicArray; may be more for\n subclasses).\n \"\"\"\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)\n\n def filter(self, func=None, **kwargs):\n \"\"\"Take a subset of rows where the given condition is true.\n\n Parameters\n ----------\n func : callable\n A boolean function which will be applied to each row to keep rows\n where the result is True.\n **kwargs : string\n Keyword arguments like ``chromosome=\"chr7\"`` or\n ``gene=\"Antitarget\"``, which will keep rows where the keyed field\n equals the specified value.\n\n Return\n ------\n GenomicArray\n Subset of `self` where the specified condition is True.\n \"\"\"\n table = self.data\n if func is not None:\n table = table[table.apply(func, axis=1)]\n for key, val in list(kwargs.items()):\n assert key in self\n table = table[table[key] == val]\n return self.as_dataframe(table)\n\n def shuffle(self):\n \"\"\"Randomize the order of bins in this array (in-place).\"\"\"\n order = np.arange(len(self.data))\n np.random.seed(0xA5EED)\n np.random.shuffle(order)\n self.data = self.data.iloc[order]\n return order\n\n def sort(self):\n \"\"\"Sort this array's bins in-place, with smart chromosome ordering.\"\"\"\n sort_key = self.data.chromosome.apply(sorter_chrom)\n self.data = (\n self.data.assign(_sort_key_=sort_key)\n .sort_values(by=[\"_sort_key_\", \"start\", \"end\"], kind=\"mergesort\")\n .drop(\"_sort_key_\", axis=1)\n .reset_index(drop=True)\n )\n\n def sort_columns(self):\n \"\"\"Sort this array's columns in-place, per class definition.\"\"\"\n extra_cols = []\n for col in self.data.columns:\n if col not in self._required_columns:\n extra_cols.append(col)\n sorted_colnames = list(self._required_columns) + sorted(extra_cols)\n assert len(sorted_colnames) == len(self.data.columns)\n self.data = self.data.reindex(columns=sorted_colnames)\n\n # Genome arithmetic\n\n def cut(self, other, combine=None):\n \"\"\"Split this array's regions at the boundaries in `other`.\"\"\"\n # TODO\n return NotImplemented\n\n def flatten(\n self,\n combine: Optional[Dict[str, Callable]] = None,\n split_columns: Optional[Iterable[str]] = None,\n ):\n \"\"\"Split this array's regions where they overlap.\"\"\"\n return self.as_dataframe(\n flatten(self.data, combine=combine, split_columns=split_columns)\n )\n\n def intersection(self, other, mode: str = \"outer\"):\n \"\"\"Select the bins in `self` that overlap the regions in `other`.\n\n The extra fields of `self`, but not `other`, are retained in the output.\n \"\"\"\n # TODO options for which extra fields to keep\n # by default, keep just the fields in 'table'\n if mode == \"trim\":\n # Slower\n chunks = [\n chunk.data\n for _, chunk in self.by_ranges(other, mode=mode, keep_empty=False)\n ]\n return self.as_dataframe(pd.concat(chunks))\n # Faster\n slices = iter_slices(self.data, other.data, mode, False)\n indices = np.concatenate(list(slices))\n return self.as_dataframe(self.data.loc[indices])\n\n def merge(\n self,\n bp: int = 0,\n stranded: bool = False,\n combine: Optional[Dict[str, Callable]] = None,\n ):\n \"\"\"Merge adjacent or overlapping regions into single rows.\n\n Similar to 'bedtools merge'.\n \"\"\"\n return self.as_dataframe(merge(self.data, bp, stranded, combine))\n\n def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str, Numeric]] = None):\n \"\"\"Resize each genomic bin by a fixed number of bases at each end.\n\n Bin 'start' values have a minimum of 0, and `chrom_sizes` can\n specify each chromosome's maximum 'end' value.\n\n Similar to 'bedtools slop'.\n\n Parameters\n ----------\n bp : int\n Number of bases in each direction to expand or shrink each bin.\n Applies to 'start' and 'end' values symmetrically, and may be\n positive (expand) or negative (shrink).\n chrom_sizes : dict of string-to-int\n Chromosome name to length in base pairs. If given, all chromosomes\n in `self` must be included.\n \"\"\"\n table = self.data\n limits = {\"lower\": 0}\n if chrom_sizes:\n limits[\"upper\"] = self.chromosome.replace(chrom_sizes)\n table = table.assign(\n start=(table[\"start\"] - bp).clip(**limits),\n end=(table[\"end\"] + bp).clip(**limits),\n )\n if bp < 0:\n # Drop any bins that now have zero or negative size\n ok_size = table[\"end\"] - table[\"start\"] > 0\n logging.debug(\"Dropping %d bins with size <= 0\", (~ok_size).sum())\n table = table[ok_size]\n # Don't modify the original\n return self.as_dataframe(table.copy())\n\n def squash(self, combine=None):\n \"\"\"Combine some groups of rows, by some criteria, into single rows.\"\"\"\n # TODO\n return NotImplemented\n\n def subdivide(self, avg_size: int, min_size: int = 0, verbose: bool = False):\n \"\"\"Split this array's regions into roughly equal-sized sub-regions.\"\"\"\n return self.as_dataframe(subdivide(self.data, avg_size, min_size, verbose))\n\n def subtract(self, other):\n \"\"\"Remove the overlapping regions in `other` from this array.\"\"\"\n return self.as_dataframe(subtract(self.data, other.data))\n\n def total_range_size(self) -> int:\n \"\"\"Total number of bases covered by all (merged) regions.\"\"\"\n if not len(self):\n return 0\n regions = merge(self.data, bp=1)\n return regions.end.sum() - regions.start.sum()\n\n def _get_gene_map(self) -> OrderedDict:\n \"\"\"Map unique gene names to their indices in this array.\n\n Returns\n -------\n OrderedDict\n An (ordered) dictionary of unique gene names and the data indices of\n their segments in the order of occurrence (genomic order).\n \"\"\"\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes\n",
"step-ids": [
36,
38,
48,
49,
55
]
}
|
[
36,
38,
48,
49,
55
] |
from flask import logging
from flask_sqlalchemy import SQLAlchemy
from passlib.apps import custom_app_context as pwd_context
logger = logging.getLogger(__name__)
db = SQLAlchemy() # flask-sqlalchemy
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), index=True)
password_hash = db.Column(db.String(128))
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
class Weather(db.Model):
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(timezone=True))
pressure = db.Column(db.Float)
inTemp = db.Column(db.Float)
outTemp = db.Column(db.Float)
windDir = db.Column(db.Float)
windSpeed = db.Column(db.Float)
outHumidity = db.Column(db.Float)
inHumidity = db.Column(db.Float)
rain = db.Column(db.Float)
def save(self):
db.session.add(self)
db.session.commit()
class Webcam(db.Model):
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(timezone=True))
data = db.Column(db.LargeBinary)
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def setup_api_user():
username = "weatherstation"
password = "umevohvoori2zaew2choKaeshooPho"
if User.query.filter_by(username=username).first() is not None:
return
user = User(username=username)
user.hash_password(password)
db.session.add(user)
db.session.commit()
logger.info("User created")
def init_db(app):
db.app = app
db.init_app(app)
db.create_all()
setup_api_user()
if app.config["SQLALCHEMY_BOOTSTRAP_DATA"]:
import_from_json()
return db
def import_from_json():
pass
|
normal
|
{
"blob_id": "e976f7e423d75f7fc8a3d5cd597bdd9358ae317e",
"index": 5243,
"step-1": "<mask token>\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(32), index=True)\n password_hash = db.Column(db.String(128))\n\n def hash_password(self, password):\n self.password_hash = pwd_context.encrypt(password)\n\n def verify_password(self, password):\n return pwd_context.verify(password, self.password_hash)\n\n\nclass Weather(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime(timezone=True))\n pressure = db.Column(db.Float)\n inTemp = db.Column(db.Float)\n outTemp = db.Column(db.Float)\n windDir = db.Column(db.Float)\n windSpeed = db.Column(db.Float)\n outHumidity = db.Column(db.Float)\n inHumidity = db.Column(db.Float)\n rain = db.Column(db.Float)\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n\nclass Webcam(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime(timezone=True))\n data = db.Column(db.LargeBinary)\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(32), index=True)\n password_hash = db.Column(db.String(128))\n\n def hash_password(self, password):\n self.password_hash = pwd_context.encrypt(password)\n\n def verify_password(self, password):\n return pwd_context.verify(password, self.password_hash)\n\n\nclass Weather(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime(timezone=True))\n pressure = db.Column(db.Float)\n inTemp = db.Column(db.Float)\n outTemp = db.Column(db.Float)\n windDir = db.Column(db.Float)\n windSpeed = db.Column(db.Float)\n outHumidity = db.Column(db.Float)\n inHumidity = db.Column(db.Float)\n rain = db.Column(db.Float)\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n\nclass Webcam(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime(timezone=True))\n data = db.Column(db.LargeBinary)\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n\ndef setup_api_user():\n username = 'weatherstation'\n password = 'umevohvoori2zaew2choKaeshooPho'\n if User.query.filter_by(username=username).first() is not None:\n return\n user = User(username=username)\n user.hash_password(password)\n db.session.add(user)\n db.session.commit()\n logger.info('User created')\n\n\ndef init_db(app):\n db.app = app\n db.init_app(app)\n db.create_all()\n setup_api_user()\n if app.config['SQLALCHEMY_BOOTSTRAP_DATA']:\n import_from_json()\n return db\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\ndb = SQLAlchemy()\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(32), index=True)\n password_hash = db.Column(db.String(128))\n\n def hash_password(self, password):\n self.password_hash = pwd_context.encrypt(password)\n\n def verify_password(self, password):\n return pwd_context.verify(password, self.password_hash)\n\n\nclass Weather(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime(timezone=True))\n pressure = db.Column(db.Float)\n inTemp = db.Column(db.Float)\n outTemp = db.Column(db.Float)\n windDir = db.Column(db.Float)\n windSpeed = db.Column(db.Float)\n outHumidity = db.Column(db.Float)\n inHumidity = db.Column(db.Float)\n rain = db.Column(db.Float)\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n\nclass Webcam(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime(timezone=True))\n data = db.Column(db.LargeBinary)\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n\ndef setup_api_user():\n username = 'weatherstation'\n password = 'umevohvoori2zaew2choKaeshooPho'\n if User.query.filter_by(username=username).first() is not None:\n return\n user = User(username=username)\n user.hash_password(password)\n db.session.add(user)\n db.session.commit()\n logger.info('User created')\n\n\ndef init_db(app):\n db.app = app\n db.init_app(app)\n db.create_all()\n setup_api_user()\n if app.config['SQLALCHEMY_BOOTSTRAP_DATA']:\n import_from_json()\n return db\n\n\ndef import_from_json():\n pass\n",
"step-4": "from flask import logging\nfrom flask_sqlalchemy import SQLAlchemy\nfrom passlib.apps import custom_app_context as pwd_context\nlogger = logging.getLogger(__name__)\ndb = SQLAlchemy()\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(32), index=True)\n password_hash = db.Column(db.String(128))\n\n def hash_password(self, password):\n self.password_hash = pwd_context.encrypt(password)\n\n def verify_password(self, password):\n return pwd_context.verify(password, self.password_hash)\n\n\nclass Weather(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime(timezone=True))\n pressure = db.Column(db.Float)\n inTemp = db.Column(db.Float)\n outTemp = db.Column(db.Float)\n windDir = db.Column(db.Float)\n windSpeed = db.Column(db.Float)\n outHumidity = db.Column(db.Float)\n inHumidity = db.Column(db.Float)\n rain = db.Column(db.Float)\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n\nclass Webcam(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime(timezone=True))\n data = db.Column(db.LargeBinary)\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n\ndef setup_api_user():\n username = 'weatherstation'\n password = 'umevohvoori2zaew2choKaeshooPho'\n if User.query.filter_by(username=username).first() is not None:\n return\n user = User(username=username)\n user.hash_password(password)\n db.session.add(user)\n db.session.commit()\n logger.info('User created')\n\n\ndef init_db(app):\n db.app = app\n db.init_app(app)\n db.create_all()\n setup_api_user()\n if app.config['SQLALCHEMY_BOOTSTRAP_DATA']:\n import_from_json()\n return db\n\n\ndef import_from_json():\n pass\n",
"step-5": "from flask import logging\nfrom flask_sqlalchemy import SQLAlchemy\nfrom passlib.apps import custom_app_context as pwd_context\n\nlogger = logging.getLogger(__name__)\n\ndb = SQLAlchemy() # flask-sqlalchemy\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(32), index=True)\n password_hash = db.Column(db.String(128))\n\n def hash_password(self, password):\n self.password_hash = pwd_context.encrypt(password)\n\n def verify_password(self, password):\n return pwd_context.verify(password, self.password_hash)\n\n\nclass Weather(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime(timezone=True))\n pressure = db.Column(db.Float)\n inTemp = db.Column(db.Float)\n outTemp = db.Column(db.Float)\n windDir = db.Column(db.Float)\n windSpeed = db.Column(db.Float)\n outHumidity = db.Column(db.Float)\n inHumidity = db.Column(db.Float)\n rain = db.Column(db.Float)\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n\nclass Webcam(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n timestamp = db.Column(db.DateTime(timezone=True))\n data = db.Column(db.LargeBinary)\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n\ndef setup_api_user():\n username = \"weatherstation\"\n password = \"umevohvoori2zaew2choKaeshooPho\"\n if User.query.filter_by(username=username).first() is not None:\n return\n user = User(username=username)\n user.hash_password(password)\n db.session.add(user)\n db.session.commit()\n logger.info(\"User created\")\n\n\ndef init_db(app):\n db.app = app\n db.init_app(app)\n db.create_all()\n setup_api_user()\n\n if app.config[\"SQLALCHEMY_BOOTSTRAP_DATA\"]:\n import_from_json()\n\n return db\n\n\ndef import_from_json():\n pass\n",
"step-ids": [
11,
13,
15,
16,
17
]
}
|
[
11,
13,
15,
16,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore')
<|reserved_special_token_0|>
os.chdir(lib_path)
<|reserved_special_token_0|>
print(res.summary())
<|reserved_special_token_0|>
X0
<|reserved_special_token_0|>
b
<|reserved_special_token_0|>
covid_actual.loc[:, 'Date':'human_date']
<|reserved_special_token_0|>
covid_predicted.to_csv('predicted_data.csv', index=False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore')
<|reserved_special_token_0|>
lib_path = '\\\\'
os.chdir(lib_path)
covid_pred = pd.read_csv('total_cases_data.csv')
data = covid_pred
<|reserved_special_token_0|>
X = data.Time
X = sm.add_constant(X)
data['logTotal'] = np.log(data.Total)
y = data.logTotal
mod = sm.OLS(y, X)
res = mod.fit()
print(res.summary())
<|reserved_special_token_0|>
initial_value_exponent = 2.2588
X0 = math.exp(initial_value_exponent)
X0
growth_factor_exponent = 0.173
b = math.exp(growth_factor_exponent)
b
<|reserved_special_token_0|>
start_date = date(2020, 3, 2)
<|reserved_special_token_0|>
today = datetime.date.today()
t = today + datetime.timedelta(days=1)
delta = t - start_date
time = delta.days
Xt = X0 * math.pow(b, time)
predicted = round(Xt)
tomorrow = t - datetime.timedelta(days=1)
covid_actual = pd.read_csv('total_cases_data.csv')
covid_actual.loc[:, 'Date':'human_date']
covid_predicted = pd.DataFrame({'Date': ['26/3/2020', '27/3/2020',
'28/3/2020'], 'Total': ['721', '857', '1022'], 'human_date': [
'26th Mar', '27th Mar', '28th Mar']})
covid_predicted.to_csv('predicted_data.csv', index=False)
covid_merge = pd.merge(covid_actual, covid_predicted, left_on=['Date'],
right_on=['Date'], how='left')
covid_accuracy = covid_merge[(covid_merge['Date'] == '26/3/2020') | (
covid_merge['Date'] == '27/3/2020') | (covid_merge['Date'] == '28/3/2020')]
covid_accuracy['Total_y'] = covid_accuracy['Total_y'].astype(int)
covid_accuracy['Total_x'] = covid_accuracy['Total_x'].astype(int)
covid_accuracy.loc[covid_accuracy['Total_x'] >= covid_accuracy['Total_y'],
'Accuracy'] = covid_accuracy['Total_y'] / covid_accuracy['Total_x'] * 100
covid_accuracy.loc[covid_accuracy['Total_x'] < covid_accuracy['Total_y'],
'Accuracy'] = covid_accuracy['Total_x'] / covid_accuracy['Total_y'] * 100
accuracy_final = covid_accuracy.mean(axis=0)
<|reserved_special_token_1|>
import pandas as pd
import os
import numpy as np
import json as json
import mysql.connector as sqlcnt
import datetime as dt
import requests
from mysql.connector.constants import SQLMode
import os
import glob
import re
import warnings
warnings.filterwarnings('ignore')
from pathlib import Path
import os, sys
lib_path = '\\\\'
os.chdir(lib_path)
covid_pred = pd.read_csv('total_cases_data.csv')
data = covid_pred
import scipy
import patsy
import statsmodels.api as sm
X = data.Time
X = sm.add_constant(X)
data['logTotal'] = np.log(data.Total)
y = data.logTotal
mod = sm.OLS(y, X)
res = mod.fit()
print(res.summary())
import math
initial_value_exponent = 2.2588
X0 = math.exp(initial_value_exponent)
X0
growth_factor_exponent = 0.173
b = math.exp(growth_factor_exponent)
b
from datetime import date
start_date = date(2020, 3, 2)
import datetime
today = datetime.date.today()
t = today + datetime.timedelta(days=1)
delta = t - start_date
time = delta.days
Xt = X0 * math.pow(b, time)
predicted = round(Xt)
tomorrow = t - datetime.timedelta(days=1)
covid_actual = pd.read_csv('total_cases_data.csv')
covid_actual.loc[:, 'Date':'human_date']
covid_predicted = pd.DataFrame({'Date': ['26/3/2020', '27/3/2020',
'28/3/2020'], 'Total': ['721', '857', '1022'], 'human_date': [
'26th Mar', '27th Mar', '28th Mar']})
covid_predicted.to_csv('predicted_data.csv', index=False)
covid_merge = pd.merge(covid_actual, covid_predicted, left_on=['Date'],
right_on=['Date'], how='left')
covid_accuracy = covid_merge[(covid_merge['Date'] == '26/3/2020') | (
covid_merge['Date'] == '27/3/2020') | (covid_merge['Date'] == '28/3/2020')]
covid_accuracy['Total_y'] = covid_accuracy['Total_y'].astype(int)
covid_accuracy['Total_x'] = covid_accuracy['Total_x'].astype(int)
covid_accuracy.loc[covid_accuracy['Total_x'] >= covid_accuracy['Total_y'],
'Accuracy'] = covid_accuracy['Total_y'] / covid_accuracy['Total_x'] * 100
covid_accuracy.loc[covid_accuracy['Total_x'] < covid_accuracy['Total_y'],
'Accuracy'] = covid_accuracy['Total_x'] / covid_accuracy['Total_y'] * 100
accuracy_final = covid_accuracy.mean(axis=0)
<|reserved_special_token_1|>
# coding: utf-8
import pandas as pd
import os
import numpy as np
import json as json
import mysql.connector as sqlcnt
import datetime as dt
import requests
from mysql.connector.constants import SQLMode
import os
import glob
import re
import warnings
warnings.filterwarnings("ignore")
from pathlib import Path
# In[289]:
import os,sys
lib_path = r"\\"
#lib_path = r"C:\Users\300068241\Documents\Covid_Data\Daily"
os.chdir(lib_path)
covid_pred=pd.read_csv(r'total_cases_data.csv')
data=covid_pred
import scipy
import patsy
import statsmodels.api as sm
X=data.Time
X=sm.add_constant(X)
data['logTotal']=np.log(data.Total)
y=data.logTotal
mod=sm.OLS(y,X)
res=mod.fit()
print(res.summary())
import math
initial_value_exponent=2.2588
X0=math.exp(initial_value_exponent)
X0
growth_factor_exponent=0.1730
# In[304]:
b=math.exp(growth_factor_exponent)
# In[305]:
b
# In[306]:
from datetime import date
start_date = date(2020, 3, 2) #1st case is assumed to be of 2nd Mar'20
# In[307]:
import datetime
today = datetime.date.today()
t = today + datetime.timedelta(days = 1) #+1 in days as 1st case was on 2nd and another +1 days as we're predicting for tomorrow
delta = t - start_date
time=delta.days
Xt = X0 * (math.pow(b,time))
#Xt
predicted = round(Xt)
tomorrow = t - datetime.timedelta(days=1)
covid_actual=pd.read_csv(r'total_cases_data.csv')
covid_actual.loc[:, 'Date':'human_date']
covid_predicted=pd.DataFrame({'Date':["26/3/2020","27/3/2020","28/3/2020"],'Total':["721","857","1022"], 'human_date':["26th Mar","27th Mar","28th Mar"]}) #change here
covid_predicted.to_csv('predicted_data.csv',index=False)
covid_merge = pd.merge(covid_actual,covid_predicted,left_on=['Date'],right_on=['Date'],how = 'left')
covid_accuracy = covid_merge[(covid_merge['Date']=='26/3/2020') | (covid_merge['Date']=='27/3/2020') | (covid_merge['Date']=='28/3/2020')] #change here
#covid_accuracy
covid_accuracy['Total_y']=covid_accuracy['Total_y'].astype(int)
covid_accuracy['Total_x']=covid_accuracy['Total_x'].astype(int)
covid_accuracy.loc[covid_accuracy['Total_x']>=covid_accuracy['Total_y'], 'Accuracy'] = (covid_accuracy['Total_y']/covid_accuracy['Total_x'])*100
covid_accuracy.loc[covid_accuracy['Total_x']<covid_accuracy['Total_y'], 'Accuracy'] = (covid_accuracy['Total_x']/covid_accuracy['Total_y'])*100
accuracy_final=covid_accuracy.mean(axis = 0)
|
flexible
|
{
"blob_id": "2060f57cfd910a308d60ad35ebbbf9ffd5678b9c",
"index": 3519,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwarnings.filterwarnings('ignore')\n<mask token>\nos.chdir(lib_path)\n<mask token>\nprint(res.summary())\n<mask token>\nX0\n<mask token>\nb\n<mask token>\ncovid_actual.loc[:, 'Date':'human_date']\n<mask token>\ncovid_predicted.to_csv('predicted_data.csv', index=False)\n<mask token>\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\n<mask token>\nlib_path = '\\\\\\\\'\nos.chdir(lib_path)\ncovid_pred = pd.read_csv('total_cases_data.csv')\ndata = covid_pred\n<mask token>\nX = data.Time\nX = sm.add_constant(X)\ndata['logTotal'] = np.log(data.Total)\ny = data.logTotal\nmod = sm.OLS(y, X)\nres = mod.fit()\nprint(res.summary())\n<mask token>\ninitial_value_exponent = 2.2588\nX0 = math.exp(initial_value_exponent)\nX0\ngrowth_factor_exponent = 0.173\nb = math.exp(growth_factor_exponent)\nb\n<mask token>\nstart_date = date(2020, 3, 2)\n<mask token>\ntoday = datetime.date.today()\nt = today + datetime.timedelta(days=1)\ndelta = t - start_date\ntime = delta.days\nXt = X0 * math.pow(b, time)\npredicted = round(Xt)\ntomorrow = t - datetime.timedelta(days=1)\ncovid_actual = pd.read_csv('total_cases_data.csv')\ncovid_actual.loc[:, 'Date':'human_date']\ncovid_predicted = pd.DataFrame({'Date': ['26/3/2020', '27/3/2020',\n '28/3/2020'], 'Total': ['721', '857', '1022'], 'human_date': [\n '26th Mar', '27th Mar', '28th Mar']})\ncovid_predicted.to_csv('predicted_data.csv', index=False)\ncovid_merge = pd.merge(covid_actual, covid_predicted, left_on=['Date'],\n right_on=['Date'], how='left')\ncovid_accuracy = covid_merge[(covid_merge['Date'] == '26/3/2020') | (\n covid_merge['Date'] == '27/3/2020') | (covid_merge['Date'] == '28/3/2020')]\ncovid_accuracy['Total_y'] = covid_accuracy['Total_y'].astype(int)\ncovid_accuracy['Total_x'] = covid_accuracy['Total_x'].astype(int)\ncovid_accuracy.loc[covid_accuracy['Total_x'] >= covid_accuracy['Total_y'],\n 'Accuracy'] = covid_accuracy['Total_y'] / covid_accuracy['Total_x'] * 100\ncovid_accuracy.loc[covid_accuracy['Total_x'] < covid_accuracy['Total_y'],\n 'Accuracy'] = covid_accuracy['Total_x'] / covid_accuracy['Total_y'] * 100\naccuracy_final = covid_accuracy.mean(axis=0)\n",
"step-4": "import pandas as pd\nimport os\nimport numpy as np\nimport json as json\nimport mysql.connector as sqlcnt\nimport datetime as dt\nimport requests\nfrom mysql.connector.constants import SQLMode\nimport os\nimport glob\nimport re\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom pathlib import Path\nimport os, sys\nlib_path = '\\\\\\\\'\nos.chdir(lib_path)\ncovid_pred = pd.read_csv('total_cases_data.csv')\ndata = covid_pred\nimport scipy\nimport patsy\nimport statsmodels.api as sm\nX = data.Time\nX = sm.add_constant(X)\ndata['logTotal'] = np.log(data.Total)\ny = data.logTotal\nmod = sm.OLS(y, X)\nres = mod.fit()\nprint(res.summary())\nimport math\ninitial_value_exponent = 2.2588\nX0 = math.exp(initial_value_exponent)\nX0\ngrowth_factor_exponent = 0.173\nb = math.exp(growth_factor_exponent)\nb\nfrom datetime import date\nstart_date = date(2020, 3, 2)\nimport datetime\ntoday = datetime.date.today()\nt = today + datetime.timedelta(days=1)\ndelta = t - start_date\ntime = delta.days\nXt = X0 * math.pow(b, time)\npredicted = round(Xt)\ntomorrow = t - datetime.timedelta(days=1)\ncovid_actual = pd.read_csv('total_cases_data.csv')\ncovid_actual.loc[:, 'Date':'human_date']\ncovid_predicted = pd.DataFrame({'Date': ['26/3/2020', '27/3/2020',\n '28/3/2020'], 'Total': ['721', '857', '1022'], 'human_date': [\n '26th Mar', '27th Mar', '28th Mar']})\ncovid_predicted.to_csv('predicted_data.csv', index=False)\ncovid_merge = pd.merge(covid_actual, covid_predicted, left_on=['Date'],\n right_on=['Date'], how='left')\ncovid_accuracy = covid_merge[(covid_merge['Date'] == '26/3/2020') | (\n covid_merge['Date'] == '27/3/2020') | (covid_merge['Date'] == '28/3/2020')]\ncovid_accuracy['Total_y'] = covid_accuracy['Total_y'].astype(int)\ncovid_accuracy['Total_x'] = covid_accuracy['Total_x'].astype(int)\ncovid_accuracy.loc[covid_accuracy['Total_x'] >= covid_accuracy['Total_y'],\n 'Accuracy'] = covid_accuracy['Total_y'] / covid_accuracy['Total_x'] * 100\ncovid_accuracy.loc[covid_accuracy['Total_x'] < covid_accuracy['Total_y'],\n 'Accuracy'] = covid_accuracy['Total_x'] / covid_accuracy['Total_y'] * 100\naccuracy_final = covid_accuracy.mean(axis=0)\n",
"step-5": "\n# coding: utf-8\n\n\nimport pandas as pd\nimport os\nimport numpy as np\nimport json as json\nimport mysql.connector as sqlcnt\nimport datetime as dt\nimport requests\nfrom mysql.connector.constants import SQLMode\nimport os\nimport glob\nimport re\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom pathlib import Path\n\n\n# In[289]:\n\n\nimport os,sys\nlib_path = r\"\\\\\"\n#lib_path = r\"C:\\Users\\300068241\\Documents\\Covid_Data\\Daily\"\nos.chdir(lib_path)\n\n\ncovid_pred=pd.read_csv(r'total_cases_data.csv')\ndata=covid_pred\n\n\nimport scipy\nimport patsy\n\n\nimport statsmodels.api as sm\n\n\nX=data.Time\nX=sm.add_constant(X)\ndata['logTotal']=np.log(data.Total)\ny=data.logTotal\nmod=sm.OLS(y,X)\nres=mod.fit()\nprint(res.summary())\n\n\nimport math\ninitial_value_exponent=2.2588\nX0=math.exp(initial_value_exponent)\nX0\n\n\ngrowth_factor_exponent=0.1730\n\n\n# In[304]:\n\n\nb=math.exp(growth_factor_exponent)\n\n\n# In[305]:\n\n\nb\n\n\n# In[306]:\n\n\nfrom datetime import date\nstart_date = date(2020, 3, 2) #1st case is assumed to be of 2nd Mar'20\n\n\n# In[307]:\n\n\nimport datetime \ntoday = datetime.date.today()\nt = today + datetime.timedelta(days = 1) #+1 in days as 1st case was on 2nd and another +1 days as we're predicting for tomorrow\n\ndelta = t - start_date\n\n\ntime=delta.days\n\n\nXt = X0 * (math.pow(b,time))\n#Xt\npredicted = round(Xt)\n\ntomorrow = t - datetime.timedelta(days=1)\n\ncovid_actual=pd.read_csv(r'total_cases_data.csv')\n\ncovid_actual.loc[:, 'Date':'human_date']\n\n\ncovid_predicted=pd.DataFrame({'Date':[\"26/3/2020\",\"27/3/2020\",\"28/3/2020\"],'Total':[\"721\",\"857\",\"1022\"], 'human_date':[\"26th Mar\",\"27th Mar\",\"28th Mar\"]}) #change here\n\ncovid_predicted.to_csv('predicted_data.csv',index=False)\n\n\ncovid_merge = pd.merge(covid_actual,covid_predicted,left_on=['Date'],right_on=['Date'],how = 'left')\ncovid_accuracy = covid_merge[(covid_merge['Date']=='26/3/2020') | (covid_merge['Date']=='27/3/2020') | (covid_merge['Date']=='28/3/2020')] #change here\n\n#covid_accuracy\n\ncovid_accuracy['Total_y']=covid_accuracy['Total_y'].astype(int)\ncovid_accuracy['Total_x']=covid_accuracy['Total_x'].astype(int)\n\ncovid_accuracy.loc[covid_accuracy['Total_x']>=covid_accuracy['Total_y'], 'Accuracy'] = (covid_accuracy['Total_y']/covid_accuracy['Total_x'])*100\n\ncovid_accuracy.loc[covid_accuracy['Total_x']<covid_accuracy['Total_y'], 'Accuracy'] = (covid_accuracy['Total_x']/covid_accuracy['Total_y'])*100\n\naccuracy_final=covid_accuracy.mean(axis = 0)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db.models import manager
from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.utils import serializer_helpers
from rest_framework.views import APIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.status import HTTP_200_OK
from .serializers import StockSerializer
from .models import Stock
# Create your views here.
class TestView(APIView):
def get(self, request, *args, **kwargs):
ans = {
"msg": "Test"
}
return Response(ans)
class StockPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 500
class StockView(APIView):
def get(self, request, *args, **kwargs):
if request.GET.get('ticker'):
qs = Stock.objects.filter(ticker=request.GET.get('ticker'))
serializer = StockSerializer(qs, many=True)
return Response(serializer.data)
else:
qs = Stock.objects.all()
paginator = StockPagination()
result_page = paginator.paginate_queryset(qs, request)
serializer = StockSerializer(result_page, many=True, context={'request': request})
return Response(serializer.data, status=HTTP_200_OK)
def post(self, request, *args, **kwargs):
serializer = StockSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
|
normal
|
{
"blob_id": "34536e3112c8791c8f8d48bb6ffd059c1af38e2f",
"index": 8978,
"step-1": "<mask token>\n\n\nclass StockPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\n\nclass StockView(APIView):\n\n def get(self, request, *args, **kwargs):\n if request.GET.get('ticker'):\n qs = Stock.objects.filter(ticker=request.GET.get('ticker'))\n serializer = StockSerializer(qs, many=True)\n return Response(serializer.data)\n else:\n qs = Stock.objects.all()\n paginator = StockPagination()\n result_page = paginator.paginate_queryset(qs, request)\n serializer = StockSerializer(result_page, many=True, context={\n 'request': request})\n return Response(serializer.data, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n serializer = StockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)\n",
"step-2": "<mask token>\n\n\nclass TestView(APIView):\n <mask token>\n\n\nclass StockPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\n\nclass StockView(APIView):\n\n def get(self, request, *args, **kwargs):\n if request.GET.get('ticker'):\n qs = Stock.objects.filter(ticker=request.GET.get('ticker'))\n serializer = StockSerializer(qs, many=True)\n return Response(serializer.data)\n else:\n qs = Stock.objects.all()\n paginator = StockPagination()\n result_page = paginator.paginate_queryset(qs, request)\n serializer = StockSerializer(result_page, many=True, context={\n 'request': request})\n return Response(serializer.data, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n serializer = StockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)\n",
"step-3": "<mask token>\n\n\nclass TestView(APIView):\n\n def get(self, request, *args, **kwargs):\n ans = {'msg': 'Test'}\n return Response(ans)\n\n\nclass StockPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\n\nclass StockView(APIView):\n\n def get(self, request, *args, **kwargs):\n if request.GET.get('ticker'):\n qs = Stock.objects.filter(ticker=request.GET.get('ticker'))\n serializer = StockSerializer(qs, many=True)\n return Response(serializer.data)\n else:\n qs = Stock.objects.all()\n paginator = StockPagination()\n result_page = paginator.paginate_queryset(qs, request)\n serializer = StockSerializer(result_page, many=True, context={\n 'request': request})\n return Response(serializer.data, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n serializer = StockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)\n",
"step-4": "from django.db.models import manager\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom rest_framework.response import Response\nfrom rest_framework.utils import serializer_helpers\nfrom rest_framework.views import APIView\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.status import HTTP_200_OK\nfrom .serializers import StockSerializer\nfrom .models import Stock\n\n\nclass TestView(APIView):\n\n def get(self, request, *args, **kwargs):\n ans = {'msg': 'Test'}\n return Response(ans)\n\n\nclass StockPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\n\nclass StockView(APIView):\n\n def get(self, request, *args, **kwargs):\n if request.GET.get('ticker'):\n qs = Stock.objects.filter(ticker=request.GET.get('ticker'))\n serializer = StockSerializer(qs, many=True)\n return Response(serializer.data)\n else:\n qs = Stock.objects.all()\n paginator = StockPagination()\n result_page = paginator.paginate_queryset(qs, request)\n serializer = StockSerializer(result_page, many=True, context={\n 'request': request})\n return Response(serializer.data, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n serializer = StockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)\n",
"step-5": "from django.db.models import manager\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\n\nfrom rest_framework.response import Response\nfrom rest_framework.utils import serializer_helpers\nfrom rest_framework.views import APIView\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.status import HTTP_200_OK\nfrom .serializers import StockSerializer\nfrom .models import Stock\n\n# Create your views here.\nclass TestView(APIView):\n def get(self, request, *args, **kwargs):\n ans = {\n \"msg\": \"Test\"\n }\n return Response(ans)\n\nclass StockPagination(PageNumberPagination):\n page_size = 20\n page_size_query_param = 'page_size'\n max_page_size = 500\n\nclass StockView(APIView):\n \n def get(self, request, *args, **kwargs):\n if request.GET.get('ticker'):\n qs = Stock.objects.filter(ticker=request.GET.get('ticker'))\n serializer = StockSerializer(qs, many=True)\n return Response(serializer.data)\n else:\n qs = Stock.objects.all()\n paginator = StockPagination()\n result_page = paginator.paginate_queryset(qs, request)\n serializer = StockSerializer(result_page, many=True, context={'request': request})\n return Response(serializer.data, status=HTTP_200_OK)\n\n\n def post(self, request, *args, **kwargs):\n serializer = StockSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
@service_marker
class TestTrainingDebuggerJob:
def _wait_sagemaker_training_rule_eval_status(self, training_job_name,
rule_type: str, expected_status: str, wait_periods: int=30,
period_length: int=30):
return wait_for_status(expected_status, wait_periods, period_length,
get_training_rule_eval_sagemaker_status, training_job_name,
rule_type)
def _wait_resource_training_rule_eval_status(self, reference: k8s.
CustomResourceReference, rule_type: str, expected_status: str,
wait_periods: int=30, period_length: int=30):
return wait_for_status(expected_status, wait_periods, period_length,
get_training_rule_eval_resource_status, reference, rule_type)
def _assert_training_rule_eval_status_in_sync(self, training_job_name,
sagemaker_rule_type, reference, expected_status):
resource_rule_type = sagemaker_rule_type[0].lower(
) + sagemaker_rule_type[1:]
assert self._wait_sagemaker_training_rule_eval_status(training_job_name
, sagemaker_rule_type, expected_status
) == self._wait_resource_training_rule_eval_status(reference,
resource_rule_type, expected_status) == expected_status
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture(scope='function')
def xgboost_training_job_debugger():
resource_name = random_suffix_name('xgboost-trainingjob-debugger', 50)
replacements = REPLACEMENT_VALUES.copy()
replacements['TRAINING_JOB_NAME'] = resource_name
reference, _, resource = create_sagemaker_resource(resource_plural=
RESOURCE_PLURAL, resource_name=resource_name, spec_file=
'xgboost_trainingjob_debugger', replacements=replacements)
assert resource is not None
yield reference, resource
if k8s.get_resource_exists(reference):
_, deleted = k8s.delete_custom_resource(reference, 3, 10)
assert deleted
def get_training_rule_eval_sagemaker_status(training_job_name: str,
rule_type: str):
training_sm_desc = get_sagemaker_training_job(training_job_name)
return training_sm_desc[rule_type + 'EvaluationStatuses'][0][
'RuleEvaluationStatus']
<|reserved_special_token_0|>
@service_marker
class TestTrainingDebuggerJob:
def _wait_sagemaker_training_rule_eval_status(self, training_job_name,
rule_type: str, expected_status: str, wait_periods: int=30,
period_length: int=30):
return wait_for_status(expected_status, wait_periods, period_length,
get_training_rule_eval_sagemaker_status, training_job_name,
rule_type)
def _wait_resource_training_rule_eval_status(self, reference: k8s.
CustomResourceReference, rule_type: str, expected_status: str,
wait_periods: int=30, period_length: int=30):
return wait_for_status(expected_status, wait_periods, period_length,
get_training_rule_eval_resource_status, reference, rule_type)
def _assert_training_rule_eval_status_in_sync(self, training_job_name,
sagemaker_rule_type, reference, expected_status):
resource_rule_type = sagemaker_rule_type[0].lower(
) + sagemaker_rule_type[1:]
assert self._wait_sagemaker_training_rule_eval_status(training_job_name
, sagemaker_rule_type, expected_status
) == self._wait_resource_training_rule_eval_status(reference,
resource_rule_type, expected_status) == expected_status
def test_completed(self, xgboost_training_job_debugger):
reference, resource = xgboost_training_job_debugger
assert k8s.get_resource_exists(reference)
training_job_name = resource['spec'].get('trainingJobName', None)
assert training_job_name is not None
training_job_desc = get_sagemaker_training_job(training_job_name)
training_job_arn = training_job_desc['TrainingJobArn']
resource_arn = k8s.get_resource_arn(resource)
if resource_arn is None:
logging.error(
f"ARN for this resource is None, resource status is: {resource['status']}"
)
assert resource_arn == training_job_arn
assert training_job_desc['TrainingJobStatus'
] == cfg.JOB_STATUS_INPROGRESS
assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')
assert_training_status_in_sync(training_job_name, reference, cfg.
JOB_STATUS_COMPLETED)
assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')
self._assert_training_rule_eval_status_in_sync(training_job_name,
'DebugRule', reference, cfg.RULE_STATUS_COMPLETED)
self._assert_training_rule_eval_status_in_sync(training_job_name,
'ProfilerRule', reference, cfg.RULE_STATUS_COMPLETED)
assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'True')
resource_tags = resource['spec'].get('tags', None)
assert_tags_in_sync(training_job_arn, resource_tags)
_, deleted = k8s.delete_custom_resource(reference, cfg.
JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)
assert deleted is True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture(scope='function')
def xgboost_training_job_debugger():
resource_name = random_suffix_name('xgboost-trainingjob-debugger', 50)
replacements = REPLACEMENT_VALUES.copy()
replacements['TRAINING_JOB_NAME'] = resource_name
reference, _, resource = create_sagemaker_resource(resource_plural=
RESOURCE_PLURAL, resource_name=resource_name, spec_file=
'xgboost_trainingjob_debugger', replacements=replacements)
assert resource is not None
yield reference, resource
if k8s.get_resource_exists(reference):
_, deleted = k8s.delete_custom_resource(reference, 3, 10)
assert deleted
def get_training_rule_eval_sagemaker_status(training_job_name: str,
rule_type: str):
training_sm_desc = get_sagemaker_training_job(training_job_name)
return training_sm_desc[rule_type + 'EvaluationStatuses'][0][
'RuleEvaluationStatus']
def get_training_rule_eval_resource_status(reference: k8s.
CustomResourceReference, rule_type: str):
resource = k8s.get_resource(reference)
resource_status = resource['status'][rule_type + 'EvaluationStatuses'][0][
'ruleEvaluationStatus']
assert resource_status is not None
return resource_status
@service_marker
class TestTrainingDebuggerJob:
def _wait_sagemaker_training_rule_eval_status(self, training_job_name,
rule_type: str, expected_status: str, wait_periods: int=30,
period_length: int=30):
return wait_for_status(expected_status, wait_periods, period_length,
get_training_rule_eval_sagemaker_status, training_job_name,
rule_type)
def _wait_resource_training_rule_eval_status(self, reference: k8s.
CustomResourceReference, rule_type: str, expected_status: str,
wait_periods: int=30, period_length: int=30):
return wait_for_status(expected_status, wait_periods, period_length,
get_training_rule_eval_resource_status, reference, rule_type)
def _assert_training_rule_eval_status_in_sync(self, training_job_name,
sagemaker_rule_type, reference, expected_status):
resource_rule_type = sagemaker_rule_type[0].lower(
) + sagemaker_rule_type[1:]
assert self._wait_sagemaker_training_rule_eval_status(training_job_name
, sagemaker_rule_type, expected_status
) == self._wait_resource_training_rule_eval_status(reference,
resource_rule_type, expected_status) == expected_status
def test_completed(self, xgboost_training_job_debugger):
reference, resource = xgboost_training_job_debugger
assert k8s.get_resource_exists(reference)
training_job_name = resource['spec'].get('trainingJobName', None)
assert training_job_name is not None
training_job_desc = get_sagemaker_training_job(training_job_name)
training_job_arn = training_job_desc['TrainingJobArn']
resource_arn = k8s.get_resource_arn(resource)
if resource_arn is None:
logging.error(
f"ARN for this resource is None, resource status is: {resource['status']}"
)
assert resource_arn == training_job_arn
assert training_job_desc['TrainingJobStatus'
] == cfg.JOB_STATUS_INPROGRESS
assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')
assert_training_status_in_sync(training_job_name, reference, cfg.
JOB_STATUS_COMPLETED)
assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')
self._assert_training_rule_eval_status_in_sync(training_job_name,
'DebugRule', reference, cfg.RULE_STATUS_COMPLETED)
self._assert_training_rule_eval_status_in_sync(training_job_name,
'ProfilerRule', reference, cfg.RULE_STATUS_COMPLETED)
assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'True')
resource_tags = resource['spec'].get('tags', None)
assert_tags_in_sync(training_job_arn, resource_tags)
_, deleted = k8s.delete_custom_resource(reference, cfg.
JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)
assert deleted is True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
RESOURCE_PLURAL = 'trainingjobs'
@pytest.fixture(scope='function')
def xgboost_training_job_debugger():
resource_name = random_suffix_name('xgboost-trainingjob-debugger', 50)
replacements = REPLACEMENT_VALUES.copy()
replacements['TRAINING_JOB_NAME'] = resource_name
reference, _, resource = create_sagemaker_resource(resource_plural=
RESOURCE_PLURAL, resource_name=resource_name, spec_file=
'xgboost_trainingjob_debugger', replacements=replacements)
assert resource is not None
yield reference, resource
if k8s.get_resource_exists(reference):
_, deleted = k8s.delete_custom_resource(reference, 3, 10)
assert deleted
def get_training_rule_eval_sagemaker_status(training_job_name: str,
rule_type: str):
training_sm_desc = get_sagemaker_training_job(training_job_name)
return training_sm_desc[rule_type + 'EvaluationStatuses'][0][
'RuleEvaluationStatus']
def get_training_rule_eval_resource_status(reference: k8s.
CustomResourceReference, rule_type: str):
resource = k8s.get_resource(reference)
resource_status = resource['status'][rule_type + 'EvaluationStatuses'][0][
'ruleEvaluationStatus']
assert resource_status is not None
return resource_status
@service_marker
class TestTrainingDebuggerJob:
def _wait_sagemaker_training_rule_eval_status(self, training_job_name,
rule_type: str, expected_status: str, wait_periods: int=30,
period_length: int=30):
return wait_for_status(expected_status, wait_periods, period_length,
get_training_rule_eval_sagemaker_status, training_job_name,
rule_type)
def _wait_resource_training_rule_eval_status(self, reference: k8s.
CustomResourceReference, rule_type: str, expected_status: str,
wait_periods: int=30, period_length: int=30):
return wait_for_status(expected_status, wait_periods, period_length,
get_training_rule_eval_resource_status, reference, rule_type)
def _assert_training_rule_eval_status_in_sync(self, training_job_name,
sagemaker_rule_type, reference, expected_status):
resource_rule_type = sagemaker_rule_type[0].lower(
) + sagemaker_rule_type[1:]
assert self._wait_sagemaker_training_rule_eval_status(training_job_name
, sagemaker_rule_type, expected_status
) == self._wait_resource_training_rule_eval_status(reference,
resource_rule_type, expected_status) == expected_status
def test_completed(self, xgboost_training_job_debugger):
reference, resource = xgboost_training_job_debugger
assert k8s.get_resource_exists(reference)
training_job_name = resource['spec'].get('trainingJobName', None)
assert training_job_name is not None
training_job_desc = get_sagemaker_training_job(training_job_name)
training_job_arn = training_job_desc['TrainingJobArn']
resource_arn = k8s.get_resource_arn(resource)
if resource_arn is None:
logging.error(
f"ARN for this resource is None, resource status is: {resource['status']}"
)
assert resource_arn == training_job_arn
assert training_job_desc['TrainingJobStatus'
] == cfg.JOB_STATUS_INPROGRESS
assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')
assert_training_status_in_sync(training_job_name, reference, cfg.
JOB_STATUS_COMPLETED)
assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')
self._assert_training_rule_eval_status_in_sync(training_job_name,
'DebugRule', reference, cfg.RULE_STATUS_COMPLETED)
self._assert_training_rule_eval_status_in_sync(training_job_name,
'ProfilerRule', reference, cfg.RULE_STATUS_COMPLETED)
assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'True')
resource_tags = resource['spec'].get('tags', None)
assert_tags_in_sync(training_job_arn, resource_tags)
_, deleted = k8s.delete_custom_resource(reference, cfg.
JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)
assert deleted is True
<|reserved_special_token_1|>
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the SageMaker TrainingJob API.
"""
import pytest
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import (
service_marker,
create_sagemaker_resource,
wait_for_status,
get_sagemaker_training_job,
assert_training_status_in_sync,
assert_tags_in_sync,
)
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.common import config as cfg
RESOURCE_PLURAL = "trainingjobs"
@pytest.fixture(scope="function")
def xgboost_training_job_debugger():
resource_name = random_suffix_name("xgboost-trainingjob-debugger", 50)
replacements = REPLACEMENT_VALUES.copy()
replacements["TRAINING_JOB_NAME"] = resource_name
reference, _, resource = create_sagemaker_resource(
resource_plural=RESOURCE_PLURAL,
resource_name=resource_name,
spec_file="xgboost_trainingjob_debugger",
replacements=replacements,
)
assert resource is not None
yield (reference, resource)
if k8s.get_resource_exists(reference):
_, deleted = k8s.delete_custom_resource(reference, 3, 10)
assert deleted
def get_training_rule_eval_sagemaker_status(training_job_name: str, rule_type: str):
training_sm_desc = get_sagemaker_training_job(training_job_name)
return training_sm_desc[rule_type+"EvaluationStatuses"][0]["RuleEvaluationStatus"]
def get_training_rule_eval_resource_status(reference: k8s.CustomResourceReference, rule_type: str):
resource = k8s.get_resource(reference)
resource_status = resource["status"][rule_type+"EvaluationStatuses"][0][
"ruleEvaluationStatus"
]
assert resource_status is not None
return resource_status
@service_marker
class TestTrainingDebuggerJob:
def _wait_sagemaker_training_rule_eval_status(
self,
training_job_name,
rule_type: str,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_rule_eval_sagemaker_status,
training_job_name,
rule_type,
)
def _wait_resource_training_rule_eval_status(
self,
reference: k8s.CustomResourceReference,
rule_type: str,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_rule_eval_resource_status,
reference,
rule_type,
)
def _assert_training_rule_eval_status_in_sync(
self, training_job_name, sagemaker_rule_type, reference, expected_status
):
resource_rule_type = sagemaker_rule_type[0].lower() + sagemaker_rule_type[1:]
assert (
self._wait_sagemaker_training_rule_eval_status(
training_job_name, sagemaker_rule_type, expected_status,
)
== self._wait_resource_training_rule_eval_status(reference, resource_rule_type, expected_status)
== expected_status
)
def test_completed(self, xgboost_training_job_debugger):
(reference, resource) = xgboost_training_job_debugger
assert k8s.get_resource_exists(reference)
training_job_name = resource["spec"].get("trainingJobName", None)
assert training_job_name is not None
training_job_desc = get_sagemaker_training_job(training_job_name)
training_job_arn = training_job_desc["TrainingJobArn"]
resource_arn = k8s.get_resource_arn(resource)
if resource_arn is None:
logging.error(
f"ARN for this resource is None, resource status is: {resource['status']}"
)
assert resource_arn == training_job_arn
assert training_job_desc["TrainingJobStatus"] == cfg.JOB_STATUS_INPROGRESS
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False")
assert_training_status_in_sync(
training_job_name, reference, cfg.JOB_STATUS_COMPLETED
)
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False")
# Assert debugger rule evaluation completed
self._assert_training_rule_eval_status_in_sync(
training_job_name, "DebugRule", reference, cfg.RULE_STATUS_COMPLETED
)
# Assert profiler rule evaluation completed
self._assert_training_rule_eval_status_in_sync(
training_job_name, "ProfilerRule", reference, cfg.RULE_STATUS_COMPLETED
)
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True")
resource_tags = resource["spec"].get("tags", None)
assert_tags_in_sync(training_job_arn, resource_tags)
# Check that you can delete a completed resource from k8s
_, deleted = k8s.delete_custom_resource(reference, cfg.JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)
assert deleted is True
|
flexible
|
{
"blob_id": "6f107d0d0328c2445c0e1d0dd10e51227da58129",
"index": 3900,
"step-1": "<mask token>\n\n\n@service_marker\nclass TestTrainingDebuggerJob:\n\n def _wait_sagemaker_training_rule_eval_status(self, training_job_name,\n rule_type: str, expected_status: str, wait_periods: int=30,\n period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_sagemaker_status, training_job_name,\n rule_type)\n\n def _wait_resource_training_rule_eval_status(self, reference: k8s.\n CustomResourceReference, rule_type: str, expected_status: str,\n wait_periods: int=30, period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_resource_status, reference, rule_type)\n\n def _assert_training_rule_eval_status_in_sync(self, training_job_name,\n sagemaker_rule_type, reference, expected_status):\n resource_rule_type = sagemaker_rule_type[0].lower(\n ) + sagemaker_rule_type[1:]\n assert self._wait_sagemaker_training_rule_eval_status(training_job_name\n , sagemaker_rule_type, expected_status\n ) == self._wait_resource_training_rule_eval_status(reference,\n resource_rule_type, expected_status) == expected_status\n <mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture(scope='function')\ndef xgboost_training_job_debugger():\n resource_name = random_suffix_name('xgboost-trainingjob-debugger', 50)\n replacements = REPLACEMENT_VALUES.copy()\n replacements['TRAINING_JOB_NAME'] = resource_name\n reference, _, resource = create_sagemaker_resource(resource_plural=\n RESOURCE_PLURAL, resource_name=resource_name, spec_file=\n 'xgboost_trainingjob_debugger', replacements=replacements)\n assert resource is not None\n yield reference, resource\n if k8s.get_resource_exists(reference):\n _, deleted = k8s.delete_custom_resource(reference, 3, 10)\n assert deleted\n\n\ndef get_training_rule_eval_sagemaker_status(training_job_name: str,\n rule_type: str):\n training_sm_desc = get_sagemaker_training_job(training_job_name)\n return training_sm_desc[rule_type + 'EvaluationStatuses'][0][\n 'RuleEvaluationStatus']\n\n\n<mask token>\n\n\n@service_marker\nclass TestTrainingDebuggerJob:\n\n def _wait_sagemaker_training_rule_eval_status(self, training_job_name,\n rule_type: str, expected_status: str, wait_periods: int=30,\n period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_sagemaker_status, training_job_name,\n rule_type)\n\n def _wait_resource_training_rule_eval_status(self, reference: k8s.\n CustomResourceReference, rule_type: str, expected_status: str,\n wait_periods: int=30, period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_resource_status, reference, rule_type)\n\n def _assert_training_rule_eval_status_in_sync(self, training_job_name,\n sagemaker_rule_type, reference, expected_status):\n resource_rule_type = sagemaker_rule_type[0].lower(\n ) + sagemaker_rule_type[1:]\n assert self._wait_sagemaker_training_rule_eval_status(training_job_name\n , sagemaker_rule_type, expected_status\n ) == self._wait_resource_training_rule_eval_status(reference,\n resource_rule_type, expected_status) == expected_status\n\n def test_completed(self, xgboost_training_job_debugger):\n reference, resource = xgboost_training_job_debugger\n assert k8s.get_resource_exists(reference)\n training_job_name = resource['spec'].get('trainingJobName', None)\n assert training_job_name is not None\n training_job_desc = get_sagemaker_training_job(training_job_name)\n training_job_arn = training_job_desc['TrainingJobArn']\n resource_arn = k8s.get_resource_arn(resource)\n if resource_arn is None:\n logging.error(\n f\"ARN for this resource is None, resource status is: {resource['status']}\"\n )\n assert resource_arn == training_job_arn\n assert training_job_desc['TrainingJobStatus'\n ] == cfg.JOB_STATUS_INPROGRESS\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n assert_training_status_in_sync(training_job_name, reference, cfg.\n JOB_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'DebugRule', reference, cfg.RULE_STATUS_COMPLETED)\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'ProfilerRule', reference, cfg.RULE_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'True')\n resource_tags = resource['spec'].get('tags', None)\n assert_tags_in_sync(training_job_arn, resource_tags)\n _, deleted = k8s.delete_custom_resource(reference, cfg.\n JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)\n assert deleted is True\n",
"step-3": "<mask token>\n\n\n@pytest.fixture(scope='function')\ndef xgboost_training_job_debugger():\n resource_name = random_suffix_name('xgboost-trainingjob-debugger', 50)\n replacements = REPLACEMENT_VALUES.copy()\n replacements['TRAINING_JOB_NAME'] = resource_name\n reference, _, resource = create_sagemaker_resource(resource_plural=\n RESOURCE_PLURAL, resource_name=resource_name, spec_file=\n 'xgboost_trainingjob_debugger', replacements=replacements)\n assert resource is not None\n yield reference, resource\n if k8s.get_resource_exists(reference):\n _, deleted = k8s.delete_custom_resource(reference, 3, 10)\n assert deleted\n\n\ndef get_training_rule_eval_sagemaker_status(training_job_name: str,\n rule_type: str):\n training_sm_desc = get_sagemaker_training_job(training_job_name)\n return training_sm_desc[rule_type + 'EvaluationStatuses'][0][\n 'RuleEvaluationStatus']\n\n\ndef get_training_rule_eval_resource_status(reference: k8s.\n CustomResourceReference, rule_type: str):\n resource = k8s.get_resource(reference)\n resource_status = resource['status'][rule_type + 'EvaluationStatuses'][0][\n 'ruleEvaluationStatus']\n assert resource_status is not None\n return resource_status\n\n\n@service_marker\nclass TestTrainingDebuggerJob:\n\n def _wait_sagemaker_training_rule_eval_status(self, training_job_name,\n rule_type: str, expected_status: str, wait_periods: int=30,\n period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_sagemaker_status, training_job_name,\n rule_type)\n\n def _wait_resource_training_rule_eval_status(self, reference: k8s.\n CustomResourceReference, rule_type: str, expected_status: str,\n wait_periods: int=30, period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_resource_status, reference, rule_type)\n\n def _assert_training_rule_eval_status_in_sync(self, training_job_name,\n sagemaker_rule_type, reference, expected_status):\n resource_rule_type = sagemaker_rule_type[0].lower(\n ) + sagemaker_rule_type[1:]\n assert self._wait_sagemaker_training_rule_eval_status(training_job_name\n , sagemaker_rule_type, expected_status\n ) == self._wait_resource_training_rule_eval_status(reference,\n resource_rule_type, expected_status) == expected_status\n\n def test_completed(self, xgboost_training_job_debugger):\n reference, resource = xgboost_training_job_debugger\n assert k8s.get_resource_exists(reference)\n training_job_name = resource['spec'].get('trainingJobName', None)\n assert training_job_name is not None\n training_job_desc = get_sagemaker_training_job(training_job_name)\n training_job_arn = training_job_desc['TrainingJobArn']\n resource_arn = k8s.get_resource_arn(resource)\n if resource_arn is None:\n logging.error(\n f\"ARN for this resource is None, resource status is: {resource['status']}\"\n )\n assert resource_arn == training_job_arn\n assert training_job_desc['TrainingJobStatus'\n ] == cfg.JOB_STATUS_INPROGRESS\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n assert_training_status_in_sync(training_job_name, reference, cfg.\n JOB_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'DebugRule', reference, cfg.RULE_STATUS_COMPLETED)\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'ProfilerRule', reference, cfg.RULE_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'True')\n resource_tags = resource['spec'].get('tags', None)\n assert_tags_in_sync(training_job_arn, resource_tags)\n _, deleted = k8s.delete_custom_resource(reference, cfg.\n JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)\n assert deleted is True\n",
"step-4": "<mask token>\nRESOURCE_PLURAL = 'trainingjobs'\n\n\n@pytest.fixture(scope='function')\ndef xgboost_training_job_debugger():\n resource_name = random_suffix_name('xgboost-trainingjob-debugger', 50)\n replacements = REPLACEMENT_VALUES.copy()\n replacements['TRAINING_JOB_NAME'] = resource_name\n reference, _, resource = create_sagemaker_resource(resource_plural=\n RESOURCE_PLURAL, resource_name=resource_name, spec_file=\n 'xgboost_trainingjob_debugger', replacements=replacements)\n assert resource is not None\n yield reference, resource\n if k8s.get_resource_exists(reference):\n _, deleted = k8s.delete_custom_resource(reference, 3, 10)\n assert deleted\n\n\ndef get_training_rule_eval_sagemaker_status(training_job_name: str,\n rule_type: str):\n training_sm_desc = get_sagemaker_training_job(training_job_name)\n return training_sm_desc[rule_type + 'EvaluationStatuses'][0][\n 'RuleEvaluationStatus']\n\n\ndef get_training_rule_eval_resource_status(reference: k8s.\n CustomResourceReference, rule_type: str):\n resource = k8s.get_resource(reference)\n resource_status = resource['status'][rule_type + 'EvaluationStatuses'][0][\n 'ruleEvaluationStatus']\n assert resource_status is not None\n return resource_status\n\n\n@service_marker\nclass TestTrainingDebuggerJob:\n\n def _wait_sagemaker_training_rule_eval_status(self, training_job_name,\n rule_type: str, expected_status: str, wait_periods: int=30,\n period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_sagemaker_status, training_job_name,\n rule_type)\n\n def _wait_resource_training_rule_eval_status(self, reference: k8s.\n CustomResourceReference, rule_type: str, expected_status: str,\n wait_periods: int=30, period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_resource_status, reference, rule_type)\n\n def _assert_training_rule_eval_status_in_sync(self, training_job_name,\n sagemaker_rule_type, reference, expected_status):\n resource_rule_type = sagemaker_rule_type[0].lower(\n ) + sagemaker_rule_type[1:]\n assert self._wait_sagemaker_training_rule_eval_status(training_job_name\n , sagemaker_rule_type, expected_status\n ) == self._wait_resource_training_rule_eval_status(reference,\n resource_rule_type, expected_status) == expected_status\n\n def test_completed(self, xgboost_training_job_debugger):\n reference, resource = xgboost_training_job_debugger\n assert k8s.get_resource_exists(reference)\n training_job_name = resource['spec'].get('trainingJobName', None)\n assert training_job_name is not None\n training_job_desc = get_sagemaker_training_job(training_job_name)\n training_job_arn = training_job_desc['TrainingJobArn']\n resource_arn = k8s.get_resource_arn(resource)\n if resource_arn is None:\n logging.error(\n f\"ARN for this resource is None, resource status is: {resource['status']}\"\n )\n assert resource_arn == training_job_arn\n assert training_job_desc['TrainingJobStatus'\n ] == cfg.JOB_STATUS_INPROGRESS\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n assert_training_status_in_sync(training_job_name, reference, cfg.\n JOB_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'DebugRule', reference, cfg.RULE_STATUS_COMPLETED)\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'ProfilerRule', reference, cfg.RULE_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'True')\n resource_tags = resource['spec'].get('tags', None)\n assert_tags_in_sync(training_job_arn, resource_tags)\n _, deleted = k8s.delete_custom_resource(reference, cfg.\n JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)\n assert deleted is True\n",
"step-5": "# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n# not use this file except in compliance with the License. A copy of the\n# License is located at\n#\n# \t http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Integration tests for the SageMaker TrainingJob API.\n\"\"\"\n\nimport pytest\nimport logging\n\nfrom acktest.resources import random_suffix_name\nfrom acktest.k8s import resource as k8s\nfrom e2e import (\n service_marker,\n create_sagemaker_resource,\n wait_for_status,\n get_sagemaker_training_job,\n assert_training_status_in_sync,\n assert_tags_in_sync,\n)\nfrom e2e.replacement_values import REPLACEMENT_VALUES\nfrom e2e.common import config as cfg\n\nRESOURCE_PLURAL = \"trainingjobs\"\n\n\n@pytest.fixture(scope=\"function\")\ndef xgboost_training_job_debugger():\n resource_name = random_suffix_name(\"xgboost-trainingjob-debugger\", 50)\n replacements = REPLACEMENT_VALUES.copy()\n replacements[\"TRAINING_JOB_NAME\"] = resource_name\n reference, _, resource = create_sagemaker_resource(\n resource_plural=RESOURCE_PLURAL,\n resource_name=resource_name,\n spec_file=\"xgboost_trainingjob_debugger\",\n replacements=replacements,\n )\n assert resource is not None\n\n yield (reference, resource)\n\n if k8s.get_resource_exists(reference):\n _, deleted = k8s.delete_custom_resource(reference, 3, 10)\n assert deleted\n\n\ndef get_training_rule_eval_sagemaker_status(training_job_name: str, rule_type: str):\n training_sm_desc = get_sagemaker_training_job(training_job_name)\n return training_sm_desc[rule_type+\"EvaluationStatuses\"][0][\"RuleEvaluationStatus\"]\n\n\ndef get_training_rule_eval_resource_status(reference: k8s.CustomResourceReference, rule_type: str):\n resource = k8s.get_resource(reference)\n resource_status = resource[\"status\"][rule_type+\"EvaluationStatuses\"][0][\n \"ruleEvaluationStatus\"\n ]\n assert resource_status is not None\n return resource_status\n\n@service_marker\nclass TestTrainingDebuggerJob:\n def _wait_sagemaker_training_rule_eval_status(\n self,\n training_job_name,\n rule_type: str,\n expected_status: str,\n wait_periods: int = 30,\n period_length: int = 30,\n ):\n return wait_for_status(\n expected_status,\n wait_periods,\n period_length,\n get_training_rule_eval_sagemaker_status,\n training_job_name,\n rule_type,\n )\n\n def _wait_resource_training_rule_eval_status(\n self,\n reference: k8s.CustomResourceReference,\n rule_type: str,\n expected_status: str,\n wait_periods: int = 30,\n period_length: int = 30,\n ):\n return wait_for_status(\n expected_status,\n wait_periods,\n period_length,\n get_training_rule_eval_resource_status,\n reference,\n rule_type,\n )\n\n def _assert_training_rule_eval_status_in_sync(\n self, training_job_name, sagemaker_rule_type, reference, expected_status\n ):\n resource_rule_type = sagemaker_rule_type[0].lower() + sagemaker_rule_type[1:]\n assert (\n self._wait_sagemaker_training_rule_eval_status(\n training_job_name, sagemaker_rule_type, expected_status, \n )\n == self._wait_resource_training_rule_eval_status(reference, resource_rule_type, expected_status)\n == expected_status\n )\n\n def test_completed(self, xgboost_training_job_debugger):\n (reference, resource) = xgboost_training_job_debugger\n assert k8s.get_resource_exists(reference)\n\n training_job_name = resource[\"spec\"].get(\"trainingJobName\", None)\n assert training_job_name is not None\n\n training_job_desc = get_sagemaker_training_job(training_job_name)\n training_job_arn = training_job_desc[\"TrainingJobArn\"]\n \n resource_arn = k8s.get_resource_arn(resource)\n if resource_arn is None:\n logging.error(\n f\"ARN for this resource is None, resource status is: {resource['status']}\"\n )\n assert resource_arn == training_job_arn\n\n assert training_job_desc[\"TrainingJobStatus\"] == cfg.JOB_STATUS_INPROGRESS\n assert k8s.wait_on_condition(reference, \"ACK.ResourceSynced\", \"False\")\n\n assert_training_status_in_sync(\n training_job_name, reference, cfg.JOB_STATUS_COMPLETED\n )\n assert k8s.wait_on_condition(reference, \"ACK.ResourceSynced\", \"False\")\n\n # Assert debugger rule evaluation completed\n self._assert_training_rule_eval_status_in_sync(\n training_job_name, \"DebugRule\", reference, cfg.RULE_STATUS_COMPLETED\n )\n \n # Assert profiler rule evaluation completed\n self._assert_training_rule_eval_status_in_sync(\n training_job_name, \"ProfilerRule\", reference, cfg.RULE_STATUS_COMPLETED\n )\n assert k8s.wait_on_condition(reference, \"ACK.ResourceSynced\", \"True\")\n\n resource_tags = resource[\"spec\"].get(\"tags\", None)\n assert_tags_in_sync(training_job_arn, resource_tags)\n\n # Check that you can delete a completed resource from k8s\n _, deleted = k8s.delete_custom_resource(reference, cfg.JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)\n assert deleted is True\n",
"step-ids": [
4,
7,
8,
9,
11
]
}
|
[
4,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestComparisonExpression:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestComparisonExpression:
def test_cmp(self):
assert exp.parse('CMP(1, 2)') == {'$cmp': [1, 2]}
with raises(ParseException):
exp.parse('CMP(1)')
with raises(ParseException):
exp.parse('CMP(1, 2, 3)')
assert exp.parse('CMP(1, 3 + 2)') == {'$cmp': [1, {'$add': [3, 2]}]}
<|reserved_special_token_1|>
from pyparsing import ParseException
from pytest import raises
from easymql.expressions import Expression as exp
class TestComparisonExpression:
def test_cmp(self):
assert exp.parse('CMP(1, 2)') == {'$cmp': [1, 2]}
with raises(ParseException):
exp.parse('CMP(1)')
with raises(ParseException):
exp.parse('CMP(1, 2, 3)')
assert exp.parse('CMP(1, 3 + 2)') == {'$cmp': [1, {'$add': [3, 2]}]}
|
flexible
|
{
"blob_id": "91959f6621f05b1b814a025f0b95c55cf683ded3",
"index": 5856,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestComparisonExpression:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestComparisonExpression:\n\n def test_cmp(self):\n assert exp.parse('CMP(1, 2)') == {'$cmp': [1, 2]}\n with raises(ParseException):\n exp.parse('CMP(1)')\n with raises(ParseException):\n exp.parse('CMP(1, 2, 3)')\n assert exp.parse('CMP(1, 3 + 2)') == {'$cmp': [1, {'$add': [3, 2]}]}\n",
"step-4": "from pyparsing import ParseException\nfrom pytest import raises\nfrom easymql.expressions import Expression as exp\n\n\nclass TestComparisonExpression:\n\n def test_cmp(self):\n assert exp.parse('CMP(1, 2)') == {'$cmp': [1, 2]}\n with raises(ParseException):\n exp.parse('CMP(1)')\n with raises(ParseException):\n exp.parse('CMP(1, 2, 3)')\n assert exp.parse('CMP(1, 3 + 2)') == {'$cmp': [1, {'$add': [3, 2]}]}\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from tensorflow import keras
class SkippableSeq(keras.utils.Sequence):
def __init__(self, seq):
super(SkippableSeq, self).__init__()
self.start = 0
self.seq = seq
def __iter__(self):
return self
def __next__(self):
res = self.seq[self.start]
self.start = (self.start + 1) % len(self)
return res
def __getitem__(self, i):
if isinstance(i, slice):
assert i.step == None == i.stop and self.start == 0, \
'only one suffix slicing allowed'
oth = copy.copy(self)
oth.start = i.start
return oth
else:
return self.seq[(self.start + i) % len(self)]
def __len__(self):
return len(self.seq)
class PostprocessSeq(SkippableSeq):
def __init__(self, postprocess, seq):
super(PostprocessSeq, self).__init__(seq)
self.postprocess = postprocess
def __next__(self):
return self.postprocess(super(PostprocessSeq, self).__next__())
def __getitem__(self, i):
return self.postprocess(super(PostprocessSeq, self).__getitem__(i))
def make_enqueuer_generator(sequence, workers):
data_enqueuer = keras.utils.OrderedEnqueuer(sequence)
data_enqueuer.start(workers=workers, max_queue_size=workers + 1)
return data_enqueuer.get()
|
normal
|
{
"blob_id": "2417dd4f3787742832fec53fec4592165d0fccfc",
"index": 9513,
"step-1": "<mask token>\n\n\nclass SkippableSeq(keras.utils.Sequence):\n\n def __init__(self, seq):\n super(SkippableSeq, self).__init__()\n self.start = 0\n self.seq = seq\n\n def __iter__(self):\n return self\n\n def __next__(self):\n res = self.seq[self.start]\n self.start = (self.start + 1) % len(self)\n return res\n <mask token>\n\n def __len__(self):\n return len(self.seq)\n\n\nclass PostprocessSeq(SkippableSeq):\n\n def __init__(self, postprocess, seq):\n super(PostprocessSeq, self).__init__(seq)\n self.postprocess = postprocess\n\n def __next__(self):\n return self.postprocess(super(PostprocessSeq, self).__next__())\n\n def __getitem__(self, i):\n return self.postprocess(super(PostprocessSeq, self).__getitem__(i))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SkippableSeq(keras.utils.Sequence):\n\n def __init__(self, seq):\n super(SkippableSeq, self).__init__()\n self.start = 0\n self.seq = seq\n\n def __iter__(self):\n return self\n\n def __next__(self):\n res = self.seq[self.start]\n self.start = (self.start + 1) % len(self)\n return res\n\n def __getitem__(self, i):\n if isinstance(i, slice):\n assert i.step == None == i.stop and self.start == 0, 'only one suffix slicing allowed'\n oth = copy.copy(self)\n oth.start = i.start\n return oth\n else:\n return self.seq[(self.start + i) % len(self)]\n\n def __len__(self):\n return len(self.seq)\n\n\nclass PostprocessSeq(SkippableSeq):\n\n def __init__(self, postprocess, seq):\n super(PostprocessSeq, self).__init__(seq)\n self.postprocess = postprocess\n\n def __next__(self):\n return self.postprocess(super(PostprocessSeq, self).__next__())\n\n def __getitem__(self, i):\n return self.postprocess(super(PostprocessSeq, self).__getitem__(i))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SkippableSeq(keras.utils.Sequence):\n\n def __init__(self, seq):\n super(SkippableSeq, self).__init__()\n self.start = 0\n self.seq = seq\n\n def __iter__(self):\n return self\n\n def __next__(self):\n res = self.seq[self.start]\n self.start = (self.start + 1) % len(self)\n return res\n\n def __getitem__(self, i):\n if isinstance(i, slice):\n assert i.step == None == i.stop and self.start == 0, 'only one suffix slicing allowed'\n oth = copy.copy(self)\n oth.start = i.start\n return oth\n else:\n return self.seq[(self.start + i) % len(self)]\n\n def __len__(self):\n return len(self.seq)\n\n\nclass PostprocessSeq(SkippableSeq):\n\n def __init__(self, postprocess, seq):\n super(PostprocessSeq, self).__init__(seq)\n self.postprocess = postprocess\n\n def __next__(self):\n return self.postprocess(super(PostprocessSeq, self).__next__())\n\n def __getitem__(self, i):\n return self.postprocess(super(PostprocessSeq, self).__getitem__(i))\n\n\ndef make_enqueuer_generator(sequence, workers):\n data_enqueuer = keras.utils.OrderedEnqueuer(sequence)\n data_enqueuer.start(workers=workers, max_queue_size=workers + 1)\n return data_enqueuer.get()\n",
"step-4": "from tensorflow import keras\n\n\nclass SkippableSeq(keras.utils.Sequence):\n\n def __init__(self, seq):\n super(SkippableSeq, self).__init__()\n self.start = 0\n self.seq = seq\n\n def __iter__(self):\n return self\n\n def __next__(self):\n res = self.seq[self.start]\n self.start = (self.start + 1) % len(self)\n return res\n\n def __getitem__(self, i):\n if isinstance(i, slice):\n assert i.step == None == i.stop and self.start == 0, 'only one suffix slicing allowed'\n oth = copy.copy(self)\n oth.start = i.start\n return oth\n else:\n return self.seq[(self.start + i) % len(self)]\n\n def __len__(self):\n return len(self.seq)\n\n\nclass PostprocessSeq(SkippableSeq):\n\n def __init__(self, postprocess, seq):\n super(PostprocessSeq, self).__init__(seq)\n self.postprocess = postprocess\n\n def __next__(self):\n return self.postprocess(super(PostprocessSeq, self).__next__())\n\n def __getitem__(self, i):\n return self.postprocess(super(PostprocessSeq, self).__getitem__(i))\n\n\ndef make_enqueuer_generator(sequence, workers):\n data_enqueuer = keras.utils.OrderedEnqueuer(sequence)\n data_enqueuer.start(workers=workers, max_queue_size=workers + 1)\n return data_enqueuer.get()\n",
"step-5": "from tensorflow import keras\n\n\nclass SkippableSeq(keras.utils.Sequence):\n def __init__(self, seq):\n super(SkippableSeq, self).__init__()\n self.start = 0\n self.seq = seq\n\n def __iter__(self):\n return self\n\n def __next__(self):\n res = self.seq[self.start]\n self.start = (self.start + 1) % len(self)\n return res\n\n def __getitem__(self, i):\n if isinstance(i, slice):\n assert i.step == None == i.stop and self.start == 0, \\\n 'only one suffix slicing allowed'\n oth = copy.copy(self)\n oth.start = i.start\n return oth\n else:\n return self.seq[(self.start + i) % len(self)]\n\n def __len__(self):\n return len(self.seq)\n\n\nclass PostprocessSeq(SkippableSeq):\n def __init__(self, postprocess, seq):\n super(PostprocessSeq, self).__init__(seq)\n self.postprocess = postprocess\n\n def __next__(self):\n return self.postprocess(super(PostprocessSeq, self).__next__())\n\n def __getitem__(self, i):\n return self.postprocess(super(PostprocessSeq, self).__getitem__(i))\n\n\ndef make_enqueuer_generator(sequence, workers):\n data_enqueuer = keras.utils.OrderedEnqueuer(sequence)\n data_enqueuer.start(workers=workers, max_queue_size=workers + 1)\n return data_enqueuer.get()\n",
"step-ids": [
9,
10,
11,
12,
13
]
}
|
[
9,
10,
11,
12,
13
] |
def find_max(a, b):
if a > b:
return a
return b
def find_max_three(a, b, c):
return find_max(a, find_max(b, c))
|
normal
|
{
"blob_id": "71dc429033b159f6ed806358f2286b4315e842d9",
"index": 9617,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_max_three(a, b, c):\n return find_max(a, find_max(b, c))\n",
"step-3": "def find_max(a, b):\n if a > b:\n return a\n return b\n\n\ndef find_max_three(a, b, c):\n return find_max(a, find_max(b, c))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# binary search
# iterative
def Iter_BinarySearch(array,b,e,value):
while(b<=e):#pay attention to the judgement!
mid=(b+e)/2#floor
if (array[mid]<value):#value in [mid,e]
b=mid+1
elif (array[mid]>value):#value in [b,mid]
e=mid-1
else:
print "find it! the index is: ", mid
return mid
print "cannot fint it!"
return -1
# test code for iterative BinarySearch(array,b,e,value)
array=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
Iter_BinarySearch(array,0,15,15)
# recursive
def Recur_BinarySearch(arrray,b,e,value):
mid=(b+e)/2#floor
if (b<=e):
if (array[mid]<value):#value in [mid,e]
b=mid+1
elif (array[mid]>value):#value in [b,mid]
e=mid-1
else:
print "find it! the index is: ", mid
return mid
else:
print "cannot find it"
return
Recur_BinarySearch(array,b,e,value)
# test code for recursive BinarySearch
array=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
Iter_BinarySearch(array,0,15,16)
|
normal
|
{
"blob_id": "f2d7f0b0d27bd43223d0eb6a6279b67968461dad",
"index": 9499,
"step-1": "# binary search\n\n# iterative\ndef Iter_BinarySearch(array,b,e,value):\n while(b<=e):#pay attention to the judgement!\n mid=(b+e)/2#floor\n if (array[mid]<value):#value in [mid,e]\n b=mid+1\n elif (array[mid]>value):#value in [b,mid]\n e=mid-1\n else:\n print \"find it! the index is: \", mid\n return mid\n print \"cannot fint it!\"\n return -1\n\n\n# test code for iterative BinarySearch(array,b,e,value)\narray=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\nIter_BinarySearch(array,0,15,15)\n\n\n# recursive\ndef Recur_BinarySearch(arrray,b,e,value):\n mid=(b+e)/2#floor\n if (b<=e):\n if (array[mid]<value):#value in [mid,e]\n b=mid+1\n elif (array[mid]>value):#value in [b,mid]\n e=mid-1\n else:\n print \"find it! the index is: \", mid\n return mid\n else:\n print \"cannot find it\"\n return\n Recur_BinarySearch(array,b,e,value)\n\n# test code for recursive BinarySearch\narray=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\nIter_BinarySearch(array,0,15,16)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
variable_1 = 100
variable_2 = 500
variable_3 = 222.5
variable_4 = 'Hello'
variable_5 = 'world'
print(variable_1, variable_2, variable_3, sep=', ')
print(variable_4, variable_5, sep=', ', end='!\n')
user_age = input('Введите ваш возраст: ')
user_name = input('Введите ваше имя: ')
print(variable_4 + ', ' + user_name + '! ' + 'Ваш возраст: ' + user_age)
|
normal
|
{
"blob_id": "12ca9a81574d34d1004ac9ebcb2ee4b31d7171e2",
"index": 5623,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(variable_1, variable_2, variable_3, sep=', ')\nprint(variable_4, variable_5, sep=', ', end='!\\n')\n<mask token>\nprint(variable_4 + ', ' + user_name + '! ' + 'Ваш возраст: ' + user_age)\n",
"step-3": "variable_1 = 100\nvariable_2 = 500\nvariable_3 = 222.5\nvariable_4 = 'Hello'\nvariable_5 = 'world'\nprint(variable_1, variable_2, variable_3, sep=', ')\nprint(variable_4, variable_5, sep=', ', end='!\\n')\nuser_age = input('Введите ваш возраст: ')\nuser_name = input('Введите ваше имя: ')\nprint(variable_4 + ', ' + user_name + '! ' + 'Ваш возраст: ' + user_age)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
"""
"""
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
from django.utils import timezone
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on':
timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name
), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=
user_id, filename=file_name).order_by('-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
<|reserved_special_token_0|>
def inspect_file(request):
output_dict = {'file_type': 'unknown', 'do_compress': False}
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
zip_threshold = 200000000
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else:
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[
1]
except:
output_dict['file_type'] = 'unknown'
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
profile_id = request.session['profile_id']
component = 'datafile'
auto_fields = dict()
auto_fields[DataFile().get_qualified_field('file_id')] = file_id
auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[
'file_type']
auto_fields[DataFile().get_qualified_field('file_location')] = file_name
auto_fields[DataFile().get_qualified_field('file_size')
] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field('name')
] = chunked_upload.filename
type = [f for f in d_utils.get_copo_schema(component) if f.get('id').
split('.')[-1] == 'type']
if type:
type = type[0]['default_value']
auto_fields[DataFile().get_qualified_field('type')] = type
df = BrokerDA(context=dict(), profile_id=profile_id, component=
component, auto_fields=auto_fields, visualize='last_record'
).do_save_edit().get('record_object', dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
"""
"""
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
from django.utils import timezone
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on':
timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name
), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=
user_id, filename=file_name).order_by('-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
<|reserved_special_token_0|>
def hash_upload(request):
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda : f.read(8192), b''):
md5.update(chunk)
file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash
profile_id = request.session['profile_id']
component = 'datafile'
BrokerDA(target_id=str(record_object.get('_id', str())), component=
component, auto_fields=auto_fields).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
output_dict = {'file_type': 'unknown', 'do_compress': False}
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
zip_threshold = 200000000
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else:
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[
1]
except:
output_dict['file_type'] = 'unknown'
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
profile_id = request.session['profile_id']
component = 'datafile'
auto_fields = dict()
auto_fields[DataFile().get_qualified_field('file_id')] = file_id
auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[
'file_type']
auto_fields[DataFile().get_qualified_field('file_location')] = file_name
auto_fields[DataFile().get_qualified_field('file_size')
] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field('name')
] = chunked_upload.filename
type = [f for f in d_utils.get_copo_schema(component) if f.get('id').
split('.')[-1] == 'type']
if type:
type = type[0]['default_value']
auto_fields[DataFile().get_qualified_field('type')] = type
df = BrokerDA(context=dict(), profile_id=profile_id, component=
component, auto_fields=auto_fields, visualize='last_record'
).do_save_edit().get('record_object', dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
"""
"""
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
from django.utils import timezone
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on':
timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name
), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=
user_id, filename=file_name).order_by('-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
<|reserved_special_token_0|>
def hash_upload(request):
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda : f.read(8192), b''):
md5.update(chunk)
file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash
profile_id = request.session['profile_id']
component = 'datafile'
BrokerDA(target_id=str(record_object.get('_id', str())), component=
component, auto_fields=auto_fields).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
output_dict = {'file_type': 'unknown', 'do_compress': False}
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
zip_threshold = 200000000
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else:
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[
1]
except:
output_dict['file_type'] = 'unknown'
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
profile_id = request.session['profile_id']
component = 'datafile'
auto_fields = dict()
auto_fields[DataFile().get_qualified_field('file_id')] = file_id
auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[
'file_type']
auto_fields[DataFile().get_qualified_field('file_location')] = file_name
auto_fields[DataFile().get_qualified_field('file_size')
] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field('name')
] = chunked_upload.filename
type = [f for f in d_utils.get_copo_schema(component) if f.get('id').
split('.')[-1] == 'type']
if type:
type = type[0]['default_value']
auto_fields[DataFile().get_qualified_field('type')] = type
df = BrokerDA(context=dict(), profile_id=profile_id, component=
component, auto_fields=auto_fields, visualize='last_record'
).do_save_edit().get('record_object', dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
def zip_file(request):
file_id = request.GET['file_id']
print('zip started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name
)
output_file_name = file_obj.filename + '.gz'
try:
temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) +
'.tmp')
myzip = gzip.open(temp_name, 'wb', compresslevel=1)
src = open(output_file_location, 'r')
n = 100000000
for chunk in iter(lambda : src.read(n), ''):
myzip.write(bytes(chunk, 'UTF-8'))
finally:
myzip.close()
src.close()
print('zip complete ' + file_id)
new_file_name = output_file_location + '.gz'
os.rename(temp_name, new_file_name)
os.remove(output_file_location)
stats = os.stat(new_file_name)
new_file_size = stats.st_size / 1000 / 1000
file_obj.filename = output_file_name
file_obj.file.name = new_file_name
file_obj.offset = stats.st_size
file_obj.save()
out = {'zipped': True, 'file_name': output_file_name, 'file_size':
new_file_size}
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field('file_size')
] = u.filesize_toString(file_obj.offset)
auto_fields[DataFile().get_qualified_field('name')] = output_file_name
auto_fields[DataFile().get_qualified_field('file_location')
] = new_file_name
profile_id = request.session['profile_id']
component = 'datafile'
BrokerDA(target_id=str(record_object.get('_id', str())), component=
component, auto_fields=auto_fields).do_save_edit()
out = jsonpickle.encode(out)
return HttpResponse(out, content_type='json')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
"""
"""
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
from django.utils import timezone
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on':
timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name
), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=
user_id, filename=file_name).order_by('-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def get_partial_uploads(request):
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id
).order_by('created_on')
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def hash_upload(request):
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda : f.read(8192), b''):
md5.update(chunk)
file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash
profile_id = request.session['profile_id']
component = 'datafile'
BrokerDA(target_id=str(record_object.get('_id', str())), component=
component, auto_fields=auto_fields).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
output_dict = {'file_type': 'unknown', 'do_compress': False}
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
zip_threshold = 200000000
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else:
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[
1]
except:
output_dict['file_type'] = 'unknown'
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
profile_id = request.session['profile_id']
component = 'datafile'
auto_fields = dict()
auto_fields[DataFile().get_qualified_field('file_id')] = file_id
auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[
'file_type']
auto_fields[DataFile().get_qualified_field('file_location')] = file_name
auto_fields[DataFile().get_qualified_field('file_size')
] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field('name')
] = chunked_upload.filename
type = [f for f in d_utils.get_copo_schema(component) if f.get('id').
split('.')[-1] == 'type']
if type:
type = type[0]['default_value']
auto_fields[DataFile().get_qualified_field('type')] = type
df = BrokerDA(context=dict(), profile_id=profile_id, component=
component, auto_fields=auto_fields, visualize='last_record'
).do_save_edit().get('record_object', dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
def zip_file(request):
file_id = request.GET['file_id']
print('zip started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name
)
output_file_name = file_obj.filename + '.gz'
try:
temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) +
'.tmp')
myzip = gzip.open(temp_name, 'wb', compresslevel=1)
src = open(output_file_location, 'r')
n = 100000000
for chunk in iter(lambda : src.read(n), ''):
myzip.write(bytes(chunk, 'UTF-8'))
finally:
myzip.close()
src.close()
print('zip complete ' + file_id)
new_file_name = output_file_location + '.gz'
os.rename(temp_name, new_file_name)
os.remove(output_file_location)
stats = os.stat(new_file_name)
new_file_size = stats.st_size / 1000 / 1000
file_obj.filename = output_file_name
file_obj.file.name = new_file_name
file_obj.offset = stats.st_size
file_obj.save()
out = {'zipped': True, 'file_name': output_file_name, 'file_size':
new_file_size}
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field('file_size')
] = u.filesize_toString(file_obj.offset)
auto_fields[DataFile().get_qualified_field('name')] = output_file_name
auto_fields[DataFile().get_qualified_field('file_location')
] = new_file_name
profile_id = request.session['profile_id']
component = 'datafile'
BrokerDA(target_id=str(record_object.get('_id', str())), component=
component, auto_fields=auto_fields).do_save_edit()
out = jsonpickle.encode(out)
return HttpResponse(out, content_type='json')
<|reserved_special_token_1|>
__author__ = 'fshaw'
import gzip
import hashlib
import os
import uuid
import json
import jsonpickle
from chunked_upload.models import ChunkedUpload
from chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView
from django.conf import settings
from django.core import serializers
from django.core.files.base import ContentFile
from django.http import HttpResponse
from django.template.context_processors import csrf
from rest_framework.renderers import JSONRenderer
import web.apps.web_copo.schemas.utils.data_utils as d_utils
import web.apps.web_copo.utils.EnaUtils as u
from dal.broker_da import BrokerDA
from dal.copo_da import DataFile
from web.apps.web_copo.rest.models import CopoChunkedUpload
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
'''
'''
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
# this method is called for writing smaller files (<= 260MB) to disk, larger files use the
# upload method in ChunkedUpload class
from django.utils import timezone
# need to make a chunked upload record to store deails of the file
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
# file starts empty
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
# create output structure to pass back to jquery-upload
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
# retrieve incomplete file for user with this name
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(
'-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def get_partial_uploads(request):
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def hash_upload(request):
# utility method to create an md5 hash of a given file path
# open uploaded file
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
# now hash opened file
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_hash")] = file_obj.hash
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
# utility method to examine a file and return meta-data to the frontend
output_dict = {'file_type': 'unknown', 'do_compress': False}
# get reference to file
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
# size threshold to determine if a file should be compressed
zip_threshold = 200000000 # size in bytes
# check if file is compressed
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
# check for file type
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else: # make file type same as extension
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]
except:
output_dict['file_type'] = 'unknown'
# add datafile schema
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
# ...and obtain the inserted record
profile_id = request.session['profile_id']
component = "datafile"
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_id")] = file_id
auto_fields[DataFile().get_qualified_field("file_type")] = output_dict['file_type']
auto_fields[DataFile().get_qualified_field("file_location")] = file_name
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field("name")] = chunked_upload.filename
# get default type from schema
type = [f for f in d_utils.get_copo_schema(component) if f.get("id").split(".")[-1] == "type"]
if type:
type = type[0]["default_value"]
auto_fields[DataFile().get_qualified_field("type")] = type
df = BrokerDA(context=dict(),
profile_id=profile_id,
component=component,
auto_fields=auto_fields,
visualize="last_record"
).do_save_edit().get("record_object", dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
def zip_file(request):
# need to get a reference to the file to zip
file_id = request.GET['file_id']
print("zip started " + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
# get the name of the file to zip and change its suffix to .gz
output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
output_file_name = file_obj.filename + '.gz'
try:
# open the file as gzip acrchive...set compression level
temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')
myzip = gzip.open(temp_name, 'wb', compresslevel=1)
src = open(output_file_location, 'r')
# write input file to gzip archive in n byte chunks
n = 100000000
for chunk in iter(lambda: src.read(n), ''):
myzip.write(bytes(chunk, 'UTF-8'))
finally:
myzip.close()
src.close()
print('zip complete ' + file_id)
# now need to delete the old file and update the file record with the new file
new_file_name = output_file_location + '.gz'
os.rename(temp_name, new_file_name)
os.remove(output_file_location)
# calculate new file size
stats = os.stat(new_file_name)
new_file_size = stats.st_size / 1000 / 1000
# update filename
file_obj.filename = output_file_name
file_obj.file.name = new_file_name
# update file size
file_obj.offset = stats.st_size
file_obj.save()
out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(file_obj.offset)
auto_fields[DataFile().get_qualified_field("name")] = output_file_name
auto_fields[DataFile().get_qualified_field("file_location")] = new_file_name
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = jsonpickle.encode(out)
return HttpResponse(out, content_type='json')
|
flexible
|
{
"blob_id": "2b7415d86f9157ae55228efdd61c9a9e9920bc5c",
"index": 7716,
"step-1": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\n<mask token>\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\n<mask token>\n\n\ndef hash_upload(request):\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda : f.read(8192), b''):\n md5.update(chunk)\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\n<mask token>\n\n\ndef hash_upload(request):\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda : f.read(8192), b''):\n md5.update(chunk)\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\ndef zip_file(request):\n file_id = request.GET['file_id']\n print('zip started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name\n )\n output_file_name = file_obj.filename + '.gz'\n try:\n temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) +\n '.tmp')\n myzip = gzip.open(temp_name, 'wb', compresslevel=1)\n src = open(output_file_location, 'r')\n n = 100000000\n for chunk in iter(lambda : src.read(n), ''):\n myzip.write(bytes(chunk, 'UTF-8'))\n finally:\n myzip.close()\n src.close()\n print('zip complete ' + file_id)\n new_file_name = output_file_location + '.gz'\n os.rename(temp_name, new_file_name)\n os.remove(output_file_location)\n stats = os.stat(new_file_name)\n new_file_size = stats.st_size / 1000 / 1000\n file_obj.filename = output_file_name\n file_obj.file.name = new_file_name\n file_obj.offset = stats.st_size\n file_obj.save()\n out = {'zipped': True, 'file_name': output_file_name, 'file_size':\n new_file_size}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(file_obj.offset)\n auto_fields[DataFile().get_qualified_field('name')] = output_file_name\n auto_fields[DataFile().get_qualified_field('file_location')\n ] = new_file_name\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = jsonpickle.encode(out)\n return HttpResponse(out, content_type='json')\n",
"step-4": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef get_partial_uploads(request):\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id\n ).order_by('created_on')\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef hash_upload(request):\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda : f.read(8192), b''):\n md5.update(chunk)\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\ndef zip_file(request):\n file_id = request.GET['file_id']\n print('zip started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name\n )\n output_file_name = file_obj.filename + '.gz'\n try:\n temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) +\n '.tmp')\n myzip = gzip.open(temp_name, 'wb', compresslevel=1)\n src = open(output_file_location, 'r')\n n = 100000000\n for chunk in iter(lambda : src.read(n), ''):\n myzip.write(bytes(chunk, 'UTF-8'))\n finally:\n myzip.close()\n src.close()\n print('zip complete ' + file_id)\n new_file_name = output_file_location + '.gz'\n os.rename(temp_name, new_file_name)\n os.remove(output_file_location)\n stats = os.stat(new_file_name)\n new_file_size = stats.st_size / 1000 / 1000\n file_obj.filename = output_file_name\n file_obj.file.name = new_file_name\n file_obj.offset = stats.st_size\n file_obj.save()\n out = {'zipped': True, 'file_name': output_file_name, 'file_size':\n new_file_size}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(file_obj.offset)\n auto_fields[DataFile().get_qualified_field('name')] = output_file_name\n auto_fields[DataFile().get_qualified_field('file_location')\n ] = new_file_name\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = jsonpickle.encode(out)\n return HttpResponse(out, content_type='json')\n",
"step-5": "__author__ = 'fshaw'\nimport gzip\nimport hashlib\nimport os\nimport uuid\nimport json\nimport jsonpickle\nfrom chunked_upload.models import ChunkedUpload\nfrom chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView\nfrom django.conf import settings\nfrom django.core import serializers\nfrom django.core.files.base import ContentFile\nfrom django.http import HttpResponse\nfrom django.template.context_processors import csrf\nfrom rest_framework.renderers import JSONRenderer\n\nimport web.apps.web_copo.schemas.utils.data_utils as d_utils\nimport web.apps.web_copo.utils.EnaUtils as u\nfrom dal.broker_da import BrokerDA\nfrom dal.copo_da import DataFile\nfrom web.apps.web_copo.rest.models import CopoChunkedUpload\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n\n '''\n '''\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n # this method is called for writing smaller files (<= 260MB) to disk, larger files use the\n # upload method in ChunkedUpload class\n\n from django.utils import timezone\n # need to make a chunked upload record to store deails of the file\n if request.method == 'POST':\n\n c = {}\n f = request.FILES['file']\n\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n # file starts empty\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n\n # create output structure to pass back to jquery-upload\n files = {'files': {}}\n files['files']['name'] = f._name\n\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n # retrieve incomplete file for user with this name\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(\n '-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef get_partial_uploads(request):\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef hash_upload(request):\n # utility method to create an md5 hash of a given file path\n # open uploaded file\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n\n # now hash opened file\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda: f.read(8192), b''):\n md5.update(chunk)\n\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n\n # update record in mongo\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field(\"file_hash\")] = file_obj.hash\n\n profile_id = request.session['profile_id']\n component = \"datafile\"\n\n BrokerDA(target_id=str(record_object.get(\"_id\", str())),\n component=component,\n auto_fields=auto_fields\n ).do_save_edit()\n\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n # utility method to examine a file and return meta-data to the frontend\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n\n # get reference to file\n file_id = request.GET['file_id']\n\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n\n # size threshold to determine if a file should be compressed\n zip_threshold = 200000000 # size in bytes\n\n # check if file is compressed\n is_zipped = u.is_gzipped(file_name)\n\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n\n # check for file type\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n\n else: # make file type same as extension\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]\n except:\n output_dict['file_type'] = 'unknown'\n\n # add datafile schema\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n\n # ...and obtain the inserted record\n profile_id = request.session['profile_id']\n component = \"datafile\"\n\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field(\"file_id\")] = file_id\n auto_fields[DataFile().get_qualified_field(\"file_type\")] = output_dict['file_type']\n auto_fields[DataFile().get_qualified_field(\"file_location\")] = file_name\n auto_fields[DataFile().get_qualified_field(\"file_size\")] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field(\"name\")] = chunked_upload.filename\n\n # get default type from schema\n type = [f for f in d_utils.get_copo_schema(component) if f.get(\"id\").split(\".\")[-1] == \"type\"]\n if type:\n type = type[0][\"default_value\"]\n auto_fields[DataFile().get_qualified_field(\"type\")] = type\n\n df = BrokerDA(context=dict(),\n profile_id=profile_id,\n component=component,\n auto_fields=auto_fields,\n visualize=\"last_record\"\n ).do_save_edit().get(\"record_object\", dict())\n\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\ndef zip_file(request):\n # need to get a reference to the file to zip\n file_id = request.GET['file_id']\n print(\"zip started \" + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n\n # get the name of the file to zip and change its suffix to .gz\n output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n output_file_name = file_obj.filename + '.gz'\n try:\n # open the file as gzip acrchive...set compression level\n temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')\n myzip = gzip.open(temp_name, 'wb', compresslevel=1)\n src = open(output_file_location, 'r')\n\n # write input file to gzip archive in n byte chunks\n n = 100000000\n for chunk in iter(lambda: src.read(n), ''):\n myzip.write(bytes(chunk, 'UTF-8'))\n finally:\n myzip.close()\n src.close()\n\n print('zip complete ' + file_id)\n # now need to delete the old file and update the file record with the new file\n new_file_name = output_file_location + '.gz'\n os.rename(temp_name, new_file_name)\n os.remove(output_file_location)\n\n # calculate new file size\n stats = os.stat(new_file_name)\n new_file_size = stats.st_size / 1000 / 1000\n\n # update filename\n file_obj.filename = output_file_name\n file_obj.file.name = new_file_name\n\n # update file size\n file_obj.offset = stats.st_size\n file_obj.save()\n\n out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}\n\n # update record in mongo\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field(\"file_size\")] = u.filesize_toString(file_obj.offset)\n auto_fields[DataFile().get_qualified_field(\"name\")] = output_file_name\n auto_fields[DataFile().get_qualified_field(\"file_location\")] = new_file_name\n\n profile_id = request.session['profile_id']\n component = \"datafile\"\n\n BrokerDA(target_id=str(record_object.get(\"_id\", str())),\n component=component,\n auto_fields=auto_fields\n ).do_save_edit()\n\n out = jsonpickle.encode(out)\n return HttpResponse(out, content_type='json')\n",
"step-ids": [
12,
13,
14,
15,
18
]
}
|
[
12,
13,
14,
15,
18
] |
#!/usr/bin/python
# encoding: utf-8
#
# In case of reuse of this source code please do not remove this copyright.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For more information on the GNU General Public License see:
# <http://www.gnu.org/licenses/>.
#
from Components.config import config
from datetime import datetime
import os
MinCacheLimit = config.EMC.min_file_cache_limit.getValue()
pathisfile = os.path.isfile
pathisdir = os.path.isdir
pathislink = os.path.islink
pathexists = os.path.exists
pathreal = os.path.realpath
idx_isLink=0
idx_isDir=1
idx_isFile=2
idx_Date=3
idx_realpath=4
idx_num=5
class EMCFileCache():
def __init__(self):
self.cacheDirectoryList = {}
self.cacheFileList = {}
self.cacheAttributeList = {}
self.cacheCountSizeList = {}
def addCountSizeToCache(self, path, count, size):
# print "EMC addCountSizeToCache", path
if self.cacheCountSizeList.has_key(path):
lastcount, lastsize = self.cacheCountSizeList[path]
if lastcount != count or lastsize != size:
del self.cacheCountSizeList[path]
self.cacheCountSizeList[path] = count, size
else:
self.cacheCountSizeList[path] = count, size
# print "EMC addCountSizeToCache", self.cacheCountSizeList
def getCountSizeFromCache(self, path):
if self.cacheCountSizeList.has_key(path):
return self.cacheCountSizeList[path]
else:
return None
# print "EMC getCountSizeFromCache", self.cacheCountSizeList
def delcacheCountSizeList(self):
self.cacheCountSizeList = {}
print "EMC delete cacheCountSizeList", self.cacheCountSizeList
def delcacheCountSizeListEntriesOnFileOp(self,path):
#print "EMC delcacheCountSizeListEntriesOnFileOp",path
rescanPaths = []
if path:
for k in self.cacheCountSizeList.keys():
if (k+"/").startswith(path+"/") or (path+"/").startswith(k+"/"): # drop dirs containing path, but not "a/bc" when path is "a/bcd/e", therefore append "/"
del self.cacheCountSizeList[k]
rescanPaths.append(k)
#print "EMC delcacheCountSizeListEntriesOnFileOp IS deleting",k," due to OP on path ",path
#else:
#print "EMC delcacheCountSizeListEntriesOnFileOp NOT deleting",k," due to OP on path ",path
return rescanPaths
def IsPathInCountSizeList(self, path):
if self.cacheCountSizeList.has_key(path):
return True
else:
return False
def addPathToCache(self, path, subdirlist, filelist, MovieCenterInst):
if config.EMC.files_cache.value:
print "EMC addPathToCache", path
if (len(subdirlist)>MinCacheLimit) or (len(filelist)>MinCacheLimit):
self.cacheDirectoryList[path] = subdirlist
for p, n, e in subdirlist:
if not (p in self.cacheAttributeList):
AttributeList=[None]*idx_num
AttributeList[idx_isLink] = pathislink(p)
AttributeList[idx_isDir] = True # we are in subdirlist
AttributeList[idx_isFile] = False # we are in subdirlist
AttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, True)
AttributeList[idx_realpath] = pathreal(p) #for dirs only
self.cacheAttributeList[p] = AttributeList
self.cacheFileList[path] = filelist
for p, n, e in filelist:
if not (p in self.cacheAttributeList):
AttributeList=[None]*idx_num
AttributeList[idx_isLink] = pathislink(p)
AttributeList[idx_isDir] = False # we are in filelist, no entry is a real directrory ...
AttributeList[idx_isFile] = pathisfile(p) # ... but filelist might contain virtual directories
AttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, False)
#AttributeList[idx_realpath] = pathreal(p) #for dirs only
self.cacheAttributeList[p] = AttributeList
else:
if self.cacheDirectoryList.has_key(path):
self.deleteAssociatedListEntries(self.cacheDirectoryList[path])
del self.cacheDirectoryList[path]
if self.cacheFileList.has_key(path):
self.deleteAssociatedListEntries(self.cacheFileList[path])
del self.cacheFileList[path]
# self.debugPrintDirCache()
# self.debugPrintFileCache()
# self.debugPrintFileAttributeCache()
def addRecToCacheFileList(self, path, rec):
if config.EMC.files_cache.value:
if self.cacheFileList.has_key(path):
filelist = self.cacheFileList[path]
filelist.append(rec)
del self.cacheFileList[path]
self.cacheFileList[path] = filelist
def getCacheForPath(self, path):
print "EMC getCacheForPath", path
if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path):
subdirlist = self.cacheDirectoryList[path]
filelist = self.cacheFileList[path]
# self.debugPrintDirCache()
# self.debugPrintFileCache()
# self.debugPrintFileAttributeCache()
return subdirlist, filelist
else:
return None, None
def isLink(self, path):
isLink = None
if config.EMC.files_cache.value and (path in self.cacheAttributeList):
isLink = self.cacheAttributeList[path][idx_isLink]
if isLink is None:
isLink = pathislink(path)
return isLink
def isDir(self, path):
isDir = None
if (config.EMC.check_dead_links.value != "always") and config.EMC.files_cache.value and (path in self.cacheAttributeList):
isDir = self.cacheAttributeList[path][idx_isDir]
if isDir is None:
isDir = pathisdir(path)
return isDir
def isFile(self, path):
isFile = None
if (config.EMC.check_dead_links.value != "always") and config.EMC.files_cache.value and (path in self.cacheAttributeList):
isFile = self.cacheAttributeList[path][idx_isFile]
if isFile is None:
isFile = pathisfile(path)
return isFile
def realpath(self, path):
realpath = None
if config.EMC.files_cache.value and (path in self.cacheAttributeList):
realpath = self.cacheAttributeList[path][idx_realpath]
if realpath is None:
realpath = pathreal(path)
return realpath
def getDateInfoFromCacheForPath(self, path):
if config.EMC.files_cache.value and (path in self.cacheAttributeList):
return self.cacheAttributeList[path][idx_Date]
else:
return None
def getDirsFromCacheForPath(self, path):
if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path):
subdirlist = self.cacheDirectoryList[path]
return subdirlist
else:
return None
def getFilesFromCacheForPath(self, path):
if config.EMC.files_cache.value and self.cacheFileList.has_key(path):
filelist = self.cacheFileList[path]
return filelist
else:
return None
def IsPathInCache(self, path):
if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path):
return True
else:
return False
def IsPathWithDirsInCache(self, path):
if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path):
return True
else:
return False
def IsPathWithFilesInCache(self, path):
if config.EMC.files_cache.value and self.cacheFileList.has_key(path):
return True
else:
return False
def delPathFromCache(self, path):
if len(path)>1 and path[-1]=="/":
path = path[:-1]
print "EMC delPathFromCache", path
if self.cacheDirectoryList.has_key(path):
self.deleteAssociatedListEntries(self.cacheDirectoryList[path])
del self.cacheDirectoryList[path]
if self.cacheFileList.has_key(path):
self.deleteAssociatedListEntries(self.cacheFileList[path])
del self.cacheFileList[path]
# self.debugPrintDirCache()
# self.debugPrintFileCache()
# self.debugPrintFileAttributeCache()
def delPathFromDirCache(self, path):
if len(path)>1 and path[-1]=="/":
path = path[:-1]
if self.cacheDirectoryList.has_key(path):
self.deleteAssociatedListEntries(self.cacheDirectoryList[path])
del self.cacheDirectoryList[path]
def delPathFromFileCache(self, path):
if len(path)>1 and path[-1]=="/":
path = path[:-1]
if self.cacheFileList.has_key(path):
self.deleteAssociatedListEntries(self.cacheFileList[path])
del self.cacheFileList[path]
def debugPrintFileCache(self):
print "cacheFileList:"
for p in self.cacheFileList:
print p,self.cacheFileList[p]
print ""
def debugPrintDirCache(self):
print "cacheDirectoryList:"
for p in self.cacheDirectoryList:
print p,self.cacheDirectoryList[p]
print ""
def debugPrintFileAttributeCache(self):
print "cacheAttributeList:"
for p in self.cacheAttributeList:
print p,self.cacheAttributeList[p]
print ""
def deleteAssociatedListEntries(self, list):
for p, n, e in list:
if p in self.cacheAttributeList and (config.EMC.check_dead_links.value != "only_initially"):
del self.cacheAttributeList[p]
movieFileCache = EMCFileCache()
|
normal
|
{
"blob_id": "a7218971b831e2cfda9a035eddb350ecf1cdf938",
"index": 17,
"step-1": "#!/usr/bin/python\n# encoding: utf-8\n#\n# In case of reuse of this source code please do not remove this copyright.\n#\n#\tThis program is free software: you can redistribute it and/or modify\n#\tit under the terms of the GNU General Public License as published by\n#\tthe Free Software Foundation, either version 3 of the License, or\n#\t(at your option) any later version.\n#\n#\tThis program is distributed in the hope that it will be useful,\n#\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n#\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#\tGNU General Public License for more details.\n#\n#\tFor more information on the GNU General Public License see:\n#\t<http://www.gnu.org/licenses/>.\n#\n\nfrom Components.config import config\nfrom datetime import datetime\nimport os\n\nMinCacheLimit = config.EMC.min_file_cache_limit.getValue()\npathisfile = os.path.isfile\npathisdir = os.path.isdir\npathislink = os.path.islink\npathexists = os.path.exists\npathreal = os.path.realpath\n\nidx_isLink=0\nidx_isDir=1\nidx_isFile=2\nidx_Date=3\nidx_realpath=4\nidx_num=5\n\nclass EMCFileCache():\n\tdef __init__(self):\n\t\tself.cacheDirectoryList = {}\n\t\tself.cacheFileList = {}\n\t\tself.cacheAttributeList = {}\n\t\tself.cacheCountSizeList = {}\n\n\tdef addCountSizeToCache(self, path, count, size):\n#\t\tprint \"EMC addCountSizeToCache\", path\n\t\tif self.cacheCountSizeList.has_key(path):\n\t\t\tlastcount, lastsize = self.cacheCountSizeList[path]\n\t\t\tif lastcount != count or lastsize != size:\n\t\t\t\tdel self.cacheCountSizeList[path]\n\t\t\t\tself.cacheCountSizeList[path] = count, size\n\t\telse:\n\t\t\tself.cacheCountSizeList[path] = count, size\n#\t\tprint \"EMC addCountSizeToCache\", self.cacheCountSizeList\n\n\tdef getCountSizeFromCache(self, path):\n\t\tif self.cacheCountSizeList.has_key(path):\n\t\t\treturn self.cacheCountSizeList[path]\n\t\telse:\n\t\t\treturn None\n#\t\tprint \"EMC getCountSizeFromCache\", self.cacheCountSizeList\n\n\tdef delcacheCountSizeList(self):\n\t\tself.cacheCountSizeList = {}\n\t\tprint \"EMC delete cacheCountSizeList\", self.cacheCountSizeList\n\n\tdef delcacheCountSizeListEntriesOnFileOp(self,path):\n\t\t#print \"EMC delcacheCountSizeListEntriesOnFileOp\",path\n\t\trescanPaths = []\n\t\tif path:\n\t\t\tfor k in self.cacheCountSizeList.keys():\n\t\t\t\tif (k+\"/\").startswith(path+\"/\") or (path+\"/\").startswith(k+\"/\"): # drop dirs containing path, but not \"a/bc\" when path is \"a/bcd/e\", therefore append \"/\"\n\t\t\t\t\tdel self.cacheCountSizeList[k]\n\t\t\t\t\trescanPaths.append(k)\n\t\t\t\t\t#print \"EMC delcacheCountSizeListEntriesOnFileOp IS deleting\",k,\" due to OP on path \",path\n\t\t\t\t#else:\n\t\t\t\t\t#print \"EMC delcacheCountSizeListEntriesOnFileOp NOT deleting\",k,\" due to OP on path \",path\n\t\treturn rescanPaths\n\n\tdef IsPathInCountSizeList(self, path):\n\t\tif self.cacheCountSizeList.has_key(path):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef addPathToCache(self, path, subdirlist, filelist, MovieCenterInst):\n\t\tif config.EMC.files_cache.value:\n\t\t\tprint \"EMC addPathToCache\", path\n\t\t\tif (len(subdirlist)>MinCacheLimit) or (len(filelist)>MinCacheLimit):\n\t\t\t\tself.cacheDirectoryList[path] = subdirlist\n\t\t\t\tfor p, n, e in subdirlist:\n\t\t\t\t\tif not (p in self.cacheAttributeList):\n\t\t\t\t\t\tAttributeList=[None]*idx_num\n\t\t\t\t\t\tAttributeList[idx_isLink] = pathislink(p)\n\t\t\t\t\t\tAttributeList[idx_isDir] = True # we are in subdirlist\n\t\t\t\t\t\tAttributeList[idx_isFile] = False # we are in subdirlist\n\t\t\t\t\t\tAttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, True)\n\t\t\t\t\t\tAttributeList[idx_realpath] = pathreal(p) #for dirs only\n\t\t\t\t\t\tself.cacheAttributeList[p] = AttributeList\n\t\t\t\tself.cacheFileList[path] = filelist\n\t\t\t\tfor p, n, e in filelist:\n\t\t\t\t\tif not (p in self.cacheAttributeList):\n\t\t\t\t\t\tAttributeList=[None]*idx_num\n\t\t\t\t\t\tAttributeList[idx_isLink] = pathislink(p)\n\t\t\t\t\t\tAttributeList[idx_isDir] = False # we are in filelist, no entry is a real directrory ...\n\t\t\t\t\t\tAttributeList[idx_isFile] = pathisfile(p) # ... but filelist might contain virtual directories\n\t\t\t\t\t\tAttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, False)\n\t\t\t\t\t\t#AttributeList[idx_realpath] = pathreal(p) #for dirs only\n\t\t\t\t\t\tself.cacheAttributeList[p] = AttributeList\n\t\t\telse:\n\t\t\t\tif self.cacheDirectoryList.has_key(path):\n\t\t\t\t\tself.deleteAssociatedListEntries(self.cacheDirectoryList[path])\n\t\t\t\t\tdel self.cacheDirectoryList[path]\n\t\t\t\tif self.cacheFileList.has_key(path):\n\t\t\t\t\tself.deleteAssociatedListEntries(self.cacheFileList[path])\n\t\t\t\t\tdel self.cacheFileList[path]\n#\t\tself.debugPrintDirCache()\n#\t\tself.debugPrintFileCache()\n#\t\tself.debugPrintFileAttributeCache()\n\n\tdef addRecToCacheFileList(self, path, rec):\n\t\tif config.EMC.files_cache.value:\n\t\t\tif self.cacheFileList.has_key(path):\n\t\t\t\tfilelist = self.cacheFileList[path]\n\t\t\t\tfilelist.append(rec)\n\t\t\t\tdel self.cacheFileList[path]\n\t\t\t\tself.cacheFileList[path] = filelist\n\n\tdef getCacheForPath(self, path):\n\t\tprint \"EMC getCacheForPath\", path\n\t\tif config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path):\n\t\t\tsubdirlist = self.cacheDirectoryList[path]\n\t\t\tfilelist = self.cacheFileList[path]\n#\t\t\tself.debugPrintDirCache()\n#\t\t\tself.debugPrintFileCache()\n#\t\t\tself.debugPrintFileAttributeCache()\n\t\t\treturn subdirlist, filelist\n\t\telse:\n\t\t\treturn None, None\n\n\tdef isLink(self, path):\n\t\tisLink = None\n\t\tif config.EMC.files_cache.value and (path in self.cacheAttributeList):\n\t\t\tisLink = self.cacheAttributeList[path][idx_isLink]\n\t\tif isLink is None:\n\t\t\tisLink = pathislink(path)\n\t\treturn isLink\n\n\tdef isDir(self, path):\n\t\tisDir = None\n\t\tif (config.EMC.check_dead_links.value != \"always\") and config.EMC.files_cache.value and (path in self.cacheAttributeList):\n\t\t\tisDir = self.cacheAttributeList[path][idx_isDir]\n\t\tif isDir is None:\n\t\t\tisDir = pathisdir(path)\n\t\treturn isDir\n\n\tdef isFile(self, path):\n\t\tisFile = None\n\t\tif (config.EMC.check_dead_links.value != \"always\") and config.EMC.files_cache.value and (path in self.cacheAttributeList):\n\t\t\tisFile = self.cacheAttributeList[path][idx_isFile]\n\t\tif isFile is None:\n\t\t\tisFile = pathisfile(path)\n\t\treturn isFile\n\n\tdef realpath(self, path):\n\t\trealpath = None\n\t\tif config.EMC.files_cache.value and (path in self.cacheAttributeList):\n\t\t\trealpath = self.cacheAttributeList[path][idx_realpath]\n\t\tif realpath is None:\n\t\t\trealpath = pathreal(path)\n\t\treturn realpath\n\n\tdef getDateInfoFromCacheForPath(self, path):\n\t\tif config.EMC.files_cache.value and (path in self.cacheAttributeList):\n\t\t\treturn self.cacheAttributeList[path][idx_Date]\n\t\telse:\n\t\t\treturn None\n\n\tdef getDirsFromCacheForPath(self, path):\n\t\tif config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path):\n\t\t\tsubdirlist = self.cacheDirectoryList[path]\n\t\t\treturn subdirlist\n\t\telse:\n\t\t\treturn None\n\n\tdef getFilesFromCacheForPath(self, path):\n\t\tif config.EMC.files_cache.value and self.cacheFileList.has_key(path):\n\t\t\tfilelist = self.cacheFileList[path]\n\t\t\treturn filelist\n\t\telse:\n\t\t\treturn None\n\n\tdef IsPathInCache(self, path):\n\t\tif config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef IsPathWithDirsInCache(self, path):\n\t\tif config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef IsPathWithFilesInCache(self, path):\n\t\tif config.EMC.files_cache.value and self.cacheFileList.has_key(path):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef delPathFromCache(self, path):\n\t\tif len(path)>1 and path[-1]==\"/\":\n\t\t\tpath = path[:-1]\n\t\tprint \"EMC delPathFromCache\", path\n\t\tif self.cacheDirectoryList.has_key(path):\n\t\t\tself.deleteAssociatedListEntries(self.cacheDirectoryList[path])\n\t\t\tdel self.cacheDirectoryList[path]\n\t\tif self.cacheFileList.has_key(path):\n\t\t\tself.deleteAssociatedListEntries(self.cacheFileList[path])\n\t\t\tdel self.cacheFileList[path]\n#\t\tself.debugPrintDirCache()\n#\t\tself.debugPrintFileCache()\n#\t\tself.debugPrintFileAttributeCache()\n\n\tdef delPathFromDirCache(self, path):\n\t\tif len(path)>1 and path[-1]==\"/\":\n\t\t\tpath = path[:-1]\n\t\tif self.cacheDirectoryList.has_key(path):\n\t\t\tself.deleteAssociatedListEntries(self.cacheDirectoryList[path])\n\t\t\tdel self.cacheDirectoryList[path]\n\n\tdef delPathFromFileCache(self, path):\n\t\tif len(path)>1 and path[-1]==\"/\":\n\t\t\tpath = path[:-1]\n\t\tif self.cacheFileList.has_key(path):\n\t\t\tself.deleteAssociatedListEntries(self.cacheFileList[path])\n\t\t\tdel self.cacheFileList[path]\n\n\tdef debugPrintFileCache(self):\n\t\tprint \"cacheFileList:\"\n\t\tfor p in self.cacheFileList:\n\t\t\tprint p,self.cacheFileList[p]\n\t\tprint \"\"\n\n\tdef debugPrintDirCache(self):\n\t\tprint \"cacheDirectoryList:\"\n\t\tfor p in self.cacheDirectoryList:\n\t\t\tprint p,self.cacheDirectoryList[p]\n\t\tprint \"\"\n\n\tdef debugPrintFileAttributeCache(self):\n\t\tprint \"cacheAttributeList:\"\n\t\tfor p in self.cacheAttributeList:\n\t\t\tprint p,self.cacheAttributeList[p]\n\t\tprint \"\"\n\n\tdef deleteAssociatedListEntries(self, list):\n\t\tfor p, n, e in list:\n\t\t\tif p in self.cacheAttributeList and (config.EMC.check_dead_links.value != \"only_initially\"):\n\t\t\t\tdel self.cacheAttributeList[p]\n\nmovieFileCache = EMCFileCache()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import unittest
import json
import os
import copy
from nested.nested_dict import NestedDict
from pprint import pprint
class TestNestedDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.dirname(__file__)
cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')
cls.nd = NestedDict()
cls.d = {'a': {'b': {'c': 'C'}}}
with open(cls.afile, 'r') as fp:
cls.dfood = json.load(fp)
def test_file(self):
self.assertTrue(os.path.isfile(self.afile))
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
def test_get(self):
v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)
self.assertEqual(v, 'C')
# depth 0
dc = copy.deepcopy(self.d)
items = ['x', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
# depth 1
dc = copy.deepcopy(self.d)
items = ['a', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
# depth 2
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'e']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)
self.assertEqual(v, 'E')
# depth 3
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'c']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)
self.assertEqual(v, 'E')
def test_set(self):
# update the lastdict with new value of the same key
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='topless', keys=[u'0002', u'topping', u'5001', u'type'], dnow=dcopy)
value = self.nd.get(keys=[u'0002', u'topping', u'5001'], dnow=dchg)
self.assertEqual(value, {'id': '5001', 'type': 'topless'})
# update the lastdict with new key: value, but not new dict
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='5.01', keys=['0002', 'topping', '5001', 'price'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
self.assertEqual(value, {'id': '5001', 'type': u'None', 'price': '5.01'})
# int key
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='topless', keys=[35, 'topping', '5001', 'type'], dnow=dcopy)
pprint(dchg)
argv = [35, 'topping', '5001']
value = self.nd.get(keys=argv, dnow=dchg)
self.assertEqual(value, {'type': 'topless'})
# special condition value to be dict
dcopy = copy.deepcopy(self.dfood)
dnew = {'id': 555, 'type': 'berry', 'price': 0.99}
dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
pprint(value)
self.assertEqual(value, dnew)
# without id
dcopy = copy.deepcopy(self.dfood)
dnew = {'Type': 'berry', 'price': 0.99}
dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
self.assertEqual(value, {u'id': u'5001', 'Type': 'berry', 'price': 0.99, u'type': u'None'})
def test_create(self):
keys = ['a', 'b', 'c']
value = {u'd': 1}
d = self.nd.create(value=value, keys=keys)
dchg = {'a': {'b': {'c': {u'd': 1}}}}
self.assertEqual(d, dchg)
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
# d_original did not change
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
# dnow in parameters will be updated(!)
# self.assertEqual(d_original.keys(), ['hello1'])
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
def test_merge_shallow(self):
d = {}
dchg = {}
du = self.nd.merge_shallow(dchg=dchg, dnow=d)
self.assertEqual(du, d)
d_original = {'hello1': 1}
dup = {'hello2': 2}
du = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(du, {'hello1': 1, 'hello2': 2})
# this is not shallow
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over'}})
def test_update2(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
# d_original did not change
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
# self.assertEqual(d_original.keys(), ['hello1'])
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
|
normal
|
{
"blob_id": "f9a255a464b5f48a1a8be2e2887db721a92e7f4e",
"index": 1474,
"step-1": "<mask token>\n\n\nclass TestNestedDict(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_dfood(self):\n self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])\n <mask token>\n <mask token>\n <mask token>\n\n def test_update(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestNestedDict(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n path = os.path.dirname(__file__)\n cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')\n cls.nd = NestedDict()\n cls.d = {'a': {'b': {'c': 'C'}}}\n with open(cls.afile, 'r') as fp:\n cls.dfood = json.load(fp)\n <mask token>\n\n def test_dfood(self):\n self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])\n\n def test_get(self):\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)\n self.assertEqual(v, 'C')\n dc = copy.deepcopy(self.d)\n items = ['x', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'e']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'c']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)\n self.assertEqual(v, 'E')\n <mask token>\n <mask token>\n\n def test_update(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n <mask token>\n\n def test_update2(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n",
"step-3": "<mask token>\n\n\nclass TestNestedDict(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n path = os.path.dirname(__file__)\n cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')\n cls.nd = NestedDict()\n cls.d = {'a': {'b': {'c': 'C'}}}\n with open(cls.afile, 'r') as fp:\n cls.dfood = json.load(fp)\n <mask token>\n\n def test_dfood(self):\n self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])\n\n def test_get(self):\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)\n self.assertEqual(v, 'C')\n dc = copy.deepcopy(self.d)\n items = ['x', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'e']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'c']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)\n self.assertEqual(v, 'E')\n <mask token>\n\n def test_create(self):\n keys = ['a', 'b', 'c']\n value = {u'd': 1}\n d = self.nd.create(value=value, keys=keys)\n dchg = {'a': {'b': {'c': {u'd': 1}}}}\n self.assertEqual(d, dchg)\n\n def test_update(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n\n def test_merge_shallow(self):\n d = {}\n dchg = {}\n du = self.nd.merge_shallow(dchg=dchg, dnow=d)\n self.assertEqual(du, d)\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n du = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(du, {'hello1': 1, 'hello2': 2})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over'}})\n\n def test_update2(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n",
"step-4": "<mask token>\n\n\nclass TestNestedDict(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n path = os.path.dirname(__file__)\n cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')\n cls.nd = NestedDict()\n cls.d = {'a': {'b': {'c': 'C'}}}\n with open(cls.afile, 'r') as fp:\n cls.dfood = json.load(fp)\n\n def test_file(self):\n self.assertTrue(os.path.isfile(self.afile))\n\n def test_dfood(self):\n self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])\n\n def test_get(self):\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)\n self.assertEqual(v, 'C')\n dc = copy.deepcopy(self.d)\n items = ['x', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'e']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'c']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)\n self.assertEqual(v, 'E')\n\n def test_set(self):\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='topless', keys=[u'0002', u'topping',\n u'5001', u'type'], dnow=dcopy)\n value = self.nd.get(keys=[u'0002', u'topping', u'5001'], dnow=dchg)\n self.assertEqual(value, {'id': '5001', 'type': 'topless'})\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='5.01', keys=['0002', 'topping', '5001',\n 'price'], dnow=dcopy)\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n self.assertEqual(value, {'id': '5001', 'type': u'None', 'price':\n '5.01'})\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='topless', keys=[35, 'topping', '5001',\n 'type'], dnow=dcopy)\n pprint(dchg)\n argv = [35, 'topping', '5001']\n value = self.nd.get(keys=argv, dnow=dchg)\n self.assertEqual(value, {'type': 'topless'})\n dcopy = copy.deepcopy(self.dfood)\n dnew = {'id': 555, 'type': 'berry', 'price': 0.99}\n dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'],\n dnow=dcopy)\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n pprint(value)\n self.assertEqual(value, dnew)\n dcopy = copy.deepcopy(self.dfood)\n dnew = {'Type': 'berry', 'price': 0.99}\n dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'],\n dnow=dcopy)\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n self.assertEqual(value, {u'id': u'5001', 'Type': 'berry', 'price': \n 0.99, u'type': u'None'})\n\n def test_create(self):\n keys = ['a', 'b', 'c']\n value = {u'd': 1}\n d = self.nd.create(value=value, keys=keys)\n dchg = {'a': {'b': {'c': {u'd': 1}}}}\n self.assertEqual(d, dchg)\n\n def test_update(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n\n def test_merge_shallow(self):\n d = {}\n dchg = {}\n du = self.nd.merge_shallow(dchg=dchg, dnow=d)\n self.assertEqual(du, d)\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n du = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(du, {'hello1': 1, 'hello2': 2})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over'}})\n\n def test_update2(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n",
"step-5": "import unittest\nimport json\nimport os\nimport copy\nfrom nested.nested_dict import NestedDict\nfrom pprint import pprint\n\n\nclass TestNestedDict(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n path = os.path.dirname(__file__)\n cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')\n cls.nd = NestedDict()\n cls.d = {'a': {'b': {'c': 'C'}}}\n with open(cls.afile, 'r') as fp:\n cls.dfood = json.load(fp)\n\n def test_file(self):\n self.assertTrue(os.path.isfile(self.afile))\n\n def test_dfood(self):\n self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])\n\n def test_get(self):\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)\n self.assertEqual(v, 'C')\n\n # depth 0\n dc = copy.deepcopy(self.d)\n items = ['x', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n\n # depth 1\n dc = copy.deepcopy(self.d)\n items = ['a', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n\n # depth 2\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'e']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)\n self.assertEqual(v, 'E')\n\n # depth 3\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'c']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)\n self.assertEqual(v, 'E')\n\n def test_set(self):\n # update the lastdict with new value of the same key\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='topless', keys=[u'0002', u'topping', u'5001', u'type'], dnow=dcopy)\n value = self.nd.get(keys=[u'0002', u'topping', u'5001'], dnow=dchg)\n self.assertEqual(value, {'id': '5001', 'type': 'topless'})\n\n # update the lastdict with new key: value, but not new dict\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='5.01', keys=['0002', 'topping', '5001', 'price'], dnow=dcopy)\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n self.assertEqual(value, {'id': '5001', 'type': u'None', 'price': '5.01'})\n\n # int key\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='topless', keys=[35, 'topping', '5001', 'type'], dnow=dcopy)\n pprint(dchg)\n argv = [35, 'topping', '5001']\n value = self.nd.get(keys=argv, dnow=dchg)\n self.assertEqual(value, {'type': 'topless'})\n\n # special condition value to be dict\n dcopy = copy.deepcopy(self.dfood)\n dnew = {'id': 555, 'type': 'berry', 'price': 0.99}\n dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'], dnow=dcopy)\n\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n pprint(value)\n self.assertEqual(value, dnew)\n\n # without id\n dcopy = copy.deepcopy(self.dfood)\n dnew = {'Type': 'berry', 'price': 0.99}\n dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'], dnow=dcopy)\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n self.assertEqual(value, {u'id': u'5001', 'Type': 'berry', 'price': 0.99, u'type': u'None'})\n\n def test_create(self):\n keys = ['a', 'b', 'c']\n value = {u'd': 1}\n d = self.nd.create(value=value, keys=keys)\n dchg = {'a': {'b': {'c': {u'd': 1}}}}\n self.assertEqual(d, dchg)\n\n def test_update(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n\n # d_original did not change\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n # dnow in parameters will be updated(!)\n # self.assertEqual(d_original.keys(), ['hello1'])\n\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n\n def test_merge_shallow(self):\n d = {}\n dchg = {}\n du = self.nd.merge_shallow(dchg=dchg, dnow=d)\n self.assertEqual(du, d)\n\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n du = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(du, {'hello1': 1, 'hello2': 2})\n\n # this is not shallow\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n\n d = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over'}})\n\n def test_update2(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n\n # d_original did not change\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n # self.assertEqual(d_original.keys(), ['hello1'])\n\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n",
"step-ids": [
3,
6,
8,
10,
12
]
}
|
[
3,
6,
8,
10,
12
] |
from collections import namedtuple
from os import getenv
from pathlib import Path
TMP = getenv("TMP", "/tmp")
PYBITES_FAKER_DIR = Path(getenv("PYBITES_FAKER_DIR", TMP))
CACHE_FILENAME = "pybites-fake-data.pkl"
FAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME
BITE_FEED = "https://codechalleng.es/api/bites/"
BLOG_FEED = "https://pybit.es/feeds/all.rss.xml"
Bite = namedtuple("Bite", "number title level")
Article = namedtuple("Article", "author title tags")
|
normal
|
{
"blob_id": "7336b8dec95d23cbcebbff2a813bbbd5575ba58f",
"index": 2327,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nTMP = getenv('TMP', '/tmp')\nPYBITES_FAKER_DIR = Path(getenv('PYBITES_FAKER_DIR', TMP))\nCACHE_FILENAME = 'pybites-fake-data.pkl'\nFAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME\nBITE_FEED = 'https://codechalleng.es/api/bites/'\nBLOG_FEED = 'https://pybit.es/feeds/all.rss.xml'\nBite = namedtuple('Bite', 'number title level')\nArticle = namedtuple('Article', 'author title tags')\n",
"step-3": "from collections import namedtuple\nfrom os import getenv\nfrom pathlib import Path\nTMP = getenv('TMP', '/tmp')\nPYBITES_FAKER_DIR = Path(getenv('PYBITES_FAKER_DIR', TMP))\nCACHE_FILENAME = 'pybites-fake-data.pkl'\nFAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME\nBITE_FEED = 'https://codechalleng.es/api/bites/'\nBLOG_FEED = 'https://pybit.es/feeds/all.rss.xml'\nBite = namedtuple('Bite', 'number title level')\nArticle = namedtuple('Article', 'author title tags')\n",
"step-4": "from collections import namedtuple\nfrom os import getenv\nfrom pathlib import Path\n\nTMP = getenv(\"TMP\", \"/tmp\")\nPYBITES_FAKER_DIR = Path(getenv(\"PYBITES_FAKER_DIR\", TMP))\nCACHE_FILENAME = \"pybites-fake-data.pkl\"\nFAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME\nBITE_FEED = \"https://codechalleng.es/api/bites/\"\nBLOG_FEED = \"https://pybit.es/feeds/all.rss.xml\"\n\nBite = namedtuple(\"Bite\", \"number title level\")\nArticle = namedtuple(\"Article\", \"author title tags\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
v1=int(input("Introdu virsta primei persoane"))
v2=int(input("Introdu virsta persoanei a doua"))
v3=int(input("Introdu virsta persoanei a treia"))
if ((v1>18)and(v1<60)):
print(v1)
elif((v2>18)and(v2<60)):
print(v2)
elif((v3>18)and(v3<60)):
print(v3)
|
normal
|
{
"blob_id": "b8c749052af0061373808addea3ad419c35e1a29",
"index": 3324,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif v1 > 18 and v1 < 60:\n print(v1)\nelif v2 > 18 and v2 < 60:\n print(v2)\nelif v3 > 18 and v3 < 60:\n print(v3)\n",
"step-3": "v1 = int(input('Introdu virsta primei persoane'))\nv2 = int(input('Introdu virsta persoanei a doua'))\nv3 = int(input('Introdu virsta persoanei a treia'))\nif v1 > 18 and v1 < 60:\n print(v1)\nelif v2 > 18 and v2 < 60:\n print(v2)\nelif v3 > 18 and v3 < 60:\n print(v3)\n",
"step-4": "v1=int(input(\"Introdu virsta primei persoane\"))\r\nv2=int(input(\"Introdu virsta persoanei a doua\"))\r\nv3=int(input(\"Introdu virsta persoanei a treia\"))\r\nif ((v1>18)and(v1<60)):\r\n print(v1)\r\nelif((v2>18)and(v2<60)):\r\n print(v2)\r\nelif((v3>18)and(v3<60)):\r\n print(v3)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .personal_questions import *
from .survey_questions import *
|
normal
|
{
"blob_id": "a8f2d527e9824d3986f4bb49c3cc75fd0d999bf7",
"index": 3290,
"step-1": "<mask token>\n",
"step-2": "from .personal_questions import *\nfrom .survey_questions import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def load_norm_file(fname):
"""Parse the norm file and return the mean system norms"""
try:
with open(fname, 'r') as fh:
lines = fh.readlines()
norms = [float(ll.strip().split()[0]) for ll in lines]
return norms
except:
return []
def generate_test_norms(testname):
"""Parse the log file and generate test norms"""
logname = testname + '.log'
norm_name = testname + '.norm'
cmdline = "awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s " % (
logname, norm_name)
os.system(cmdline)
args = parse_arguments()
if args.save_norm_file != None:
copyfile(norm_name, args.save_norm_file)
return load_norm_file(norm_name)
def get_run_time(testname):
"""Return STKPERF total time"""
logname = testname + '.log'
cmdline = "awk '/STKPERF: Total Time/ { print $4; }' %s " % logname
try:
pp = subprocess.run(cmdline, shell=True, check=True, capture_output
=True)
return pp.stdout.decode('UTF-8').strip()
except:
return ''
def check_norms(test_norms, gold_norms, atol, rtol):
"""Check the regression test norms"""
if len(test_norms) != len(gold_norms):
print('Number of timesteps do not match', flush=True)
return False, 1e+16, 1e+16
test_pass = True
abs_diff = 0.0
rel_diff = 0.0
for t1, t2 in zip(test_norms, gold_norms):
adiff = abs(t1 - t2)
rdiff = abs(t1 / t2 - 1.0)
abs_diff = max(abs_diff, adiff)
rel_diff = max(rel_diff, rdiff)
if adiff > atol and rdiff > rtol:
test_pass = False
return test_pass, abs_diff, rel_diff
def main():
"""Driver function"""
args = parse_arguments()
test_norms = generate_test_norms(args.test_name)
gold_norms = load_norm_file(args.gold_norms)
run_time = get_run_time(args.test_name)
run_time = float(run_time) if run_time else 0.0
status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,
args.rel_tol)
name = args.test_name.ljust(40, '.')
status_str = 'PASS:' if status else 'FAIL:'
print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,
rdiff), flush=True)
sys.exit(0 if status else 1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_arguments():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description=
'Nalu-Wind regression test check utility')
parser.add_argument('--abs-tol', type=float, default=1e-15, help=
'Tolerance for absolute error')
parser.add_argument('--rel-tol', type=float, default=1e-07, help=
'Tolerance for relative error')
parser.add_argument('test_name', help='Regression test name')
parser.add_argument('gold_norms', help=
'Absolute path to the gold norms file')
parser.add_argument('--save-norm-file', required=False, help=
'File in which to save a copy of the norms')
return parser.parse_args()
def load_norm_file(fname):
"""Parse the norm file and return the mean system norms"""
try:
with open(fname, 'r') as fh:
lines = fh.readlines()
norms = [float(ll.strip().split()[0]) for ll in lines]
return norms
except:
return []
def generate_test_norms(testname):
"""Parse the log file and generate test norms"""
logname = testname + '.log'
norm_name = testname + '.norm'
cmdline = "awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s " % (
logname, norm_name)
os.system(cmdline)
args = parse_arguments()
if args.save_norm_file != None:
copyfile(norm_name, args.save_norm_file)
return load_norm_file(norm_name)
def get_run_time(testname):
"""Return STKPERF total time"""
logname = testname + '.log'
cmdline = "awk '/STKPERF: Total Time/ { print $4; }' %s " % logname
try:
pp = subprocess.run(cmdline, shell=True, check=True, capture_output
=True)
return pp.stdout.decode('UTF-8').strip()
except:
return ''
def check_norms(test_norms, gold_norms, atol, rtol):
"""Check the regression test norms"""
if len(test_norms) != len(gold_norms):
print('Number of timesteps do not match', flush=True)
return False, 1e+16, 1e+16
test_pass = True
abs_diff = 0.0
rel_diff = 0.0
for t1, t2 in zip(test_norms, gold_norms):
adiff = abs(t1 - t2)
rdiff = abs(t1 / t2 - 1.0)
abs_diff = max(abs_diff, adiff)
rel_diff = max(rel_diff, rdiff)
if adiff > atol and rdiff > rtol:
test_pass = False
return test_pass, abs_diff, rel_diff
def main():
"""Driver function"""
args = parse_arguments()
test_norms = generate_test_norms(args.test_name)
gold_norms = load_norm_file(args.gold_norms)
run_time = get_run_time(args.test_name)
run_time = float(run_time) if run_time else 0.0
status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,
args.rel_tol)
name = args.test_name.ljust(40, '.')
status_str = 'PASS:' if status else 'FAIL:'
print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,
rdiff), flush=True)
sys.exit(0 if status else 1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_arguments():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description=
'Nalu-Wind regression test check utility')
parser.add_argument('--abs-tol', type=float, default=1e-15, help=
'Tolerance for absolute error')
parser.add_argument('--rel-tol', type=float, default=1e-07, help=
'Tolerance for relative error')
parser.add_argument('test_name', help='Regression test name')
parser.add_argument('gold_norms', help=
'Absolute path to the gold norms file')
parser.add_argument('--save-norm-file', required=False, help=
'File in which to save a copy of the norms')
return parser.parse_args()
def load_norm_file(fname):
"""Parse the norm file and return the mean system norms"""
try:
with open(fname, 'r') as fh:
lines = fh.readlines()
norms = [float(ll.strip().split()[0]) for ll in lines]
return norms
except:
return []
def generate_test_norms(testname):
"""Parse the log file and generate test norms"""
logname = testname + '.log'
norm_name = testname + '.norm'
cmdline = "awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s " % (
logname, norm_name)
os.system(cmdline)
args = parse_arguments()
if args.save_norm_file != None:
copyfile(norm_name, args.save_norm_file)
return load_norm_file(norm_name)
def get_run_time(testname):
"""Return STKPERF total time"""
logname = testname + '.log'
cmdline = "awk '/STKPERF: Total Time/ { print $4; }' %s " % logname
try:
pp = subprocess.run(cmdline, shell=True, check=True, capture_output
=True)
return pp.stdout.decode('UTF-8').strip()
except:
return ''
def check_norms(test_norms, gold_norms, atol, rtol):
"""Check the regression test norms"""
if len(test_norms) != len(gold_norms):
print('Number of timesteps do not match', flush=True)
return False, 1e+16, 1e+16
test_pass = True
abs_diff = 0.0
rel_diff = 0.0
for t1, t2 in zip(test_norms, gold_norms):
adiff = abs(t1 - t2)
rdiff = abs(t1 / t2 - 1.0)
abs_diff = max(abs_diff, adiff)
rel_diff = max(rel_diff, rdiff)
if adiff > atol and rdiff > rtol:
test_pass = False
return test_pass, abs_diff, rel_diff
def main():
"""Driver function"""
args = parse_arguments()
test_norms = generate_test_norms(args.test_name)
gold_norms = load_norm_file(args.gold_norms)
run_time = get_run_time(args.test_name)
run_time = float(run_time) if run_time else 0.0
status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,
args.rel_tol)
name = args.test_name.ljust(40, '.')
status_str = 'PASS:' if status else 'FAIL:'
print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,
rdiff), flush=True)
sys.exit(0 if status else 1)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
import os
import math
import subprocess
import argparse
from shutil import copyfile
def parse_arguments():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description=
'Nalu-Wind regression test check utility')
parser.add_argument('--abs-tol', type=float, default=1e-15, help=
'Tolerance for absolute error')
parser.add_argument('--rel-tol', type=float, default=1e-07, help=
'Tolerance for relative error')
parser.add_argument('test_name', help='Regression test name')
parser.add_argument('gold_norms', help=
'Absolute path to the gold norms file')
parser.add_argument('--save-norm-file', required=False, help=
'File in which to save a copy of the norms')
return parser.parse_args()
def load_norm_file(fname):
"""Parse the norm file and return the mean system norms"""
try:
with open(fname, 'r') as fh:
lines = fh.readlines()
norms = [float(ll.strip().split()[0]) for ll in lines]
return norms
except:
return []
def generate_test_norms(testname):
"""Parse the log file and generate test norms"""
logname = testname + '.log'
norm_name = testname + '.norm'
cmdline = "awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s " % (
logname, norm_name)
os.system(cmdline)
args = parse_arguments()
if args.save_norm_file != None:
copyfile(norm_name, args.save_norm_file)
return load_norm_file(norm_name)
def get_run_time(testname):
"""Return STKPERF total time"""
logname = testname + '.log'
cmdline = "awk '/STKPERF: Total Time/ { print $4; }' %s " % logname
try:
pp = subprocess.run(cmdline, shell=True, check=True, capture_output
=True)
return pp.stdout.decode('UTF-8').strip()
except:
return ''
def check_norms(test_norms, gold_norms, atol, rtol):
"""Check the regression test norms"""
if len(test_norms) != len(gold_norms):
print('Number of timesteps do not match', flush=True)
return False, 1e+16, 1e+16
test_pass = True
abs_diff = 0.0
rel_diff = 0.0
for t1, t2 in zip(test_norms, gold_norms):
adiff = abs(t1 - t2)
rdiff = abs(t1 / t2 - 1.0)
abs_diff = max(abs_diff, adiff)
rel_diff = max(rel_diff, rdiff)
if adiff > atol and rdiff > rtol:
test_pass = False
return test_pass, abs_diff, rel_diff
def main():
"""Driver function"""
args = parse_arguments()
test_norms = generate_test_norms(args.test_name)
gold_norms = load_norm_file(args.gold_norms)
run_time = get_run_time(args.test_name)
run_time = float(run_time) if run_time else 0.0
status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,
args.rel_tol)
name = args.test_name.ljust(40, '.')
status_str = 'PASS:' if status else 'FAIL:'
print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,
rdiff), flush=True)
sys.exit(0 if status else 1)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Check mean system norm errors in regression tests
This script determines the pass/fail status of a regression test by comparing
the "Mean System Norm" values output at each timestep against "gold values"
from the reference file provided by the user.
Success is determined by the following criteria: the number of timesteps in the
log file matches the number of timesteps in the gold file, and for each
timestep the system norms meet the absolute and relative tolerances (default
1.0e-16 and 1.0e-7 respectively). The tolerances can be adjusted using command
line arguments, pass `-h` to get a brief usage message.
"""
import sys
import os
import math
import subprocess
import argparse
from shutil import copyfile
def parse_arguments():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="Nalu-Wind regression test check utility")
parser.add_argument(
'--abs-tol', type=float, default=1.0e-15,
help="Tolerance for absolute error")
parser.add_argument(
'--rel-tol', type=float, default=1.0e-7,
help="Tolerance for relative error")
parser.add_argument(
"test_name", help="Regression test name")
parser.add_argument(
"gold_norms", help="Absolute path to the gold norms file")
parser.add_argument(
'--save-norm-file', required=False,
help="File in which to save a copy of the norms")
return parser.parse_args()
def load_norm_file(fname):
"""Parse the norm file and return the mean system norms"""
try:
with open(fname, 'r') as fh:
lines = fh.readlines()
norms = [float(ll.strip().split()[0]) for ll in lines]
return norms
except:
return []
def generate_test_norms(testname):
"""Parse the log file and generate test norms"""
logname = testname + ".log"
norm_name = testname + ".norm"
cmdline = """awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s """%(
logname, norm_name)
os.system(cmdline)
args = parse_arguments()
if (args.save_norm_file != None):
copyfile(norm_name, args.save_norm_file)
return load_norm_file(norm_name)
def get_run_time(testname):
"""Return STKPERF total time"""
logname = testname + ".log"
cmdline = """awk '/STKPERF: Total Time/ { print $4; }' %s """%(
logname)
try:
pp = subprocess.run(cmdline, shell=True, check=True, capture_output=True)
return pp.stdout.decode('UTF-8').strip()
except:
return ""
def check_norms(test_norms, gold_norms, atol, rtol):
"""Check the regression test norms"""
if len(test_norms) != len(gold_norms):
print("Number of timesteps do not match", flush=True)
return (False, 1.0e16, 1.0e16)
test_pass = True
abs_diff = 0.0
rel_diff = 0.0
for t1, t2 in zip(test_norms, gold_norms):
adiff = abs(t1 - t2)
rdiff = abs(t1 / t2 - 1.0)
abs_diff = max(abs_diff, adiff)
rel_diff = max(rel_diff, rdiff)
if (adiff > atol) and (rdiff > rtol):
test_pass = False
return (test_pass, abs_diff, rel_diff)
def main():
"""Driver function"""
args = parse_arguments()
test_norms = generate_test_norms(args.test_name)
gold_norms = load_norm_file(args.gold_norms)
run_time = get_run_time(args.test_name)
run_time = float(run_time) if run_time else 0.0
status, adiff, rdiff = check_norms(
test_norms, gold_norms, args.abs_tol, args.rel_tol)
name = args.test_name.ljust(40, ".")
status_str = "PASS:" if status else "FAIL:"
print("%s %-40s %10.4fs %.4e %.4e"%(
status_str, name, run_time, adiff, rdiff), flush=True)
sys.exit(0 if status else 1)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "d03669924233edf33fcb6645f5ed7ab118f54a95",
"index": 7610,
"step-1": "<mask token>\n\n\ndef load_norm_file(fname):\n \"\"\"Parse the norm file and return the mean system norms\"\"\"\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []\n\n\ndef generate_test_norms(testname):\n \"\"\"Parse the log file and generate test norms\"\"\"\n logname = testname + '.log'\n norm_name = testname + '.norm'\n cmdline = \"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \" % (\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if args.save_norm_file != None:\n copyfile(norm_name, args.save_norm_file)\n return load_norm_file(norm_name)\n\n\ndef get_run_time(testname):\n \"\"\"Return STKPERF total time\"\"\"\n logname = testname + '.log'\n cmdline = \"awk '/STKPERF: Total Time/ { print $4; }' %s \" % logname\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output\n =True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return ''\n\n\ndef check_norms(test_norms, gold_norms, atol, rtol):\n \"\"\"Check the regression test norms\"\"\"\n if len(test_norms) != len(gold_norms):\n print('Number of timesteps do not match', flush=True)\n return False, 1e+16, 1e+16\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n if adiff > atol and rdiff > rtol:\n test_pass = False\n return test_pass, abs_diff, rel_diff\n\n\ndef main():\n \"\"\"Driver function\"\"\"\n args = parse_arguments()\n test_norms = generate_test_norms(args.test_name)\n gold_norms = load_norm_file(args.gold_norms)\n run_time = get_run_time(args.test_name)\n run_time = float(run_time) if run_time else 0.0\n status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,\n args.rel_tol)\n name = args.test_name.ljust(40, '.')\n status_str = 'PASS:' if status else 'FAIL:'\n print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,\n rdiff), flush=True)\n sys.exit(0 if status else 1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_arguments():\n \"\"\"Parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(description=\n 'Nalu-Wind regression test check utility')\n parser.add_argument('--abs-tol', type=float, default=1e-15, help=\n 'Tolerance for absolute error')\n parser.add_argument('--rel-tol', type=float, default=1e-07, help=\n 'Tolerance for relative error')\n parser.add_argument('test_name', help='Regression test name')\n parser.add_argument('gold_norms', help=\n 'Absolute path to the gold norms file')\n parser.add_argument('--save-norm-file', required=False, help=\n 'File in which to save a copy of the norms')\n return parser.parse_args()\n\n\ndef load_norm_file(fname):\n \"\"\"Parse the norm file and return the mean system norms\"\"\"\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []\n\n\ndef generate_test_norms(testname):\n \"\"\"Parse the log file and generate test norms\"\"\"\n logname = testname + '.log'\n norm_name = testname + '.norm'\n cmdline = \"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \" % (\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if args.save_norm_file != None:\n copyfile(norm_name, args.save_norm_file)\n return load_norm_file(norm_name)\n\n\ndef get_run_time(testname):\n \"\"\"Return STKPERF total time\"\"\"\n logname = testname + '.log'\n cmdline = \"awk '/STKPERF: Total Time/ { print $4; }' %s \" % logname\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output\n =True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return ''\n\n\ndef check_norms(test_norms, gold_norms, atol, rtol):\n \"\"\"Check the regression test norms\"\"\"\n if len(test_norms) != len(gold_norms):\n print('Number of timesteps do not match', flush=True)\n return False, 1e+16, 1e+16\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n if adiff > atol and rdiff > rtol:\n test_pass = False\n return test_pass, abs_diff, rel_diff\n\n\ndef main():\n \"\"\"Driver function\"\"\"\n args = parse_arguments()\n test_norms = generate_test_norms(args.test_name)\n gold_norms = load_norm_file(args.gold_norms)\n run_time = get_run_time(args.test_name)\n run_time = float(run_time) if run_time else 0.0\n status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,\n args.rel_tol)\n name = args.test_name.ljust(40, '.')\n status_str = 'PASS:' if status else 'FAIL:'\n print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,\n rdiff), flush=True)\n sys.exit(0 if status else 1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_arguments():\n \"\"\"Parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(description=\n 'Nalu-Wind regression test check utility')\n parser.add_argument('--abs-tol', type=float, default=1e-15, help=\n 'Tolerance for absolute error')\n parser.add_argument('--rel-tol', type=float, default=1e-07, help=\n 'Tolerance for relative error')\n parser.add_argument('test_name', help='Regression test name')\n parser.add_argument('gold_norms', help=\n 'Absolute path to the gold norms file')\n parser.add_argument('--save-norm-file', required=False, help=\n 'File in which to save a copy of the norms')\n return parser.parse_args()\n\n\ndef load_norm_file(fname):\n \"\"\"Parse the norm file and return the mean system norms\"\"\"\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []\n\n\ndef generate_test_norms(testname):\n \"\"\"Parse the log file and generate test norms\"\"\"\n logname = testname + '.log'\n norm_name = testname + '.norm'\n cmdline = \"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \" % (\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if args.save_norm_file != None:\n copyfile(norm_name, args.save_norm_file)\n return load_norm_file(norm_name)\n\n\ndef get_run_time(testname):\n \"\"\"Return STKPERF total time\"\"\"\n logname = testname + '.log'\n cmdline = \"awk '/STKPERF: Total Time/ { print $4; }' %s \" % logname\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output\n =True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return ''\n\n\ndef check_norms(test_norms, gold_norms, atol, rtol):\n \"\"\"Check the regression test norms\"\"\"\n if len(test_norms) != len(gold_norms):\n print('Number of timesteps do not match', flush=True)\n return False, 1e+16, 1e+16\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n if adiff > atol and rdiff > rtol:\n test_pass = False\n return test_pass, abs_diff, rel_diff\n\n\ndef main():\n \"\"\"Driver function\"\"\"\n args = parse_arguments()\n test_norms = generate_test_norms(args.test_name)\n gold_norms = load_norm_file(args.gold_norms)\n run_time = get_run_time(args.test_name)\n run_time = float(run_time) if run_time else 0.0\n status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,\n args.rel_tol)\n name = args.test_name.ljust(40, '.')\n status_str = 'PASS:' if status else 'FAIL:'\n print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,\n rdiff), flush=True)\n sys.exit(0 if status else 1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport sys\nimport os\nimport math\nimport subprocess\nimport argparse\nfrom shutil import copyfile\n\n\ndef parse_arguments():\n \"\"\"Parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(description=\n 'Nalu-Wind regression test check utility')\n parser.add_argument('--abs-tol', type=float, default=1e-15, help=\n 'Tolerance for absolute error')\n parser.add_argument('--rel-tol', type=float, default=1e-07, help=\n 'Tolerance for relative error')\n parser.add_argument('test_name', help='Regression test name')\n parser.add_argument('gold_norms', help=\n 'Absolute path to the gold norms file')\n parser.add_argument('--save-norm-file', required=False, help=\n 'File in which to save a copy of the norms')\n return parser.parse_args()\n\n\ndef load_norm_file(fname):\n \"\"\"Parse the norm file and return the mean system norms\"\"\"\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []\n\n\ndef generate_test_norms(testname):\n \"\"\"Parse the log file and generate test norms\"\"\"\n logname = testname + '.log'\n norm_name = testname + '.norm'\n cmdline = \"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \" % (\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if args.save_norm_file != None:\n copyfile(norm_name, args.save_norm_file)\n return load_norm_file(norm_name)\n\n\ndef get_run_time(testname):\n \"\"\"Return STKPERF total time\"\"\"\n logname = testname + '.log'\n cmdline = \"awk '/STKPERF: Total Time/ { print $4; }' %s \" % logname\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output\n =True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return ''\n\n\ndef check_norms(test_norms, gold_norms, atol, rtol):\n \"\"\"Check the regression test norms\"\"\"\n if len(test_norms) != len(gold_norms):\n print('Number of timesteps do not match', flush=True)\n return False, 1e+16, 1e+16\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n if adiff > atol and rdiff > rtol:\n test_pass = False\n return test_pass, abs_diff, rel_diff\n\n\ndef main():\n \"\"\"Driver function\"\"\"\n args = parse_arguments()\n test_norms = generate_test_norms(args.test_name)\n gold_norms = load_norm_file(args.gold_norms)\n run_time = get_run_time(args.test_name)\n run_time = float(run_time) if run_time else 0.0\n status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,\n args.rel_tol)\n name = args.test_name.ljust(40, '.')\n status_str = 'PASS:' if status else 'FAIL:'\n print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,\n rdiff), flush=True)\n sys.exit(0 if status else 1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCheck mean system norm errors in regression tests\n\nThis script determines the pass/fail status of a regression test by comparing\nthe \"Mean System Norm\" values output at each timestep against \"gold values\"\nfrom the reference file provided by the user.\n\nSuccess is determined by the following criteria: the number of timesteps in the\nlog file matches the number of timesteps in the gold file, and for each\ntimestep the system norms meet the absolute and relative tolerances (default\n1.0e-16 and 1.0e-7 respectively). The tolerances can be adjusted using command\nline arguments, pass `-h` to get a brief usage message.\n\"\"\"\n\nimport sys\nimport os\nimport math\nimport subprocess\nimport argparse\nfrom shutil import copyfile\n\ndef parse_arguments():\n \"\"\"Parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Nalu-Wind regression test check utility\")\n parser.add_argument(\n '--abs-tol', type=float, default=1.0e-15,\n help=\"Tolerance for absolute error\")\n parser.add_argument(\n '--rel-tol', type=float, default=1.0e-7,\n help=\"Tolerance for relative error\")\n parser.add_argument(\n \"test_name\", help=\"Regression test name\")\n parser.add_argument(\n \"gold_norms\", help=\"Absolute path to the gold norms file\")\n parser.add_argument(\n '--save-norm-file', required=False,\n help=\"File in which to save a copy of the norms\")\n return parser.parse_args()\n\ndef load_norm_file(fname):\n \"\"\"Parse the norm file and return the mean system norms\"\"\"\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []\n\ndef generate_test_norms(testname):\n \"\"\"Parse the log file and generate test norms\"\"\"\n logname = testname + \".log\"\n norm_name = testname + \".norm\"\n cmdline = \"\"\"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \"\"\"%(\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if (args.save_norm_file != None):\n copyfile(norm_name, args.save_norm_file)\n\n return load_norm_file(norm_name)\n\ndef get_run_time(testname):\n \"\"\"Return STKPERF total time\"\"\"\n logname = testname + \".log\"\n cmdline = \"\"\"awk '/STKPERF: Total Time/ { print $4; }' %s \"\"\"%(\n logname)\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output=True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return \"\"\n\ndef check_norms(test_norms, gold_norms, atol, rtol):\n \"\"\"Check the regression test norms\"\"\"\n if len(test_norms) != len(gold_norms):\n print(\"Number of timesteps do not match\", flush=True)\n return (False, 1.0e16, 1.0e16)\n\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n\n if (adiff > atol) and (rdiff > rtol):\n test_pass = False\n\n return (test_pass, abs_diff, rel_diff)\n\ndef main():\n \"\"\"Driver function\"\"\"\n args = parse_arguments()\n test_norms = generate_test_norms(args.test_name)\n gold_norms = load_norm_file(args.gold_norms)\n run_time = get_run_time(args.test_name)\n run_time = float(run_time) if run_time else 0.0\n status, adiff, rdiff = check_norms(\n test_norms, gold_norms, args.abs_tol, args.rel_tol)\n\n name = args.test_name.ljust(40, \".\")\n status_str = \"PASS:\" if status else \"FAIL:\"\n print(\"%s %-40s %10.4fs %.4e %.4e\"%(\n status_str, name, run_time, adiff, rdiff), flush=True)\n sys.exit(0 if status else 1)\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import random
import numpy as np
import torch
from utils import print_result, set_random_seed, get_dataset, get_extra_args
from cogdl.tasks import build_task
from cogdl.datasets import build_dataset
from cogdl.utils import build_args_from_dict
DATASET_REGISTRY = {}
def build_default_args_for_node_classification(dataset):
cpu = not torch.cuda.is_available()
args = {
"lr": 0.01,
"weight_decay": 5e-4,
"max_epoch": 1000,
"max_epochs": 1000,
"patience": 100,
"cpu": cpu,
"device_id": [0],
"seed": [42],
"dropout": 0.5,
"hidden_size": 256,
"num_layers": 32,
"lmbda": 0.5,
"wd1": 0.001,
"wd2": 5e-4,
"alpha": 0.1,
"task": "node_classification",
"model": "gcnii",
"dataset": dataset,
}
args = get_extra_args(args)
return build_args_from_dict(args)
def register_func(name):
def register_func_name(func):
DATASET_REGISTRY[name] = func
return func
return register_func_name
@register_func("cora")
def cora_config(args):
args.num_layers = 64
args.hidden_size = 64
args.dropout = 0.6
return args
@register_func("citeseer")
def citeseer_config(args):
args.num_layers = 32
args.hidden_size = 256
args.lr = 0.001
args.patience = 200
args.max_epoch = 2000
args.lmbda = 0.6
args.dropout = 0.7
return args
@register_func("pubmed")
def pubmed_config(args):
args.num_layers = 16
args.hidden_size = 256
args.lmbda = 0.4
args.dropout = 0.5
args.wd1 = 5e-4
return args
def run(dataset_name):
args = build_default_args_for_node_classification(dataset_name)
args = DATASET_REGISTRY[dataset_name](args)
dataset, args = get_dataset(args)
results = []
for seed in args.seed:
set_random_seed(seed)
task = build_task(args, dataset=dataset)
result = task.train()
results.append(result)
return results
if __name__ == "__main__":
# datasets = ["cora", "citeseer", "pubmed"]
datasets = ["citeseer"]
results = []
for x in datasets:
results += run(x)
print_result(results, datasets, "gcnii")
|
normal
|
{
"blob_id": "2396f7acab95260253c367c62002392760157705",
"index": 1236,
"step-1": "<mask token>\n\n\ndef build_default_args_for_node_classification(dataset):\n cpu = not torch.cuda.is_available()\n args = {'lr': 0.01, 'weight_decay': 0.0005, 'max_epoch': 1000,\n 'max_epochs': 1000, 'patience': 100, 'cpu': cpu, 'device_id': [0],\n 'seed': [42], 'dropout': 0.5, 'hidden_size': 256, 'num_layers': 32,\n 'lmbda': 0.5, 'wd1': 0.001, 'wd2': 0.0005, 'alpha': 0.1, 'task':\n 'node_classification', 'model': 'gcnii', 'dataset': dataset}\n args = get_extra_args(args)\n return build_args_from_dict(args)\n\n\n<mask token>\n\n\n@register_func('cora')\ndef cora_config(args):\n args.num_layers = 64\n args.hidden_size = 64\n args.dropout = 0.6\n return args\n\n\n@register_func('citeseer')\ndef citeseer_config(args):\n args.num_layers = 32\n args.hidden_size = 256\n args.lr = 0.001\n args.patience = 200\n args.max_epoch = 2000\n args.lmbda = 0.6\n args.dropout = 0.7\n return args\n\n\n@register_func('pubmed')\ndef pubmed_config(args):\n args.num_layers = 16\n args.hidden_size = 256\n args.lmbda = 0.4\n args.dropout = 0.5\n args.wd1 = 0.0005\n return args\n\n\ndef run(dataset_name):\n args = build_default_args_for_node_classification(dataset_name)\n args = DATASET_REGISTRY[dataset_name](args)\n dataset, args = get_dataset(args)\n results = []\n for seed in args.seed:\n set_random_seed(seed)\n task = build_task(args, dataset=dataset)\n result = task.train()\n results.append(result)\n return results\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef build_default_args_for_node_classification(dataset):\n cpu = not torch.cuda.is_available()\n args = {'lr': 0.01, 'weight_decay': 0.0005, 'max_epoch': 1000,\n 'max_epochs': 1000, 'patience': 100, 'cpu': cpu, 'device_id': [0],\n 'seed': [42], 'dropout': 0.5, 'hidden_size': 256, 'num_layers': 32,\n 'lmbda': 0.5, 'wd1': 0.001, 'wd2': 0.0005, 'alpha': 0.1, 'task':\n 'node_classification', 'model': 'gcnii', 'dataset': dataset}\n args = get_extra_args(args)\n return build_args_from_dict(args)\n\n\ndef register_func(name):\n\n def register_func_name(func):\n DATASET_REGISTRY[name] = func\n return func\n return register_func_name\n\n\n@register_func('cora')\ndef cora_config(args):\n args.num_layers = 64\n args.hidden_size = 64\n args.dropout = 0.6\n return args\n\n\n@register_func('citeseer')\ndef citeseer_config(args):\n args.num_layers = 32\n args.hidden_size = 256\n args.lr = 0.001\n args.patience = 200\n args.max_epoch = 2000\n args.lmbda = 0.6\n args.dropout = 0.7\n return args\n\n\n@register_func('pubmed')\ndef pubmed_config(args):\n args.num_layers = 16\n args.hidden_size = 256\n args.lmbda = 0.4\n args.dropout = 0.5\n args.wd1 = 0.0005\n return args\n\n\ndef run(dataset_name):\n args = build_default_args_for_node_classification(dataset_name)\n args = DATASET_REGISTRY[dataset_name](args)\n dataset, args = get_dataset(args)\n results = []\n for seed in args.seed:\n set_random_seed(seed)\n task = build_task(args, dataset=dataset)\n result = task.train()\n results.append(result)\n return results\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef build_default_args_for_node_classification(dataset):\n cpu = not torch.cuda.is_available()\n args = {'lr': 0.01, 'weight_decay': 0.0005, 'max_epoch': 1000,\n 'max_epochs': 1000, 'patience': 100, 'cpu': cpu, 'device_id': [0],\n 'seed': [42], 'dropout': 0.5, 'hidden_size': 256, 'num_layers': 32,\n 'lmbda': 0.5, 'wd1': 0.001, 'wd2': 0.0005, 'alpha': 0.1, 'task':\n 'node_classification', 'model': 'gcnii', 'dataset': dataset}\n args = get_extra_args(args)\n return build_args_from_dict(args)\n\n\ndef register_func(name):\n\n def register_func_name(func):\n DATASET_REGISTRY[name] = func\n return func\n return register_func_name\n\n\n@register_func('cora')\ndef cora_config(args):\n args.num_layers = 64\n args.hidden_size = 64\n args.dropout = 0.6\n return args\n\n\n@register_func('citeseer')\ndef citeseer_config(args):\n args.num_layers = 32\n args.hidden_size = 256\n args.lr = 0.001\n args.patience = 200\n args.max_epoch = 2000\n args.lmbda = 0.6\n args.dropout = 0.7\n return args\n\n\n@register_func('pubmed')\ndef pubmed_config(args):\n args.num_layers = 16\n args.hidden_size = 256\n args.lmbda = 0.4\n args.dropout = 0.5\n args.wd1 = 0.0005\n return args\n\n\ndef run(dataset_name):\n args = build_default_args_for_node_classification(dataset_name)\n args = DATASET_REGISTRY[dataset_name](args)\n dataset, args = get_dataset(args)\n results = []\n for seed in args.seed:\n set_random_seed(seed)\n task = build_task(args, dataset=dataset)\n result = task.train()\n results.append(result)\n return results\n\n\nif __name__ == '__main__':\n datasets = ['citeseer']\n results = []\n for x in datasets:\n results += run(x)\n print_result(results, datasets, 'gcnii')\n",
"step-4": "<mask token>\nDATASET_REGISTRY = {}\n\n\ndef build_default_args_for_node_classification(dataset):\n cpu = not torch.cuda.is_available()\n args = {'lr': 0.01, 'weight_decay': 0.0005, 'max_epoch': 1000,\n 'max_epochs': 1000, 'patience': 100, 'cpu': cpu, 'device_id': [0],\n 'seed': [42], 'dropout': 0.5, 'hidden_size': 256, 'num_layers': 32,\n 'lmbda': 0.5, 'wd1': 0.001, 'wd2': 0.0005, 'alpha': 0.1, 'task':\n 'node_classification', 'model': 'gcnii', 'dataset': dataset}\n args = get_extra_args(args)\n return build_args_from_dict(args)\n\n\ndef register_func(name):\n\n def register_func_name(func):\n DATASET_REGISTRY[name] = func\n return func\n return register_func_name\n\n\n@register_func('cora')\ndef cora_config(args):\n args.num_layers = 64\n args.hidden_size = 64\n args.dropout = 0.6\n return args\n\n\n@register_func('citeseer')\ndef citeseer_config(args):\n args.num_layers = 32\n args.hidden_size = 256\n args.lr = 0.001\n args.patience = 200\n args.max_epoch = 2000\n args.lmbda = 0.6\n args.dropout = 0.7\n return args\n\n\n@register_func('pubmed')\ndef pubmed_config(args):\n args.num_layers = 16\n args.hidden_size = 256\n args.lmbda = 0.4\n args.dropout = 0.5\n args.wd1 = 0.0005\n return args\n\n\ndef run(dataset_name):\n args = build_default_args_for_node_classification(dataset_name)\n args = DATASET_REGISTRY[dataset_name](args)\n dataset, args = get_dataset(args)\n results = []\n for seed in args.seed:\n set_random_seed(seed)\n task = build_task(args, dataset=dataset)\n result = task.train()\n results.append(result)\n return results\n\n\nif __name__ == '__main__':\n datasets = ['citeseer']\n results = []\n for x in datasets:\n results += run(x)\n print_result(results, datasets, 'gcnii')\n",
"step-5": "import random\nimport numpy as np\n\nimport torch\n\nfrom utils import print_result, set_random_seed, get_dataset, get_extra_args\nfrom cogdl.tasks import build_task\nfrom cogdl.datasets import build_dataset\nfrom cogdl.utils import build_args_from_dict\n\nDATASET_REGISTRY = {}\n\n\ndef build_default_args_for_node_classification(dataset):\n cpu = not torch.cuda.is_available()\n args = {\n \"lr\": 0.01,\n \"weight_decay\": 5e-4,\n \"max_epoch\": 1000,\n \"max_epochs\": 1000,\n \"patience\": 100,\n \"cpu\": cpu,\n \"device_id\": [0],\n \"seed\": [42],\n \"dropout\": 0.5,\n \"hidden_size\": 256,\n \"num_layers\": 32,\n \"lmbda\": 0.5,\n \"wd1\": 0.001,\n \"wd2\": 5e-4,\n \"alpha\": 0.1,\n \"task\": \"node_classification\",\n \"model\": \"gcnii\",\n \"dataset\": dataset,\n }\n args = get_extra_args(args)\n return build_args_from_dict(args)\n\n\ndef register_func(name):\n def register_func_name(func):\n DATASET_REGISTRY[name] = func\n return func\n\n return register_func_name\n\n\n@register_func(\"cora\")\ndef cora_config(args):\n args.num_layers = 64\n args.hidden_size = 64\n args.dropout = 0.6\n return args\n\n\n@register_func(\"citeseer\")\ndef citeseer_config(args):\n args.num_layers = 32\n args.hidden_size = 256\n args.lr = 0.001\n args.patience = 200\n args.max_epoch = 2000\n args.lmbda = 0.6\n args.dropout = 0.7\n return args\n\n\n@register_func(\"pubmed\")\ndef pubmed_config(args):\n args.num_layers = 16\n args.hidden_size = 256\n args.lmbda = 0.4\n args.dropout = 0.5\n args.wd1 = 5e-4\n return args\n\n\ndef run(dataset_name):\n args = build_default_args_for_node_classification(dataset_name)\n args = DATASET_REGISTRY[dataset_name](args)\n dataset, args = get_dataset(args)\n results = []\n for seed in args.seed:\n set_random_seed(seed)\n task = build_task(args, dataset=dataset)\n result = task.train()\n results.append(result)\n return results\n\n\nif __name__ == \"__main__\":\n # datasets = [\"cora\", \"citeseer\", \"pubmed\"]\n datasets = [\"citeseer\"]\n results = []\n for x in datasets:\n results += run(x)\n print_result(results, datasets, \"gcnii\")\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import tempfile
from functools import partial
import numpy as np
import torch
from ax.benchmark.benchmark_problem import SimpleBenchmarkProblem
from ax.core.metric import Metric
from ax.core.runner import Runner
from ax.exceptions.storage import JSONDecodeError, JSONEncodeError
from ax.modelbridge.base import ModelBridge
from ax.modelbridge.registry import Models
from ax.storage.json_store.decoder import (
generation_strategy_from_json,
object_from_json,
)
from ax.storage.json_store.decoders import class_from_json
from ax.storage.json_store.encoder import object_to_json
from ax.storage.json_store.encoders import botorch_modular_to_dict
from ax.storage.json_store.load import load_experiment
from ax.storage.json_store.registry import CLASS_ENCODER_REGISTRY
from ax.storage.json_store.save import save_experiment
from ax.storage.metric_registry import register_metric
from ax.storage.runner_registry import register_runner
from ax.utils.common.testutils import TestCase
from ax.utils.measurement.synthetic_functions import ackley, branin, from_botorch
from ax.utils.testing.benchmark_stubs import (
get_branin_benchmark_problem,
get_branin_simple_benchmark_problem,
get_mult_simple_benchmark_problem,
get_sum_simple_benchmark_problem,
)
from ax.utils.testing.core_stubs import (
get_abandoned_arm,
get_acquisition_function_type,
get_acquisition_type,
get_arm,
get_augmented_branin_metric,
get_augmented_hartmann_metric,
get_batch_trial,
get_botorch_model,
get_botorch_model_with_default_acquisition_class,
get_branin_data,
get_branin_experiment,
get_branin_metric,
get_choice_parameter,
get_experiment_with_batch_and_single_trial,
get_experiment_with_data,
get_experiment_with_trial_with_ttl,
get_experiment_with_map_data_type,
get_factorial_metric,
get_fixed_parameter,
get_generator_run,
get_map_data,
get_hartmann_metric,
get_list_surrogate,
get_metric,
get_mll_type,
get_model_type,
get_multi_objective,
get_multi_objective_optimization_config,
get_multi_type_experiment,
get_objective,
get_objective_threshold,
get_optimization_config,
get_order_constraint,
get_outcome_constraint,
get_parameter_constraint,
get_percentile_early_stopping_strategy,
get_range_parameter,
get_scalarized_objective,
get_search_space,
get_simple_experiment_with_batch_trial,
get_sum_constraint1,
get_sum_constraint2,
get_surrogate,
get_synthetic_runner,
get_trial,
)
from ax.utils.testing.modeling_stubs import (
get_generation_strategy,
get_observation_features,
get_transform_type,
)
from botorch.test_functions.synthetic import Ackley
TEST_CASES = [
("AbandonedArm", get_abandoned_arm),
("Arm", get_arm),
("AugmentedBraninMetric", get_augmented_branin_metric),
("AugmentedHartmannMetric", get_augmented_hartmann_metric),
("BatchTrial", get_batch_trial),
("BenchmarkProblem", get_branin_benchmark_problem),
("BoTorchModel", get_botorch_model),
("BoTorchModel", get_botorch_model_with_default_acquisition_class),
("BraninMetric", get_branin_metric),
("ChoiceParameter", get_choice_parameter),
("Experiment", get_experiment_with_batch_and_single_trial),
("Experiment", get_experiment_with_trial_with_ttl),
("Experiment", get_experiment_with_data),
("Experiment", get_experiment_with_map_data_type),
("FactorialMetric", get_factorial_metric),
("FixedParameter", get_fixed_parameter),
("Hartmann6Metric", get_hartmann_metric),
("GenerationStrategy", partial(get_generation_strategy, with_experiment=True)),
("GeneratorRun", get_generator_run),
("ListSurrogate", get_list_surrogate),
("MapData", get_map_data),
("Metric", get_metric),
("MultiObjective", get_multi_objective),
("MultiObjectiveOptimizationConfig", get_multi_objective_optimization_config),
("MultiTypeExperiment", get_multi_type_experiment),
("ObservationFeatures", get_observation_features),
("Objective", get_objective),
("ObjectiveThreshold", get_objective_threshold),
("OptimizationConfig", get_optimization_config),
("OrderConstraint", get_order_constraint),
("OutcomeConstraint", get_outcome_constraint),
("PercentileEarlyStoppingStrategy", get_percentile_early_stopping_strategy),
("ParameterConstraint", get_parameter_constraint),
("RangeParameter", get_range_parameter),
("ScalarizedObjective", get_scalarized_objective),
("SearchSpace", get_search_space),
("SimpleBenchmarkProblem", get_mult_simple_benchmark_problem),
("SimpleBenchmarkProblem", get_branin_simple_benchmark_problem),
("SimpleBenchmarkProblem", get_sum_simple_benchmark_problem),
("SimpleExperiment", get_simple_experiment_with_batch_trial),
("SumConstraint", get_sum_constraint1),
("SumConstraint", get_sum_constraint2),
("Surrogate", get_surrogate),
("SyntheticRunner", get_synthetic_runner),
("Type[Acquisition]", get_acquisition_type),
("Type[AcquisitionFunction]", get_acquisition_function_type),
("Type[Model]", get_model_type),
("Type[MarginalLogLikelihood]", get_mll_type),
("Type[Transform]", get_transform_type),
("Trial", get_trial),
]
class JSONStoreTest(TestCase):
def setUp(self):
self.experiment = get_experiment_with_batch_and_single_trial()
def testJSONEncodeFailure(self):
self.assertRaises(JSONEncodeError, object_to_json, RuntimeError("foobar"))
def testJSONDecodeFailure(self):
self.assertRaises(JSONDecodeError, object_from_json, RuntimeError("foobar"))
self.assertRaises(JSONDecodeError, object_from_json, {"__type": "foobar"})
def testSaveAndLoad(self):
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as f:
save_experiment(self.experiment, f.name)
loaded_experiment = load_experiment(f.name)
self.assertEqual(loaded_experiment, self.experiment)
os.remove(f.name)
def testSaveValidation(self):
with self.assertRaises(ValueError):
save_experiment(self.experiment.trials[0], "test.json")
def testValidateFilename(self):
bad_filename = "test"
self.assertRaises(ValueError, save_experiment, self.experiment, bad_filename)
def testEncodeDecode(self):
for class_, fake_func in TEST_CASES:
# Can't load trials from JSON, because a batch needs an experiment
# in order to be initialized
if class_ == "BatchTrial" or class_ == "Trial":
continue
# Can't load parameter constraints from JSON, because they require
# a SearchSpace in order to be initialized
if class_ == "OrderConstraint" or class_ == "SumConstraint":
continue
original_object = fake_func()
json_object = object_to_json(original_object)
converted_object = object_from_json(json_object)
if class_ == "SimpleExperiment":
# Evaluation functions will be different, so need to do
# this so equality test passes
with self.assertRaises(RuntimeError):
converted_object.evaluation_function(parameterization={})
original_object.evaluation_function = None
converted_object.evaluation_function = None
self.assertEqual(
original_object,
converted_object,
msg=f"Error encoding/decoding {class_}.",
)
def testEncodeDecodeTorchTensor(self):
x = torch.tensor(
[[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64, device=torch.device("cpu")
)
expected_json = {
"__type": "Tensor",
"value": [[1.0, 2.0], [3.0, 4.0]],
"dtype": {"__type": "torch_dtype", "value": "torch.float64"},
"device": {"__type": "torch_device", "value": "cpu"},
}
x_json = object_to_json(x)
self.assertEqual(expected_json, x_json)
x2 = object_from_json(x_json)
self.assertTrue(torch.equal(x, x2))
def testDecodeGenerationStrategy(self):
generation_strategy = get_generation_strategy()
experiment = get_branin_experiment()
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertGreater(len(new_generation_strategy._steps), 0)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
# Model has not yet been initialized on this GS since it hasn't generated
# anything yet.
self.assertIsNone(new_generation_strategy.model)
# Check that we can encode and decode the generation strategy after
# it has generated some generator runs. Since we now need to `gen`,
# we remove the fake callable kwarg we added, since model does not
# expect it.
generation_strategy = get_generation_strategy(with_callable_model_kwarg=False)
gr = generation_strategy.gen(experiment)
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
# Since this GS has now generated one generator run, model should have
# been initialized and restored when decoding from JSON.
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
# Check that we can encode and decode the generation strategy after
# it has generated some trials and been updated with some data.
generation_strategy = new_generation_strategy
experiment.new_trial(gr) # Add previously generated GR as trial.
# Make generation strategy aware of the trial's data via `gen`.
generation_strategy.gen(experiment, data=get_branin_data())
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
def testEncodeDecodeNumpy(self):
arr = np.array([[1, 2, 3], [4, 5, 6]])
self.assertTrue(np.array_equal(arr, object_from_json(object_to_json(arr))))
def testEncodeDecodeSimpleBenchmarkProblem(self):
branin_problem = get_branin_simple_benchmark_problem()
sum_problem = get_sum_simple_benchmark_problem()
new_branin_problem = object_from_json(object_to_json(branin_problem))
new_sum_problem = object_from_json(object_to_json(sum_problem))
self.assertEqual(
branin_problem.f(1, 2), new_branin_problem.f(1, 2), branin(1, 2)
)
self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)
# Test using `from_botorch`.
ackley_problem = SimpleBenchmarkProblem(
f=from_botorch(Ackley()), noise_sd=0.0, minimize=True
)
new_ackley_problem = object_from_json(object_to_json(ackley_problem))
self.assertEqual(
ackley_problem.f(1, 2), new_ackley_problem.f(1, 2), ackley(1, 2)
)
def testRegistryAdditions(self):
class MyRunner(Runner):
def run():
pass
def staging_required():
return False
class MyMetric(Metric):
pass
register_metric(MyMetric)
register_runner(MyRunner)
experiment = get_experiment_with_batch_and_single_trial()
experiment.runner = MyRunner()
experiment.add_tracking_metric(MyMetric(name="my_metric"))
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as f:
save_experiment(experiment, f.name)
loaded_experiment = load_experiment(f.name)
self.assertEqual(loaded_experiment, experiment)
os.remove(f.name)
def testEncodeUnknownClassToDict(self):
# Cannot encode `UnknownClass` type because it is not registered in the
# CLASS_ENCODER_REGISTRY.
class UnknownClass:
def __init__(self):
pass
with self.assertRaisesRegex(
ValueError, "is a class. Add it to the CLASS_ENCODER_REGISTRY"
):
object_to_json(UnknownClass)
# `UnknownClass` type is registered in the CLASS_ENCODER_REGISTRY and uses the
# `botorch_modular_to_dict` encoder, but `UnknownClass` is not registered in
# the `botorch_modular_registry.py` file.
CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict
with self.assertRaisesRegex(
ValueError,
"does not have a corresponding parent class in CLASS_TO_REGISTRY",
):
object_to_json(UnknownClass)
def testDecodeUnknownClassFromJson(self):
with self.assertRaisesRegex(
ValueError,
"does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY",
):
class_from_json({"index": 0, "class": "unknown_path"})
|
normal
|
{
"blob_id": "52eec56f7f5da8356f61301994f846ef7769f73b",
"index": 6189,
"step-1": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n <mask token>\n <mask token>\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n <mask token>\n <mask token>\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n <mask token>\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,\n device=torch.device('cpu'))\n expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0, \n 4.0]], 'dtype': {'__type': 'torch_dtype', 'value':\n 'torch.float64'}, 'device': {'__type': 'torch_device', 'value':\n 'cpu'}}\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsNone(new_generation_strategy.model)\n generation_strategy = get_generation_strategy(with_callable_model_kwarg\n =False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr)\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n\n def testSaveValidation(self):\n with self.assertRaises(ValueError):\n save_experiment(self.experiment.trials[0], 'test.json')\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,\n device=torch.device('cpu'))\n expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0, \n 4.0]], 'dtype': {'__type': 'torch_dtype', 'value':\n 'torch.float64'}, 'device': {'__type': 'torch_device', 'value':\n 'cpu'}}\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsNone(new_generation_strategy.model)\n generation_strategy = get_generation_strategy(with_callable_model_kwarg\n =False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr)\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n <mask token>\n <mask token>\n\n def testDecodeUnknownClassFromJson(self):\n with self.assertRaisesRegex(ValueError,\n 'does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY'\n ):\n class_from_json({'index': 0, 'class': 'unknown_path'})\n",
"step-4": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n\n def testSaveValidation(self):\n with self.assertRaises(ValueError):\n save_experiment(self.experiment.trials[0], 'test.json')\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,\n device=torch.device('cpu'))\n expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0, \n 4.0]], 'dtype': {'__type': 'torch_dtype', 'value':\n 'torch.float64'}, 'device': {'__type': 'torch_device', 'value':\n 'cpu'}}\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsNone(new_generation_strategy.model)\n generation_strategy = get_generation_strategy(with_callable_model_kwarg\n =False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr)\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n\n def testRegistryAdditions(self):\n\n\n class MyRunner(Runner):\n\n def run():\n pass\n\n def staging_required():\n return False\n\n\n class MyMetric(Metric):\n pass\n register_metric(MyMetric)\n register_runner(MyRunner)\n experiment = get_experiment_with_batch_and_single_trial()\n experiment.runner = MyRunner()\n experiment.add_tracking_metric(MyMetric(name='my_metric'))\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, experiment)\n os.remove(f.name)\n\n def testEncodeUnknownClassToDict(self):\n\n\n class UnknownClass:\n\n def __init__(self):\n pass\n with self.assertRaisesRegex(ValueError,\n 'is a class. Add it to the CLASS_ENCODER_REGISTRY'):\n object_to_json(UnknownClass)\n CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict\n with self.assertRaisesRegex(ValueError,\n 'does not have a corresponding parent class in CLASS_TO_REGISTRY'):\n object_to_json(UnknownClass)\n\n def testDecodeUnknownClassFromJson(self):\n with self.assertRaisesRegex(ValueError,\n 'does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY'\n ):\n class_from_json({'index': 0, 'class': 'unknown_path'})\n",
"step-5": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport tempfile\nfrom functools import partial\n\nimport numpy as np\nimport torch\nfrom ax.benchmark.benchmark_problem import SimpleBenchmarkProblem\nfrom ax.core.metric import Metric\nfrom ax.core.runner import Runner\nfrom ax.exceptions.storage import JSONDecodeError, JSONEncodeError\nfrom ax.modelbridge.base import ModelBridge\nfrom ax.modelbridge.registry import Models\nfrom ax.storage.json_store.decoder import (\n generation_strategy_from_json,\n object_from_json,\n)\nfrom ax.storage.json_store.decoders import class_from_json\nfrom ax.storage.json_store.encoder import object_to_json\nfrom ax.storage.json_store.encoders import botorch_modular_to_dict\nfrom ax.storage.json_store.load import load_experiment\nfrom ax.storage.json_store.registry import CLASS_ENCODER_REGISTRY\nfrom ax.storage.json_store.save import save_experiment\nfrom ax.storage.metric_registry import register_metric\nfrom ax.storage.runner_registry import register_runner\nfrom ax.utils.common.testutils import TestCase\nfrom ax.utils.measurement.synthetic_functions import ackley, branin, from_botorch\nfrom ax.utils.testing.benchmark_stubs import (\n get_branin_benchmark_problem,\n get_branin_simple_benchmark_problem,\n get_mult_simple_benchmark_problem,\n get_sum_simple_benchmark_problem,\n)\nfrom ax.utils.testing.core_stubs import (\n get_abandoned_arm,\n get_acquisition_function_type,\n get_acquisition_type,\n get_arm,\n get_augmented_branin_metric,\n get_augmented_hartmann_metric,\n get_batch_trial,\n get_botorch_model,\n get_botorch_model_with_default_acquisition_class,\n get_branin_data,\n get_branin_experiment,\n get_branin_metric,\n get_choice_parameter,\n get_experiment_with_batch_and_single_trial,\n get_experiment_with_data,\n get_experiment_with_trial_with_ttl,\n get_experiment_with_map_data_type,\n get_factorial_metric,\n get_fixed_parameter,\n get_generator_run,\n get_map_data,\n get_hartmann_metric,\n get_list_surrogate,\n get_metric,\n get_mll_type,\n get_model_type,\n get_multi_objective,\n get_multi_objective_optimization_config,\n get_multi_type_experiment,\n get_objective,\n get_objective_threshold,\n get_optimization_config,\n get_order_constraint,\n get_outcome_constraint,\n get_parameter_constraint,\n get_percentile_early_stopping_strategy,\n get_range_parameter,\n get_scalarized_objective,\n get_search_space,\n get_simple_experiment_with_batch_trial,\n get_sum_constraint1,\n get_sum_constraint2,\n get_surrogate,\n get_synthetic_runner,\n get_trial,\n)\nfrom ax.utils.testing.modeling_stubs import (\n get_generation_strategy,\n get_observation_features,\n get_transform_type,\n)\nfrom botorch.test_functions.synthetic import Ackley\n\n\nTEST_CASES = [\n (\"AbandonedArm\", get_abandoned_arm),\n (\"Arm\", get_arm),\n (\"AugmentedBraninMetric\", get_augmented_branin_metric),\n (\"AugmentedHartmannMetric\", get_augmented_hartmann_metric),\n (\"BatchTrial\", get_batch_trial),\n (\"BenchmarkProblem\", get_branin_benchmark_problem),\n (\"BoTorchModel\", get_botorch_model),\n (\"BoTorchModel\", get_botorch_model_with_default_acquisition_class),\n (\"BraninMetric\", get_branin_metric),\n (\"ChoiceParameter\", get_choice_parameter),\n (\"Experiment\", get_experiment_with_batch_and_single_trial),\n (\"Experiment\", get_experiment_with_trial_with_ttl),\n (\"Experiment\", get_experiment_with_data),\n (\"Experiment\", get_experiment_with_map_data_type),\n (\"FactorialMetric\", get_factorial_metric),\n (\"FixedParameter\", get_fixed_parameter),\n (\"Hartmann6Metric\", get_hartmann_metric),\n (\"GenerationStrategy\", partial(get_generation_strategy, with_experiment=True)),\n (\"GeneratorRun\", get_generator_run),\n (\"ListSurrogate\", get_list_surrogate),\n (\"MapData\", get_map_data),\n (\"Metric\", get_metric),\n (\"MultiObjective\", get_multi_objective),\n (\"MultiObjectiveOptimizationConfig\", get_multi_objective_optimization_config),\n (\"MultiTypeExperiment\", get_multi_type_experiment),\n (\"ObservationFeatures\", get_observation_features),\n (\"Objective\", get_objective),\n (\"ObjectiveThreshold\", get_objective_threshold),\n (\"OptimizationConfig\", get_optimization_config),\n (\"OrderConstraint\", get_order_constraint),\n (\"OutcomeConstraint\", get_outcome_constraint),\n (\"PercentileEarlyStoppingStrategy\", get_percentile_early_stopping_strategy),\n (\"ParameterConstraint\", get_parameter_constraint),\n (\"RangeParameter\", get_range_parameter),\n (\"ScalarizedObjective\", get_scalarized_objective),\n (\"SearchSpace\", get_search_space),\n (\"SimpleBenchmarkProblem\", get_mult_simple_benchmark_problem),\n (\"SimpleBenchmarkProblem\", get_branin_simple_benchmark_problem),\n (\"SimpleBenchmarkProblem\", get_sum_simple_benchmark_problem),\n (\"SimpleExperiment\", get_simple_experiment_with_batch_trial),\n (\"SumConstraint\", get_sum_constraint1),\n (\"SumConstraint\", get_sum_constraint2),\n (\"Surrogate\", get_surrogate),\n (\"SyntheticRunner\", get_synthetic_runner),\n (\"Type[Acquisition]\", get_acquisition_type),\n (\"Type[AcquisitionFunction]\", get_acquisition_function_type),\n (\"Type[Model]\", get_model_type),\n (\"Type[MarginalLogLikelihood]\", get_mll_type),\n (\"Type[Transform]\", get_transform_type),\n (\"Trial\", get_trial),\n]\n\n\nclass JSONStoreTest(TestCase):\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\"foobar\"))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\"foobar\"))\n self.assertRaises(JSONDecodeError, object_from_json, {\"__type\": \"foobar\"})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False, suffix=\".json\") as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n\n def testSaveValidation(self):\n with self.assertRaises(ValueError):\n save_experiment(self.experiment.trials[0], \"test.json\")\n\n def testValidateFilename(self):\n bad_filename = \"test\"\n self.assertRaises(ValueError, save_experiment, self.experiment, bad_filename)\n\n def testEncodeDecode(self):\n for class_, fake_func in TEST_CASES:\n # Can't load trials from JSON, because a batch needs an experiment\n # in order to be initialized\n if class_ == \"BatchTrial\" or class_ == \"Trial\":\n continue\n\n # Can't load parameter constraints from JSON, because they require\n # a SearchSpace in order to be initialized\n if class_ == \"OrderConstraint\" or class_ == \"SumConstraint\":\n continue\n\n original_object = fake_func()\n json_object = object_to_json(original_object)\n converted_object = object_from_json(json_object)\n\n if class_ == \"SimpleExperiment\":\n # Evaluation functions will be different, so need to do\n # this so equality test passes\n with self.assertRaises(RuntimeError):\n converted_object.evaluation_function(parameterization={})\n\n original_object.evaluation_function = None\n converted_object.evaluation_function = None\n\n self.assertEqual(\n original_object,\n converted_object,\n msg=f\"Error encoding/decoding {class_}.\",\n )\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor(\n [[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64, device=torch.device(\"cpu\")\n )\n expected_json = {\n \"__type\": \"Tensor\",\n \"value\": [[1.0, 2.0], [3.0, 4.0]],\n \"dtype\": {\"__type\": \"torch_dtype\", \"value\": \"torch.float64\"},\n \"device\": {\"__type\": \"torch_device\", \"value\": \"cpu\"},\n }\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n # Model has not yet been initialized on this GS since it hasn't generated\n # anything yet.\n self.assertIsNone(new_generation_strategy.model)\n\n # Check that we can encode and decode the generation strategy after\n # it has generated some generator runs. Since we now need to `gen`,\n # we remove the fake callable kwarg we added, since model does not\n # expect it.\n generation_strategy = get_generation_strategy(with_callable_model_kwarg=False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n # Since this GS has now generated one generator run, model should have\n # been initialized and restored when decoding from JSON.\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n # Check that we can encode and decode the generation strategy after\n # it has generated some trials and been updated with some data.\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr) # Add previously generated GR as trial.\n # Make generation strategy aware of the trial's data via `gen`.\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json(arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(\n branin_problem.f(1, 2), new_branin_problem.f(1, 2), branin(1, 2)\n )\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n # Test using `from_botorch`.\n ackley_problem = SimpleBenchmarkProblem(\n f=from_botorch(Ackley()), noise_sd=0.0, minimize=True\n )\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(\n ackley_problem.f(1, 2), new_ackley_problem.f(1, 2), ackley(1, 2)\n )\n\n def testRegistryAdditions(self):\n class MyRunner(Runner):\n def run():\n pass\n\n def staging_required():\n return False\n\n class MyMetric(Metric):\n pass\n\n register_metric(MyMetric)\n register_runner(MyRunner)\n\n experiment = get_experiment_with_batch_and_single_trial()\n experiment.runner = MyRunner()\n experiment.add_tracking_metric(MyMetric(name=\"my_metric\"))\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False, suffix=\".json\") as f:\n save_experiment(experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, experiment)\n os.remove(f.name)\n\n def testEncodeUnknownClassToDict(self):\n # Cannot encode `UnknownClass` type because it is not registered in the\n # CLASS_ENCODER_REGISTRY.\n class UnknownClass:\n def __init__(self):\n pass\n\n with self.assertRaisesRegex(\n ValueError, \"is a class. Add it to the CLASS_ENCODER_REGISTRY\"\n ):\n object_to_json(UnknownClass)\n # `UnknownClass` type is registered in the CLASS_ENCODER_REGISTRY and uses the\n # `botorch_modular_to_dict` encoder, but `UnknownClass` is not registered in\n # the `botorch_modular_registry.py` file.\n CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict\n with self.assertRaisesRegex(\n ValueError,\n \"does not have a corresponding parent class in CLASS_TO_REGISTRY\",\n ):\n object_to_json(UnknownClass)\n\n def testDecodeUnknownClassFromJson(self):\n with self.assertRaisesRegex(\n ValueError,\n \"does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY\",\n ):\n class_from_json({\"index\": 0, \"class\": \"unknown_path\"})\n",
"step-ids": [
7,
10,
12,
14,
18
]
}
|
[
7,
10,
12,
14,
18
] |
<|reserved_special_token_0|>
def get_datas():
filename = None
while True:
filename = input('Please enter filename:')
if not filename.strip():
print('Filename is empty!')
continue
if not os.path.exists(filename):
print('File is not exists!')
continue
break
try:
with open(filename) as f:
datas = []
while True:
headers = f.readline().strip().split('\t')
if headers:
break
for line in f.readlines():
row_datas = {}
if line.strip():
row = line.strip().split('\t')
for k, v in zip(headers, row):
row_datas[k] = v
datas.append(row_datas)
return headers, datas
except Exception as e:
print(e)
def display_all(headers, datas):
if not datas:
print('No datas!')
return
max_page = math.ceil(len(datas) / 10)
page = 0
page_num = 10
while True:
for header in headers:
print(header, end='\t')
print()
for row in datas[page * 10:(page + 1) * 10]:
for k in headers:
print(row[k], end='\t')
print()
command = input('Continue(Enter) or Quit(Q)?')
if command.strip().lower() == 'q':
break
page += 1
if page >= max_page:
break
<|reserved_special_token_0|>
def query_from_some_field(headers, datas):
while True:
print('All fields:', headers)
field_name = input("Please input a students's field name:").strip()
if field_name and field_name in headers:
break
while True:
value = input("Please input a students's value:").strip().lower()
if value:
break
for header in headers:
print(header, end='\t')
print()
for data in datas:
if data[field_name].lower() == value:
for header in headers:
print(data[header], end='\t')
print()
def display_grad_year(headers, datas):
while True:
grad_year = input("Please input a students's GradYear:").strip()
if grad_year and grad_year.isdigit():
break
datas = [d for d in datas if d['GradYear'] == grad_year]
display_all(headers, datas)
def count_one_year(headers, datas, grad_year):
ret = {}
for data in datas:
if data['GradYear'] == grad_year:
if data['DegreeProgram'] in ret:
ret[data['DegreeProgram']] += 1
else:
ret[data['DegreeProgram']] = 1
if ret:
totals = sum(ret.values())
for k, v in ret.items():
print(k, ':', v, 'Percent:', v / totals * 100)
else:
print('No datas!')
<|reserved_special_token_0|>
def main():
print('init from file ...')
while True:
datas = get_datas()
if datas:
break
headers, studs = datas
commands = {'list': display_all, 'qid': query_from_id, 'qlst':
query_from_lastname, 'qfd': query_from_some_field, 'qcgy':
count_from_grad_year, 'dgy': display_grad_year}
while True:
print()
print('-------------------------------')
print(
'List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd); Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)'
)
print('-------------------------------')
command = input('Input your command:').lower()
print()
if command == 'q':
break
if not command or command not in commands.keys():
print('Bad command!')
continue
else:
commands[command](headers, studs)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_datas():
filename = None
while True:
filename = input('Please enter filename:')
if not filename.strip():
print('Filename is empty!')
continue
if not os.path.exists(filename):
print('File is not exists!')
continue
break
try:
with open(filename) as f:
datas = []
while True:
headers = f.readline().strip().split('\t')
if headers:
break
for line in f.readlines():
row_datas = {}
if line.strip():
row = line.strip().split('\t')
for k, v in zip(headers, row):
row_datas[k] = v
datas.append(row_datas)
return headers, datas
except Exception as e:
print(e)
def display_all(headers, datas):
if not datas:
print('No datas!')
return
max_page = math.ceil(len(datas) / 10)
page = 0
page_num = 10
while True:
for header in headers:
print(header, end='\t')
print()
for row in datas[page * 10:(page + 1) * 10]:
for k in headers:
print(row[k], end='\t')
print()
command = input('Continue(Enter) or Quit(Q)?')
if command.strip().lower() == 'q':
break
page += 1
if page >= max_page:
break
<|reserved_special_token_0|>
def query_from_some_field(headers, datas):
while True:
print('All fields:', headers)
field_name = input("Please input a students's field name:").strip()
if field_name and field_name in headers:
break
while True:
value = input("Please input a students's value:").strip().lower()
if value:
break
for header in headers:
print(header, end='\t')
print()
for data in datas:
if data[field_name].lower() == value:
for header in headers:
print(data[header], end='\t')
print()
def display_grad_year(headers, datas):
while True:
grad_year = input("Please input a students's GradYear:").strip()
if grad_year and grad_year.isdigit():
break
datas = [d for d in datas if d['GradYear'] == grad_year]
display_all(headers, datas)
def count_one_year(headers, datas, grad_year):
ret = {}
for data in datas:
if data['GradYear'] == grad_year:
if data['DegreeProgram'] in ret:
ret[data['DegreeProgram']] += 1
else:
ret[data['DegreeProgram']] = 1
if ret:
totals = sum(ret.values())
for k, v in ret.items():
print(k, ':', v, 'Percent:', v / totals * 100)
else:
print('No datas!')
def count_from_grad_year(headers, datas):
while True:
grad_year = input("Please input a students's GradYear:").strip()
if grad_year and grad_year.isdigit():
break
while True:
on_after = input('Please Select On or After(On or Aft)? :').strip(
).lower()
if on_after and on_after in ('on', 'aft'):
break
if on_after == 'on':
count_one_year(headers, datas, grad_year)
elif on_after == 'aft':
max_year = 0
for data in datas:
if int(data['GradYear']) > max_year:
max_year = int(data['GradYear'])
if max_year < int(grad_year):
print('No datas')
else:
for year in range(int(grad_year), max_year):
count_one_year(headers, datas, grad_year)
def main():
print('init from file ...')
while True:
datas = get_datas()
if datas:
break
headers, studs = datas
commands = {'list': display_all, 'qid': query_from_id, 'qlst':
query_from_lastname, 'qfd': query_from_some_field, 'qcgy':
count_from_grad_year, 'dgy': display_grad_year}
while True:
print()
print('-------------------------------')
print(
'List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd); Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)'
)
print('-------------------------------')
command = input('Input your command:').lower()
print()
if command == 'q':
break
if not command or command not in commands.keys():
print('Bad command!')
continue
else:
commands[command](headers, studs)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_datas():
filename = None
while True:
filename = input('Please enter filename:')
if not filename.strip():
print('Filename is empty!')
continue
if not os.path.exists(filename):
print('File is not exists!')
continue
break
try:
with open(filename) as f:
datas = []
while True:
headers = f.readline().strip().split('\t')
if headers:
break
for line in f.readlines():
row_datas = {}
if line.strip():
row = line.strip().split('\t')
for k, v in zip(headers, row):
row_datas[k] = v
datas.append(row_datas)
return headers, datas
except Exception as e:
print(e)
def display_all(headers, datas):
if not datas:
print('No datas!')
return
max_page = math.ceil(len(datas) / 10)
page = 0
page_num = 10
while True:
for header in headers:
print(header, end='\t')
print()
for row in datas[page * 10:(page + 1) * 10]:
for k in headers:
print(row[k], end='\t')
print()
command = input('Continue(Enter) or Quit(Q)?')
if command.strip().lower() == 'q':
break
page += 1
if page >= max_page:
break
def query_from_id(headers, datas):
while True:
ID = input("Please input a students's ID:").strip()
if ID:
break
flag = True
for data in datas:
if data['ID'] == ID:
flag = False
for header in headers:
print(header, ':\t', data[header])
if flag:
print('No data was finded!')
<|reserved_special_token_0|>
def query_from_some_field(headers, datas):
while True:
print('All fields:', headers)
field_name = input("Please input a students's field name:").strip()
if field_name and field_name in headers:
break
while True:
value = input("Please input a students's value:").strip().lower()
if value:
break
for header in headers:
print(header, end='\t')
print()
for data in datas:
if data[field_name].lower() == value:
for header in headers:
print(data[header], end='\t')
print()
def display_grad_year(headers, datas):
while True:
grad_year = input("Please input a students's GradYear:").strip()
if grad_year and grad_year.isdigit():
break
datas = [d for d in datas if d['GradYear'] == grad_year]
display_all(headers, datas)
def count_one_year(headers, datas, grad_year):
ret = {}
for data in datas:
if data['GradYear'] == grad_year:
if data['DegreeProgram'] in ret:
ret[data['DegreeProgram']] += 1
else:
ret[data['DegreeProgram']] = 1
if ret:
totals = sum(ret.values())
for k, v in ret.items():
print(k, ':', v, 'Percent:', v / totals * 100)
else:
print('No datas!')
def count_from_grad_year(headers, datas):
while True:
grad_year = input("Please input a students's GradYear:").strip()
if grad_year and grad_year.isdigit():
break
while True:
on_after = input('Please Select On or After(On or Aft)? :').strip(
).lower()
if on_after and on_after in ('on', 'aft'):
break
if on_after == 'on':
count_one_year(headers, datas, grad_year)
elif on_after == 'aft':
max_year = 0
for data in datas:
if int(data['GradYear']) > max_year:
max_year = int(data['GradYear'])
if max_year < int(grad_year):
print('No datas')
else:
for year in range(int(grad_year), max_year):
count_one_year(headers, datas, grad_year)
def main():
print('init from file ...')
while True:
datas = get_datas()
if datas:
break
headers, studs = datas
commands = {'list': display_all, 'qid': query_from_id, 'qlst':
query_from_lastname, 'qfd': query_from_some_field, 'qcgy':
count_from_grad_year, 'dgy': display_grad_year}
while True:
print()
print('-------------------------------')
print(
'List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd); Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)'
)
print('-------------------------------')
command = input('Input your command:').lower()
print()
if command == 'q':
break
if not command or command not in commands.keys():
print('Bad command!')
continue
else:
commands[command](headers, studs)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_datas():
filename = None
while True:
filename = input('Please enter filename:')
if not filename.strip():
print('Filename is empty!')
continue
if not os.path.exists(filename):
print('File is not exists!')
continue
break
try:
with open(filename) as f:
datas = []
while True:
headers = f.readline().strip().split('\t')
if headers:
break
for line in f.readlines():
row_datas = {}
if line.strip():
row = line.strip().split('\t')
for k, v in zip(headers, row):
row_datas[k] = v
datas.append(row_datas)
return headers, datas
except Exception as e:
print(e)
def display_all(headers, datas):
if not datas:
print('No datas!')
return
max_page = math.ceil(len(datas) / 10)
page = 0
page_num = 10
while True:
for header in headers:
print(header, end='\t')
print()
for row in datas[page * 10:(page + 1) * 10]:
for k in headers:
print(row[k], end='\t')
print()
command = input('Continue(Enter) or Quit(Q)?')
if command.strip().lower() == 'q':
break
page += 1
if page >= max_page:
break
def query_from_id(headers, datas):
while True:
ID = input("Please input a students's ID:").strip()
if ID:
break
flag = True
for data in datas:
if data['ID'] == ID:
flag = False
for header in headers:
print(header, ':\t', data[header])
if flag:
print('No data was finded!')
def query_from_lastname(headers, datas):
while True:
name = input("Please input a students's name:").strip()
if name:
break
flag = True
for data in datas:
if data['Last'].lower().startswith(name.lower()):
flag = False
for header in headers:
print(header, ':\t', data[header])
if flag:
print('No data was finded!')
def query_from_some_field(headers, datas):
while True:
print('All fields:', headers)
field_name = input("Please input a students's field name:").strip()
if field_name and field_name in headers:
break
while True:
value = input("Please input a students's value:").strip().lower()
if value:
break
for header in headers:
print(header, end='\t')
print()
for data in datas:
if data[field_name].lower() == value:
for header in headers:
print(data[header], end='\t')
print()
def display_grad_year(headers, datas):
while True:
grad_year = input("Please input a students's GradYear:").strip()
if grad_year and grad_year.isdigit():
break
datas = [d for d in datas if d['GradYear'] == grad_year]
display_all(headers, datas)
def count_one_year(headers, datas, grad_year):
ret = {}
for data in datas:
if data['GradYear'] == grad_year:
if data['DegreeProgram'] in ret:
ret[data['DegreeProgram']] += 1
else:
ret[data['DegreeProgram']] = 1
if ret:
totals = sum(ret.values())
for k, v in ret.items():
print(k, ':', v, 'Percent:', v / totals * 100)
else:
print('No datas!')
def count_from_grad_year(headers, datas):
while True:
grad_year = input("Please input a students's GradYear:").strip()
if grad_year and grad_year.isdigit():
break
while True:
on_after = input('Please Select On or After(On or Aft)? :').strip(
).lower()
if on_after and on_after in ('on', 'aft'):
break
if on_after == 'on':
count_one_year(headers, datas, grad_year)
elif on_after == 'aft':
max_year = 0
for data in datas:
if int(data['GradYear']) > max_year:
max_year = int(data['GradYear'])
if max_year < int(grad_year):
print('No datas')
else:
for year in range(int(grad_year), max_year):
count_one_year(headers, datas, grad_year)
def main():
print('init from file ...')
while True:
datas = get_datas()
if datas:
break
headers, studs = datas
commands = {'list': display_all, 'qid': query_from_id, 'qlst':
query_from_lastname, 'qfd': query_from_some_field, 'qcgy':
count_from_grad_year, 'dgy': display_grad_year}
while True:
print()
print('-------------------------------')
print(
'List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd); Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)'
)
print('-------------------------------')
command = input('Input your command:').lower()
print()
if command == 'q':
break
if not command or command not in commands.keys():
print('Bad command!')
continue
else:
commands[command](headers, studs)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import os
import math
def get_datas():
filename = None
while True:
filename = input('Please enter filename:')
if not filename.strip():
print('Filename is empty!')
continue
if not os.path.exists(filename):
print('File is not exists!')
continue
break
try:
with open(filename) as f:
datas = []
while True:
headers = f.readline().strip().split('\t')
if headers:
break
for line in f.readlines():
row_datas = {}
if line.strip():
row = line.strip().split('\t')
for k,v in zip(headers, row):
row_datas[k] = v
datas.append(row_datas)
return headers,datas
except Exception as e:
print(e)
def display_all(headers, datas):
if not datas:
print('No datas!')
return
max_page = math.ceil(len(datas) / 10)
page = 0
page_num = 10
while True:
for header in headers:
print(header, end='\t')
print()
for row in datas[page * 10 : (page + 1) * 10]:
for k in headers:
print(row[k], end='\t')
print()
command = input('Continue(Enter) or Quit(Q)?')
if command.strip().lower() == 'q':
break
page += 1
if page >= max_page:
break
def query_from_id(headers, datas):
while True:
ID = input('Please input a students\'s ID:').strip()
if ID:
break
flag = True
for data in datas:
if data['ID'] == ID:
flag = False
for header in headers:
print(header, ':\t', data[header])
if flag:
print('No data was finded!')
def query_from_lastname(headers, datas):
while True:
name = input('Please input a students\'s name:').strip()
if name:
break
flag = True
for data in datas:
if data['Last'].lower().startswith(name.lower()):
flag = False
for header in headers:
print(header, ':\t', data[header])
if flag:
print('No data was finded!')
def query_from_some_field(headers, datas):
while True:
print('All fields:', headers)
field_name = input('Please input a students\'s field name:').strip()
if field_name and field_name in headers:
break
while True:
value = input('Please input a students\'s value:').strip().lower()
if value:
break
for header in headers:
print(header, end='\t')
print()
for data in datas:
if data[field_name].lower() == value:
for header in headers:
print(data[header], end='\t')
print()
def display_grad_year(headers, datas):
while True:
grad_year = input('Please input a students\'s GradYear:').strip()
if grad_year and grad_year.isdigit():
# grad_year = int(grad_year)
break
datas = [d for d in datas if d['GradYear'] == grad_year]
# print(datas)
display_all(headers, datas)
def count_one_year(headers, datas, grad_year):
ret = {}
for data in datas:
if data['GradYear'] == grad_year:
if data['DegreeProgram'] in ret:
ret[data['DegreeProgram']] += 1
else:
ret[data['DegreeProgram']] = 1
# print(ret)
if ret:
totals = sum(ret.values())
for k,v in ret.items():
print(k, ':', v, 'Percent:', v / totals * 100)
else:
print('No datas!')
def count_from_grad_year(headers, datas):
while True:
grad_year = input('Please input a students\'s GradYear:').strip()
if grad_year and grad_year.isdigit():
# grad_year = int(grad_year)
break
while True:
on_after = input('Please Select On or After(On or Aft)? :').strip().lower()
if on_after and on_after in ('on', 'aft'):
break
if on_after == 'on':
count_one_year(headers, datas, grad_year)
elif on_after == 'aft':
max_year = 0
for data in datas:
if int(data['GradYear']) > max_year:
max_year = int(data['GradYear'])
if max_year < int(grad_year):
print('No datas')
else:
for year in range(int(grad_year), max_year):
count_one_year(headers, datas, grad_year)
def main():
print('init from file ...')
while True:
datas = get_datas()
if datas:
break
headers, studs = datas
commands = {'list':display_all,'qid':query_from_id,
'qlst':query_from_lastname, 'qfd':query_from_some_field,
'qcgy': count_from_grad_year, 'dgy':display_grad_year}
while True:
print()
print('-------------------------------')
print('List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd);\
Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)')
print('-------------------------------')
command = input('Input your command:').lower()
print()
if command == 'q':
break
if not command or command not in commands.keys():
print('Bad command!')
continue
else:
commands[command](headers, studs)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "6829f7bcbc1b12500795eec19829ff077502e270",
"index": 3260,
"step-1": "<mask token>\n\n\ndef get_datas():\n filename = None\n while True:\n filename = input('Please enter filename:')\n if not filename.strip():\n print('Filename is empty!')\n continue\n if not os.path.exists(filename):\n print('File is not exists!')\n continue\n break\n try:\n with open(filename) as f:\n datas = []\n while True:\n headers = f.readline().strip().split('\\t')\n if headers:\n break\n for line in f.readlines():\n row_datas = {}\n if line.strip():\n row = line.strip().split('\\t')\n for k, v in zip(headers, row):\n row_datas[k] = v\n datas.append(row_datas)\n return headers, datas\n except Exception as e:\n print(e)\n\n\ndef display_all(headers, datas):\n if not datas:\n print('No datas!')\n return\n max_page = math.ceil(len(datas) / 10)\n page = 0\n page_num = 10\n while True:\n for header in headers:\n print(header, end='\\t')\n print()\n for row in datas[page * 10:(page + 1) * 10]:\n for k in headers:\n print(row[k], end='\\t')\n print()\n command = input('Continue(Enter) or Quit(Q)?')\n if command.strip().lower() == 'q':\n break\n page += 1\n if page >= max_page:\n break\n\n\n<mask token>\n\n\ndef query_from_some_field(headers, datas):\n while True:\n print('All fields:', headers)\n field_name = input(\"Please input a students's field name:\").strip()\n if field_name and field_name in headers:\n break\n while True:\n value = input(\"Please input a students's value:\").strip().lower()\n if value:\n break\n for header in headers:\n print(header, end='\\t')\n print()\n for data in datas:\n if data[field_name].lower() == value:\n for header in headers:\n print(data[header], end='\\t')\n print()\n\n\ndef display_grad_year(headers, datas):\n while True:\n grad_year = input(\"Please input a students's GradYear:\").strip()\n if grad_year and grad_year.isdigit():\n break\n datas = [d for d in datas if d['GradYear'] == grad_year]\n display_all(headers, datas)\n\n\ndef count_one_year(headers, datas, grad_year):\n ret = {}\n for data in datas:\n if data['GradYear'] == grad_year:\n if data['DegreeProgram'] in ret:\n ret[data['DegreeProgram']] += 1\n else:\n ret[data['DegreeProgram']] = 1\n if ret:\n totals = sum(ret.values())\n for k, v in ret.items():\n print(k, ':', v, 'Percent:', v / totals * 100)\n else:\n print('No datas!')\n\n\n<mask token>\n\n\ndef main():\n print('init from file ...')\n while True:\n datas = get_datas()\n if datas:\n break\n headers, studs = datas\n commands = {'list': display_all, 'qid': query_from_id, 'qlst':\n query_from_lastname, 'qfd': query_from_some_field, 'qcgy':\n count_from_grad_year, 'dgy': display_grad_year}\n while True:\n print()\n print('-------------------------------')\n print(\n 'List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd); Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)'\n )\n print('-------------------------------')\n command = input('Input your command:').lower()\n print()\n if command == 'q':\n break\n if not command or command not in commands.keys():\n print('Bad command!')\n continue\n else:\n commands[command](headers, studs)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_datas():\n filename = None\n while True:\n filename = input('Please enter filename:')\n if not filename.strip():\n print('Filename is empty!')\n continue\n if not os.path.exists(filename):\n print('File is not exists!')\n continue\n break\n try:\n with open(filename) as f:\n datas = []\n while True:\n headers = f.readline().strip().split('\\t')\n if headers:\n break\n for line in f.readlines():\n row_datas = {}\n if line.strip():\n row = line.strip().split('\\t')\n for k, v in zip(headers, row):\n row_datas[k] = v\n datas.append(row_datas)\n return headers, datas\n except Exception as e:\n print(e)\n\n\ndef display_all(headers, datas):\n if not datas:\n print('No datas!')\n return\n max_page = math.ceil(len(datas) / 10)\n page = 0\n page_num = 10\n while True:\n for header in headers:\n print(header, end='\\t')\n print()\n for row in datas[page * 10:(page + 1) * 10]:\n for k in headers:\n print(row[k], end='\\t')\n print()\n command = input('Continue(Enter) or Quit(Q)?')\n if command.strip().lower() == 'q':\n break\n page += 1\n if page >= max_page:\n break\n\n\n<mask token>\n\n\ndef query_from_some_field(headers, datas):\n while True:\n print('All fields:', headers)\n field_name = input(\"Please input a students's field name:\").strip()\n if field_name and field_name in headers:\n break\n while True:\n value = input(\"Please input a students's value:\").strip().lower()\n if value:\n break\n for header in headers:\n print(header, end='\\t')\n print()\n for data in datas:\n if data[field_name].lower() == value:\n for header in headers:\n print(data[header], end='\\t')\n print()\n\n\ndef display_grad_year(headers, datas):\n while True:\n grad_year = input(\"Please input a students's GradYear:\").strip()\n if grad_year and grad_year.isdigit():\n break\n datas = [d for d in datas if d['GradYear'] == grad_year]\n display_all(headers, datas)\n\n\ndef count_one_year(headers, datas, grad_year):\n ret = {}\n for data in datas:\n if data['GradYear'] == grad_year:\n if data['DegreeProgram'] in ret:\n ret[data['DegreeProgram']] += 1\n else:\n ret[data['DegreeProgram']] = 1\n if ret:\n totals = sum(ret.values())\n for k, v in ret.items():\n print(k, ':', v, 'Percent:', v / totals * 100)\n else:\n print('No datas!')\n\n\ndef count_from_grad_year(headers, datas):\n while True:\n grad_year = input(\"Please input a students's GradYear:\").strip()\n if grad_year and grad_year.isdigit():\n break\n while True:\n on_after = input('Please Select On or After(On or Aft)? :').strip(\n ).lower()\n if on_after and on_after in ('on', 'aft'):\n break\n if on_after == 'on':\n count_one_year(headers, datas, grad_year)\n elif on_after == 'aft':\n max_year = 0\n for data in datas:\n if int(data['GradYear']) > max_year:\n max_year = int(data['GradYear'])\n if max_year < int(grad_year):\n print('No datas')\n else:\n for year in range(int(grad_year), max_year):\n count_one_year(headers, datas, grad_year)\n\n\ndef main():\n print('init from file ...')\n while True:\n datas = get_datas()\n if datas:\n break\n headers, studs = datas\n commands = {'list': display_all, 'qid': query_from_id, 'qlst':\n query_from_lastname, 'qfd': query_from_some_field, 'qcgy':\n count_from_grad_year, 'dgy': display_grad_year}\n while True:\n print()\n print('-------------------------------')\n print(\n 'List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd); Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)'\n )\n print('-------------------------------')\n command = input('Input your command:').lower()\n print()\n if command == 'q':\n break\n if not command or command not in commands.keys():\n print('Bad command!')\n continue\n else:\n commands[command](headers, studs)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_datas():\n filename = None\n while True:\n filename = input('Please enter filename:')\n if not filename.strip():\n print('Filename is empty!')\n continue\n if not os.path.exists(filename):\n print('File is not exists!')\n continue\n break\n try:\n with open(filename) as f:\n datas = []\n while True:\n headers = f.readline().strip().split('\\t')\n if headers:\n break\n for line in f.readlines():\n row_datas = {}\n if line.strip():\n row = line.strip().split('\\t')\n for k, v in zip(headers, row):\n row_datas[k] = v\n datas.append(row_datas)\n return headers, datas\n except Exception as e:\n print(e)\n\n\ndef display_all(headers, datas):\n if not datas:\n print('No datas!')\n return\n max_page = math.ceil(len(datas) / 10)\n page = 0\n page_num = 10\n while True:\n for header in headers:\n print(header, end='\\t')\n print()\n for row in datas[page * 10:(page + 1) * 10]:\n for k in headers:\n print(row[k], end='\\t')\n print()\n command = input('Continue(Enter) or Quit(Q)?')\n if command.strip().lower() == 'q':\n break\n page += 1\n if page >= max_page:\n break\n\n\ndef query_from_id(headers, datas):\n while True:\n ID = input(\"Please input a students's ID:\").strip()\n if ID:\n break\n flag = True\n for data in datas:\n if data['ID'] == ID:\n flag = False\n for header in headers:\n print(header, ':\\t', data[header])\n if flag:\n print('No data was finded!')\n\n\n<mask token>\n\n\ndef query_from_some_field(headers, datas):\n while True:\n print('All fields:', headers)\n field_name = input(\"Please input a students's field name:\").strip()\n if field_name and field_name in headers:\n break\n while True:\n value = input(\"Please input a students's value:\").strip().lower()\n if value:\n break\n for header in headers:\n print(header, end='\\t')\n print()\n for data in datas:\n if data[field_name].lower() == value:\n for header in headers:\n print(data[header], end='\\t')\n print()\n\n\ndef display_grad_year(headers, datas):\n while True:\n grad_year = input(\"Please input a students's GradYear:\").strip()\n if grad_year and grad_year.isdigit():\n break\n datas = [d for d in datas if d['GradYear'] == grad_year]\n display_all(headers, datas)\n\n\ndef count_one_year(headers, datas, grad_year):\n ret = {}\n for data in datas:\n if data['GradYear'] == grad_year:\n if data['DegreeProgram'] in ret:\n ret[data['DegreeProgram']] += 1\n else:\n ret[data['DegreeProgram']] = 1\n if ret:\n totals = sum(ret.values())\n for k, v in ret.items():\n print(k, ':', v, 'Percent:', v / totals * 100)\n else:\n print('No datas!')\n\n\ndef count_from_grad_year(headers, datas):\n while True:\n grad_year = input(\"Please input a students's GradYear:\").strip()\n if grad_year and grad_year.isdigit():\n break\n while True:\n on_after = input('Please Select On or After(On or Aft)? :').strip(\n ).lower()\n if on_after and on_after in ('on', 'aft'):\n break\n if on_after == 'on':\n count_one_year(headers, datas, grad_year)\n elif on_after == 'aft':\n max_year = 0\n for data in datas:\n if int(data['GradYear']) > max_year:\n max_year = int(data['GradYear'])\n if max_year < int(grad_year):\n print('No datas')\n else:\n for year in range(int(grad_year), max_year):\n count_one_year(headers, datas, grad_year)\n\n\ndef main():\n print('init from file ...')\n while True:\n datas = get_datas()\n if datas:\n break\n headers, studs = datas\n commands = {'list': display_all, 'qid': query_from_id, 'qlst':\n query_from_lastname, 'qfd': query_from_some_field, 'qcgy':\n count_from_grad_year, 'dgy': display_grad_year}\n while True:\n print()\n print('-------------------------------')\n print(\n 'List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd); Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)'\n )\n print('-------------------------------')\n command = input('Input your command:').lower()\n print()\n if command == 'q':\n break\n if not command or command not in commands.keys():\n print('Bad command!')\n continue\n else:\n commands[command](headers, studs)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_datas():\n filename = None\n while True:\n filename = input('Please enter filename:')\n if not filename.strip():\n print('Filename is empty!')\n continue\n if not os.path.exists(filename):\n print('File is not exists!')\n continue\n break\n try:\n with open(filename) as f:\n datas = []\n while True:\n headers = f.readline().strip().split('\\t')\n if headers:\n break\n for line in f.readlines():\n row_datas = {}\n if line.strip():\n row = line.strip().split('\\t')\n for k, v in zip(headers, row):\n row_datas[k] = v\n datas.append(row_datas)\n return headers, datas\n except Exception as e:\n print(e)\n\n\ndef display_all(headers, datas):\n if not datas:\n print('No datas!')\n return\n max_page = math.ceil(len(datas) / 10)\n page = 0\n page_num = 10\n while True:\n for header in headers:\n print(header, end='\\t')\n print()\n for row in datas[page * 10:(page + 1) * 10]:\n for k in headers:\n print(row[k], end='\\t')\n print()\n command = input('Continue(Enter) or Quit(Q)?')\n if command.strip().lower() == 'q':\n break\n page += 1\n if page >= max_page:\n break\n\n\ndef query_from_id(headers, datas):\n while True:\n ID = input(\"Please input a students's ID:\").strip()\n if ID:\n break\n flag = True\n for data in datas:\n if data['ID'] == ID:\n flag = False\n for header in headers:\n print(header, ':\\t', data[header])\n if flag:\n print('No data was finded!')\n\n\ndef query_from_lastname(headers, datas):\n while True:\n name = input(\"Please input a students's name:\").strip()\n if name:\n break\n flag = True\n for data in datas:\n if data['Last'].lower().startswith(name.lower()):\n flag = False\n for header in headers:\n print(header, ':\\t', data[header])\n if flag:\n print('No data was finded!')\n\n\ndef query_from_some_field(headers, datas):\n while True:\n print('All fields:', headers)\n field_name = input(\"Please input a students's field name:\").strip()\n if field_name and field_name in headers:\n break\n while True:\n value = input(\"Please input a students's value:\").strip().lower()\n if value:\n break\n for header in headers:\n print(header, end='\\t')\n print()\n for data in datas:\n if data[field_name].lower() == value:\n for header in headers:\n print(data[header], end='\\t')\n print()\n\n\ndef display_grad_year(headers, datas):\n while True:\n grad_year = input(\"Please input a students's GradYear:\").strip()\n if grad_year and grad_year.isdigit():\n break\n datas = [d for d in datas if d['GradYear'] == grad_year]\n display_all(headers, datas)\n\n\ndef count_one_year(headers, datas, grad_year):\n ret = {}\n for data in datas:\n if data['GradYear'] == grad_year:\n if data['DegreeProgram'] in ret:\n ret[data['DegreeProgram']] += 1\n else:\n ret[data['DegreeProgram']] = 1\n if ret:\n totals = sum(ret.values())\n for k, v in ret.items():\n print(k, ':', v, 'Percent:', v / totals * 100)\n else:\n print('No datas!')\n\n\ndef count_from_grad_year(headers, datas):\n while True:\n grad_year = input(\"Please input a students's GradYear:\").strip()\n if grad_year and grad_year.isdigit():\n break\n while True:\n on_after = input('Please Select On or After(On or Aft)? :').strip(\n ).lower()\n if on_after and on_after in ('on', 'aft'):\n break\n if on_after == 'on':\n count_one_year(headers, datas, grad_year)\n elif on_after == 'aft':\n max_year = 0\n for data in datas:\n if int(data['GradYear']) > max_year:\n max_year = int(data['GradYear'])\n if max_year < int(grad_year):\n print('No datas')\n else:\n for year in range(int(grad_year), max_year):\n count_one_year(headers, datas, grad_year)\n\n\ndef main():\n print('init from file ...')\n while True:\n datas = get_datas()\n if datas:\n break\n headers, studs = datas\n commands = {'list': display_all, 'qid': query_from_id, 'qlst':\n query_from_lastname, 'qfd': query_from_some_field, 'qcgy':\n count_from_grad_year, 'dgy': display_grad_year}\n while True:\n print()\n print('-------------------------------')\n print(\n 'List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd); Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)'\n )\n print('-------------------------------')\n command = input('Input your command:').lower()\n print()\n if command == 'q':\n break\n if not command or command not in commands.keys():\n print('Bad command!')\n continue\n else:\n commands[command](headers, studs)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import os\nimport math\n\ndef get_datas():\n filename = None\n while True:\n filename = input('Please enter filename:')\n if not filename.strip():\n print('Filename is empty!')\n continue\n if not os.path.exists(filename):\n print('File is not exists!')\n continue\n break\n try:\n with open(filename) as f:\n datas = []\n while True:\n headers = f.readline().strip().split('\\t')\n if headers:\n break\n for line in f.readlines():\n row_datas = {}\n if line.strip():\n row = line.strip().split('\\t')\n for k,v in zip(headers, row):\n row_datas[k] = v\n datas.append(row_datas)\n return headers,datas\n except Exception as e:\n print(e)\n\ndef display_all(headers, datas):\n if not datas:\n print('No datas!')\n return\n max_page = math.ceil(len(datas) / 10)\n page = 0\n page_num = 10\n while True:\n for header in headers:\n print(header, end='\\t')\n print()\n for row in datas[page * 10 : (page + 1) * 10]:\n for k in headers:\n print(row[k], end='\\t')\n print()\n command = input('Continue(Enter) or Quit(Q)?')\n if command.strip().lower() == 'q':\n break\n page += 1\n if page >= max_page:\n break\n\n\n\ndef query_from_id(headers, datas):\n while True:\n ID = input('Please input a students\\'s ID:').strip()\n if ID:\n break\n flag = True\n for data in datas:\n if data['ID'] == ID:\n flag = False\n for header in headers:\n print(header, ':\\t', data[header])\n if flag:\n print('No data was finded!')\n\ndef query_from_lastname(headers, datas):\n while True:\n name = input('Please input a students\\'s name:').strip()\n if name:\n break\n flag = True\n for data in datas:\n if data['Last'].lower().startswith(name.lower()):\n flag = False\n for header in headers:\n print(header, ':\\t', data[header])\n if flag:\n print('No data was finded!')\n\ndef query_from_some_field(headers, datas):\n while True:\n print('All fields:', headers)\n field_name = input('Please input a students\\'s field name:').strip()\n if field_name and field_name in headers:\n break\n while True:\n value = input('Please input a students\\'s value:').strip().lower()\n if value:\n break\n for header in headers:\n print(header, end='\\t')\n print()\n for data in datas:\n if data[field_name].lower() == value:\n for header in headers:\n print(data[header], end='\\t')\n print()\n\ndef display_grad_year(headers, datas):\n while True:\n grad_year = input('Please input a students\\'s GradYear:').strip()\n if grad_year and grad_year.isdigit():\n # grad_year = int(grad_year)\n break\n datas = [d for d in datas if d['GradYear'] == grad_year]\n # print(datas)\n display_all(headers, datas)\n\ndef count_one_year(headers, datas, grad_year):\n ret = {}\n for data in datas:\n if data['GradYear'] == grad_year:\n if data['DegreeProgram'] in ret:\n ret[data['DegreeProgram']] += 1\n else:\n ret[data['DegreeProgram']] = 1\n # print(ret)\n if ret:\n totals = sum(ret.values())\n for k,v in ret.items():\n print(k, ':', v, 'Percent:', v / totals * 100)\n else:\n print('No datas!')\n\ndef count_from_grad_year(headers, datas):\n while True:\n grad_year = input('Please input a students\\'s GradYear:').strip()\n if grad_year and grad_year.isdigit():\n # grad_year = int(grad_year)\n break\n while True:\n on_after = input('Please Select On or After(On or Aft)? :').strip().lower()\n if on_after and on_after in ('on', 'aft'):\n break\n if on_after == 'on':\n count_one_year(headers, datas, grad_year)\n elif on_after == 'aft':\n max_year = 0\n for data in datas:\n if int(data['GradYear']) > max_year:\n max_year = int(data['GradYear'])\n if max_year < int(grad_year):\n print('No datas')\n else:\n for year in range(int(grad_year), max_year):\n count_one_year(headers, datas, grad_year)\n\n\ndef main():\n print('init from file ...')\n while True:\n datas = get_datas()\n if datas:\n break\n headers, studs = datas\n commands = {'list':display_all,'qid':query_from_id,\n 'qlst':query_from_lastname, 'qfd':query_from_some_field,\n 'qcgy': count_from_grad_year, 'dgy':display_grad_year}\n while True:\n print()\n print('-------------------------------')\n print('List all:(list); Query ID:(Qid); Query Last(Qlst); Query field(Qfd);\\\n Count GradYear(Qcgy); display_grad_year(Dgy); Quit(Q)')\n print('-------------------------------')\n command = input('Input your command:').lower()\n print()\n if command == 'q':\n break\n if not command or command not in commands.keys():\n print('Bad command!')\n continue\n else:\n commands[command](headers, studs)\n \n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
6,
7,
8,
10,
12
]
}
|
[
6,
7,
8,
10,
12
] |
<|reserved_special_token_0|>
class RoomView(View):
def get(self, request, room_id):
try:
room = Room.objects.get(id=room_id)
rating_list = [field.name for field in Review._meta.get_fields(
) if field.name not in ['id', 'review_user', 'review_room',
'comment']]
room_detail = {'room_name': room.name, 'address': room.city,
'price': room.price, 'room_type': room.category.name,
'image': [image.url for image in room.image.all()][0],
'is_super': room.host.is_super, 'host': room.host.user.
last_name + room.host.user.first_name, 'capacity': room.
capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':
re.sub('<i class=\\"|\\"></i>', '', roomamenity.amenity.
image), 'description': roomamenity.amenity.name} for
roomamenity in room.roomamenity_set.all()], 'rating': [{
'category': category, 'category_rating': int(Review.objects
.filter(review_room=room).aggregate(Avg(category)).get(
category + '__avg'))} for category in rating_list]}
return JsonResponse({'detail': room_detail}, status=200)
except KeyError:
return JsonResponse({'message': 'KeyError'}, status=400)
except Room.DoesNotExist:
return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)
class WishListView(View):
@LoginRequired
def post(self, request, room_id):
user = request.user
try:
if WishList.objects.filter(wish_user=user, wish_room_id=room_id
).exists():
return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)
WishList.objects.create(wish_user_id=1, wish_room_id=room_id)
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
@LoginRequired
def delete(self, request, room_id):
try:
user = request.user
wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)
wish.delete()
return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
except WishList.DoesNotExist:
return JsonResponse({'MESSAGE': 'Already not Exist in list'},
status=400)
@LoginRequired
def get(self, request):
try:
user = request.user
wishlists = WishList.objects.filter(wish_user=user)
rating_list = [field.name for field in Review._meta.get_fields(
) if field.name not in ['id', 'review_user', 'review_room',
'comment']]
if not wishlists:
return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)
result = [{'room_id': wishlist.wish_room.id, 'room_name':
wishlist.wish_room.name, 'address': wishlist.wish_room.city,
'price': wishlist.wish_room.price, 'room_type': wishlist.
wish_room.category.name, 'image': [image.url for image in
wishlist.wish_room.image.all()], 'is_super': wishlist.
wish_room.host.is_super, 'capacity': wishlist.wish_room.
capacity, 'lat': wishlist.wish_room.latitude, 'lng':
wishlist.wish_room.longtitude, 'amenity': [roomamenity.
amenity.name for roomamenity in wishlist.wish_room.
roomamenity_set.all()], 'rating': [{'category': category,
'category_rating': Review.objects.filter(review_room=
wishlist.wish_room).aggregate(Avg(category)).get(category +
'__avg')} for category in rating_list]} for wishlist in
wishlists]
return JsonResponse({'result': result}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RoomListView(View):
<|reserved_special_token_0|>
class RoomView(View):
def get(self, request, room_id):
try:
room = Room.objects.get(id=room_id)
rating_list = [field.name for field in Review._meta.get_fields(
) if field.name not in ['id', 'review_user', 'review_room',
'comment']]
room_detail = {'room_name': room.name, 'address': room.city,
'price': room.price, 'room_type': room.category.name,
'image': [image.url for image in room.image.all()][0],
'is_super': room.host.is_super, 'host': room.host.user.
last_name + room.host.user.first_name, 'capacity': room.
capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':
re.sub('<i class=\\"|\\"></i>', '', roomamenity.amenity.
image), 'description': roomamenity.amenity.name} for
roomamenity in room.roomamenity_set.all()], 'rating': [{
'category': category, 'category_rating': int(Review.objects
.filter(review_room=room).aggregate(Avg(category)).get(
category + '__avg'))} for category in rating_list]}
return JsonResponse({'detail': room_detail}, status=200)
except KeyError:
return JsonResponse({'message': 'KeyError'}, status=400)
except Room.DoesNotExist:
return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)
class WishListView(View):
@LoginRequired
def post(self, request, room_id):
user = request.user
try:
if WishList.objects.filter(wish_user=user, wish_room_id=room_id
).exists():
return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)
WishList.objects.create(wish_user_id=1, wish_room_id=room_id)
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
@LoginRequired
def delete(self, request, room_id):
try:
user = request.user
wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)
wish.delete()
return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
except WishList.DoesNotExist:
return JsonResponse({'MESSAGE': 'Already not Exist in list'},
status=400)
@LoginRequired
def get(self, request):
try:
user = request.user
wishlists = WishList.objects.filter(wish_user=user)
rating_list = [field.name for field in Review._meta.get_fields(
) if field.name not in ['id', 'review_user', 'review_room',
'comment']]
if not wishlists:
return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)
result = [{'room_id': wishlist.wish_room.id, 'room_name':
wishlist.wish_room.name, 'address': wishlist.wish_room.city,
'price': wishlist.wish_room.price, 'room_type': wishlist.
wish_room.category.name, 'image': [image.url for image in
wishlist.wish_room.image.all()], 'is_super': wishlist.
wish_room.host.is_super, 'capacity': wishlist.wish_room.
capacity, 'lat': wishlist.wish_room.latitude, 'lng':
wishlist.wish_room.longtitude, 'amenity': [roomamenity.
amenity.name for roomamenity in wishlist.wish_room.
roomamenity_set.all()], 'rating': [{'category': category,
'category_rating': Review.objects.filter(review_room=
wishlist.wish_room).aggregate(Avg(category)).get(category +
'__avg')} for category in rating_list]} for wishlist in
wishlists]
return JsonResponse({'result': result}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RoomListView(View):
def get(self, request):
try:
city = request.GET.get('city', '')
checkin = request.GET.get('checkin', None)
checkout = request.GET.get('checkout', None)
adult = int(request.GET.get('adult', '0'))
child = int(request.GET.get('child', '0'))
min_price = request.GET.get('min_price', 0)
max_price = request.GET.get('max_price', 100000000)
is_refund = True if request.GET.get('is_refund', None
) == 'true' else False
is_super = True if request.GET.get('is_super', None
) == 'true' else False
room_types = request.GET.getlist('room_type', None)
amenities = request.GET.getlist('amenity', None)
page = int(request.GET.get('page', '1'))
list_criteria = {'city__contains': city, 'price__range': [
min_price, max_price], 'capacity__gte': adult + child}
if room_types:
list_criteria['category__name__in'] = room_types
if amenities:
list_criteria['amenity__name__in'] = amenities
if is_super:
list_criteria['host__is_super'] = is_super
if is_refund:
list_criteria['is_refund'] = is_refund
size = 10
offset = (page - 1) * size
limit = page * size
room_list = Room.objects.filter(**list_criteria)
if checkin and checkout:
room_list = [room for room in room_list if check(room,
checkin, checkout)]
if checkin:
room_list = [room for room in room_list if check_in(room,
checkin)]
if checkout:
room_list = [room for room in room_list if check_out(room,
checkout)]
if not room_list:
return JsonResponse({'message': 'NO_ROOM_AVAILABLE'},
status=400)
rating_list = [field.name for field in Review._meta.get_fields(
) if field.name not in ['id', 'review_user', 'review_room',
'comment']]
room_thumbnail = [{'room_id': room.id, 'room_name': room.name,
'price': room.price, 'address': room.city, 'room_type':
room.category.name, 'lat': room.latitude, 'lng': room.
longtitude, 'image': [image.url for image in room.image.all
()], 'is_super': room.host.is_super, 'capacity': int(room.
capacity), 'amenity': [roomamenity.amenity.name for
roomamenity in room.roomamenity_set.all()], 'rating': [{
'category': category, 'category_rating': Review.objects.
filter(review_room=room).aggregate(rate_avg=Avg(category))[
'rate_avg']} for category in rating_list]} for room in
room_list[offset:limit]]
common_data = len(room_list)
return JsonResponse({'thumbnail': room_thumbnail, 'common':
common_data}, status=200)
except KeyError:
return JsonResponse({'message': 'KeyError'}, status=400)
class RoomView(View):
def get(self, request, room_id):
try:
room = Room.objects.get(id=room_id)
rating_list = [field.name for field in Review._meta.get_fields(
) if field.name not in ['id', 'review_user', 'review_room',
'comment']]
room_detail = {'room_name': room.name, 'address': room.city,
'price': room.price, 'room_type': room.category.name,
'image': [image.url for image in room.image.all()][0],
'is_super': room.host.is_super, 'host': room.host.user.
last_name + room.host.user.first_name, 'capacity': room.
capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':
re.sub('<i class=\\"|\\"></i>', '', roomamenity.amenity.
image), 'description': roomamenity.amenity.name} for
roomamenity in room.roomamenity_set.all()], 'rating': [{
'category': category, 'category_rating': int(Review.objects
.filter(review_room=room).aggregate(Avg(category)).get(
category + '__avg'))} for category in rating_list]}
return JsonResponse({'detail': room_detail}, status=200)
except KeyError:
return JsonResponse({'message': 'KeyError'}, status=400)
except Room.DoesNotExist:
return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)
class WishListView(View):
@LoginRequired
def post(self, request, room_id):
user = request.user
try:
if WishList.objects.filter(wish_user=user, wish_room_id=room_id
).exists():
return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)
WishList.objects.create(wish_user_id=1, wish_room_id=room_id)
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
@LoginRequired
def delete(self, request, room_id):
try:
user = request.user
wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)
wish.delete()
return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
except WishList.DoesNotExist:
return JsonResponse({'MESSAGE': 'Already not Exist in list'},
status=400)
@LoginRequired
def get(self, request):
try:
user = request.user
wishlists = WishList.objects.filter(wish_user=user)
rating_list = [field.name for field in Review._meta.get_fields(
) if field.name not in ['id', 'review_user', 'review_room',
'comment']]
if not wishlists:
return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)
result = [{'room_id': wishlist.wish_room.id, 'room_name':
wishlist.wish_room.name, 'address': wishlist.wish_room.city,
'price': wishlist.wish_room.price, 'room_type': wishlist.
wish_room.category.name, 'image': [image.url for image in
wishlist.wish_room.image.all()], 'is_super': wishlist.
wish_room.host.is_super, 'capacity': wishlist.wish_room.
capacity, 'lat': wishlist.wish_room.latitude, 'lng':
wishlist.wish_room.longtitude, 'amenity': [roomamenity.
amenity.name for roomamenity in wishlist.wish_room.
roomamenity_set.all()], 'rating': [{'category': category,
'category_rating': Review.objects.filter(review_room=
wishlist.wish_room).aggregate(Avg(category)).get(category +
'__avg')} for category in rating_list]} for wishlist in
wishlists]
return JsonResponse({'result': result}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
<|reserved_special_token_1|>
import json, re, bcrypt, jwt
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Avg
from django.http import JsonResponse
from django.views import View
from room.models import Room, Category, RoomAmenity, Image, Amenity, WishList, DisableDate, AbleTime
from reservation.check import check, check_in, check_out
from user.models import User, Host, Review
from user.utils import LoginRequired
class RoomListView(View):
def get(self, request):
try:
city = request.GET.get('city', '')
checkin = request.GET.get('checkin', None)
checkout = request.GET.get('checkout', None)
adult = int(request.GET.get('adult', '0'))
child = int(request.GET.get('child', '0'))
min_price = request.GET.get('min_price', 0)
max_price = request.GET.get('max_price', 100000000)
is_refund = True if request.GET.get('is_refund', None
) == 'true' else False
is_super = True if request.GET.get('is_super', None
) == 'true' else False
room_types = request.GET.getlist('room_type', None)
amenities = request.GET.getlist('amenity', None)
page = int(request.GET.get('page', '1'))
list_criteria = {'city__contains': city, 'price__range': [
min_price, max_price], 'capacity__gte': adult + child}
if room_types:
list_criteria['category__name__in'] = room_types
if amenities:
list_criteria['amenity__name__in'] = amenities
if is_super:
list_criteria['host__is_super'] = is_super
if is_refund:
list_criteria['is_refund'] = is_refund
size = 10
offset = (page - 1) * size
limit = page * size
room_list = Room.objects.filter(**list_criteria)
if checkin and checkout:
room_list = [room for room in room_list if check(room,
checkin, checkout)]
if checkin:
room_list = [room for room in room_list if check_in(room,
checkin)]
if checkout:
room_list = [room for room in room_list if check_out(room,
checkout)]
if not room_list:
return JsonResponse({'message': 'NO_ROOM_AVAILABLE'},
status=400)
rating_list = [field.name for field in Review._meta.get_fields(
) if field.name not in ['id', 'review_user', 'review_room',
'comment']]
room_thumbnail = [{'room_id': room.id, 'room_name': room.name,
'price': room.price, 'address': room.city, 'room_type':
room.category.name, 'lat': room.latitude, 'lng': room.
longtitude, 'image': [image.url for image in room.image.all
()], 'is_super': room.host.is_super, 'capacity': int(room.
capacity), 'amenity': [roomamenity.amenity.name for
roomamenity in room.roomamenity_set.all()], 'rating': [{
'category': category, 'category_rating': Review.objects.
filter(review_room=room).aggregate(rate_avg=Avg(category))[
'rate_avg']} for category in rating_list]} for room in
room_list[offset:limit]]
common_data = len(room_list)
return JsonResponse({'thumbnail': room_thumbnail, 'common':
common_data}, status=200)
except KeyError:
return JsonResponse({'message': 'KeyError'}, status=400)
class RoomView(View):
def get(self, request, room_id):
try:
room = Room.objects.get(id=room_id)
rating_list = [field.name for field in Review._meta.get_fields(
) if field.name not in ['id', 'review_user', 'review_room',
'comment']]
room_detail = {'room_name': room.name, 'address': room.city,
'price': room.price, 'room_type': room.category.name,
'image': [image.url for image in room.image.all()][0],
'is_super': room.host.is_super, 'host': room.host.user.
last_name + room.host.user.first_name, 'capacity': room.
capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':
re.sub('<i class=\\"|\\"></i>', '', roomamenity.amenity.
image), 'description': roomamenity.amenity.name} for
roomamenity in room.roomamenity_set.all()], 'rating': [{
'category': category, 'category_rating': int(Review.objects
.filter(review_room=room).aggregate(Avg(category)).get(
category + '__avg'))} for category in rating_list]}
return JsonResponse({'detail': room_detail}, status=200)
except KeyError:
return JsonResponse({'message': 'KeyError'}, status=400)
except Room.DoesNotExist:
return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)
class WishListView(View):
@LoginRequired
def post(self, request, room_id):
user = request.user
try:
if WishList.objects.filter(wish_user=user, wish_room_id=room_id
).exists():
return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)
WishList.objects.create(wish_user_id=1, wish_room_id=room_id)
return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
@LoginRequired
def delete(self, request, room_id):
try:
user = request.user
wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)
wish.delete()
return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
except WishList.DoesNotExist:
return JsonResponse({'MESSAGE': 'Already not Exist in list'},
status=400)
@LoginRequired
def get(self, request):
try:
user = request.user
wishlists = WishList.objects.filter(wish_user=user)
rating_list = [field.name for field in Review._meta.get_fields(
) if field.name not in ['id', 'review_user', 'review_room',
'comment']]
if not wishlists:
return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)
result = [{'room_id': wishlist.wish_room.id, 'room_name':
wishlist.wish_room.name, 'address': wishlist.wish_room.city,
'price': wishlist.wish_room.price, 'room_type': wishlist.
wish_room.category.name, 'image': [image.url for image in
wishlist.wish_room.image.all()], 'is_super': wishlist.
wish_room.host.is_super, 'capacity': wishlist.wish_room.
capacity, 'lat': wishlist.wish_room.latitude, 'lng':
wishlist.wish_room.longtitude, 'amenity': [roomamenity.
amenity.name for roomamenity in wishlist.wish_room.
roomamenity_set.all()], 'rating': [{'category': category,
'category_rating': Review.objects.filter(review_room=
wishlist.wish_room).aggregate(Avg(category)).get(category +
'__avg')} for category in rating_list]} for wishlist in
wishlists]
return JsonResponse({'result': result}, status=200)
except KeyError:
return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)
<|reserved_special_token_1|>
import json, re, bcrypt, jwt
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Avg
from django.http import JsonResponse
from django.views import View
from room.models import Room, Category, RoomAmenity,Image,Amenity,WishList,DisableDate,AbleTime
from reservation.check import check, check_in, check_out
from user.models import User, Host, Review
from user.utils import LoginRequired
class RoomListView(View):
def get(self,request):
try:
city = request.GET.get('city','')
checkin = request.GET.get('checkin',None)
checkout = request.GET.get('checkout', None)
adult = int(request.GET.get('adult','0'))
child = int(request.GET.get('child','0'))
min_price = request.GET.get('min_price',0)
max_price = request.GET.get('max_price',100000000)
is_refund = True if request.GET.get('is_refund',None) == 'true' else False
is_super = True if request.GET.get('is_super',None) == 'true' else False
room_types = request.GET.getlist('room_type',None)
amenities = request.GET.getlist('amenity',None)
page = int(request.GET.get('page', '1'))
#필터
list_criteria = {
'city__contains': city,
'price__range' : [min_price,max_price],
'capacity__gte' : adult+child
}
if room_types:
list_criteria['category__name__in'] = room_types
if amenities:
list_criteria['amenity__name__in'] = amenities
if is_super:
list_criteria['host__is_super'] = is_super
if is_refund:
list_criteria['is_refund'] = is_refund
#paginator
size = 10
offset = (page-1) * size
limit = page * size
room_list = Room.objects.filter(**list_criteria)
#날짜 필터
if checkin and checkout:
room_list = [room for room in room_list if check(room, checkin, checkout)]
if checkin:
room_list = [room for room in room_list if check_in(room, checkin)]
if checkout:
room_list = [room for room in room_list if check_out(room, checkout)]
if not room_list:
return JsonResponse({'message':'NO_ROOM_AVAILABLE'}, status=400)
rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]
room_thumbnail = [{
'room_id' : room.id,
'room_name' : room.name,
'price' : room.price,
'address' : room.city,
'room_type' : room.category.name,
'lat' : room.latitude,
'lng' : room.longtitude,
'image' : [image.url for image in room.image.all()],
'is_super' : room.host.is_super,
'capacity' : int(room.capacity),
'amenity' : [roomamenity.amenity.name for roomamenity in room.roomamenity_set.all()],
'rating' : [{
'category' : category,
'category_rating': Review.objects.filter(review_room=room).aggregate(rate_avg=Avg(category))['rate_avg']
} for category in rating_list
]
} for room in room_list[offset:limit]
]
common_data = len(room_list)
return JsonResponse({'thumbnail': room_thumbnail, 'common':common_data }, status=200)
except KeyError:
return JsonResponse({'message':'KeyError'}, status=400)
class RoomView(View):
def get(self,request, room_id):
try:
room = Room.objects.get(id=room_id)
rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]
room_detail = {
'room_name': room.name,
'address' : room.city,
'price' : room.price,
'room_type': room.category.name,
'image' : [image.url for image in room.image.all()][0],
'is_super' : room.host.is_super,
'host' : room.host.user.last_name + room.host.user.first_name,
'capacity' : room.capacity,
'amenity' : [{
'id' : roomamenity.amenity.id,
'icon' : re.sub('<i class=\\"|\\"></i>', '',roomamenity.amenity.image),
'description': roomamenity.amenity.name
} for roomamenity in room.roomamenity_set.all()
],
'rating' : [{
'category' : category,
'category_rating': int(Review.objects.filter(review_room=room).aggregate(Avg(category)).get(category+'__avg'))
} for category in rating_list
]
}
return JsonResponse({'detail': room_detail}, status=200)
except KeyError:
return JsonResponse({'message':'KeyError'}, status=400)
except Room.DoesNotExist:
return JsonResponse({'message':'NOT_FOUND_ROOM_ID'}, status=400)
class WishListView(View):
@LoginRequired
def post(self, request, room_id):
user = request.user
try:
if WishList.objects.filter(wish_user=user, wish_room_id=room_id).exists():
return JsonResponse({'MESSAGE':'Already Choosen'}, status=400)
WishList.objects.create(
wish_user_id = 1,
wish_room_id = room_id
)
return JsonResponse({'MESSAGE':'SUCCESS'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)
@LoginRequired
def delete(self, request, room_id):
try:
user = request.user
wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)
wish.delete()
return JsonResponse({'MESSAGE':'Delete Success'}, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)
except WishList.DoesNotExist:
return JsonResponse({'MESSAGE':'Already not Exist in list'}, status=400)
@LoginRequired
def get(self, request):
try:
user = request.user
wishlists = WishList.objects.filter(wish_user = user)
rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]
if not wishlists:
return JsonResponse({'MESSAGE':'nothing in cart'}, status=400)
result = [{
'room_id' : wishlist.wish_room.id,
'room_name': wishlist.wish_room.name,
'address' : wishlist.wish_room.city,
'price' : wishlist.wish_room.price,
'room_type': wishlist.wish_room.category.name,
'image' : [image.url for image in wishlist.wish_room.image.all()],
'is_super' : wishlist.wish_room.host.is_super,
'capacity' : wishlist.wish_room.capacity,
'lat' : wishlist.wish_room.latitude,
'lng' : wishlist.wish_room.longtitude,
'amenity' : [roomamenity.amenity.name for roomamenity in wishlist.wish_room.roomamenity_set.all()],
'rating' : [{
'category' : category,
'category_rating': Review.objects.filter(review_room=wishlist.wish_room).aggregate(Avg(category)).get(category+'__avg')
} for category in rating_list
]
} for wishlist in wishlists]
return JsonResponse({'result':result}, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)
|
flexible
|
{
"blob_id": "cc5b22a0246fcc9feaed6a0663095a6003e6cef1",
"index": 6685,
"step-1": "<mask token>\n\n\nclass RoomView(View):\n\n def get(self, request, room_id):\n try:\n room = Room.objects.get(id=room_id)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_detail = {'room_name': room.name, 'address': room.city,\n 'price': room.price, 'room_type': room.category.name,\n 'image': [image.url for image in room.image.all()][0],\n 'is_super': room.host.is_super, 'host': room.host.user.\n last_name + room.host.user.first_name, 'capacity': room.\n capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':\n re.sub('<i class=\\\\\"|\\\\\"></i>', '', roomamenity.amenity.\n image), 'description': roomamenity.amenity.name} for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': int(Review.objects\n .filter(review_room=room).aggregate(Avg(category)).get(\n category + '__avg'))} for category in rating_list]}\n return JsonResponse({'detail': room_detail}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n except Room.DoesNotExist:\n return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)\n\n\nclass WishListView(View):\n\n @LoginRequired\n def post(self, request, room_id):\n user = request.user\n try:\n if WishList.objects.filter(wish_user=user, wish_room_id=room_id\n ).exists():\n return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)\n WishList.objects.create(wish_user_id=1, wish_room_id=room_id)\n return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n\n @LoginRequired\n def delete(self, request, room_id):\n try:\n user = request.user\n wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)\n wish.delete()\n return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n except WishList.DoesNotExist:\n return JsonResponse({'MESSAGE': 'Already not Exist in list'},\n status=400)\n\n @LoginRequired\n def get(self, request):\n try:\n user = request.user\n wishlists = WishList.objects.filter(wish_user=user)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n if not wishlists:\n return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)\n result = [{'room_id': wishlist.wish_room.id, 'room_name':\n wishlist.wish_room.name, 'address': wishlist.wish_room.city,\n 'price': wishlist.wish_room.price, 'room_type': wishlist.\n wish_room.category.name, 'image': [image.url for image in\n wishlist.wish_room.image.all()], 'is_super': wishlist.\n wish_room.host.is_super, 'capacity': wishlist.wish_room.\n capacity, 'lat': wishlist.wish_room.latitude, 'lng':\n wishlist.wish_room.longtitude, 'amenity': [roomamenity.\n amenity.name for roomamenity in wishlist.wish_room.\n roomamenity_set.all()], 'rating': [{'category': category,\n 'category_rating': Review.objects.filter(review_room=\n wishlist.wish_room).aggregate(Avg(category)).get(category +\n '__avg')} for category in rating_list]} for wishlist in\n wishlists]\n return JsonResponse({'result': result}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n",
"step-2": "<mask token>\n\n\nclass RoomListView(View):\n <mask token>\n\n\nclass RoomView(View):\n\n def get(self, request, room_id):\n try:\n room = Room.objects.get(id=room_id)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_detail = {'room_name': room.name, 'address': room.city,\n 'price': room.price, 'room_type': room.category.name,\n 'image': [image.url for image in room.image.all()][0],\n 'is_super': room.host.is_super, 'host': room.host.user.\n last_name + room.host.user.first_name, 'capacity': room.\n capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':\n re.sub('<i class=\\\\\"|\\\\\"></i>', '', roomamenity.amenity.\n image), 'description': roomamenity.amenity.name} for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': int(Review.objects\n .filter(review_room=room).aggregate(Avg(category)).get(\n category + '__avg'))} for category in rating_list]}\n return JsonResponse({'detail': room_detail}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n except Room.DoesNotExist:\n return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)\n\n\nclass WishListView(View):\n\n @LoginRequired\n def post(self, request, room_id):\n user = request.user\n try:\n if WishList.objects.filter(wish_user=user, wish_room_id=room_id\n ).exists():\n return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)\n WishList.objects.create(wish_user_id=1, wish_room_id=room_id)\n return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n\n @LoginRequired\n def delete(self, request, room_id):\n try:\n user = request.user\n wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)\n wish.delete()\n return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n except WishList.DoesNotExist:\n return JsonResponse({'MESSAGE': 'Already not Exist in list'},\n status=400)\n\n @LoginRequired\n def get(self, request):\n try:\n user = request.user\n wishlists = WishList.objects.filter(wish_user=user)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n if not wishlists:\n return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)\n result = [{'room_id': wishlist.wish_room.id, 'room_name':\n wishlist.wish_room.name, 'address': wishlist.wish_room.city,\n 'price': wishlist.wish_room.price, 'room_type': wishlist.\n wish_room.category.name, 'image': [image.url for image in\n wishlist.wish_room.image.all()], 'is_super': wishlist.\n wish_room.host.is_super, 'capacity': wishlist.wish_room.\n capacity, 'lat': wishlist.wish_room.latitude, 'lng':\n wishlist.wish_room.longtitude, 'amenity': [roomamenity.\n amenity.name for roomamenity in wishlist.wish_room.\n roomamenity_set.all()], 'rating': [{'category': category,\n 'category_rating': Review.objects.filter(review_room=\n wishlist.wish_room).aggregate(Avg(category)).get(category +\n '__avg')} for category in rating_list]} for wishlist in\n wishlists]\n return JsonResponse({'result': result}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n",
"step-3": "<mask token>\n\n\nclass RoomListView(View):\n\n def get(self, request):\n try:\n city = request.GET.get('city', '')\n checkin = request.GET.get('checkin', None)\n checkout = request.GET.get('checkout', None)\n adult = int(request.GET.get('adult', '0'))\n child = int(request.GET.get('child', '0'))\n min_price = request.GET.get('min_price', 0)\n max_price = request.GET.get('max_price', 100000000)\n is_refund = True if request.GET.get('is_refund', None\n ) == 'true' else False\n is_super = True if request.GET.get('is_super', None\n ) == 'true' else False\n room_types = request.GET.getlist('room_type', None)\n amenities = request.GET.getlist('amenity', None)\n page = int(request.GET.get('page', '1'))\n list_criteria = {'city__contains': city, 'price__range': [\n min_price, max_price], 'capacity__gte': adult + child}\n if room_types:\n list_criteria['category__name__in'] = room_types\n if amenities:\n list_criteria['amenity__name__in'] = amenities\n if is_super:\n list_criteria['host__is_super'] = is_super\n if is_refund:\n list_criteria['is_refund'] = is_refund\n size = 10\n offset = (page - 1) * size\n limit = page * size\n room_list = Room.objects.filter(**list_criteria)\n if checkin and checkout:\n room_list = [room for room in room_list if check(room,\n checkin, checkout)]\n if checkin:\n room_list = [room for room in room_list if check_in(room,\n checkin)]\n if checkout:\n room_list = [room for room in room_list if check_out(room,\n checkout)]\n if not room_list:\n return JsonResponse({'message': 'NO_ROOM_AVAILABLE'},\n status=400)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_thumbnail = [{'room_id': room.id, 'room_name': room.name,\n 'price': room.price, 'address': room.city, 'room_type':\n room.category.name, 'lat': room.latitude, 'lng': room.\n longtitude, 'image': [image.url for image in room.image.all\n ()], 'is_super': room.host.is_super, 'capacity': int(room.\n capacity), 'amenity': [roomamenity.amenity.name for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': Review.objects.\n filter(review_room=room).aggregate(rate_avg=Avg(category))[\n 'rate_avg']} for category in rating_list]} for room in\n room_list[offset:limit]]\n common_data = len(room_list)\n return JsonResponse({'thumbnail': room_thumbnail, 'common':\n common_data}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n\n\nclass RoomView(View):\n\n def get(self, request, room_id):\n try:\n room = Room.objects.get(id=room_id)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_detail = {'room_name': room.name, 'address': room.city,\n 'price': room.price, 'room_type': room.category.name,\n 'image': [image.url for image in room.image.all()][0],\n 'is_super': room.host.is_super, 'host': room.host.user.\n last_name + room.host.user.first_name, 'capacity': room.\n capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':\n re.sub('<i class=\\\\\"|\\\\\"></i>', '', roomamenity.amenity.\n image), 'description': roomamenity.amenity.name} for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': int(Review.objects\n .filter(review_room=room).aggregate(Avg(category)).get(\n category + '__avg'))} for category in rating_list]}\n return JsonResponse({'detail': room_detail}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n except Room.DoesNotExist:\n return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)\n\n\nclass WishListView(View):\n\n @LoginRequired\n def post(self, request, room_id):\n user = request.user\n try:\n if WishList.objects.filter(wish_user=user, wish_room_id=room_id\n ).exists():\n return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)\n WishList.objects.create(wish_user_id=1, wish_room_id=room_id)\n return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n\n @LoginRequired\n def delete(self, request, room_id):\n try:\n user = request.user\n wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)\n wish.delete()\n return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n except WishList.DoesNotExist:\n return JsonResponse({'MESSAGE': 'Already not Exist in list'},\n status=400)\n\n @LoginRequired\n def get(self, request):\n try:\n user = request.user\n wishlists = WishList.objects.filter(wish_user=user)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n if not wishlists:\n return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)\n result = [{'room_id': wishlist.wish_room.id, 'room_name':\n wishlist.wish_room.name, 'address': wishlist.wish_room.city,\n 'price': wishlist.wish_room.price, 'room_type': wishlist.\n wish_room.category.name, 'image': [image.url for image in\n wishlist.wish_room.image.all()], 'is_super': wishlist.\n wish_room.host.is_super, 'capacity': wishlist.wish_room.\n capacity, 'lat': wishlist.wish_room.latitude, 'lng':\n wishlist.wish_room.longtitude, 'amenity': [roomamenity.\n amenity.name for roomamenity in wishlist.wish_room.\n roomamenity_set.all()], 'rating': [{'category': category,\n 'category_rating': Review.objects.filter(review_room=\n wishlist.wish_room).aggregate(Avg(category)).get(category +\n '__avg')} for category in rating_list]} for wishlist in\n wishlists]\n return JsonResponse({'result': result}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n",
"step-4": "import json, re, bcrypt, jwt\nfrom datetime import datetime, timedelta\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Avg\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom room.models import Room, Category, RoomAmenity, Image, Amenity, WishList, DisableDate, AbleTime\nfrom reservation.check import check, check_in, check_out\nfrom user.models import User, Host, Review\nfrom user.utils import LoginRequired\n\n\nclass RoomListView(View):\n\n def get(self, request):\n try:\n city = request.GET.get('city', '')\n checkin = request.GET.get('checkin', None)\n checkout = request.GET.get('checkout', None)\n adult = int(request.GET.get('adult', '0'))\n child = int(request.GET.get('child', '0'))\n min_price = request.GET.get('min_price', 0)\n max_price = request.GET.get('max_price', 100000000)\n is_refund = True if request.GET.get('is_refund', None\n ) == 'true' else False\n is_super = True if request.GET.get('is_super', None\n ) == 'true' else False\n room_types = request.GET.getlist('room_type', None)\n amenities = request.GET.getlist('amenity', None)\n page = int(request.GET.get('page', '1'))\n list_criteria = {'city__contains': city, 'price__range': [\n min_price, max_price], 'capacity__gte': adult + child}\n if room_types:\n list_criteria['category__name__in'] = room_types\n if amenities:\n list_criteria['amenity__name__in'] = amenities\n if is_super:\n list_criteria['host__is_super'] = is_super\n if is_refund:\n list_criteria['is_refund'] = is_refund\n size = 10\n offset = (page - 1) * size\n limit = page * size\n room_list = Room.objects.filter(**list_criteria)\n if checkin and checkout:\n room_list = [room for room in room_list if check(room,\n checkin, checkout)]\n if checkin:\n room_list = [room for room in room_list if check_in(room,\n checkin)]\n if checkout:\n room_list = [room for room in room_list if check_out(room,\n checkout)]\n if not room_list:\n return JsonResponse({'message': 'NO_ROOM_AVAILABLE'},\n status=400)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_thumbnail = [{'room_id': room.id, 'room_name': room.name,\n 'price': room.price, 'address': room.city, 'room_type':\n room.category.name, 'lat': room.latitude, 'lng': room.\n longtitude, 'image': [image.url for image in room.image.all\n ()], 'is_super': room.host.is_super, 'capacity': int(room.\n capacity), 'amenity': [roomamenity.amenity.name for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': Review.objects.\n filter(review_room=room).aggregate(rate_avg=Avg(category))[\n 'rate_avg']} for category in rating_list]} for room in\n room_list[offset:limit]]\n common_data = len(room_list)\n return JsonResponse({'thumbnail': room_thumbnail, 'common':\n common_data}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n\n\nclass RoomView(View):\n\n def get(self, request, room_id):\n try:\n room = Room.objects.get(id=room_id)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n room_detail = {'room_name': room.name, 'address': room.city,\n 'price': room.price, 'room_type': room.category.name,\n 'image': [image.url for image in room.image.all()][0],\n 'is_super': room.host.is_super, 'host': room.host.user.\n last_name + room.host.user.first_name, 'capacity': room.\n capacity, 'amenity': [{'id': roomamenity.amenity.id, 'icon':\n re.sub('<i class=\\\\\"|\\\\\"></i>', '', roomamenity.amenity.\n image), 'description': roomamenity.amenity.name} for\n roomamenity in room.roomamenity_set.all()], 'rating': [{\n 'category': category, 'category_rating': int(Review.objects\n .filter(review_room=room).aggregate(Avg(category)).get(\n category + '__avg'))} for category in rating_list]}\n return JsonResponse({'detail': room_detail}, status=200)\n except KeyError:\n return JsonResponse({'message': 'KeyError'}, status=400)\n except Room.DoesNotExist:\n return JsonResponse({'message': 'NOT_FOUND_ROOM_ID'}, status=400)\n\n\nclass WishListView(View):\n\n @LoginRequired\n def post(self, request, room_id):\n user = request.user\n try:\n if WishList.objects.filter(wish_user=user, wish_room_id=room_id\n ).exists():\n return JsonResponse({'MESSAGE': 'Already Choosen'}, status=400)\n WishList.objects.create(wish_user_id=1, wish_room_id=room_id)\n return JsonResponse({'MESSAGE': 'SUCCESS'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n\n @LoginRequired\n def delete(self, request, room_id):\n try:\n user = request.user\n wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)\n wish.delete()\n return JsonResponse({'MESSAGE': 'Delete Success'}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n except WishList.DoesNotExist:\n return JsonResponse({'MESSAGE': 'Already not Exist in list'},\n status=400)\n\n @LoginRequired\n def get(self, request):\n try:\n user = request.user\n wishlists = WishList.objects.filter(wish_user=user)\n rating_list = [field.name for field in Review._meta.get_fields(\n ) if field.name not in ['id', 'review_user', 'review_room',\n 'comment']]\n if not wishlists:\n return JsonResponse({'MESSAGE': 'nothing in cart'}, status=400)\n result = [{'room_id': wishlist.wish_room.id, 'room_name':\n wishlist.wish_room.name, 'address': wishlist.wish_room.city,\n 'price': wishlist.wish_room.price, 'room_type': wishlist.\n wish_room.category.name, 'image': [image.url for image in\n wishlist.wish_room.image.all()], 'is_super': wishlist.\n wish_room.host.is_super, 'capacity': wishlist.wish_room.\n capacity, 'lat': wishlist.wish_room.latitude, 'lng':\n wishlist.wish_room.longtitude, 'amenity': [roomamenity.\n amenity.name for roomamenity in wishlist.wish_room.\n roomamenity_set.all()], 'rating': [{'category': category,\n 'category_rating': Review.objects.filter(review_room=\n wishlist.wish_room).aggregate(Avg(category)).get(category +\n '__avg')} for category in rating_list]} for wishlist in\n wishlists]\n return JsonResponse({'result': result}, status=200)\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY ERROR'}, status=400)\n",
"step-5": "import json, re, bcrypt, jwt\n\nfrom datetime import datetime, timedelta\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Avg\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom room.models import Room, Category, RoomAmenity,Image,Amenity,WishList,DisableDate,AbleTime\nfrom reservation.check import check, check_in, check_out\nfrom user.models import User, Host, Review\nfrom user.utils import LoginRequired\n\nclass RoomListView(View):\n def get(self,request):\n try: \n city = request.GET.get('city','')\n checkin = request.GET.get('checkin',None)\n checkout = request.GET.get('checkout', None)\n adult = int(request.GET.get('adult','0'))\n child = int(request.GET.get('child','0'))\n min_price = request.GET.get('min_price',0)\n max_price = request.GET.get('max_price',100000000)\n is_refund = True if request.GET.get('is_refund',None) == 'true' else False\n is_super = True if request.GET.get('is_super',None) == 'true' else False\n room_types = request.GET.getlist('room_type',None)\n amenities = request.GET.getlist('amenity',None)\n page = int(request.GET.get('page', '1'))\n \n #필터\n list_criteria = {\n 'city__contains': city,\n 'price__range' : [min_price,max_price],\n 'capacity__gte' : adult+child\n }\n if room_types: \n list_criteria['category__name__in'] = room_types\n if amenities: \n list_criteria['amenity__name__in'] = amenities\n if is_super: \n list_criteria['host__is_super'] = is_super\n if is_refund: \n list_criteria['is_refund'] = is_refund\n\n #paginator\n size = 10\n offset = (page-1) * size\n limit = page * size\n\n room_list = Room.objects.filter(**list_criteria)\n \n #날짜 필터\n if checkin and checkout:\n room_list = [room for room in room_list if check(room, checkin, checkout)]\n if checkin:\n room_list = [room for room in room_list if check_in(room, checkin)]\n if checkout:\n room_list = [room for room in room_list if check_out(room, checkout)]\n if not room_list:\n return JsonResponse({'message':'NO_ROOM_AVAILABLE'}, status=400)\n\n rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]\n \n room_thumbnail = [{\n 'room_id' : room.id,\n 'room_name' : room.name,\n 'price' : room.price,\n 'address' : room.city,\n 'room_type' : room.category.name,\n 'lat' : room.latitude,\n 'lng' : room.longtitude,\n 'image' : [image.url for image in room.image.all()],\n 'is_super' : room.host.is_super,\n 'capacity' : int(room.capacity),\n 'amenity' : [roomamenity.amenity.name for roomamenity in room.roomamenity_set.all()],\n 'rating' : [{\n 'category' : category,\n 'category_rating': Review.objects.filter(review_room=room).aggregate(rate_avg=Avg(category))['rate_avg']\n } for category in rating_list\n ]\n } for room in room_list[offset:limit]\n ]\n\n common_data = len(room_list)\n \n return JsonResponse({'thumbnail': room_thumbnail, 'common':common_data }, status=200)\n\n except KeyError:\n return JsonResponse({'message':'KeyError'}, status=400)\n\nclass RoomView(View):\n def get(self,request, room_id):\n try: \n room = Room.objects.get(id=room_id)\n rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]\n\n room_detail = {\n 'room_name': room.name,\n 'address' : room.city,\n 'price' : room.price,\n 'room_type': room.category.name,\n 'image' : [image.url for image in room.image.all()][0],\n 'is_super' : room.host.is_super,\n 'host' : room.host.user.last_name + room.host.user.first_name,\n 'capacity' : room.capacity,\n 'amenity' : [{\n 'id' : roomamenity.amenity.id,\n 'icon' : re.sub('<i class=\\\\\"|\\\\\"></i>', '',roomamenity.amenity.image),\n 'description': roomamenity.amenity.name\n } for roomamenity in room.roomamenity_set.all()\n ],\n 'rating' : [{\n 'category' : category,\n 'category_rating': int(Review.objects.filter(review_room=room).aggregate(Avg(category)).get(category+'__avg'))\n } for category in rating_list\n ]\n }\n \n return JsonResponse({'detail': room_detail}, status=200)\n \n except KeyError:\n return JsonResponse({'message':'KeyError'}, status=400)\n except Room.DoesNotExist:\n return JsonResponse({'message':'NOT_FOUND_ROOM_ID'}, status=400)\n\nclass WishListView(View):\n @LoginRequired\n def post(self, request, room_id):\n user = request.user\n\n try:\n if WishList.objects.filter(wish_user=user, wish_room_id=room_id).exists():\n return JsonResponse({'MESSAGE':'Already Choosen'}, status=400)\n \n WishList.objects.create(\n wish_user_id = 1,\n wish_room_id = room_id\n )\n return JsonResponse({'MESSAGE':'SUCCESS'}, status=200)\n\n except KeyError:\n return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400) \n\n @LoginRequired\n def delete(self, request, room_id):\n try:\n user = request.user\n wish = WishList.objects.get(wish_user=user, wish_room_id=room_id)\n \n wish.delete()\n return JsonResponse({'MESSAGE':'Delete Success'}, status=200)\n \n except KeyError:\n return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)\n except WishList.DoesNotExist:\n return JsonResponse({'MESSAGE':'Already not Exist in list'}, status=400)\n\n @LoginRequired\n def get(self, request):\n try:\n user = request.user\n wishlists = WishList.objects.filter(wish_user = user)\n rating_list = [field.name for field in Review._meta.get_fields() if field.name not in ['id','review_user','review_room','comment']]\n\n if not wishlists:\n return JsonResponse({'MESSAGE':'nothing in cart'}, status=400)\n\n\n result = [{\n 'room_id' : wishlist.wish_room.id,\n 'room_name': wishlist.wish_room.name,\n 'address' : wishlist.wish_room.city,\n 'price' : wishlist.wish_room.price,\n 'room_type': wishlist.wish_room.category.name,\n 'image' : [image.url for image in wishlist.wish_room.image.all()],\n 'is_super' : wishlist.wish_room.host.is_super,\n 'capacity' : wishlist.wish_room.capacity,\n 'lat' : wishlist.wish_room.latitude,\n 'lng' : wishlist.wish_room.longtitude,\n 'amenity' : [roomamenity.amenity.name for roomamenity in wishlist.wish_room.roomamenity_set.all()],\n 'rating' : [{\n 'category' : category,\n 'category_rating': Review.objects.filter(review_room=wishlist.wish_room).aggregate(Avg(category)).get(category+'__avg')\n } for category in rating_list\n ]\n } for wishlist in wishlists]\n \n return JsonResponse({'result':result}, status=200)\n\n except KeyError:\n return JsonResponse({'MESSAGE':'KEY ERROR'}, status=400)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify, render_template, redirect
from flask_pymongo import PyMongo
from config import mongo_password, mongo_username, sql_username, sql_password
from bson.json_util import dumps
# Database Setup
rds_connection_string = f"{sql_username}:{sql_password}@localhost:5432/Pokemon"
engine = create_engine(f'postgresql://{rds_connection_string}')
# Reflect existing database
Base = automap_base()
Base.prepare(engine, reflect=True)
# Save reference to the table
pokemon_sql = Base.classes.pokemon
# Flask Setup
app = Flask(__name__)
#Set up MongoDB Database
app.config['MONGO_URI'] = f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'
mongo = PyMongo(app)
@app.route("/")
def index():
#Return the homepage
pokemon_data = mongo.db.pokemon.find_one()
return render_template("index.html", pokemon_data = pokemon_data)
#All Pokemon Stats
@app.route("/stats")
def stats():
session = Session(engine)
stats = session.query(pokemon_sql).all()
pokemon_list =[]
for pokeman in stats:
pokeman = {'Name': pokeman.name,
'Number': pokeman.number,
'Type_1': pokeman.type_1,
'Type_2': pokeman.type_2,
'HP': pokeman.hp,
'Attack': pokeman.attack,
'Defense': pokeman.defense,
'Special_Attack': pokeman.sp_atk,
'Special_Defense': pokeman.sp_def,
'Speed': pokeman.speed,
'Generation': pokeman.generation,
'Legendary': pokeman.legendary}
pokemon_list.append(pokeman)
return jsonify(pokemon_list)
session.close()
#Mongo DB image database
@app.route("/images")
def images():
pokemon_image_db = mongo.db.pokemon.find()
images = []
for image in pokemon_image_db:
image.pop('_id')
images.append(image)
return jsonify(images)
if __name__ == "__main__":
app.run(debug=True)
|
normal
|
{
"blob_id": "15e1ce95398ff155fe594c3b39936d82d71ab9e2",
"index": 5015,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n pokemon_data = mongo.db.pokemon.find_one()\n return render_template('index.html', pokemon_data=pokemon_data)\n\n\n@app.route('/stats')\ndef stats():\n session = Session(engine)\n stats = session.query(pokemon_sql).all()\n pokemon_list = []\n for pokeman in stats:\n pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':\n pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,\n 'Attack': pokeman.attack, 'Defense': pokeman.defense,\n 'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.\n sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.\n generation, 'Legendary': pokeman.legendary}\n pokemon_list.append(pokeman)\n return jsonify(pokemon_list)\n session.close()\n\n\n@app.route('/images')\ndef images():\n pokemon_image_db = mongo.db.pokemon.find()\n images = []\n for image in pokemon_image_db:\n image.pop('_id')\n images.append(image)\n return jsonify(images)\n\n\n<mask token>\n",
"step-2": "<mask token>\nBase.prepare(engine, reflect=True)\n<mask token>\n\n\n@app.route('/')\ndef index():\n pokemon_data = mongo.db.pokemon.find_one()\n return render_template('index.html', pokemon_data=pokemon_data)\n\n\n@app.route('/stats')\ndef stats():\n session = Session(engine)\n stats = session.query(pokemon_sql).all()\n pokemon_list = []\n for pokeman in stats:\n pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':\n pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,\n 'Attack': pokeman.attack, 'Defense': pokeman.defense,\n 'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.\n sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.\n generation, 'Legendary': pokeman.legendary}\n pokemon_list.append(pokeman)\n return jsonify(pokemon_list)\n session.close()\n\n\n@app.route('/images')\ndef images():\n pokemon_image_db = mongo.db.pokemon.find()\n images = []\n for image in pokemon_image_db:\n image.pop('_id')\n images.append(image)\n return jsonify(images)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\nrds_connection_string = f'{sql_username}:{sql_password}@localhost:5432/Pokemon'\nengine = create_engine(f'postgresql://{rds_connection_string}')\nBase = automap_base()\nBase.prepare(engine, reflect=True)\npokemon_sql = Base.classes.pokemon\napp = Flask(__name__)\napp.config['MONGO_URI'] = (\n f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'\n )\nmongo = PyMongo(app)\n\n\n@app.route('/')\ndef index():\n pokemon_data = mongo.db.pokemon.find_one()\n return render_template('index.html', pokemon_data=pokemon_data)\n\n\n@app.route('/stats')\ndef stats():\n session = Session(engine)\n stats = session.query(pokemon_sql).all()\n pokemon_list = []\n for pokeman in stats:\n pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':\n pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,\n 'Attack': pokeman.attack, 'Defense': pokeman.defense,\n 'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.\n sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.\n generation, 'Legendary': pokeman.legendary}\n pokemon_list.append(pokeman)\n return jsonify(pokemon_list)\n session.close()\n\n\n@app.route('/images')\ndef images():\n pokemon_image_db = mongo.db.pokemon.find()\n images = []\n for image in pokemon_image_db:\n image.pop('_id')\n images.append(image)\n return jsonify(images)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "import sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\nfrom flask import Flask, jsonify, render_template, redirect\nfrom flask_pymongo import PyMongo\nfrom config import mongo_password, mongo_username, sql_username, sql_password\nfrom bson.json_util import dumps\nrds_connection_string = f'{sql_username}:{sql_password}@localhost:5432/Pokemon'\nengine = create_engine(f'postgresql://{rds_connection_string}')\nBase = automap_base()\nBase.prepare(engine, reflect=True)\npokemon_sql = Base.classes.pokemon\napp = Flask(__name__)\napp.config['MONGO_URI'] = (\n f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'\n )\nmongo = PyMongo(app)\n\n\n@app.route('/')\ndef index():\n pokemon_data = mongo.db.pokemon.find_one()\n return render_template('index.html', pokemon_data=pokemon_data)\n\n\n@app.route('/stats')\ndef stats():\n session = Session(engine)\n stats = session.query(pokemon_sql).all()\n pokemon_list = []\n for pokeman in stats:\n pokeman = {'Name': pokeman.name, 'Number': pokeman.number, 'Type_1':\n pokeman.type_1, 'Type_2': pokeman.type_2, 'HP': pokeman.hp,\n 'Attack': pokeman.attack, 'Defense': pokeman.defense,\n 'Special_Attack': pokeman.sp_atk, 'Special_Defense': pokeman.\n sp_def, 'Speed': pokeman.speed, 'Generation': pokeman.\n generation, 'Legendary': pokeman.legendary}\n pokemon_list.append(pokeman)\n return jsonify(pokemon_list)\n session.close()\n\n\n@app.route('/images')\ndef images():\n pokemon_image_db = mongo.db.pokemon.find()\n images = []\n for image in pokemon_image_db:\n image.pop('_id')\n images.append(image)\n return jsonify(images)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "import sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\nfrom flask import Flask, jsonify, render_template, redirect\nfrom flask_pymongo import PyMongo\nfrom config import mongo_password, mongo_username, sql_username, sql_password\nfrom bson.json_util import dumps\n\n\n# Database Setup\nrds_connection_string = f\"{sql_username}:{sql_password}@localhost:5432/Pokemon\"\nengine = create_engine(f'postgresql://{rds_connection_string}')\n\n# Reflect existing database\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\npokemon_sql = Base.classes.pokemon\n\n# Flask Setup\napp = Flask(__name__)\n\n#Set up MongoDB Database\napp.config['MONGO_URI'] = f'mongodb+srv://MikeAnderson89:{mongo_password}@cluster0-wadjd.mongodb.net/test?retryWrites=true&w=majority'\nmongo = PyMongo(app)\n\n\n@app.route(\"/\")\ndef index():\n #Return the homepage\n pokemon_data = mongo.db.pokemon.find_one()\n return render_template(\"index.html\", pokemon_data = pokemon_data)\n\n\n#All Pokemon Stats\n@app.route(\"/stats\")\ndef stats():\n session = Session(engine)\n stats = session.query(pokemon_sql).all()\n pokemon_list =[]\n for pokeman in stats:\n pokeman = {'Name': pokeman.name,\n 'Number': pokeman.number,\n 'Type_1': pokeman.type_1,\n 'Type_2': pokeman.type_2,\n 'HP': pokeman.hp,\n 'Attack': pokeman.attack,\n 'Defense': pokeman.defense,\n 'Special_Attack': pokeman.sp_atk,\n 'Special_Defense': pokeman.sp_def,\n 'Speed': pokeman.speed,\n 'Generation': pokeman.generation,\n 'Legendary': pokeman.legendary}\n pokemon_list.append(pokeman)\n return jsonify(pokemon_list)\n session.close()\n\n#Mongo DB image database\n@app.route(\"/images\")\ndef images():\n pokemon_image_db = mongo.db.pokemon.find()\n images = []\n for image in pokemon_image_db:\n image.pop('_id')\n images.append(image)\n return jsonify(images)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 RAPP
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Authors: Konstantinos Panayiotou, Manos Tsardoulias
# contact: klpanagi@gmail.com, etsardou@iti.gr
## @file ServiceController/ServiceControllerSync.py
#
# @copyright Rapp Projecty EU 2015
# @author Konstantinos Panayiotou, [klpanagi@gmail.com]
#
from ServiceControllerBase import *
# high-level interface for asynchronously executing callables.
from concurrent.futures import as_completed
class AsyncHandler(object):
""" Synchronous service controller class implementation. """
def __init__(self, future):
"""! Constructor
"""
self.__future = future
"""! Wait for response.
@param timeout - Optional argument. Set timeout for waiting for a response.
@returns resp - Response object
"""
def wait(self, timeout=None):
resp = self.__future.result()
return resp
|
normal
|
{
"blob_id": "f4e287f5fce05e039c54f1108f6e73020b8d3d8f",
"index": 9346,
"step-1": "<mask token>\n\n\nclass AsyncHandler(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AsyncHandler(object):\n <mask token>\n\n def __init__(self, future):\n \"\"\"! Constructor\n\n \"\"\"\n self.__future = future\n <mask token>\n\n def wait(self, timeout=None):\n resp = self.__future.result()\n return resp\n",
"step-3": "<mask token>\n\n\nclass AsyncHandler(object):\n \"\"\" Synchronous service controller class implementation. \"\"\"\n\n def __init__(self, future):\n \"\"\"! Constructor\n\n \"\"\"\n self.__future = future\n \"\"\"! Wait for response.\n\n @param timeout - Optional argument. Set timeout for waiting for a response.\n @returns resp - Response object\n \"\"\"\n\n def wait(self, timeout=None):\n resp = self.__future.result()\n return resp\n",
"step-4": "from ServiceControllerBase import *\nfrom concurrent.futures import as_completed\n\n\nclass AsyncHandler(object):\n \"\"\" Synchronous service controller class implementation. \"\"\"\n\n def __init__(self, future):\n \"\"\"! Constructor\n\n \"\"\"\n self.__future = future\n \"\"\"! Wait for response.\n\n @param timeout - Optional argument. Set timeout for waiting for a response.\n @returns resp - Response object\n \"\"\"\n\n def wait(self, timeout=None):\n resp = self.__future.result()\n return resp\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n# Copyright 2015 RAPP\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n #http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Authors: Konstantinos Panayiotou, Manos Tsardoulias\n# contact: klpanagi@gmail.com, etsardou@iti.gr\n\n\n## @file ServiceController/ServiceControllerSync.py\n#\n# @copyright Rapp Projecty EU 2015\n# @author Konstantinos Panayiotou, [klpanagi@gmail.com]\n#\n\n\nfrom ServiceControllerBase import *\n\n# high-level interface for asynchronously executing callables.\nfrom concurrent.futures import as_completed\n\n\nclass AsyncHandler(object):\n \"\"\" Synchronous service controller class implementation. \"\"\"\n\n def __init__(self, future):\n \"\"\"! Constructor\n\n \"\"\"\n self.__future = future\n\n\n \"\"\"! Wait for response.\n\n @param timeout - Optional argument. Set timeout for waiting for a response.\n @returns resp - Response object\n \"\"\"\n def wait(self, timeout=None):\n resp = self.__future.result()\n return resp\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class ncbDB(Pconfig):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def ncb_getQuery(self, querySQL):
result = []
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
result = self.conferenceDBcurs.fetchall()
return True, result
except pymysql.ProgrammingError as er:
logging.critical(
'ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical(
'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.Error as er:
logging.critical(
'ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
<|reserved_special_token_0|>
def listdicttodict(self, listdict):
return listdict[1][0]
<|reserved_special_token_0|>
def __del__(self):
self.connect_db.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ncbDB(Pconfig):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, srvrole):
super(ncbDB, self).__init__(srvrole)
self.hostname = socket.gethostname()
self.conferenceMediaStoragePath = self.lconfig.get('media',
'media_path')
try:
self.connect_db = pymysql.connect(self.db_server, self.
conferenceConfigDBname_user, self.
conferenceConfigDBname_passwd, self.conferenceConfigDBname,
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
except pymysql.Error as er:
logging.critical(
'Can not establish connection to configuration DB: %s',
self.conferenceConfigDBname)
raise Exception(er[0], er[1])
def ncb_getQuery(self, querySQL):
result = []
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
result = self.conferenceDBcurs.fetchall()
return True, result
except pymysql.ProgrammingError as er:
logging.critical(
'ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical(
'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.Error as er:
logging.critical(
'ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
def ncb_pushQuery(self, querySQL):
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
self.connect_db.commit()
return True, []
except pymysql.Error as er:
logging.critical(
'ERROR: Can not push a data into Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.IntegrityError as er:
logging.critical(
'ERROR: IntegrityError in Conference DB. Call to support immediately %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.OperationalError as er:
logging.critical(
'ERROR: OperationalError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical(
'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
def listdicttodict(self, listdict):
return listdict[1][0]
def getGlobalMediaPath(self):
if not os.path.exists(self.conferenceMediaStoragePath):
return None
else:
return self.conferenceMediaStoragePath
def __del__(self):
self.connect_db.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ncbDB(Pconfig):
hostname = None
conferenceMediaStoragePath = '/media/conference/'
conferenceDBcurs = None
connect_db = None
def __init__(self, srvrole):
super(ncbDB, self).__init__(srvrole)
self.hostname = socket.gethostname()
self.conferenceMediaStoragePath = self.lconfig.get('media',
'media_path')
try:
self.connect_db = pymysql.connect(self.db_server, self.
conferenceConfigDBname_user, self.
conferenceConfigDBname_passwd, self.conferenceConfigDBname,
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
except pymysql.Error as er:
logging.critical(
'Can not establish connection to configuration DB: %s',
self.conferenceConfigDBname)
raise Exception(er[0], er[1])
def ncb_getQuery(self, querySQL):
result = []
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
result = self.conferenceDBcurs.fetchall()
return True, result
except pymysql.ProgrammingError as er:
logging.critical(
'ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical(
'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.Error as er:
logging.critical(
'ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
def ncb_pushQuery(self, querySQL):
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
self.connect_db.commit()
return True, []
except pymysql.Error as er:
logging.critical(
'ERROR: Can not push a data into Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.IntegrityError as er:
logging.critical(
'ERROR: IntegrityError in Conference DB. Call to support immediately %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.OperationalError as er:
logging.critical(
'ERROR: OperationalError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical(
'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
def listdicttodict(self, listdict):
return listdict[1][0]
def getGlobalMediaPath(self):
if not os.path.exists(self.conferenceMediaStoragePath):
return None
else:
return self.conferenceMediaStoragePath
def __del__(self):
self.connect_db.close()
<|reserved_special_token_1|>
import pymysql
import logging
import socket
from models.platformconfig import Pconfig
class ncbDB(Pconfig):
hostname = None
conferenceMediaStoragePath = '/media/conference/'
conferenceDBcurs = None
connect_db = None
def __init__(self, srvrole):
super(ncbDB, self).__init__(srvrole)
self.hostname = socket.gethostname()
self.conferenceMediaStoragePath = self.lconfig.get('media',
'media_path')
try:
self.connect_db = pymysql.connect(self.db_server, self.
conferenceConfigDBname_user, self.
conferenceConfigDBname_passwd, self.conferenceConfigDBname,
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
except pymysql.Error as er:
logging.critical(
'Can not establish connection to configuration DB: %s',
self.conferenceConfigDBname)
raise Exception(er[0], er[1])
def ncb_getQuery(self, querySQL):
result = []
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
result = self.conferenceDBcurs.fetchall()
return True, result
except pymysql.ProgrammingError as er:
logging.critical(
'ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical(
'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.Error as er:
logging.critical(
'ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
def ncb_pushQuery(self, querySQL):
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
self.connect_db.commit()
return True, []
except pymysql.Error as er:
logging.critical(
'ERROR: Can not push a data into Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.IntegrityError as er:
logging.critical(
'ERROR: IntegrityError in Conference DB. Call to support immediately %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.OperationalError as er:
logging.critical(
'ERROR: OperationalError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical(
'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'
% (er[0], er[1]))
return False, er[1]
def listdicttodict(self, listdict):
return listdict[1][0]
def getGlobalMediaPath(self):
if not os.path.exists(self.conferenceMediaStoragePath):
return None
else:
return self.conferenceMediaStoragePath
def __del__(self):
self.connect_db.close()
<|reserved_special_token_1|>
import pymysql
import logging
import socket
from models.platformconfig import Pconfig
class ncbDB(Pconfig):
# I have to retrieve basic configuration attributes, listed below, from system config file
# on ApplSrv, for example : /etc/ncb_applsrv/ncb_applsrv.conf
hostname = None
conferenceMediaStoragePath = '/media/conference/' # NFS mount point
conferenceDBcurs = None
connect_db = None
def __init__(self, srvrole):
super(ncbDB, self).__init__(srvrole) # run constructor of parent object
self.hostname = socket.gethostname() # get local hostname TODO: validate hostname with that one in config file
self.conferenceMediaStoragePath = self.lconfig.get('media', 'media_path') # TODO: if valid to remove it above
try:
self.connect_db = pymysql.connect(self.db_server,
self.conferenceConfigDBname_user,
self.conferenceConfigDBname_passwd,
self.conferenceConfigDBname,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
# self.conferenceDBcurs = self.connect_db.cursor()
except pymysql.Error as er:
logging.critical("Can not establish connection to configuration DB: %s", self.conferenceConfigDBname)
raise Exception(er[0], er[1])
# the method executes SQL query and returns all fetched rows. Otherwise it returns None
def ncb_getQuery(self, querySQL):
result = []
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
result = self.conferenceDBcurs.fetchall()
return True, result
except pymysql.ProgrammingError as er:
logging.critical('ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical('ERROR: InternallError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
except pymysql.Error as er:
logging.critical('ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
# the method executes SQL query to push data into DB.
def ncb_pushQuery(self, querySQL):
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
self.connect_db.commit()
return (True, [])
except pymysql.Error as er:
logging.critical('ERROR: Can not push a data into Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
except pymysql.IntegrityError as er:
logging.critical('ERROR: IntegrityError in Conference DB. Call to support immediately %s, %s' % (er[0], er[1]))
return False, er[1]
except pymysql.OperationalError as er:
logging.critical('ERROR: OperationalError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical('ERROR: InternallError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
# if more than one rows are retrieved - it gets first row from the list as a dictionary
def listdicttodict(self, listdict):
return listdict[1][0]
def getGlobalMediaPath(self):
if not os.path.exists(self.conferenceMediaStoragePath): # check it out whether it exist
return None # if it doesn't - return None
else:
return self.conferenceMediaStoragePath # otherwise return the path
def __del__(self):
self.connect_db.close()
|
flexible
|
{
"blob_id": "257a4d0b0c713624ea8452dbfd6c5a96c9a426ad",
"index": 8344,
"step-1": "<mask token>\n\n\nclass ncbDB(Pconfig):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def ncb_getQuery(self, querySQL):\n result = []\n try:\n with self.connect_db.cursor() as self.conferenceDBcurs:\n self.conferenceDBcurs.execute(querySQL)\n result = self.conferenceDBcurs.fetchall()\n return True, result\n except pymysql.ProgrammingError as er:\n logging.critical(\n 'ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.InternalError as er:\n logging.critical(\n 'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.Error as er:\n logging.critical(\n 'ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n <mask token>\n\n def listdicttodict(self, listdict):\n return listdict[1][0]\n <mask token>\n\n def __del__(self):\n self.connect_db.close()\n",
"step-2": "<mask token>\n\n\nclass ncbDB(Pconfig):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, srvrole):\n super(ncbDB, self).__init__(srvrole)\n self.hostname = socket.gethostname()\n self.conferenceMediaStoragePath = self.lconfig.get('media',\n 'media_path')\n try:\n self.connect_db = pymysql.connect(self.db_server, self.\n conferenceConfigDBname_user, self.\n conferenceConfigDBname_passwd, self.conferenceConfigDBname,\n charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)\n except pymysql.Error as er:\n logging.critical(\n 'Can not establish connection to configuration DB: %s',\n self.conferenceConfigDBname)\n raise Exception(er[0], er[1])\n\n def ncb_getQuery(self, querySQL):\n result = []\n try:\n with self.connect_db.cursor() as self.conferenceDBcurs:\n self.conferenceDBcurs.execute(querySQL)\n result = self.conferenceDBcurs.fetchall()\n return True, result\n except pymysql.ProgrammingError as er:\n logging.critical(\n 'ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.InternalError as er:\n logging.critical(\n 'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.Error as er:\n logging.critical(\n 'ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n\n def ncb_pushQuery(self, querySQL):\n try:\n with self.connect_db.cursor() as self.conferenceDBcurs:\n self.conferenceDBcurs.execute(querySQL)\n self.connect_db.commit()\n return True, []\n except pymysql.Error as er:\n logging.critical(\n 'ERROR: Can not push a data into Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.IntegrityError as er:\n logging.critical(\n 'ERROR: IntegrityError in Conference DB. Call to support immediately %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.OperationalError as er:\n logging.critical(\n 'ERROR: OperationalError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.InternalError as er:\n logging.critical(\n 'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n\n def listdicttodict(self, listdict):\n return listdict[1][0]\n\n def getGlobalMediaPath(self):\n if not os.path.exists(self.conferenceMediaStoragePath):\n return None\n else:\n return self.conferenceMediaStoragePath\n\n def __del__(self):\n self.connect_db.close()\n",
"step-3": "<mask token>\n\n\nclass ncbDB(Pconfig):\n hostname = None\n conferenceMediaStoragePath = '/media/conference/'\n conferenceDBcurs = None\n connect_db = None\n\n def __init__(self, srvrole):\n super(ncbDB, self).__init__(srvrole)\n self.hostname = socket.gethostname()\n self.conferenceMediaStoragePath = self.lconfig.get('media',\n 'media_path')\n try:\n self.connect_db = pymysql.connect(self.db_server, self.\n conferenceConfigDBname_user, self.\n conferenceConfigDBname_passwd, self.conferenceConfigDBname,\n charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)\n except pymysql.Error as er:\n logging.critical(\n 'Can not establish connection to configuration DB: %s',\n self.conferenceConfigDBname)\n raise Exception(er[0], er[1])\n\n def ncb_getQuery(self, querySQL):\n result = []\n try:\n with self.connect_db.cursor() as self.conferenceDBcurs:\n self.conferenceDBcurs.execute(querySQL)\n result = self.conferenceDBcurs.fetchall()\n return True, result\n except pymysql.ProgrammingError as er:\n logging.critical(\n 'ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.InternalError as er:\n logging.critical(\n 'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.Error as er:\n logging.critical(\n 'ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n\n def ncb_pushQuery(self, querySQL):\n try:\n with self.connect_db.cursor() as self.conferenceDBcurs:\n self.conferenceDBcurs.execute(querySQL)\n self.connect_db.commit()\n return True, []\n except pymysql.Error as er:\n logging.critical(\n 'ERROR: Can not push a data into Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.IntegrityError as er:\n logging.critical(\n 'ERROR: IntegrityError in Conference DB. Call to support immediately %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.OperationalError as er:\n logging.critical(\n 'ERROR: OperationalError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.InternalError as er:\n logging.critical(\n 'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n\n def listdicttodict(self, listdict):\n return listdict[1][0]\n\n def getGlobalMediaPath(self):\n if not os.path.exists(self.conferenceMediaStoragePath):\n return None\n else:\n return self.conferenceMediaStoragePath\n\n def __del__(self):\n self.connect_db.close()\n",
"step-4": "import pymysql\nimport logging\nimport socket\nfrom models.platformconfig import Pconfig\n\n\nclass ncbDB(Pconfig):\n hostname = None\n conferenceMediaStoragePath = '/media/conference/'\n conferenceDBcurs = None\n connect_db = None\n\n def __init__(self, srvrole):\n super(ncbDB, self).__init__(srvrole)\n self.hostname = socket.gethostname()\n self.conferenceMediaStoragePath = self.lconfig.get('media',\n 'media_path')\n try:\n self.connect_db = pymysql.connect(self.db_server, self.\n conferenceConfigDBname_user, self.\n conferenceConfigDBname_passwd, self.conferenceConfigDBname,\n charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)\n except pymysql.Error as er:\n logging.critical(\n 'Can not establish connection to configuration DB: %s',\n self.conferenceConfigDBname)\n raise Exception(er[0], er[1])\n\n def ncb_getQuery(self, querySQL):\n result = []\n try:\n with self.connect_db.cursor() as self.conferenceDBcurs:\n self.conferenceDBcurs.execute(querySQL)\n result = self.conferenceDBcurs.fetchall()\n return True, result\n except pymysql.ProgrammingError as er:\n logging.critical(\n 'ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.InternalError as er:\n logging.critical(\n 'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.Error as er:\n logging.critical(\n 'ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n\n def ncb_pushQuery(self, querySQL):\n try:\n with self.connect_db.cursor() as self.conferenceDBcurs:\n self.conferenceDBcurs.execute(querySQL)\n self.connect_db.commit()\n return True, []\n except pymysql.Error as er:\n logging.critical(\n 'ERROR: Can not push a data into Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.IntegrityError as er:\n logging.critical(\n 'ERROR: IntegrityError in Conference DB. Call to support immediately %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.OperationalError as er:\n logging.critical(\n 'ERROR: OperationalError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n except pymysql.InternalError as er:\n logging.critical(\n 'ERROR: InternallError in Conference DB. Call to support immediately - %s, %s'\n % (er[0], er[1]))\n return False, er[1]\n\n def listdicttodict(self, listdict):\n return listdict[1][0]\n\n def getGlobalMediaPath(self):\n if not os.path.exists(self.conferenceMediaStoragePath):\n return None\n else:\n return self.conferenceMediaStoragePath\n\n def __del__(self):\n self.connect_db.close()\n",
"step-5": "import pymysql\nimport logging\nimport socket\nfrom models.platformconfig import Pconfig\n\n\nclass ncbDB(Pconfig):\n # I have to retrieve basic configuration attributes, listed below, from system config file\n # on ApplSrv, for example : /etc/ncb_applsrv/ncb_applsrv.conf\n\n hostname = None\n conferenceMediaStoragePath = '/media/conference/' # NFS mount point\n conferenceDBcurs = None\n connect_db = None\n\n def __init__(self, srvrole):\n super(ncbDB, self).__init__(srvrole) # run constructor of parent object\n\n self.hostname = socket.gethostname() # get local hostname TODO: validate hostname with that one in config file\n self.conferenceMediaStoragePath = self.lconfig.get('media', 'media_path') # TODO: if valid to remove it above\n try:\n self.connect_db = pymysql.connect(self.db_server,\n self.conferenceConfigDBname_user,\n self.conferenceConfigDBname_passwd,\n self.conferenceConfigDBname,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n # self.conferenceDBcurs = self.connect_db.cursor()\n except pymysql.Error as er:\n logging.critical(\"Can not establish connection to configuration DB: %s\", self.conferenceConfigDBname)\n raise Exception(er[0], er[1])\n\n # the method executes SQL query and returns all fetched rows. Otherwise it returns None\n def ncb_getQuery(self, querySQL):\n result = []\n try:\n with self.connect_db.cursor() as self.conferenceDBcurs:\n self.conferenceDBcurs.execute(querySQL)\n result = self.conferenceDBcurs.fetchall()\n return True, result\n except pymysql.ProgrammingError as er:\n logging.critical('ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))\n return False, er[1]\n except pymysql.InternalError as er:\n logging.critical('ERROR: InternallError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))\n return False, er[1]\n except pymysql.Error as er:\n logging.critical('ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))\n return False, er[1]\n\n # the method executes SQL query to push data into DB.\n def ncb_pushQuery(self, querySQL):\n try:\n with self.connect_db.cursor() as self.conferenceDBcurs:\n self.conferenceDBcurs.execute(querySQL)\n self.connect_db.commit()\n return (True, [])\n except pymysql.Error as er:\n logging.critical('ERROR: Can not push a data into Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))\n return False, er[1]\n except pymysql.IntegrityError as er:\n logging.critical('ERROR: IntegrityError in Conference DB. Call to support immediately %s, %s' % (er[0], er[1]))\n return False, er[1]\n except pymysql.OperationalError as er:\n logging.critical('ERROR: OperationalError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))\n return False, er[1]\n except pymysql.InternalError as er:\n logging.critical('ERROR: InternallError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))\n return False, er[1]\n\n # if more than one rows are retrieved - it gets first row from the list as a dictionary\n def listdicttodict(self, listdict):\n return listdict[1][0]\n\n def getGlobalMediaPath(self):\n if not os.path.exists(self.conferenceMediaStoragePath): # check it out whether it exist\n return None # if it doesn't - return None\n else:\n return self.conferenceMediaStoragePath # otherwise return the path\n\n def __del__(self):\n self.connect_db.close()\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('./Pytorch-UNet/')
<|reserved_special_token_0|>
if __name__ == '__main__':
logger = Logger()
torch.backends.cudnn.benchmark = True
args = parse_args()
logger.update_args(args)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
else:
print(
'WARNING: Output directory already exists and will be overwriting (if not resuming)'
)
wandb.init(project=args.project_name)
filters_cpu = create_filters(device='cpu')
default_transform = transforms.Compose([transforms.CenterCrop(args.
image_size), transforms.Resize(args.image_size), transforms.ToTensor()]
)
ds_name, classes = parse_dataset_args(args.dataset)
train_dataset = create_dataset(ds_name, args.train_dir, transform=
default_transform, classes=classes[0] if classes else None)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=
args.batch_size, shuffle=True, num_workers=args.workers, pin_memory
=True, drop_last=True)
valid_dataset = create_dataset(ds_name, args.valid_dir, transform=
default_transform, classes=classes[1] if classes else None)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=
args.batch_size, shuffle=True, num_workers=args.workers, pin_memory
=True, drop_last=True)
print('Loading model 128 weights')
model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12,
bilinear=True).to(args.device)
model_128 = load_weights(model_128, args.model_128_weights, args)
model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48,
bilinear=True).to(args.device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
state_dict = {'itr': 0}
if args.resume:
print('Loading weights & resuming from iteration {}'.format(args.
checkpoint))
model, optimizer, logger = load_checkpoint(model, optimizer, '256',
args)
state_dict['itr'] = args.checkpoint
for epoch in range(args.num_epochs):
train_unet256(epoch, state_dict, model, model_128, optimizer,
train_loader, valid_loader, args, logger)
<|reserved_special_token_1|>
import os, sys
sys.path.append('./Pytorch-UNet/')
import torch
from torch import optim
import torchvision.transforms as transforms
import torchvision.datasets as dset
import wandb
from datasets import parse_dataset_args, create_dataset
from wt_utils import wt, create_filters, load_checkpoint, load_weights
from arguments import parse_args
from unet.unet_model import UNet_NTail_128_Mod
from train import train_unet256
from logger import Logger
if __name__ == '__main__':
logger = Logger()
torch.backends.cudnn.benchmark = True
args = parse_args()
logger.update_args(args)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
else:
print(
'WARNING: Output directory already exists and will be overwriting (if not resuming)'
)
wandb.init(project=args.project_name)
filters_cpu = create_filters(device='cpu')
default_transform = transforms.Compose([transforms.CenterCrop(args.
image_size), transforms.Resize(args.image_size), transforms.ToTensor()]
)
ds_name, classes = parse_dataset_args(args.dataset)
train_dataset = create_dataset(ds_name, args.train_dir, transform=
default_transform, classes=classes[0] if classes else None)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=
args.batch_size, shuffle=True, num_workers=args.workers, pin_memory
=True, drop_last=True)
valid_dataset = create_dataset(ds_name, args.valid_dir, transform=
default_transform, classes=classes[1] if classes else None)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=
args.batch_size, shuffle=True, num_workers=args.workers, pin_memory
=True, drop_last=True)
print('Loading model 128 weights')
model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12,
bilinear=True).to(args.device)
model_128 = load_weights(model_128, args.model_128_weights, args)
model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48,
bilinear=True).to(args.device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
state_dict = {'itr': 0}
if args.resume:
print('Loading weights & resuming from iteration {}'.format(args.
checkpoint))
model, optimizer, logger = load_checkpoint(model, optimizer, '256',
args)
state_dict['itr'] = args.checkpoint
for epoch in range(args.num_epochs):
train_unet256(epoch, state_dict, model, model_128, optimizer,
train_loader, valid_loader, args, logger)
<|reserved_special_token_1|>
import os, sys
sys.path.append('./Pytorch-UNet/')
import torch
from torch import optim
import torchvision.transforms as transforms
import torchvision.datasets as dset
import wandb
from datasets import parse_dataset_args, create_dataset
from wt_utils import wt, create_filters, load_checkpoint, load_weights
from arguments import parse_args
from unet.unet_model import UNet_NTail_128_Mod
from train import train_unet256
from logger import Logger
if __name__ == "__main__":
# Set up logger
logger = Logger()
# Accelerate training with benchmark true
torch.backends.cudnn.benchmark = True
# Parse arguments & log
args = parse_args()
logger.update_args(args)
# Create output directory
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
else:
print('WARNING: Output directory already exists and will be overwriting (if not resuming)')
# Initialize wandb
wandb.init(project=args.project_name)
# Create filters for dataloader
filters_cpu = create_filters(device='cpu')
# Create transforms
default_transform = transforms.Compose([
transforms.CenterCrop(args.image_size),
transforms.Resize(args.image_size),
transforms.ToTensor()
])
# Parsing dataset arguments
ds_name, classes = parse_dataset_args(args.dataset)
# Create train dataset
train_dataset = create_dataset(ds_name, args.train_dir, transform=default_transform, classes=classes[0] if classes else None)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers,
pin_memory=True, drop_last=True)
# Create validation dataset
valid_dataset = create_dataset(ds_name, args.valid_dir, transform=default_transform, classes=classes[1] if classes else None)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers,
pin_memory=True, drop_last=True)
# Load 128 model
print('Loading model 128 weights')
model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12, bilinear=True).to(args.device)
model_128 = load_weights(model_128, args.model_128_weights, args)
# Model and optimizer
model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48, bilinear=True).to(args.device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
state_dict = {'itr': 0}
if args.resume:
print('Loading weights & resuming from iteration {}'.format(args.checkpoint))
model, optimizer, logger = load_checkpoint(model, optimizer, '256', args)
state_dict['itr'] = args.checkpoint
for epoch in range(args.num_epochs):
train_unet256(epoch, state_dict, model, model_128, optimizer, train_loader, valid_loader, args, logger)
|
flexible
|
{
"blob_id": "fbd5c7fa335d6bde112e41a55d15aee31e3ebaf7",
"index": 2759,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('./Pytorch-UNet/')\n<mask token>\nif __name__ == '__main__':\n logger = Logger()\n torch.backends.cudnn.benchmark = True\n args = parse_args()\n logger.update_args(args)\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n else:\n print(\n 'WARNING: Output directory already exists and will be overwriting (if not resuming)'\n )\n wandb.init(project=args.project_name)\n filters_cpu = create_filters(device='cpu')\n default_transform = transforms.Compose([transforms.CenterCrop(args.\n image_size), transforms.Resize(args.image_size), transforms.ToTensor()]\n )\n ds_name, classes = parse_dataset_args(args.dataset)\n train_dataset = create_dataset(ds_name, args.train_dir, transform=\n default_transform, classes=classes[0] if classes else None)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=\n args.batch_size, shuffle=True, num_workers=args.workers, pin_memory\n =True, drop_last=True)\n valid_dataset = create_dataset(ds_name, args.valid_dir, transform=\n default_transform, classes=classes[1] if classes else None)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=\n args.batch_size, shuffle=True, num_workers=args.workers, pin_memory\n =True, drop_last=True)\n print('Loading model 128 weights')\n model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12,\n bilinear=True).to(args.device)\n model_128 = load_weights(model_128, args.model_128_weights, args)\n model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48,\n bilinear=True).to(args.device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n state_dict = {'itr': 0}\n if args.resume:\n print('Loading weights & resuming from iteration {}'.format(args.\n checkpoint))\n model, optimizer, logger = load_checkpoint(model, optimizer, '256',\n args)\n state_dict['itr'] = args.checkpoint\n for epoch in range(args.num_epochs):\n train_unet256(epoch, state_dict, model, model_128, optimizer,\n train_loader, valid_loader, args, logger)\n",
"step-3": "import os, sys\nsys.path.append('./Pytorch-UNet/')\nimport torch\nfrom torch import optim\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dset\nimport wandb\nfrom datasets import parse_dataset_args, create_dataset\nfrom wt_utils import wt, create_filters, load_checkpoint, load_weights\nfrom arguments import parse_args\nfrom unet.unet_model import UNet_NTail_128_Mod\nfrom train import train_unet256\nfrom logger import Logger\nif __name__ == '__main__':\n logger = Logger()\n torch.backends.cudnn.benchmark = True\n args = parse_args()\n logger.update_args(args)\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n else:\n print(\n 'WARNING: Output directory already exists and will be overwriting (if not resuming)'\n )\n wandb.init(project=args.project_name)\n filters_cpu = create_filters(device='cpu')\n default_transform = transforms.Compose([transforms.CenterCrop(args.\n image_size), transforms.Resize(args.image_size), transforms.ToTensor()]\n )\n ds_name, classes = parse_dataset_args(args.dataset)\n train_dataset = create_dataset(ds_name, args.train_dir, transform=\n default_transform, classes=classes[0] if classes else None)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=\n args.batch_size, shuffle=True, num_workers=args.workers, pin_memory\n =True, drop_last=True)\n valid_dataset = create_dataset(ds_name, args.valid_dir, transform=\n default_transform, classes=classes[1] if classes else None)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=\n args.batch_size, shuffle=True, num_workers=args.workers, pin_memory\n =True, drop_last=True)\n print('Loading model 128 weights')\n model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12,\n bilinear=True).to(args.device)\n model_128 = load_weights(model_128, args.model_128_weights, args)\n model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48,\n bilinear=True).to(args.device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n state_dict = {'itr': 0}\n if args.resume:\n print('Loading weights & resuming from iteration {}'.format(args.\n checkpoint))\n model, optimizer, logger = load_checkpoint(model, optimizer, '256',\n args)\n state_dict['itr'] = args.checkpoint\n for epoch in range(args.num_epochs):\n train_unet256(epoch, state_dict, model, model_128, optimizer,\n train_loader, valid_loader, args, logger)\n",
"step-4": "import os, sys\nsys.path.append('./Pytorch-UNet/')\nimport torch\nfrom torch import optim\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dset\nimport wandb\n\nfrom datasets import parse_dataset_args, create_dataset\nfrom wt_utils import wt, create_filters, load_checkpoint, load_weights\nfrom arguments import parse_args\nfrom unet.unet_model import UNet_NTail_128_Mod\nfrom train import train_unet256\nfrom logger import Logger\n\nif __name__ == \"__main__\":\n # Set up logger\n logger = Logger()\n \n # Accelerate training with benchmark true\n torch.backends.cudnn.benchmark = True\n\n # Parse arguments & log\n args = parse_args()\n logger.update_args(args)\n\n # Create output directory\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n else:\n print('WARNING: Output directory already exists and will be overwriting (if not resuming)')\n\n # Initialize wandb\n wandb.init(project=args.project_name)\n \n # Create filters for dataloader\n filters_cpu = create_filters(device='cpu')\n\n # Create transforms\n default_transform = transforms.Compose([\n transforms.CenterCrop(args.image_size),\n transforms.Resize(args.image_size),\n transforms.ToTensor()\n ])\n \n # Parsing dataset arguments\n ds_name, classes = parse_dataset_args(args.dataset)\n\n # Create train dataset\n train_dataset = create_dataset(ds_name, args.train_dir, transform=default_transform, classes=classes[0] if classes else None)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers,\n pin_memory=True, drop_last=True)\n\n # Create validation dataset\n valid_dataset = create_dataset(ds_name, args.valid_dir, transform=default_transform, classes=classes[1] if classes else None)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers,\n pin_memory=True, drop_last=True)\n\n # Load 128 model\n print('Loading model 128 weights')\n model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12, bilinear=True).to(args.device)\n model_128 = load_weights(model_128, args.model_128_weights, args)\n\n # Model and optimizer\n model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48, bilinear=True).to(args.device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n state_dict = {'itr': 0}\n\n if args.resume:\n print('Loading weights & resuming from iteration {}'.format(args.checkpoint))\n model, optimizer, logger = load_checkpoint(model, optimizer, '256', args)\n state_dict['itr'] = args.checkpoint\n\n for epoch in range(args.num_epochs):\n train_unet256(epoch, state_dict, model, model_128, optimizer, train_loader, valid_loader, args, logger)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
from collections import deque
t = int(sys.stdin.readline().rstrip())
for _ in range(t):
n, m = map(int, sys.stdin.readline().split())
q = deque(map(int, sys.stdin.readline().split()))
count = 0
while q:
highest = max(q)
doc = q.popleft()
m -= 1
if doc != highest:
q.append(doc)
if m < 0:
m = len(q) - 1
else:
count += 1
if m < 0:
print(count)
break
|
normal
|
{
"blob_id": "a571abd88184c8d8bb05245e9c3ce2e4dabb4c09",
"index": 615,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(t):\n n, m = map(int, sys.stdin.readline().split())\n q = deque(map(int, sys.stdin.readline().split()))\n count = 0\n while q:\n highest = max(q)\n doc = q.popleft()\n m -= 1\n if doc != highest:\n q.append(doc)\n if m < 0:\n m = len(q) - 1\n else:\n count += 1\n if m < 0:\n print(count)\n break\n",
"step-3": "<mask token>\nt = int(sys.stdin.readline().rstrip())\nfor _ in range(t):\n n, m = map(int, sys.stdin.readline().split())\n q = deque(map(int, sys.stdin.readline().split()))\n count = 0\n while q:\n highest = max(q)\n doc = q.popleft()\n m -= 1\n if doc != highest:\n q.append(doc)\n if m < 0:\n m = len(q) - 1\n else:\n count += 1\n if m < 0:\n print(count)\n break\n",
"step-4": "import sys\nfrom collections import deque\nt = int(sys.stdin.readline().rstrip())\nfor _ in range(t):\n n, m = map(int, sys.stdin.readline().split())\n q = deque(map(int, sys.stdin.readline().split()))\n count = 0\n while q:\n highest = max(q)\n doc = q.popleft()\n m -= 1\n if doc != highest:\n q.append(doc)\n if m < 0:\n m = len(q) - 1\n else:\n count += 1\n if m < 0:\n print(count)\n break\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from platypush.message.response import Response
class CameraResponse(Response):
pass
# vim:sw=4:ts=4:et:
|
normal
|
{
"blob_id": "4c38d0487f99cdc91cbce50079906f7336e51482",
"index": 5462,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CameraResponse(Response):\n pass\n",
"step-3": "from platypush.message.response import Response\n\n\nclass CameraResponse(Response):\n pass\n",
"step-4": "from platypush.message.response import Response\n\n\nclass CameraResponse(Response):\n pass\n\n\n# vim:sw=4:ts=4:et:\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from core.models import Atom
from core.models.vector3d import cVector3D
from fractions import Fraction
class SpaceGroup(object):
def __init__(self,
index=None,
name=None,
lattice_system=None,
lattice_centering=None,
inversion=None,
symmetry=None,
asymmetric_unit=None,
unique_axis=None):
self.index = index
self.name = name
self.lattice_system = lattice_system
self.lattice_centering = lattice_centering
self.inversion = inversion
self.symmetry = symmetry
self.asymmetric_unit = asymmetric_unit
self.unique_axis = unique_axis
self.non_centering_symmetry = []
self.full_symmetry = []
self.__compute_full_symmetry()
@property
def identity(self):
return SymmetryOperation('x,y,z')
def __append_identity(self):
if 'x,y,z' not in [i.operation_string for i in self.symmetry]:
self.symmetry = [self.identity] + self.symmetry
def __add_inversion_symmetry(self):
for op in self.symmetry:
self.non_centering_symmetry.append(op)
if isinstance(self.inversion,CentroSymmetric):
for i in range(len(self.non_centering_symmetry)):
op = self.non_centering_symmetry[i]
self.non_centering_symmetry.append(op.inversion())
def __add_centering_symmetry(self):
if self.lattice_centering:
for i in range(len(self.full_symmetry)):
op = self.full_symmetry[i]
centering_ops = self.lattice_centering.transform(op)
self.full_symmetry += centering_ops
def __compute_full_symmetry(self):
self.__append_identity()
self.__add_inversion_symmetry()
self.full_symmetry = [op for op in self.non_centering_symmetry]
self.__add_centering_symmetry()
return self.full_symmetry
class SymmetryOperation(object):
def __init__(self, operation_string):
"""
Initialize a symmetry operation object from a string representation of symmetry operation.
:param operation_string: A string (as read in from res/cif file) representing a symmetry operation
for a space group.
"""
self.operation_string = operation_string.lower()
self.operation_function = None
self.__set_operation_function()
def __set_operation_function(self):
"""
Convert the string form of the symmetry operation into the form of a mathematical
function that can be directly applied to a vector to transform a point to a
symmetry related point.
"""
if self.operation_function is not None:
return self.operation_function
else:
self.operation_function = symm_eval
def transform_scaled_position(self, data):
"""
Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related
position in the crystal.
:param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)
representing the fractional coordinates on which the symmetry
operation will be applied upon.
:return: Symmetry transformed vector.
"""
return self.operation_function(prepare_operation(self.operation_string), data)
def transform_atom(self, atom):
return Atom(label=atom.label, scaled_position=self.transform_scaled_position(atom.scaled_position))
def inversion(self):
func = lambda x: "-1*(%s),-1*(%s),-1*(%s)" % tuple(x.split(","))
return self.__class__(func(self.operation_string))
class Symmetry(object):
@staticmethod
def get(value):
if value is '' or value == 'UNKNOWN':
return []
return [SymmetryOperation(v) for v in value.split(';')]
class Inversion(object):
# TODO - this needs to be fixed later to make it consistent with the Centering class!
YES = True
NO = False
UNKNOWN = None
class InversionFactory(object):
@staticmethod
def construct(latt):
if int(latt) > 0:
return CentroSymmetric()
else:
return NonCentroSymmetric()
class CentroSymmetric(Inversion):
@staticmethod
def transform(op):
func = lambda x: "-1*(%s),-1*(%s),-1*(%s)" % tuple(x.split(","))
return func(op)
class NonCentroSymmetric(Inversion):
@staticmethod
def transform(op):
return op
class Centering(object):
def __init__(self, letter, additional_lattice_points):
self.letter = letter
self.additional_lattice_points = additional_lattice_points
def transform(self, op):
additional_ops = []
for point in self.additional_lattice_points:
func = lambda x: "{0}+{3},{1}+{4},{2}+{5}".format(*(x.split(",") + list(point)))
additional_ops.append(op.__class__(func(op.operation_string)))
return additional_ops
@classmethod
def primitive(cls):
return cls('P', [])
@classmethod
def body_centered(cls):
return cls('I', [(0.5, 0.5, 0.5)])
@classmethod
def hexagonal(cls):
return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0),
(Fraction(1, 3), Fraction(2, 3), 0.0)])
@classmethod
def rhombohedral(cls):
return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),
(Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])
@classmethod
def face_centered(cls):
return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])
@classmethod
def base_centered_A(cls):
return cls('A', [(0.0, 0.5, 0.5)])
@classmethod
def base_centered_B(cls):
return cls('B', [(0.5, 0.0, 0.5)])
@classmethod
def base_centered_C(cls):
return cls('C', [(0.5, 0.5, 0.0)])
@classmethod
def construct(cls, latt):
"""
Given the LATT directive in a res file, return the corresponding centered lattice type.
:param latt: the absolute integer value specified in LATT
:return: corrected centered lattice type
"""
latt = abs(latt)
if latt == 1:
return cls.primitive()
elif latt == 2:
return cls.body_centered()
elif latt == 3:
# default setting from reading in a res file is Rhombohedral
return cls.rhombohedral()
elif latt == 4:
return cls.face_centered()
elif latt == 5:
return cls.base_centered_A()
elif latt == 6:
return cls.base_centered_B()
elif latt == 7:
return cls.base_centered_C()
def get_LATT_code(self):
if self.letter == 'P':
return 1
elif self.letter == 'I':
return 2
elif self.letter == 'R':
return 3
elif self.letter == 'F':
return 4
elif self.letter == 'A':
return 5
elif self.letter == 'B':
return 6
elif self.letter == 'C':
return 7
class AsymmetricUnit(object):
UNKNOWN = [[0, 1.00], [0, 1.00], [0, 1.00]]
FULL = [[0, 1.00], [0, 1.00], [0, 1.00]]
HALF_X = [[0, 0.50], [0, 1.00], [0, 1.00]]
HALF_Y = [[0, 1.00], [0, 0.50], [0, 1.00]]
HALF_Z = [[0, 1.00], [0, 1.00], [0, 0.50]]
QUART_Y = [[0, 1.00], [0, 0.25], [0, 1.00]]
HALF_X_QUART_Y = [[0, 0.50], [0, 0.25], [0, 1.00]]
HALF_XZ = [[0, 0.50], [0, 1.00], [0, 0.50]]
HALF_XY = [[0, 0.50], [0, 0.50], [0, 1.00]]
EIGHT_Z = [[0, 1.00], [0, 1.00], [0, 0.125]]
class UniqueAxis(object):
UNKNOWN = -1
NA = -1
X = 0
Y = 1
Z = 2
def symm_eval(s, data):
x, y, z = data.x, data.y, data.z
out = list(map(eval, s.split(",")))
return cVector3D(out[0], out[1], out[2])
def prepare_operation(s):
''' Cleans up a string of a symmetry operation to be used in eval or exec
:param s: Input string e.g. "x,y,z+1/2"
:type s: string
:rtype: string
'''
tmp = s.replace("1/4", "1.0/4.0")
tmp = tmp.replace("1/2", "1.0/2.0")
tmp = tmp.replace("3/4", "3.0/4.0")
tmp = tmp.replace("1/3", "1.0/3.0")
tmp = tmp.replace("2/3", "2.0/3.0")
tmp = tmp.replace("1/6", "1.0/6.0")
tmp = tmp.replace("5/6", "5.0/6.0")
return tmp.replace(" ", "").lower()
|
normal
|
{
"blob_id": "88731049227629ed84ff56922d7ac11d4a137984",
"index": 5376,
"step-1": "<mask token>\n\n\nclass Centering(object):\n\n def __init__(self, letter, additional_lattice_points):\n self.letter = letter\n self.additional_lattice_points = additional_lattice_points\n\n def transform(self, op):\n additional_ops = []\n for point in self.additional_lattice_points:\n func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','\n ) + list(point)))\n additional_ops.append(op.__class__(func(op.operation_string)))\n return additional_ops\n\n @classmethod\n def primitive(cls):\n return cls('P', [])\n <mask token>\n\n @classmethod\n def hexagonal(cls):\n return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,\n 3), Fraction(2, 3), 0.0)])\n\n @classmethod\n def rhombohedral(cls):\n return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),\n (Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])\n\n @classmethod\n def face_centered(cls):\n return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])\n\n @classmethod\n def base_centered_A(cls):\n return cls('A', [(0.0, 0.5, 0.5)])\n\n @classmethod\n def base_centered_B(cls):\n return cls('B', [(0.5, 0.0, 0.5)])\n\n @classmethod\n def base_centered_C(cls):\n return cls('C', [(0.5, 0.5, 0.0)])\n <mask token>\n\n def get_LATT_code(self):\n if self.letter == 'P':\n return 1\n elif self.letter == 'I':\n return 2\n elif self.letter == 'R':\n return 3\n elif self.letter == 'F':\n return 4\n elif self.letter == 'A':\n return 5\n elif self.letter == 'B':\n return 6\n elif self.letter == 'C':\n return 7\n\n\nclass AsymmetricUnit(object):\n UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]\n FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]\n HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]\n HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]\n HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]\n QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]\n HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]\n HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]\n HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]\n EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]\n\n\nclass UniqueAxis(object):\n UNKNOWN = -1\n NA = -1\n X = 0\n Y = 1\n Z = 2\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SymmetryOperation(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Symmetry(object):\n\n @staticmethod\n def get(value):\n if value is '' or value == 'UNKNOWN':\n return []\n return [SymmetryOperation(v) for v in value.split(';')]\n\n\nclass Inversion(object):\n YES = True\n NO = False\n UNKNOWN = None\n\n\nclass InversionFactory(object):\n\n @staticmethod\n def construct(latt):\n if int(latt) > 0:\n return CentroSymmetric()\n else:\n return NonCentroSymmetric()\n\n\nclass CentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))\n return func(op)\n\n\nclass NonCentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n return op\n\n\nclass Centering(object):\n\n def __init__(self, letter, additional_lattice_points):\n self.letter = letter\n self.additional_lattice_points = additional_lattice_points\n\n def transform(self, op):\n additional_ops = []\n for point in self.additional_lattice_points:\n func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','\n ) + list(point)))\n additional_ops.append(op.__class__(func(op.operation_string)))\n return additional_ops\n\n @classmethod\n def primitive(cls):\n return cls('P', [])\n\n @classmethod\n def body_centered(cls):\n return cls('I', [(0.5, 0.5, 0.5)])\n\n @classmethod\n def hexagonal(cls):\n return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,\n 3), Fraction(2, 3), 0.0)])\n\n @classmethod\n def rhombohedral(cls):\n return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),\n (Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])\n\n @classmethod\n def face_centered(cls):\n return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])\n\n @classmethod\n def base_centered_A(cls):\n return cls('A', [(0.0, 0.5, 0.5)])\n\n @classmethod\n def base_centered_B(cls):\n return cls('B', [(0.5, 0.0, 0.5)])\n\n @classmethod\n def base_centered_C(cls):\n return cls('C', [(0.5, 0.5, 0.0)])\n\n @classmethod\n def construct(cls, latt):\n \"\"\"\n Given the LATT directive in a res file, return the corresponding centered lattice type.\n\n :param latt: the absolute integer value specified in LATT\n :return: corrected centered lattice type\n \"\"\"\n latt = abs(latt)\n if latt == 1:\n return cls.primitive()\n elif latt == 2:\n return cls.body_centered()\n elif latt == 3:\n return cls.rhombohedral()\n elif latt == 4:\n return cls.face_centered()\n elif latt == 5:\n return cls.base_centered_A()\n elif latt == 6:\n return cls.base_centered_B()\n elif latt == 7:\n return cls.base_centered_C()\n\n def get_LATT_code(self):\n if self.letter == 'P':\n return 1\n elif self.letter == 'I':\n return 2\n elif self.letter == 'R':\n return 3\n elif self.letter == 'F':\n return 4\n elif self.letter == 'A':\n return 5\n elif self.letter == 'B':\n return 6\n elif self.letter == 'C':\n return 7\n\n\nclass AsymmetricUnit(object):\n UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]\n FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]\n HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]\n HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]\n HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]\n QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]\n HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]\n HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]\n HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]\n EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]\n\n\nclass UniqueAxis(object):\n UNKNOWN = -1\n NA = -1\n X = 0\n Y = 1\n Z = 2\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SymmetryOperation(object):\n\n def __init__(self, operation_string):\n \"\"\"\n Initialize a symmetry operation object from a string representation of symmetry operation.\n\n :param operation_string: A string (as read in from res/cif file) representing a symmetry operation\n for a space group.\n \"\"\"\n self.operation_string = operation_string.lower()\n self.operation_function = None\n self.__set_operation_function()\n\n def __set_operation_function(self):\n \"\"\"\n Convert the string form of the symmetry operation into the form of a mathematical\n function that can be directly applied to a vector to transform a point to a\n symmetry related point.\n \"\"\"\n if self.operation_function is not None:\n return self.operation_function\n else:\n self.operation_function = symm_eval\n\n def transform_scaled_position(self, data):\n \"\"\"\n Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related\n position in the crystal.\n\n :param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)\n representing the fractional coordinates on which the symmetry\n operation will be applied upon.\n :return: Symmetry transformed vector.\n \"\"\"\n return self.operation_function(prepare_operation(self.\n operation_string), data)\n\n def transform_atom(self, atom):\n return Atom(label=atom.label, scaled_position=self.\n transform_scaled_position(atom.scaled_position))\n\n def inversion(self):\n func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))\n return self.__class__(func(self.operation_string))\n\n\nclass Symmetry(object):\n\n @staticmethod\n def get(value):\n if value is '' or value == 'UNKNOWN':\n return []\n return [SymmetryOperation(v) for v in value.split(';')]\n\n\nclass Inversion(object):\n YES = True\n NO = False\n UNKNOWN = None\n\n\nclass InversionFactory(object):\n\n @staticmethod\n def construct(latt):\n if int(latt) > 0:\n return CentroSymmetric()\n else:\n return NonCentroSymmetric()\n\n\nclass CentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))\n return func(op)\n\n\nclass NonCentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n return op\n\n\nclass Centering(object):\n\n def __init__(self, letter, additional_lattice_points):\n self.letter = letter\n self.additional_lattice_points = additional_lattice_points\n\n def transform(self, op):\n additional_ops = []\n for point in self.additional_lattice_points:\n func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','\n ) + list(point)))\n additional_ops.append(op.__class__(func(op.operation_string)))\n return additional_ops\n\n @classmethod\n def primitive(cls):\n return cls('P', [])\n\n @classmethod\n def body_centered(cls):\n return cls('I', [(0.5, 0.5, 0.5)])\n\n @classmethod\n def hexagonal(cls):\n return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,\n 3), Fraction(2, 3), 0.0)])\n\n @classmethod\n def rhombohedral(cls):\n return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),\n (Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])\n\n @classmethod\n def face_centered(cls):\n return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])\n\n @classmethod\n def base_centered_A(cls):\n return cls('A', [(0.0, 0.5, 0.5)])\n\n @classmethod\n def base_centered_B(cls):\n return cls('B', [(0.5, 0.0, 0.5)])\n\n @classmethod\n def base_centered_C(cls):\n return cls('C', [(0.5, 0.5, 0.0)])\n\n @classmethod\n def construct(cls, latt):\n \"\"\"\n Given the LATT directive in a res file, return the corresponding centered lattice type.\n\n :param latt: the absolute integer value specified in LATT\n :return: corrected centered lattice type\n \"\"\"\n latt = abs(latt)\n if latt == 1:\n return cls.primitive()\n elif latt == 2:\n return cls.body_centered()\n elif latt == 3:\n return cls.rhombohedral()\n elif latt == 4:\n return cls.face_centered()\n elif latt == 5:\n return cls.base_centered_A()\n elif latt == 6:\n return cls.base_centered_B()\n elif latt == 7:\n return cls.base_centered_C()\n\n def get_LATT_code(self):\n if self.letter == 'P':\n return 1\n elif self.letter == 'I':\n return 2\n elif self.letter == 'R':\n return 3\n elif self.letter == 'F':\n return 4\n elif self.letter == 'A':\n return 5\n elif self.letter == 'B':\n return 6\n elif self.letter == 'C':\n return 7\n\n\nclass AsymmetricUnit(object):\n UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]\n FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]\n HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]\n HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]\n HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]\n QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]\n HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]\n HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]\n HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]\n EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]\n\n\nclass UniqueAxis(object):\n UNKNOWN = -1\n NA = -1\n X = 0\n Y = 1\n Z = 2\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SpaceGroup(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SymmetryOperation(object):\n\n def __init__(self, operation_string):\n \"\"\"\n Initialize a symmetry operation object from a string representation of symmetry operation.\n\n :param operation_string: A string (as read in from res/cif file) representing a symmetry operation\n for a space group.\n \"\"\"\n self.operation_string = operation_string.lower()\n self.operation_function = None\n self.__set_operation_function()\n\n def __set_operation_function(self):\n \"\"\"\n Convert the string form of the symmetry operation into the form of a mathematical\n function that can be directly applied to a vector to transform a point to a\n symmetry related point.\n \"\"\"\n if self.operation_function is not None:\n return self.operation_function\n else:\n self.operation_function = symm_eval\n\n def transform_scaled_position(self, data):\n \"\"\"\n Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related\n position in the crystal.\n\n :param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)\n representing the fractional coordinates on which the symmetry\n operation will be applied upon.\n :return: Symmetry transformed vector.\n \"\"\"\n return self.operation_function(prepare_operation(self.\n operation_string), data)\n\n def transform_atom(self, atom):\n return Atom(label=atom.label, scaled_position=self.\n transform_scaled_position(atom.scaled_position))\n\n def inversion(self):\n func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))\n return self.__class__(func(self.operation_string))\n\n\nclass Symmetry(object):\n\n @staticmethod\n def get(value):\n if value is '' or value == 'UNKNOWN':\n return []\n return [SymmetryOperation(v) for v in value.split(';')]\n\n\nclass Inversion(object):\n YES = True\n NO = False\n UNKNOWN = None\n\n\nclass InversionFactory(object):\n\n @staticmethod\n def construct(latt):\n if int(latt) > 0:\n return CentroSymmetric()\n else:\n return NonCentroSymmetric()\n\n\nclass CentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))\n return func(op)\n\n\nclass NonCentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n return op\n\n\nclass Centering(object):\n\n def __init__(self, letter, additional_lattice_points):\n self.letter = letter\n self.additional_lattice_points = additional_lattice_points\n\n def transform(self, op):\n additional_ops = []\n for point in self.additional_lattice_points:\n func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','\n ) + list(point)))\n additional_ops.append(op.__class__(func(op.operation_string)))\n return additional_ops\n\n @classmethod\n def primitive(cls):\n return cls('P', [])\n\n @classmethod\n def body_centered(cls):\n return cls('I', [(0.5, 0.5, 0.5)])\n\n @classmethod\n def hexagonal(cls):\n return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,\n 3), Fraction(2, 3), 0.0)])\n\n @classmethod\n def rhombohedral(cls):\n return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),\n (Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])\n\n @classmethod\n def face_centered(cls):\n return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])\n\n @classmethod\n def base_centered_A(cls):\n return cls('A', [(0.0, 0.5, 0.5)])\n\n @classmethod\n def base_centered_B(cls):\n return cls('B', [(0.5, 0.0, 0.5)])\n\n @classmethod\n def base_centered_C(cls):\n return cls('C', [(0.5, 0.5, 0.0)])\n\n @classmethod\n def construct(cls, latt):\n \"\"\"\n Given the LATT directive in a res file, return the corresponding centered lattice type.\n\n :param latt: the absolute integer value specified in LATT\n :return: corrected centered lattice type\n \"\"\"\n latt = abs(latt)\n if latt == 1:\n return cls.primitive()\n elif latt == 2:\n return cls.body_centered()\n elif latt == 3:\n return cls.rhombohedral()\n elif latt == 4:\n return cls.face_centered()\n elif latt == 5:\n return cls.base_centered_A()\n elif latt == 6:\n return cls.base_centered_B()\n elif latt == 7:\n return cls.base_centered_C()\n\n def get_LATT_code(self):\n if self.letter == 'P':\n return 1\n elif self.letter == 'I':\n return 2\n elif self.letter == 'R':\n return 3\n elif self.letter == 'F':\n return 4\n elif self.letter == 'A':\n return 5\n elif self.letter == 'B':\n return 6\n elif self.letter == 'C':\n return 7\n\n\nclass AsymmetricUnit(object):\n UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]\n FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]\n HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]\n HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]\n HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]\n QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]\n HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]\n HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]\n HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]\n EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]\n\n\nclass UniqueAxis(object):\n UNKNOWN = -1\n NA = -1\n X = 0\n Y = 1\n Z = 2\n\n\n<mask token>\n",
"step-5": "from core.models import Atom\nfrom core.models.vector3d import cVector3D\nfrom fractions import Fraction\n\n\nclass SpaceGroup(object):\n def __init__(self,\n index=None,\n name=None,\n lattice_system=None,\n lattice_centering=None,\n inversion=None,\n symmetry=None,\n asymmetric_unit=None,\n unique_axis=None):\n self.index = index\n self.name = name\n self.lattice_system = lattice_system\n self.lattice_centering = lattice_centering\n self.inversion = inversion\n self.symmetry = symmetry\n self.asymmetric_unit = asymmetric_unit\n self.unique_axis = unique_axis\n self.non_centering_symmetry = []\n\n self.full_symmetry = []\n self.__compute_full_symmetry()\n\n @property\n def identity(self):\n return SymmetryOperation('x,y,z')\n\n def __append_identity(self):\n if 'x,y,z' not in [i.operation_string for i in self.symmetry]:\n self.symmetry = [self.identity] + self.symmetry\n\n def __add_inversion_symmetry(self):\n for op in self.symmetry:\n self.non_centering_symmetry.append(op)\n\n if isinstance(self.inversion,CentroSymmetric):\n for i in range(len(self.non_centering_symmetry)):\n op = self.non_centering_symmetry[i]\n self.non_centering_symmetry.append(op.inversion())\n\n def __add_centering_symmetry(self):\n if self.lattice_centering:\n for i in range(len(self.full_symmetry)):\n op = self.full_symmetry[i]\n centering_ops = self.lattice_centering.transform(op)\n self.full_symmetry += centering_ops\n\n def __compute_full_symmetry(self):\n self.__append_identity()\n self.__add_inversion_symmetry()\n self.full_symmetry = [op for op in self.non_centering_symmetry]\n self.__add_centering_symmetry()\n return self.full_symmetry\n\n\nclass SymmetryOperation(object):\n def __init__(self, operation_string):\n \"\"\"\n Initialize a symmetry operation object from a string representation of symmetry operation.\n\n :param operation_string: A string (as read in from res/cif file) representing a symmetry operation\n for a space group.\n \"\"\"\n self.operation_string = operation_string.lower()\n self.operation_function = None\n self.__set_operation_function()\n\n def __set_operation_function(self):\n \"\"\"\n Convert the string form of the symmetry operation into the form of a mathematical\n function that can be directly applied to a vector to transform a point to a\n symmetry related point.\n \"\"\"\n if self.operation_function is not None:\n return self.operation_function\n else:\n self.operation_function = symm_eval\n\n def transform_scaled_position(self, data):\n \"\"\"\n Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related\n position in the crystal.\n\n :param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)\n representing the fractional coordinates on which the symmetry\n operation will be applied upon.\n :return: Symmetry transformed vector.\n \"\"\"\n return self.operation_function(prepare_operation(self.operation_string), data)\n\n def transform_atom(self, atom):\n return Atom(label=atom.label, scaled_position=self.transform_scaled_position(atom.scaled_position))\n\n def inversion(self):\n func = lambda x: \"-1*(%s),-1*(%s),-1*(%s)\" % tuple(x.split(\",\"))\n return self.__class__(func(self.operation_string))\n\n\nclass Symmetry(object):\n @staticmethod\n def get(value):\n if value is '' or value == 'UNKNOWN':\n return []\n return [SymmetryOperation(v) for v in value.split(';')]\n\n\nclass Inversion(object):\n # TODO - this needs to be fixed later to make it consistent with the Centering class!\n YES = True\n NO = False\n UNKNOWN = None\n\n\nclass InversionFactory(object):\n @staticmethod\n def construct(latt):\n if int(latt) > 0:\n return CentroSymmetric()\n else:\n return NonCentroSymmetric()\n\n\nclass CentroSymmetric(Inversion):\n @staticmethod\n def transform(op):\n func = lambda x: \"-1*(%s),-1*(%s),-1*(%s)\" % tuple(x.split(\",\"))\n return func(op)\n\n\nclass NonCentroSymmetric(Inversion):\n @staticmethod\n def transform(op):\n return op\n\n\nclass Centering(object):\n def __init__(self, letter, additional_lattice_points):\n self.letter = letter\n self.additional_lattice_points = additional_lattice_points\n\n def transform(self, op):\n additional_ops = []\n for point in self.additional_lattice_points:\n func = lambda x: \"{0}+{3},{1}+{4},{2}+{5}\".format(*(x.split(\",\") + list(point)))\n additional_ops.append(op.__class__(func(op.operation_string)))\n return additional_ops\n\n @classmethod\n def primitive(cls):\n return cls('P', [])\n\n @classmethod\n def body_centered(cls):\n return cls('I', [(0.5, 0.5, 0.5)])\n\n @classmethod\n def hexagonal(cls):\n return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0),\n (Fraction(1, 3), Fraction(2, 3), 0.0)])\n\n @classmethod\n def rhombohedral(cls):\n return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),\n (Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])\n\n @classmethod\n def face_centered(cls):\n return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])\n\n @classmethod\n def base_centered_A(cls):\n return cls('A', [(0.0, 0.5, 0.5)])\n\n @classmethod\n def base_centered_B(cls):\n return cls('B', [(0.5, 0.0, 0.5)])\n\n @classmethod\n def base_centered_C(cls):\n return cls('C', [(0.5, 0.5, 0.0)])\n\n @classmethod\n def construct(cls, latt):\n \"\"\"\n Given the LATT directive in a res file, return the corresponding centered lattice type.\n\n :param latt: the absolute integer value specified in LATT\n :return: corrected centered lattice type\n \"\"\"\n latt = abs(latt)\n if latt == 1:\n return cls.primitive()\n elif latt == 2:\n return cls.body_centered()\n elif latt == 3:\n # default setting from reading in a res file is Rhombohedral\n return cls.rhombohedral()\n elif latt == 4:\n return cls.face_centered()\n elif latt == 5:\n return cls.base_centered_A()\n elif latt == 6:\n return cls.base_centered_B()\n elif latt == 7:\n return cls.base_centered_C()\n\n def get_LATT_code(self):\n if self.letter == 'P':\n return 1\n elif self.letter == 'I':\n return 2\n elif self.letter == 'R':\n return 3\n elif self.letter == 'F':\n return 4\n elif self.letter == 'A':\n return 5\n elif self.letter == 'B':\n return 6\n elif self.letter == 'C':\n return 7\n\n\nclass AsymmetricUnit(object):\n UNKNOWN = [[0, 1.00], [0, 1.00], [0, 1.00]]\n FULL = [[0, 1.00], [0, 1.00], [0, 1.00]]\n HALF_X = [[0, 0.50], [0, 1.00], [0, 1.00]]\n HALF_Y = [[0, 1.00], [0, 0.50], [0, 1.00]]\n HALF_Z = [[0, 1.00], [0, 1.00], [0, 0.50]]\n QUART_Y = [[0, 1.00], [0, 0.25], [0, 1.00]]\n HALF_X_QUART_Y = [[0, 0.50], [0, 0.25], [0, 1.00]]\n HALF_XZ = [[0, 0.50], [0, 1.00], [0, 0.50]]\n HALF_XY = [[0, 0.50], [0, 0.50], [0, 1.00]]\n EIGHT_Z = [[0, 1.00], [0, 1.00], [0, 0.125]]\n\n\nclass UniqueAxis(object):\n UNKNOWN = -1\n NA = -1\n X = 0\n Y = 1\n Z = 2\n\n\ndef symm_eval(s, data):\n x, y, z = data.x, data.y, data.z\n out = list(map(eval, s.split(\",\")))\n return cVector3D(out[0], out[1], out[2])\n\n\ndef prepare_operation(s):\n ''' Cleans up a string of a symmetry operation to be used in eval or exec\n\n :param s: Input string e.g. \"x,y,z+1/2\"\n :type s: string\n\n :rtype: string\n '''\n tmp = s.replace(\"1/4\", \"1.0/4.0\")\n tmp = tmp.replace(\"1/2\", \"1.0/2.0\")\n tmp = tmp.replace(\"3/4\", \"3.0/4.0\")\n tmp = tmp.replace(\"1/3\", \"1.0/3.0\")\n tmp = tmp.replace(\"2/3\", \"2.0/3.0\")\n tmp = tmp.replace(\"1/6\", \"1.0/6.0\")\n tmp = tmp.replace(\"5/6\", \"5.0/6.0\")\n return tmp.replace(\" \", \"\").lower()\n",
"step-ids": [
15,
28,
33,
34,
44
]
}
|
[
15,
28,
33,
34,
44
] |
import os
import h5py
import numpy as np
import torch
from datasets.hdf5 import get_test_datasets
from unet3d import utils
from unet3d.config import load_config
from unet3d.model import get_model
logger = utils.get_logger('UNet3DPredictor')
def predict(model, hdf5_dataset, config):
"""
Return prediction masks by applying the model on the given dataset
Args:
model (Unet3D): trained 3D UNet model used for prediction
hdf5_dataset (torch.utils.data.Dataset): input dataset
out_channels (int): number of channels in the network output
device (torch.Device): device to run the prediction on
Returns:
prediction_maps (numpy array): prediction masks for given dataset
"""
def _volume_shape(hdf5_dataset):
# TODO: support multiple internal datasets
raw = hdf5_dataset.raws[0]
if raw.ndim == 3:
return raw.shape
else:
return raw.shape[1:]
out_channels = config['model'].get('out_channels')
if out_channels is None:
out_channels = config['model']['dt_out_channels']
prediction_channel = config.get('prediction_channel', None)
if prediction_channel is not None:
logger.info(f"Using only channel '{prediction_channel}' from the network output")
device = config['device']
output_heads = config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(hdf5_dataset)} patches...')
# dimensionality of the the output (CxDxHxW)
volume_shape = _volume_shape(hdf5_dataset)
if prediction_channel is None:
prediction_maps_shape = (out_channels,) + volume_shape
else:
# single channel prediction map
prediction_maps_shape = (1,) + volume_shape
logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')
# initialize the output prediction arrays
prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for _ in range(output_heads)]
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for _ in range(output_heads)]
# Sets the module in evaluation mode explicitly, otherwise the final Softmax/Sigmoid won't be applied!
model.eval()
# Run predictions on the entire input dataset
with torch.no_grad():
for patch, index in hdf5_dataset:
logger.info(f'Predicting slice:{index}')
# save patch index: (C,D,H,W)
if prediction_channel is None:
channel_slice = slice(0, out_channels)
else:
channel_slice = slice(0, 1)
index = index[1:] #dk: support input image with mulitple dims
index = (channel_slice,) + index
# convert patch to torch tensor NxCxDxHxW and send to device we're using batch size of 1
patch = patch.unsqueeze(dim=0).to(device)
# forward pass
predictions = model(patch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
predictions = [predictions]
for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,
normalization_masks):
# squeeze batch dimension and convert back to numpy array
prediction = prediction.squeeze(dim=0).cpu().numpy()
if prediction_channel is not None:
# use only the 'prediction_channel'
logger.info(f"Using channel '{prediction_channel}'...")
prediction = np.expand_dims(prediction[prediction_channel], axis=0)
# unpad in order to avoid block artifacts in the output probability maps
u_prediction, u_index = utils.unpad(prediction, index, volume_shape)
# accumulate probabilities into the output prediction array
prediction_map[u_index] += u_prediction
# count voxel visits for normalization
normalization_mask[u_index] += 1
return [prediction_map / normalization_mask for prediction_map, normalization_mask in
zip(prediction_maps, normalization_masks)]
def save_predictions(prediction_maps, output_file, dataset_names):
"""
Saving probability maps to a given output H5 file. If 'average_channels'
is set to True average the probability_maps across the the channel axis
(useful in case where each channel predicts semantically the same thing).
Args:
prediction_maps (list): list of numpy array containing prediction maps in separate channels
output_file (string): path to the output H5 file
dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved
"""
assert len(prediction_maps) == len(dataset_names), 'Each prediction map has to have a corresponding dataset name'
logger.info(f'Saving predictions to: {output_file}...')
with h5py.File(output_file, "w") as output_h5:
for prediction_map, dataset_name in zip(prediction_maps, dataset_names):
logger.info(f"Creating dataset '{dataset_name}'...")
output_h5.create_dataset(dataset_name, data=prediction_map, compression="gzip")
def _get_output_file(dataset, suffix='_predictions'):
return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'
def _get_dataset_names(config, number_of_datasets):
dataset_names = config.get('dest_dataset_name')
print(dataset_names)
if dataset_names is not None:
if isinstance(dataset_names, str):
return [dataset_names]
else:
return dataset_names
else:
default_prefix = 'predictions'
if number_of_datasets == 1:
return [default_prefix]
else:
return [f'{default_prefix}{i}' for i in range(number_of_datasets)]
def main():
# Load configuration
config = load_config()
print(config)
# Create the model
model = get_model(config)
# Load model state
model_path = config['model_path']
logger.info(f'Loading model from {model_path}...')
utils.load_checkpoint(model_path, model)
logger.info(f"Sending the model to '{config['device']}'")
model = model.to(config['device'])
logger.info('Loading HDF5 datasets...')
for test_dataset in get_test_datasets(config):
logger.info(f"Processing '{test_dataset.file_path}'...")
# run the model prediction on the entire dataset
predictions = predict(model, test_dataset, config)
# save the resulting probability maps
output_file = _get_output_file(test_dataset)
dataset_names = _get_dataset_names(config, len(predictions))
save_predictions(predictions, output_file, dataset_names)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "6fba773025268d724283e510a03d0592282adb0a",
"index": 1780,
"step-1": "<mask token>\n\n\ndef save_predictions(prediction_maps, output_file, dataset_names):\n \"\"\"\n Saving probability maps to a given output H5 file. If 'average_channels'\n is set to True average the probability_maps across the the channel axis\n (useful in case where each channel predicts semantically the same thing).\n\n Args:\n prediction_maps (list): list of numpy array containing prediction maps in separate channels\n output_file (string): path to the output H5 file\n dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved\n \"\"\"\n assert len(prediction_maps) == len(dataset_names\n ), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n with h5py.File(output_file, 'w') as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names\n ):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map,\n compression='gzip')\n\n\n<mask token>\n\n\ndef main():\n config = load_config()\n print(config)\n model = get_model(config)\n model_path = config['model_path']\n logger.info(f'Loading model from {model_path}...')\n utils.load_checkpoint(model_path, model)\n logger.info(f\"Sending the model to '{config['device']}'\")\n model = model.to(config['device'])\n logger.info('Loading HDF5 datasets...')\n for test_dataset in get_test_datasets(config):\n logger.info(f\"Processing '{test_dataset.file_path}'...\")\n predictions = predict(model, test_dataset, config)\n output_file = _get_output_file(test_dataset)\n dataset_names = _get_dataset_names(config, len(predictions))\n save_predictions(predictions, output_file, dataset_names)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef predict(model, hdf5_dataset, config):\n \"\"\"\n Return prediction masks by applying the model on the given dataset\n\n Args:\n model (Unet3D): trained 3D UNet model used for prediction\n hdf5_dataset (torch.utils.data.Dataset): input dataset\n out_channels (int): number of channels in the network output\n device (torch.Device): device to run the prediction on\n\n Returns:\n prediction_maps (numpy array): prediction masks for given dataset\n \"\"\"\n\n def _volume_shape(hdf5_dataset):\n raw = hdf5_dataset.raws[0]\n if raw.ndim == 3:\n return raw.shape\n else:\n return raw.shape[1:]\n out_channels = config['model'].get('out_channels')\n if out_channels is None:\n out_channels = config['model']['dt_out_channels']\n prediction_channel = config.get('prediction_channel', None)\n if prediction_channel is not None:\n logger.info(\n f\"Using only channel '{prediction_channel}' from the network output\"\n )\n device = config['device']\n output_heads = config['model'].get('output_heads', 1)\n logger.info(f'Running prediction on {len(hdf5_dataset)} patches...')\n volume_shape = _volume_shape(hdf5_dataset)\n if prediction_channel is None:\n prediction_maps_shape = (out_channels,) + volume_shape\n else:\n prediction_maps_shape = (1,) + volume_shape\n logger.info(\n f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}'\n )\n prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n model.eval()\n with torch.no_grad():\n for patch, index in hdf5_dataset:\n logger.info(f'Predicting slice:{index}')\n if prediction_channel is None:\n channel_slice = slice(0, out_channels)\n else:\n channel_slice = slice(0, 1)\n index = index[1:]\n index = (channel_slice,) + index\n patch = patch.unsqueeze(dim=0).to(device)\n predictions = model(patch)\n if output_heads == 1:\n predictions = [predictions]\n for prediction, prediction_map, normalization_mask in zip(\n predictions, prediction_maps, normalization_masks):\n prediction = prediction.squeeze(dim=0).cpu().numpy()\n if prediction_channel is not None:\n logger.info(f\"Using channel '{prediction_channel}'...\")\n prediction = np.expand_dims(prediction[\n prediction_channel], axis=0)\n u_prediction, u_index = utils.unpad(prediction, index,\n volume_shape)\n prediction_map[u_index] += u_prediction\n normalization_mask[u_index] += 1\n return [(prediction_map / normalization_mask) for prediction_map,\n normalization_mask in zip(prediction_maps, normalization_masks)]\n\n\ndef save_predictions(prediction_maps, output_file, dataset_names):\n \"\"\"\n Saving probability maps to a given output H5 file. If 'average_channels'\n is set to True average the probability_maps across the the channel axis\n (useful in case where each channel predicts semantically the same thing).\n\n Args:\n prediction_maps (list): list of numpy array containing prediction maps in separate channels\n output_file (string): path to the output H5 file\n dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved\n \"\"\"\n assert len(prediction_maps) == len(dataset_names\n ), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n with h5py.File(output_file, 'w') as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names\n ):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map,\n compression='gzip')\n\n\ndef _get_output_file(dataset, suffix='_predictions'):\n return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'\n\n\ndef _get_dataset_names(config, number_of_datasets):\n dataset_names = config.get('dest_dataset_name')\n print(dataset_names)\n if dataset_names is not None:\n if isinstance(dataset_names, str):\n return [dataset_names]\n else:\n return dataset_names\n else:\n default_prefix = 'predictions'\n if number_of_datasets == 1:\n return [default_prefix]\n else:\n return [f'{default_prefix}{i}' for i in range(number_of_datasets)]\n\n\ndef main():\n config = load_config()\n print(config)\n model = get_model(config)\n model_path = config['model_path']\n logger.info(f'Loading model from {model_path}...')\n utils.load_checkpoint(model_path, model)\n logger.info(f\"Sending the model to '{config['device']}'\")\n model = model.to(config['device'])\n logger.info('Loading HDF5 datasets...')\n for test_dataset in get_test_datasets(config):\n logger.info(f\"Processing '{test_dataset.file_path}'...\")\n predictions = predict(model, test_dataset, config)\n output_file = _get_output_file(test_dataset)\n dataset_names = _get_dataset_names(config, len(predictions))\n save_predictions(predictions, output_file, dataset_names)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nlogger = utils.get_logger('UNet3DPredictor')\n\n\ndef predict(model, hdf5_dataset, config):\n \"\"\"\n Return prediction masks by applying the model on the given dataset\n\n Args:\n model (Unet3D): trained 3D UNet model used for prediction\n hdf5_dataset (torch.utils.data.Dataset): input dataset\n out_channels (int): number of channels in the network output\n device (torch.Device): device to run the prediction on\n\n Returns:\n prediction_maps (numpy array): prediction masks for given dataset\n \"\"\"\n\n def _volume_shape(hdf5_dataset):\n raw = hdf5_dataset.raws[0]\n if raw.ndim == 3:\n return raw.shape\n else:\n return raw.shape[1:]\n out_channels = config['model'].get('out_channels')\n if out_channels is None:\n out_channels = config['model']['dt_out_channels']\n prediction_channel = config.get('prediction_channel', None)\n if prediction_channel is not None:\n logger.info(\n f\"Using only channel '{prediction_channel}' from the network output\"\n )\n device = config['device']\n output_heads = config['model'].get('output_heads', 1)\n logger.info(f'Running prediction on {len(hdf5_dataset)} patches...')\n volume_shape = _volume_shape(hdf5_dataset)\n if prediction_channel is None:\n prediction_maps_shape = (out_channels,) + volume_shape\n else:\n prediction_maps_shape = (1,) + volume_shape\n logger.info(\n f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}'\n )\n prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n model.eval()\n with torch.no_grad():\n for patch, index in hdf5_dataset:\n logger.info(f'Predicting slice:{index}')\n if prediction_channel is None:\n channel_slice = slice(0, out_channels)\n else:\n channel_slice = slice(0, 1)\n index = index[1:]\n index = (channel_slice,) + index\n patch = patch.unsqueeze(dim=0).to(device)\n predictions = model(patch)\n if output_heads == 1:\n predictions = [predictions]\n for prediction, prediction_map, normalization_mask in zip(\n predictions, prediction_maps, normalization_masks):\n prediction = prediction.squeeze(dim=0).cpu().numpy()\n if prediction_channel is not None:\n logger.info(f\"Using channel '{prediction_channel}'...\")\n prediction = np.expand_dims(prediction[\n prediction_channel], axis=0)\n u_prediction, u_index = utils.unpad(prediction, index,\n volume_shape)\n prediction_map[u_index] += u_prediction\n normalization_mask[u_index] += 1\n return [(prediction_map / normalization_mask) for prediction_map,\n normalization_mask in zip(prediction_maps, normalization_masks)]\n\n\ndef save_predictions(prediction_maps, output_file, dataset_names):\n \"\"\"\n Saving probability maps to a given output H5 file. If 'average_channels'\n is set to True average the probability_maps across the the channel axis\n (useful in case where each channel predicts semantically the same thing).\n\n Args:\n prediction_maps (list): list of numpy array containing prediction maps in separate channels\n output_file (string): path to the output H5 file\n dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved\n \"\"\"\n assert len(prediction_maps) == len(dataset_names\n ), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n with h5py.File(output_file, 'w') as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names\n ):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map,\n compression='gzip')\n\n\ndef _get_output_file(dataset, suffix='_predictions'):\n return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'\n\n\ndef _get_dataset_names(config, number_of_datasets):\n dataset_names = config.get('dest_dataset_name')\n print(dataset_names)\n if dataset_names is not None:\n if isinstance(dataset_names, str):\n return [dataset_names]\n else:\n return dataset_names\n else:\n default_prefix = 'predictions'\n if number_of_datasets == 1:\n return [default_prefix]\n else:\n return [f'{default_prefix}{i}' for i in range(number_of_datasets)]\n\n\ndef main():\n config = load_config()\n print(config)\n model = get_model(config)\n model_path = config['model_path']\n logger.info(f'Loading model from {model_path}...')\n utils.load_checkpoint(model_path, model)\n logger.info(f\"Sending the model to '{config['device']}'\")\n model = model.to(config['device'])\n logger.info('Loading HDF5 datasets...')\n for test_dataset in get_test_datasets(config):\n logger.info(f\"Processing '{test_dataset.file_path}'...\")\n predictions = predict(model, test_dataset, config)\n output_file = _get_output_file(test_dataset)\n dataset_names = _get_dataset_names(config, len(predictions))\n save_predictions(predictions, output_file, dataset_names)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport h5py\nimport numpy as np\nimport torch\nfrom datasets.hdf5 import get_test_datasets\nfrom unet3d import utils\nfrom unet3d.config import load_config\nfrom unet3d.model import get_model\nlogger = utils.get_logger('UNet3DPredictor')\n\n\ndef predict(model, hdf5_dataset, config):\n \"\"\"\n Return prediction masks by applying the model on the given dataset\n\n Args:\n model (Unet3D): trained 3D UNet model used for prediction\n hdf5_dataset (torch.utils.data.Dataset): input dataset\n out_channels (int): number of channels in the network output\n device (torch.Device): device to run the prediction on\n\n Returns:\n prediction_maps (numpy array): prediction masks for given dataset\n \"\"\"\n\n def _volume_shape(hdf5_dataset):\n raw = hdf5_dataset.raws[0]\n if raw.ndim == 3:\n return raw.shape\n else:\n return raw.shape[1:]\n out_channels = config['model'].get('out_channels')\n if out_channels is None:\n out_channels = config['model']['dt_out_channels']\n prediction_channel = config.get('prediction_channel', None)\n if prediction_channel is not None:\n logger.info(\n f\"Using only channel '{prediction_channel}' from the network output\"\n )\n device = config['device']\n output_heads = config['model'].get('output_heads', 1)\n logger.info(f'Running prediction on {len(hdf5_dataset)} patches...')\n volume_shape = _volume_shape(hdf5_dataset)\n if prediction_channel is None:\n prediction_maps_shape = (out_channels,) + volume_shape\n else:\n prediction_maps_shape = (1,) + volume_shape\n logger.info(\n f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}'\n )\n prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n model.eval()\n with torch.no_grad():\n for patch, index in hdf5_dataset:\n logger.info(f'Predicting slice:{index}')\n if prediction_channel is None:\n channel_slice = slice(0, out_channels)\n else:\n channel_slice = slice(0, 1)\n index = index[1:]\n index = (channel_slice,) + index\n patch = patch.unsqueeze(dim=0).to(device)\n predictions = model(patch)\n if output_heads == 1:\n predictions = [predictions]\n for prediction, prediction_map, normalization_mask in zip(\n predictions, prediction_maps, normalization_masks):\n prediction = prediction.squeeze(dim=0).cpu().numpy()\n if prediction_channel is not None:\n logger.info(f\"Using channel '{prediction_channel}'...\")\n prediction = np.expand_dims(prediction[\n prediction_channel], axis=0)\n u_prediction, u_index = utils.unpad(prediction, index,\n volume_shape)\n prediction_map[u_index] += u_prediction\n normalization_mask[u_index] += 1\n return [(prediction_map / normalization_mask) for prediction_map,\n normalization_mask in zip(prediction_maps, normalization_masks)]\n\n\ndef save_predictions(prediction_maps, output_file, dataset_names):\n \"\"\"\n Saving probability maps to a given output H5 file. If 'average_channels'\n is set to True average the probability_maps across the the channel axis\n (useful in case where each channel predicts semantically the same thing).\n\n Args:\n prediction_maps (list): list of numpy array containing prediction maps in separate channels\n output_file (string): path to the output H5 file\n dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved\n \"\"\"\n assert len(prediction_maps) == len(dataset_names\n ), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n with h5py.File(output_file, 'w') as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names\n ):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map,\n compression='gzip')\n\n\ndef _get_output_file(dataset, suffix='_predictions'):\n return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'\n\n\ndef _get_dataset_names(config, number_of_datasets):\n dataset_names = config.get('dest_dataset_name')\n print(dataset_names)\n if dataset_names is not None:\n if isinstance(dataset_names, str):\n return [dataset_names]\n else:\n return dataset_names\n else:\n default_prefix = 'predictions'\n if number_of_datasets == 1:\n return [default_prefix]\n else:\n return [f'{default_prefix}{i}' for i in range(number_of_datasets)]\n\n\ndef main():\n config = load_config()\n print(config)\n model = get_model(config)\n model_path = config['model_path']\n logger.info(f'Loading model from {model_path}...')\n utils.load_checkpoint(model_path, model)\n logger.info(f\"Sending the model to '{config['device']}'\")\n model = model.to(config['device'])\n logger.info('Loading HDF5 datasets...')\n for test_dataset in get_test_datasets(config):\n logger.info(f\"Processing '{test_dataset.file_path}'...\")\n predictions = predict(model, test_dataset, config)\n output_file = _get_output_file(test_dataset)\n dataset_names = _get_dataset_names(config, len(predictions))\n save_predictions(predictions, output_file, dataset_names)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import os\n\nimport h5py\nimport numpy as np\nimport torch\n\nfrom datasets.hdf5 import get_test_datasets\nfrom unet3d import utils\nfrom unet3d.config import load_config\nfrom unet3d.model import get_model\n\nlogger = utils.get_logger('UNet3DPredictor')\n\n\ndef predict(model, hdf5_dataset, config):\n \"\"\"\n Return prediction masks by applying the model on the given dataset\n\n Args:\n model (Unet3D): trained 3D UNet model used for prediction\n hdf5_dataset (torch.utils.data.Dataset): input dataset\n out_channels (int): number of channels in the network output\n device (torch.Device): device to run the prediction on\n\n Returns:\n prediction_maps (numpy array): prediction masks for given dataset\n \"\"\"\n\n def _volume_shape(hdf5_dataset):\n # TODO: support multiple internal datasets\n raw = hdf5_dataset.raws[0]\n if raw.ndim == 3:\n return raw.shape\n else:\n return raw.shape[1:]\n\n out_channels = config['model'].get('out_channels')\n if out_channels is None:\n out_channels = config['model']['dt_out_channels']\n\n prediction_channel = config.get('prediction_channel', None)\n if prediction_channel is not None:\n logger.info(f\"Using only channel '{prediction_channel}' from the network output\")\n\n device = config['device']\n output_heads = config['model'].get('output_heads', 1)\n\n logger.info(f'Running prediction on {len(hdf5_dataset)} patches...')\n # dimensionality of the the output (CxDxHxW)\n volume_shape = _volume_shape(hdf5_dataset)\n if prediction_channel is None:\n prediction_maps_shape = (out_channels,) + volume_shape\n else:\n # single channel prediction map\n prediction_maps_shape = (1,) + volume_shape\n\n logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')\n\n # initialize the output prediction arrays\n prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for _ in range(output_heads)]\n # initialize normalization mask in order to average out probabilities of overlapping patches\n normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for _ in range(output_heads)]\n\n # Sets the module in evaluation mode explicitly, otherwise the final Softmax/Sigmoid won't be applied!\n model.eval()\n # Run predictions on the entire input dataset\n with torch.no_grad():\n for patch, index in hdf5_dataset:\n logger.info(f'Predicting slice:{index}')\n\n # save patch index: (C,D,H,W)\n if prediction_channel is None:\n channel_slice = slice(0, out_channels)\n else:\n channel_slice = slice(0, 1)\n\n index = index[1:] #dk: support input image with mulitple dims\n index = (channel_slice,) + index\n\n # convert patch to torch tensor NxCxDxHxW and send to device we're using batch size of 1\n patch = patch.unsqueeze(dim=0).to(device)\n\n # forward pass\n predictions = model(patch)\n # wrap predictions into a list if there is only one output head from the network\n if output_heads == 1:\n predictions = [predictions]\n\n for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,\n normalization_masks):\n # squeeze batch dimension and convert back to numpy array\n prediction = prediction.squeeze(dim=0).cpu().numpy()\n if prediction_channel is not None:\n # use only the 'prediction_channel'\n logger.info(f\"Using channel '{prediction_channel}'...\")\n prediction = np.expand_dims(prediction[prediction_channel], axis=0)\n\n # unpad in order to avoid block artifacts in the output probability maps\n u_prediction, u_index = utils.unpad(prediction, index, volume_shape)\n # accumulate probabilities into the output prediction array\n prediction_map[u_index] += u_prediction\n # count voxel visits for normalization\n normalization_mask[u_index] += 1\n\n return [prediction_map / normalization_mask for prediction_map, normalization_mask in\n zip(prediction_maps, normalization_masks)]\n\n\ndef save_predictions(prediction_maps, output_file, dataset_names):\n \"\"\"\n Saving probability maps to a given output H5 file. If 'average_channels'\n is set to True average the probability_maps across the the channel axis\n (useful in case where each channel predicts semantically the same thing).\n\n Args:\n prediction_maps (list): list of numpy array containing prediction maps in separate channels\n output_file (string): path to the output H5 file\n dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved\n \"\"\"\n assert len(prediction_maps) == len(dataset_names), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n\n with h5py.File(output_file, \"w\") as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map, compression=\"gzip\")\n\n\ndef _get_output_file(dataset, suffix='_predictions'):\n return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'\n\n\ndef _get_dataset_names(config, number_of_datasets):\n dataset_names = config.get('dest_dataset_name')\n print(dataset_names)\n if dataset_names is not None:\n if isinstance(dataset_names, str):\n return [dataset_names]\n else:\n return dataset_names\n else:\n default_prefix = 'predictions'\n if number_of_datasets == 1:\n return [default_prefix]\n else:\n return [f'{default_prefix}{i}' for i in range(number_of_datasets)]\n\n\ndef main():\n # Load configuration\n config = load_config()\n print(config)\n\n # Create the model\n model = get_model(config)\n\n # Load model state\n model_path = config['model_path']\n logger.info(f'Loading model from {model_path}...')\n utils.load_checkpoint(model_path, model)\n logger.info(f\"Sending the model to '{config['device']}'\")\n model = model.to(config['device'])\n\n logger.info('Loading HDF5 datasets...')\n for test_dataset in get_test_datasets(config):\n logger.info(f\"Processing '{test_dataset.file_path}'...\")\n # run the model prediction on the entire dataset\n predictions = predict(model, test_dataset, config)\n # save the resulting probability maps\n output_file = _get_output_file(test_dataset)\n dataset_names = _get_dataset_names(config, len(predictions))\n save_predictions(predictions, output_file, dataset_names)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class CompanyInfo(object):
def __init__(self):
self._alter_list = None
self._basic_info = None
self._case_info_list = None
self._entinv_list = None
self._fr_position_list = None
self._frinv_list = None
self._person_list = None
self._share_holder_list = None
<|reserved_special_token_0|>
@alter_list.setter
def alter_list(self, value):
if isinstance(value, list):
self._alter_list = list()
for i in value:
if isinstance(i, EpInfo):
self._alter_list.append(i)
else:
self._alter_list.append(EpInfo.from_alipay_dict(i))
@property
def basic_info(self):
return self._basic_info
<|reserved_special_token_0|>
@property
def case_info_list(self):
return self._case_info_list
@case_info_list.setter
def case_info_list(self, value):
if isinstance(value, list):
self._case_info_list = list()
for i in value:
if isinstance(i, EpInfo):
self._case_info_list.append(i)
else:
self._case_info_list.append(EpInfo.from_alipay_dict(i))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def fr_position_list(self):
return self._fr_position_list
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@frinv_list.setter
def frinv_list(self, value):
if isinstance(value, list):
self._frinv_list = list()
for i in value:
if isinstance(i, EpInfo):
self._frinv_list.append(i)
else:
self._frinv_list.append(EpInfo.from_alipay_dict(i))
@property
def person_list(self):
return self._person_list
@person_list.setter
def person_list(self, value):
if isinstance(value, list):
self._person_list = list()
for i in value:
if isinstance(i, EpInfo):
self._person_list.append(i)
else:
self._person_list.append(EpInfo.from_alipay_dict(i))
@property
def share_holder_list(self):
return self._share_holder_list
@share_holder_list.setter
def share_holder_list(self, value):
if isinstance(value, list):
self._share_holder_list = list()
for i in value:
if isinstance(i, EpInfo):
self._share_holder_list.append(i)
else:
self._share_holder_list.append(EpInfo.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.alter_list:
if isinstance(self.alter_list, list):
for i in range(0, len(self.alter_list)):
element = self.alter_list[i]
if hasattr(element, 'to_alipay_dict'):
self.alter_list[i] = element.to_alipay_dict()
if hasattr(self.alter_list, 'to_alipay_dict'):
params['alter_list'] = self.alter_list.to_alipay_dict()
else:
params['alter_list'] = self.alter_list
if self.basic_info:
if hasattr(self.basic_info, 'to_alipay_dict'):
params['basic_info'] = self.basic_info.to_alipay_dict()
else:
params['basic_info'] = self.basic_info
if self.case_info_list:
if isinstance(self.case_info_list, list):
for i in range(0, len(self.case_info_list)):
element = self.case_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.case_info_list[i] = element.to_alipay_dict()
if hasattr(self.case_info_list, 'to_alipay_dict'):
params['case_info_list'] = self.case_info_list.to_alipay_dict()
else:
params['case_info_list'] = self.case_info_list
if self.entinv_list:
if isinstance(self.entinv_list, list):
for i in range(0, len(self.entinv_list)):
element = self.entinv_list[i]
if hasattr(element, 'to_alipay_dict'):
self.entinv_list[i] = element.to_alipay_dict()
if hasattr(self.entinv_list, 'to_alipay_dict'):
params['entinv_list'] = self.entinv_list.to_alipay_dict()
else:
params['entinv_list'] = self.entinv_list
if self.fr_position_list:
if isinstance(self.fr_position_list, list):
for i in range(0, len(self.fr_position_list)):
element = self.fr_position_list[i]
if hasattr(element, 'to_alipay_dict'):
self.fr_position_list[i] = element.to_alipay_dict()
if hasattr(self.fr_position_list, 'to_alipay_dict'):
params['fr_position_list'
] = self.fr_position_list.to_alipay_dict()
else:
params['fr_position_list'] = self.fr_position_list
if self.frinv_list:
if isinstance(self.frinv_list, list):
for i in range(0, len(self.frinv_list)):
element = self.frinv_list[i]
if hasattr(element, 'to_alipay_dict'):
self.frinv_list[i] = element.to_alipay_dict()
if hasattr(self.frinv_list, 'to_alipay_dict'):
params['frinv_list'] = self.frinv_list.to_alipay_dict()
else:
params['frinv_list'] = self.frinv_list
if self.person_list:
if isinstance(self.person_list, list):
for i in range(0, len(self.person_list)):
element = self.person_list[i]
if hasattr(element, 'to_alipay_dict'):
self.person_list[i] = element.to_alipay_dict()
if hasattr(self.person_list, 'to_alipay_dict'):
params['person_list'] = self.person_list.to_alipay_dict()
else:
params['person_list'] = self.person_list
if self.share_holder_list:
if isinstance(self.share_holder_list, list):
for i in range(0, len(self.share_holder_list)):
element = self.share_holder_list[i]
if hasattr(element, 'to_alipay_dict'):
self.share_holder_list[i] = element.to_alipay_dict()
if hasattr(self.share_holder_list, 'to_alipay_dict'):
params['share_holder_list'
] = self.share_holder_list.to_alipay_dict()
else:
params['share_holder_list'] = self.share_holder_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CompanyInfo()
if 'alter_list' in d:
o.alter_list = d['alter_list']
if 'basic_info' in d:
o.basic_info = d['basic_info']
if 'case_info_list' in d:
o.case_info_list = d['case_info_list']
if 'entinv_list' in d:
o.entinv_list = d['entinv_list']
if 'fr_position_list' in d:
o.fr_position_list = d['fr_position_list']
if 'frinv_list' in d:
o.frinv_list = d['frinv_list']
if 'person_list' in d:
o.person_list = d['person_list']
if 'share_holder_list' in d:
o.share_holder_list = d['share_holder_list']
return o
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CompanyInfo(object):
def __init__(self):
self._alter_list = None
self._basic_info = None
self._case_info_list = None
self._entinv_list = None
self._fr_position_list = None
self._frinv_list = None
self._person_list = None
self._share_holder_list = None
<|reserved_special_token_0|>
@alter_list.setter
def alter_list(self, value):
if isinstance(value, list):
self._alter_list = list()
for i in value:
if isinstance(i, EpInfo):
self._alter_list.append(i)
else:
self._alter_list.append(EpInfo.from_alipay_dict(i))
@property
def basic_info(self):
return self._basic_info
@basic_info.setter
def basic_info(self, value):
if isinstance(value, EpInfo):
self._basic_info = value
else:
self._basic_info = EpInfo.from_alipay_dict(value)
@property
def case_info_list(self):
return self._case_info_list
@case_info_list.setter
def case_info_list(self, value):
if isinstance(value, list):
self._case_info_list = list()
for i in value:
if isinstance(i, EpInfo):
self._case_info_list.append(i)
else:
self._case_info_list.append(EpInfo.from_alipay_dict(i))
@property
def entinv_list(self):
return self._entinv_list
@entinv_list.setter
def entinv_list(self, value):
if isinstance(value, list):
self._entinv_list = list()
for i in value:
if isinstance(i, EpInfo):
self._entinv_list.append(i)
else:
self._entinv_list.append(EpInfo.from_alipay_dict(i))
@property
def fr_position_list(self):
return self._fr_position_list
@fr_position_list.setter
def fr_position_list(self, value):
if isinstance(value, list):
self._fr_position_list = list()
for i in value:
if isinstance(i, EpInfo):
self._fr_position_list.append(i)
else:
self._fr_position_list.append(EpInfo.from_alipay_dict(i))
<|reserved_special_token_0|>
@frinv_list.setter
def frinv_list(self, value):
if isinstance(value, list):
self._frinv_list = list()
for i in value:
if isinstance(i, EpInfo):
self._frinv_list.append(i)
else:
self._frinv_list.append(EpInfo.from_alipay_dict(i))
@property
def person_list(self):
return self._person_list
@person_list.setter
def person_list(self, value):
if isinstance(value, list):
self._person_list = list()
for i in value:
if isinstance(i, EpInfo):
self._person_list.append(i)
else:
self._person_list.append(EpInfo.from_alipay_dict(i))
@property
def share_holder_list(self):
return self._share_holder_list
@share_holder_list.setter
def share_holder_list(self, value):
if isinstance(value, list):
self._share_holder_list = list()
for i in value:
if isinstance(i, EpInfo):
self._share_holder_list.append(i)
else:
self._share_holder_list.append(EpInfo.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.alter_list:
if isinstance(self.alter_list, list):
for i in range(0, len(self.alter_list)):
element = self.alter_list[i]
if hasattr(element, 'to_alipay_dict'):
self.alter_list[i] = element.to_alipay_dict()
if hasattr(self.alter_list, 'to_alipay_dict'):
params['alter_list'] = self.alter_list.to_alipay_dict()
else:
params['alter_list'] = self.alter_list
if self.basic_info:
if hasattr(self.basic_info, 'to_alipay_dict'):
params['basic_info'] = self.basic_info.to_alipay_dict()
else:
params['basic_info'] = self.basic_info
if self.case_info_list:
if isinstance(self.case_info_list, list):
for i in range(0, len(self.case_info_list)):
element = self.case_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.case_info_list[i] = element.to_alipay_dict()
if hasattr(self.case_info_list, 'to_alipay_dict'):
params['case_info_list'] = self.case_info_list.to_alipay_dict()
else:
params['case_info_list'] = self.case_info_list
if self.entinv_list:
if isinstance(self.entinv_list, list):
for i in range(0, len(self.entinv_list)):
element = self.entinv_list[i]
if hasattr(element, 'to_alipay_dict'):
self.entinv_list[i] = element.to_alipay_dict()
if hasattr(self.entinv_list, 'to_alipay_dict'):
params['entinv_list'] = self.entinv_list.to_alipay_dict()
else:
params['entinv_list'] = self.entinv_list
if self.fr_position_list:
if isinstance(self.fr_position_list, list):
for i in range(0, len(self.fr_position_list)):
element = self.fr_position_list[i]
if hasattr(element, 'to_alipay_dict'):
self.fr_position_list[i] = element.to_alipay_dict()
if hasattr(self.fr_position_list, 'to_alipay_dict'):
params['fr_position_list'
] = self.fr_position_list.to_alipay_dict()
else:
params['fr_position_list'] = self.fr_position_list
if self.frinv_list:
if isinstance(self.frinv_list, list):
for i in range(0, len(self.frinv_list)):
element = self.frinv_list[i]
if hasattr(element, 'to_alipay_dict'):
self.frinv_list[i] = element.to_alipay_dict()
if hasattr(self.frinv_list, 'to_alipay_dict'):
params['frinv_list'] = self.frinv_list.to_alipay_dict()
else:
params['frinv_list'] = self.frinv_list
if self.person_list:
if isinstance(self.person_list, list):
for i in range(0, len(self.person_list)):
element = self.person_list[i]
if hasattr(element, 'to_alipay_dict'):
self.person_list[i] = element.to_alipay_dict()
if hasattr(self.person_list, 'to_alipay_dict'):
params['person_list'] = self.person_list.to_alipay_dict()
else:
params['person_list'] = self.person_list
if self.share_holder_list:
if isinstance(self.share_holder_list, list):
for i in range(0, len(self.share_holder_list)):
element = self.share_holder_list[i]
if hasattr(element, 'to_alipay_dict'):
self.share_holder_list[i] = element.to_alipay_dict()
if hasattr(self.share_holder_list, 'to_alipay_dict'):
params['share_holder_list'
] = self.share_holder_list.to_alipay_dict()
else:
params['share_holder_list'] = self.share_holder_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CompanyInfo()
if 'alter_list' in d:
o.alter_list = d['alter_list']
if 'basic_info' in d:
o.basic_info = d['basic_info']
if 'case_info_list' in d:
o.case_info_list = d['case_info_list']
if 'entinv_list' in d:
o.entinv_list = d['entinv_list']
if 'fr_position_list' in d:
o.fr_position_list = d['fr_position_list']
if 'frinv_list' in d:
o.frinv_list = d['frinv_list']
if 'person_list' in d:
o.person_list = d['person_list']
if 'share_holder_list' in d:
o.share_holder_list = d['share_holder_list']
return o
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CompanyInfo(object):
def __init__(self):
self._alter_list = None
self._basic_info = None
self._case_info_list = None
self._entinv_list = None
self._fr_position_list = None
self._frinv_list = None
self._person_list = None
self._share_holder_list = None
@property
def alter_list(self):
return self._alter_list
@alter_list.setter
def alter_list(self, value):
if isinstance(value, list):
self._alter_list = list()
for i in value:
if isinstance(i, EpInfo):
self._alter_list.append(i)
else:
self._alter_list.append(EpInfo.from_alipay_dict(i))
@property
def basic_info(self):
return self._basic_info
@basic_info.setter
def basic_info(self, value):
if isinstance(value, EpInfo):
self._basic_info = value
else:
self._basic_info = EpInfo.from_alipay_dict(value)
@property
def case_info_list(self):
return self._case_info_list
@case_info_list.setter
def case_info_list(self, value):
if isinstance(value, list):
self._case_info_list = list()
for i in value:
if isinstance(i, EpInfo):
self._case_info_list.append(i)
else:
self._case_info_list.append(EpInfo.from_alipay_dict(i))
@property
def entinv_list(self):
return self._entinv_list
@entinv_list.setter
def entinv_list(self, value):
if isinstance(value, list):
self._entinv_list = list()
for i in value:
if isinstance(i, EpInfo):
self._entinv_list.append(i)
else:
self._entinv_list.append(EpInfo.from_alipay_dict(i))
@property
def fr_position_list(self):
return self._fr_position_list
@fr_position_list.setter
def fr_position_list(self, value):
if isinstance(value, list):
self._fr_position_list = list()
for i in value:
if isinstance(i, EpInfo):
self._fr_position_list.append(i)
else:
self._fr_position_list.append(EpInfo.from_alipay_dict(i))
<|reserved_special_token_0|>
@frinv_list.setter
def frinv_list(self, value):
if isinstance(value, list):
self._frinv_list = list()
for i in value:
if isinstance(i, EpInfo):
self._frinv_list.append(i)
else:
self._frinv_list.append(EpInfo.from_alipay_dict(i))
@property
def person_list(self):
return self._person_list
@person_list.setter
def person_list(self, value):
if isinstance(value, list):
self._person_list = list()
for i in value:
if isinstance(i, EpInfo):
self._person_list.append(i)
else:
self._person_list.append(EpInfo.from_alipay_dict(i))
@property
def share_holder_list(self):
return self._share_holder_list
@share_holder_list.setter
def share_holder_list(self, value):
if isinstance(value, list):
self._share_holder_list = list()
for i in value:
if isinstance(i, EpInfo):
self._share_holder_list.append(i)
else:
self._share_holder_list.append(EpInfo.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.alter_list:
if isinstance(self.alter_list, list):
for i in range(0, len(self.alter_list)):
element = self.alter_list[i]
if hasattr(element, 'to_alipay_dict'):
self.alter_list[i] = element.to_alipay_dict()
if hasattr(self.alter_list, 'to_alipay_dict'):
params['alter_list'] = self.alter_list.to_alipay_dict()
else:
params['alter_list'] = self.alter_list
if self.basic_info:
if hasattr(self.basic_info, 'to_alipay_dict'):
params['basic_info'] = self.basic_info.to_alipay_dict()
else:
params['basic_info'] = self.basic_info
if self.case_info_list:
if isinstance(self.case_info_list, list):
for i in range(0, len(self.case_info_list)):
element = self.case_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.case_info_list[i] = element.to_alipay_dict()
if hasattr(self.case_info_list, 'to_alipay_dict'):
params['case_info_list'] = self.case_info_list.to_alipay_dict()
else:
params['case_info_list'] = self.case_info_list
if self.entinv_list:
if isinstance(self.entinv_list, list):
for i in range(0, len(self.entinv_list)):
element = self.entinv_list[i]
if hasattr(element, 'to_alipay_dict'):
self.entinv_list[i] = element.to_alipay_dict()
if hasattr(self.entinv_list, 'to_alipay_dict'):
params['entinv_list'] = self.entinv_list.to_alipay_dict()
else:
params['entinv_list'] = self.entinv_list
if self.fr_position_list:
if isinstance(self.fr_position_list, list):
for i in range(0, len(self.fr_position_list)):
element = self.fr_position_list[i]
if hasattr(element, 'to_alipay_dict'):
self.fr_position_list[i] = element.to_alipay_dict()
if hasattr(self.fr_position_list, 'to_alipay_dict'):
params['fr_position_list'
] = self.fr_position_list.to_alipay_dict()
else:
params['fr_position_list'] = self.fr_position_list
if self.frinv_list:
if isinstance(self.frinv_list, list):
for i in range(0, len(self.frinv_list)):
element = self.frinv_list[i]
if hasattr(element, 'to_alipay_dict'):
self.frinv_list[i] = element.to_alipay_dict()
if hasattr(self.frinv_list, 'to_alipay_dict'):
params['frinv_list'] = self.frinv_list.to_alipay_dict()
else:
params['frinv_list'] = self.frinv_list
if self.person_list:
if isinstance(self.person_list, list):
for i in range(0, len(self.person_list)):
element = self.person_list[i]
if hasattr(element, 'to_alipay_dict'):
self.person_list[i] = element.to_alipay_dict()
if hasattr(self.person_list, 'to_alipay_dict'):
params['person_list'] = self.person_list.to_alipay_dict()
else:
params['person_list'] = self.person_list
if self.share_holder_list:
if isinstance(self.share_holder_list, list):
for i in range(0, len(self.share_holder_list)):
element = self.share_holder_list[i]
if hasattr(element, 'to_alipay_dict'):
self.share_holder_list[i] = element.to_alipay_dict()
if hasattr(self.share_holder_list, 'to_alipay_dict'):
params['share_holder_list'
] = self.share_holder_list.to_alipay_dict()
else:
params['share_holder_list'] = self.share_holder_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CompanyInfo()
if 'alter_list' in d:
o.alter_list = d['alter_list']
if 'basic_info' in d:
o.basic_info = d['basic_info']
if 'case_info_list' in d:
o.case_info_list = d['case_info_list']
if 'entinv_list' in d:
o.entinv_list = d['entinv_list']
if 'fr_position_list' in d:
o.fr_position_list = d['fr_position_list']
if 'frinv_list' in d:
o.frinv_list = d['frinv_list']
if 'person_list' in d:
o.person_list = d['person_list']
if 'share_holder_list' in d:
o.share_holder_list = d['share_holder_list']
return o
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CompanyInfo(object):
def __init__(self):
self._alter_list = None
self._basic_info = None
self._case_info_list = None
self._entinv_list = None
self._fr_position_list = None
self._frinv_list = None
self._person_list = None
self._share_holder_list = None
@property
def alter_list(self):
return self._alter_list
@alter_list.setter
def alter_list(self, value):
if isinstance(value, list):
self._alter_list = list()
for i in value:
if isinstance(i, EpInfo):
self._alter_list.append(i)
else:
self._alter_list.append(EpInfo.from_alipay_dict(i))
@property
def basic_info(self):
return self._basic_info
@basic_info.setter
def basic_info(self, value):
if isinstance(value, EpInfo):
self._basic_info = value
else:
self._basic_info = EpInfo.from_alipay_dict(value)
@property
def case_info_list(self):
return self._case_info_list
@case_info_list.setter
def case_info_list(self, value):
if isinstance(value, list):
self._case_info_list = list()
for i in value:
if isinstance(i, EpInfo):
self._case_info_list.append(i)
else:
self._case_info_list.append(EpInfo.from_alipay_dict(i))
@property
def entinv_list(self):
return self._entinv_list
@entinv_list.setter
def entinv_list(self, value):
if isinstance(value, list):
self._entinv_list = list()
for i in value:
if isinstance(i, EpInfo):
self._entinv_list.append(i)
else:
self._entinv_list.append(EpInfo.from_alipay_dict(i))
@property
def fr_position_list(self):
return self._fr_position_list
@fr_position_list.setter
def fr_position_list(self, value):
if isinstance(value, list):
self._fr_position_list = list()
for i in value:
if isinstance(i, EpInfo):
self._fr_position_list.append(i)
else:
self._fr_position_list.append(EpInfo.from_alipay_dict(i))
@property
def frinv_list(self):
return self._frinv_list
@frinv_list.setter
def frinv_list(self, value):
if isinstance(value, list):
self._frinv_list = list()
for i in value:
if isinstance(i, EpInfo):
self._frinv_list.append(i)
else:
self._frinv_list.append(EpInfo.from_alipay_dict(i))
@property
def person_list(self):
return self._person_list
@person_list.setter
def person_list(self, value):
if isinstance(value, list):
self._person_list = list()
for i in value:
if isinstance(i, EpInfo):
self._person_list.append(i)
else:
self._person_list.append(EpInfo.from_alipay_dict(i))
@property
def share_holder_list(self):
return self._share_holder_list
@share_holder_list.setter
def share_holder_list(self, value):
if isinstance(value, list):
self._share_holder_list = list()
for i in value:
if isinstance(i, EpInfo):
self._share_holder_list.append(i)
else:
self._share_holder_list.append(EpInfo.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.alter_list:
if isinstance(self.alter_list, list):
for i in range(0, len(self.alter_list)):
element = self.alter_list[i]
if hasattr(element, 'to_alipay_dict'):
self.alter_list[i] = element.to_alipay_dict()
if hasattr(self.alter_list, 'to_alipay_dict'):
params['alter_list'] = self.alter_list.to_alipay_dict()
else:
params['alter_list'] = self.alter_list
if self.basic_info:
if hasattr(self.basic_info, 'to_alipay_dict'):
params['basic_info'] = self.basic_info.to_alipay_dict()
else:
params['basic_info'] = self.basic_info
if self.case_info_list:
if isinstance(self.case_info_list, list):
for i in range(0, len(self.case_info_list)):
element = self.case_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.case_info_list[i] = element.to_alipay_dict()
if hasattr(self.case_info_list, 'to_alipay_dict'):
params['case_info_list'] = self.case_info_list.to_alipay_dict()
else:
params['case_info_list'] = self.case_info_list
if self.entinv_list:
if isinstance(self.entinv_list, list):
for i in range(0, len(self.entinv_list)):
element = self.entinv_list[i]
if hasattr(element, 'to_alipay_dict'):
self.entinv_list[i] = element.to_alipay_dict()
if hasattr(self.entinv_list, 'to_alipay_dict'):
params['entinv_list'] = self.entinv_list.to_alipay_dict()
else:
params['entinv_list'] = self.entinv_list
if self.fr_position_list:
if isinstance(self.fr_position_list, list):
for i in range(0, len(self.fr_position_list)):
element = self.fr_position_list[i]
if hasattr(element, 'to_alipay_dict'):
self.fr_position_list[i] = element.to_alipay_dict()
if hasattr(self.fr_position_list, 'to_alipay_dict'):
params['fr_position_list'
] = self.fr_position_list.to_alipay_dict()
else:
params['fr_position_list'] = self.fr_position_list
if self.frinv_list:
if isinstance(self.frinv_list, list):
for i in range(0, len(self.frinv_list)):
element = self.frinv_list[i]
if hasattr(element, 'to_alipay_dict'):
self.frinv_list[i] = element.to_alipay_dict()
if hasattr(self.frinv_list, 'to_alipay_dict'):
params['frinv_list'] = self.frinv_list.to_alipay_dict()
else:
params['frinv_list'] = self.frinv_list
if self.person_list:
if isinstance(self.person_list, list):
for i in range(0, len(self.person_list)):
element = self.person_list[i]
if hasattr(element, 'to_alipay_dict'):
self.person_list[i] = element.to_alipay_dict()
if hasattr(self.person_list, 'to_alipay_dict'):
params['person_list'] = self.person_list.to_alipay_dict()
else:
params['person_list'] = self.person_list
if self.share_holder_list:
if isinstance(self.share_holder_list, list):
for i in range(0, len(self.share_holder_list)):
element = self.share_holder_list[i]
if hasattr(element, 'to_alipay_dict'):
self.share_holder_list[i] = element.to_alipay_dict()
if hasattr(self.share_holder_list, 'to_alipay_dict'):
params['share_holder_list'
] = self.share_holder_list.to_alipay_dict()
else:
params['share_holder_list'] = self.share_holder_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CompanyInfo()
if 'alter_list' in d:
o.alter_list = d['alter_list']
if 'basic_info' in d:
o.basic_info = d['basic_info']
if 'case_info_list' in d:
o.case_info_list = d['case_info_list']
if 'entinv_list' in d:
o.entinv_list = d['entinv_list']
if 'fr_position_list' in d:
o.fr_position_list = d['fr_position_list']
if 'frinv_list' in d:
o.frinv_list = d['frinv_list']
if 'person_list' in d:
o.person_list = d['person_list']
if 'share_holder_list' in d:
o.share_holder_list = d['share_holder_list']
return o
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.EpInfo import EpInfo
from alipay.aop.api.domain.EpInfo import EpInfo
from alipay.aop.api.domain.EpInfo import EpInfo
from alipay.aop.api.domain.EpInfo import EpInfo
from alipay.aop.api.domain.EpInfo import EpInfo
from alipay.aop.api.domain.EpInfo import EpInfo
from alipay.aop.api.domain.EpInfo import EpInfo
from alipay.aop.api.domain.EpInfo import EpInfo
class CompanyInfo(object):
def __init__(self):
self._alter_list = None
self._basic_info = None
self._case_info_list = None
self._entinv_list = None
self._fr_position_list = None
self._frinv_list = None
self._person_list = None
self._share_holder_list = None
@property
def alter_list(self):
return self._alter_list
@alter_list.setter
def alter_list(self, value):
if isinstance(value, list):
self._alter_list = list()
for i in value:
if isinstance(i, EpInfo):
self._alter_list.append(i)
else:
self._alter_list.append(EpInfo.from_alipay_dict(i))
@property
def basic_info(self):
return self._basic_info
@basic_info.setter
def basic_info(self, value):
if isinstance(value, EpInfo):
self._basic_info = value
else:
self._basic_info = EpInfo.from_alipay_dict(value)
@property
def case_info_list(self):
return self._case_info_list
@case_info_list.setter
def case_info_list(self, value):
if isinstance(value, list):
self._case_info_list = list()
for i in value:
if isinstance(i, EpInfo):
self._case_info_list.append(i)
else:
self._case_info_list.append(EpInfo.from_alipay_dict(i))
@property
def entinv_list(self):
return self._entinv_list
@entinv_list.setter
def entinv_list(self, value):
if isinstance(value, list):
self._entinv_list = list()
for i in value:
if isinstance(i, EpInfo):
self._entinv_list.append(i)
else:
self._entinv_list.append(EpInfo.from_alipay_dict(i))
@property
def fr_position_list(self):
return self._fr_position_list
@fr_position_list.setter
def fr_position_list(self, value):
if isinstance(value, list):
self._fr_position_list = list()
for i in value:
if isinstance(i, EpInfo):
self._fr_position_list.append(i)
else:
self._fr_position_list.append(EpInfo.from_alipay_dict(i))
@property
def frinv_list(self):
return self._frinv_list
@frinv_list.setter
def frinv_list(self, value):
if isinstance(value, list):
self._frinv_list = list()
for i in value:
if isinstance(i, EpInfo):
self._frinv_list.append(i)
else:
self._frinv_list.append(EpInfo.from_alipay_dict(i))
@property
def person_list(self):
return self._person_list
@person_list.setter
def person_list(self, value):
if isinstance(value, list):
self._person_list = list()
for i in value:
if isinstance(i, EpInfo):
self._person_list.append(i)
else:
self._person_list.append(EpInfo.from_alipay_dict(i))
@property
def share_holder_list(self):
return self._share_holder_list
@share_holder_list.setter
def share_holder_list(self, value):
if isinstance(value, list):
self._share_holder_list = list()
for i in value:
if isinstance(i, EpInfo):
self._share_holder_list.append(i)
else:
self._share_holder_list.append(EpInfo.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.alter_list:
if isinstance(self.alter_list, list):
for i in range(0, len(self.alter_list)):
element = self.alter_list[i]
if hasattr(element, 'to_alipay_dict'):
self.alter_list[i] = element.to_alipay_dict()
if hasattr(self.alter_list, 'to_alipay_dict'):
params['alter_list'] = self.alter_list.to_alipay_dict()
else:
params['alter_list'] = self.alter_list
if self.basic_info:
if hasattr(self.basic_info, 'to_alipay_dict'):
params['basic_info'] = self.basic_info.to_alipay_dict()
else:
params['basic_info'] = self.basic_info
if self.case_info_list:
if isinstance(self.case_info_list, list):
for i in range(0, len(self.case_info_list)):
element = self.case_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.case_info_list[i] = element.to_alipay_dict()
if hasattr(self.case_info_list, 'to_alipay_dict'):
params['case_info_list'] = self.case_info_list.to_alipay_dict()
else:
params['case_info_list'] = self.case_info_list
if self.entinv_list:
if isinstance(self.entinv_list, list):
for i in range(0, len(self.entinv_list)):
element = self.entinv_list[i]
if hasattr(element, 'to_alipay_dict'):
self.entinv_list[i] = element.to_alipay_dict()
if hasattr(self.entinv_list, 'to_alipay_dict'):
params['entinv_list'] = self.entinv_list.to_alipay_dict()
else:
params['entinv_list'] = self.entinv_list
if self.fr_position_list:
if isinstance(self.fr_position_list, list):
for i in range(0, len(self.fr_position_list)):
element = self.fr_position_list[i]
if hasattr(element, 'to_alipay_dict'):
self.fr_position_list[i] = element.to_alipay_dict()
if hasattr(self.fr_position_list, 'to_alipay_dict'):
params['fr_position_list'] = self.fr_position_list.to_alipay_dict()
else:
params['fr_position_list'] = self.fr_position_list
if self.frinv_list:
if isinstance(self.frinv_list, list):
for i in range(0, len(self.frinv_list)):
element = self.frinv_list[i]
if hasattr(element, 'to_alipay_dict'):
self.frinv_list[i] = element.to_alipay_dict()
if hasattr(self.frinv_list, 'to_alipay_dict'):
params['frinv_list'] = self.frinv_list.to_alipay_dict()
else:
params['frinv_list'] = self.frinv_list
if self.person_list:
if isinstance(self.person_list, list):
for i in range(0, len(self.person_list)):
element = self.person_list[i]
if hasattr(element, 'to_alipay_dict'):
self.person_list[i] = element.to_alipay_dict()
if hasattr(self.person_list, 'to_alipay_dict'):
params['person_list'] = self.person_list.to_alipay_dict()
else:
params['person_list'] = self.person_list
if self.share_holder_list:
if isinstance(self.share_holder_list, list):
for i in range(0, len(self.share_holder_list)):
element = self.share_holder_list[i]
if hasattr(element, 'to_alipay_dict'):
self.share_holder_list[i] = element.to_alipay_dict()
if hasattr(self.share_holder_list, 'to_alipay_dict'):
params['share_holder_list'] = self.share_holder_list.to_alipay_dict()
else:
params['share_holder_list'] = self.share_holder_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CompanyInfo()
if 'alter_list' in d:
o.alter_list = d['alter_list']
if 'basic_info' in d:
o.basic_info = d['basic_info']
if 'case_info_list' in d:
o.case_info_list = d['case_info_list']
if 'entinv_list' in d:
o.entinv_list = d['entinv_list']
if 'fr_position_list' in d:
o.fr_position_list = d['fr_position_list']
if 'frinv_list' in d:
o.frinv_list = d['frinv_list']
if 'person_list' in d:
o.person_list = d['person_list']
if 'share_holder_list' in d:
o.share_holder_list = d['share_holder_list']
return o
|
flexible
|
{
"blob_id": "6743a4f3c9118e790e52b586a36d71a735101702",
"index": 1901,
"step-1": "<mask token>\n\n\nclass CompanyInfo(object):\n\n def __init__(self):\n self._alter_list = None\n self._basic_info = None\n self._case_info_list = None\n self._entinv_list = None\n self._fr_position_list = None\n self._frinv_list = None\n self._person_list = None\n self._share_holder_list = None\n <mask token>\n\n @alter_list.setter\n def alter_list(self, value):\n if isinstance(value, list):\n self._alter_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._alter_list.append(i)\n else:\n self._alter_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def basic_info(self):\n return self._basic_info\n <mask token>\n\n @property\n def case_info_list(self):\n return self._case_info_list\n\n @case_info_list.setter\n def case_info_list(self, value):\n if isinstance(value, list):\n self._case_info_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._case_info_list.append(i)\n else:\n self._case_info_list.append(EpInfo.from_alipay_dict(i))\n <mask token>\n <mask token>\n\n @property\n def fr_position_list(self):\n return self._fr_position_list\n <mask token>\n <mask token>\n\n @frinv_list.setter\n def frinv_list(self, value):\n if isinstance(value, list):\n self._frinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._frinv_list.append(i)\n else:\n self._frinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def person_list(self):\n return self._person_list\n\n @person_list.setter\n def person_list(self, value):\n if isinstance(value, list):\n self._person_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._person_list.append(i)\n else:\n self._person_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def share_holder_list(self):\n return self._share_holder_list\n\n @share_holder_list.setter\n def share_holder_list(self, value):\n if isinstance(value, list):\n self._share_holder_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._share_holder_list.append(i)\n else:\n self._share_holder_list.append(EpInfo.from_alipay_dict(i))\n\n def to_alipay_dict(self):\n params = dict()\n if self.alter_list:\n if isinstance(self.alter_list, list):\n for i in range(0, len(self.alter_list)):\n element = self.alter_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.alter_list[i] = element.to_alipay_dict()\n if hasattr(self.alter_list, 'to_alipay_dict'):\n params['alter_list'] = self.alter_list.to_alipay_dict()\n else:\n params['alter_list'] = self.alter_list\n if self.basic_info:\n if hasattr(self.basic_info, 'to_alipay_dict'):\n params['basic_info'] = self.basic_info.to_alipay_dict()\n else:\n params['basic_info'] = self.basic_info\n if self.case_info_list:\n if isinstance(self.case_info_list, list):\n for i in range(0, len(self.case_info_list)):\n element = self.case_info_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.case_info_list[i] = element.to_alipay_dict()\n if hasattr(self.case_info_list, 'to_alipay_dict'):\n params['case_info_list'] = self.case_info_list.to_alipay_dict()\n else:\n params['case_info_list'] = self.case_info_list\n if self.entinv_list:\n if isinstance(self.entinv_list, list):\n for i in range(0, len(self.entinv_list)):\n element = self.entinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.entinv_list[i] = element.to_alipay_dict()\n if hasattr(self.entinv_list, 'to_alipay_dict'):\n params['entinv_list'] = self.entinv_list.to_alipay_dict()\n else:\n params['entinv_list'] = self.entinv_list\n if self.fr_position_list:\n if isinstance(self.fr_position_list, list):\n for i in range(0, len(self.fr_position_list)):\n element = self.fr_position_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.fr_position_list[i] = element.to_alipay_dict()\n if hasattr(self.fr_position_list, 'to_alipay_dict'):\n params['fr_position_list'\n ] = self.fr_position_list.to_alipay_dict()\n else:\n params['fr_position_list'] = self.fr_position_list\n if self.frinv_list:\n if isinstance(self.frinv_list, list):\n for i in range(0, len(self.frinv_list)):\n element = self.frinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.frinv_list[i] = element.to_alipay_dict()\n if hasattr(self.frinv_list, 'to_alipay_dict'):\n params['frinv_list'] = self.frinv_list.to_alipay_dict()\n else:\n params['frinv_list'] = self.frinv_list\n if self.person_list:\n if isinstance(self.person_list, list):\n for i in range(0, len(self.person_list)):\n element = self.person_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.person_list[i] = element.to_alipay_dict()\n if hasattr(self.person_list, 'to_alipay_dict'):\n params['person_list'] = self.person_list.to_alipay_dict()\n else:\n params['person_list'] = self.person_list\n if self.share_holder_list:\n if isinstance(self.share_holder_list, list):\n for i in range(0, len(self.share_holder_list)):\n element = self.share_holder_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.share_holder_list[i] = element.to_alipay_dict()\n if hasattr(self.share_holder_list, 'to_alipay_dict'):\n params['share_holder_list'\n ] = self.share_holder_list.to_alipay_dict()\n else:\n params['share_holder_list'] = self.share_holder_list\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CompanyInfo()\n if 'alter_list' in d:\n o.alter_list = d['alter_list']\n if 'basic_info' in d:\n o.basic_info = d['basic_info']\n if 'case_info_list' in d:\n o.case_info_list = d['case_info_list']\n if 'entinv_list' in d:\n o.entinv_list = d['entinv_list']\n if 'fr_position_list' in d:\n o.fr_position_list = d['fr_position_list']\n if 'frinv_list' in d:\n o.frinv_list = d['frinv_list']\n if 'person_list' in d:\n o.person_list = d['person_list']\n if 'share_holder_list' in d:\n o.share_holder_list = d['share_holder_list']\n return o\n",
"step-2": "<mask token>\n\n\nclass CompanyInfo(object):\n\n def __init__(self):\n self._alter_list = None\n self._basic_info = None\n self._case_info_list = None\n self._entinv_list = None\n self._fr_position_list = None\n self._frinv_list = None\n self._person_list = None\n self._share_holder_list = None\n <mask token>\n\n @alter_list.setter\n def alter_list(self, value):\n if isinstance(value, list):\n self._alter_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._alter_list.append(i)\n else:\n self._alter_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def basic_info(self):\n return self._basic_info\n\n @basic_info.setter\n def basic_info(self, value):\n if isinstance(value, EpInfo):\n self._basic_info = value\n else:\n self._basic_info = EpInfo.from_alipay_dict(value)\n\n @property\n def case_info_list(self):\n return self._case_info_list\n\n @case_info_list.setter\n def case_info_list(self, value):\n if isinstance(value, list):\n self._case_info_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._case_info_list.append(i)\n else:\n self._case_info_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def entinv_list(self):\n return self._entinv_list\n\n @entinv_list.setter\n def entinv_list(self, value):\n if isinstance(value, list):\n self._entinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._entinv_list.append(i)\n else:\n self._entinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def fr_position_list(self):\n return self._fr_position_list\n\n @fr_position_list.setter\n def fr_position_list(self, value):\n if isinstance(value, list):\n self._fr_position_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._fr_position_list.append(i)\n else:\n self._fr_position_list.append(EpInfo.from_alipay_dict(i))\n <mask token>\n\n @frinv_list.setter\n def frinv_list(self, value):\n if isinstance(value, list):\n self._frinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._frinv_list.append(i)\n else:\n self._frinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def person_list(self):\n return self._person_list\n\n @person_list.setter\n def person_list(self, value):\n if isinstance(value, list):\n self._person_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._person_list.append(i)\n else:\n self._person_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def share_holder_list(self):\n return self._share_holder_list\n\n @share_holder_list.setter\n def share_holder_list(self, value):\n if isinstance(value, list):\n self._share_holder_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._share_holder_list.append(i)\n else:\n self._share_holder_list.append(EpInfo.from_alipay_dict(i))\n\n def to_alipay_dict(self):\n params = dict()\n if self.alter_list:\n if isinstance(self.alter_list, list):\n for i in range(0, len(self.alter_list)):\n element = self.alter_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.alter_list[i] = element.to_alipay_dict()\n if hasattr(self.alter_list, 'to_alipay_dict'):\n params['alter_list'] = self.alter_list.to_alipay_dict()\n else:\n params['alter_list'] = self.alter_list\n if self.basic_info:\n if hasattr(self.basic_info, 'to_alipay_dict'):\n params['basic_info'] = self.basic_info.to_alipay_dict()\n else:\n params['basic_info'] = self.basic_info\n if self.case_info_list:\n if isinstance(self.case_info_list, list):\n for i in range(0, len(self.case_info_list)):\n element = self.case_info_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.case_info_list[i] = element.to_alipay_dict()\n if hasattr(self.case_info_list, 'to_alipay_dict'):\n params['case_info_list'] = self.case_info_list.to_alipay_dict()\n else:\n params['case_info_list'] = self.case_info_list\n if self.entinv_list:\n if isinstance(self.entinv_list, list):\n for i in range(0, len(self.entinv_list)):\n element = self.entinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.entinv_list[i] = element.to_alipay_dict()\n if hasattr(self.entinv_list, 'to_alipay_dict'):\n params['entinv_list'] = self.entinv_list.to_alipay_dict()\n else:\n params['entinv_list'] = self.entinv_list\n if self.fr_position_list:\n if isinstance(self.fr_position_list, list):\n for i in range(0, len(self.fr_position_list)):\n element = self.fr_position_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.fr_position_list[i] = element.to_alipay_dict()\n if hasattr(self.fr_position_list, 'to_alipay_dict'):\n params['fr_position_list'\n ] = self.fr_position_list.to_alipay_dict()\n else:\n params['fr_position_list'] = self.fr_position_list\n if self.frinv_list:\n if isinstance(self.frinv_list, list):\n for i in range(0, len(self.frinv_list)):\n element = self.frinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.frinv_list[i] = element.to_alipay_dict()\n if hasattr(self.frinv_list, 'to_alipay_dict'):\n params['frinv_list'] = self.frinv_list.to_alipay_dict()\n else:\n params['frinv_list'] = self.frinv_list\n if self.person_list:\n if isinstance(self.person_list, list):\n for i in range(0, len(self.person_list)):\n element = self.person_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.person_list[i] = element.to_alipay_dict()\n if hasattr(self.person_list, 'to_alipay_dict'):\n params['person_list'] = self.person_list.to_alipay_dict()\n else:\n params['person_list'] = self.person_list\n if self.share_holder_list:\n if isinstance(self.share_holder_list, list):\n for i in range(0, len(self.share_holder_list)):\n element = self.share_holder_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.share_holder_list[i] = element.to_alipay_dict()\n if hasattr(self.share_holder_list, 'to_alipay_dict'):\n params['share_holder_list'\n ] = self.share_holder_list.to_alipay_dict()\n else:\n params['share_holder_list'] = self.share_holder_list\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CompanyInfo()\n if 'alter_list' in d:\n o.alter_list = d['alter_list']\n if 'basic_info' in d:\n o.basic_info = d['basic_info']\n if 'case_info_list' in d:\n o.case_info_list = d['case_info_list']\n if 'entinv_list' in d:\n o.entinv_list = d['entinv_list']\n if 'fr_position_list' in d:\n o.fr_position_list = d['fr_position_list']\n if 'frinv_list' in d:\n o.frinv_list = d['frinv_list']\n if 'person_list' in d:\n o.person_list = d['person_list']\n if 'share_holder_list' in d:\n o.share_holder_list = d['share_holder_list']\n return o\n",
"step-3": "<mask token>\n\n\nclass CompanyInfo(object):\n\n def __init__(self):\n self._alter_list = None\n self._basic_info = None\n self._case_info_list = None\n self._entinv_list = None\n self._fr_position_list = None\n self._frinv_list = None\n self._person_list = None\n self._share_holder_list = None\n\n @property\n def alter_list(self):\n return self._alter_list\n\n @alter_list.setter\n def alter_list(self, value):\n if isinstance(value, list):\n self._alter_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._alter_list.append(i)\n else:\n self._alter_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def basic_info(self):\n return self._basic_info\n\n @basic_info.setter\n def basic_info(self, value):\n if isinstance(value, EpInfo):\n self._basic_info = value\n else:\n self._basic_info = EpInfo.from_alipay_dict(value)\n\n @property\n def case_info_list(self):\n return self._case_info_list\n\n @case_info_list.setter\n def case_info_list(self, value):\n if isinstance(value, list):\n self._case_info_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._case_info_list.append(i)\n else:\n self._case_info_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def entinv_list(self):\n return self._entinv_list\n\n @entinv_list.setter\n def entinv_list(self, value):\n if isinstance(value, list):\n self._entinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._entinv_list.append(i)\n else:\n self._entinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def fr_position_list(self):\n return self._fr_position_list\n\n @fr_position_list.setter\n def fr_position_list(self, value):\n if isinstance(value, list):\n self._fr_position_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._fr_position_list.append(i)\n else:\n self._fr_position_list.append(EpInfo.from_alipay_dict(i))\n <mask token>\n\n @frinv_list.setter\n def frinv_list(self, value):\n if isinstance(value, list):\n self._frinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._frinv_list.append(i)\n else:\n self._frinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def person_list(self):\n return self._person_list\n\n @person_list.setter\n def person_list(self, value):\n if isinstance(value, list):\n self._person_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._person_list.append(i)\n else:\n self._person_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def share_holder_list(self):\n return self._share_holder_list\n\n @share_holder_list.setter\n def share_holder_list(self, value):\n if isinstance(value, list):\n self._share_holder_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._share_holder_list.append(i)\n else:\n self._share_holder_list.append(EpInfo.from_alipay_dict(i))\n\n def to_alipay_dict(self):\n params = dict()\n if self.alter_list:\n if isinstance(self.alter_list, list):\n for i in range(0, len(self.alter_list)):\n element = self.alter_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.alter_list[i] = element.to_alipay_dict()\n if hasattr(self.alter_list, 'to_alipay_dict'):\n params['alter_list'] = self.alter_list.to_alipay_dict()\n else:\n params['alter_list'] = self.alter_list\n if self.basic_info:\n if hasattr(self.basic_info, 'to_alipay_dict'):\n params['basic_info'] = self.basic_info.to_alipay_dict()\n else:\n params['basic_info'] = self.basic_info\n if self.case_info_list:\n if isinstance(self.case_info_list, list):\n for i in range(0, len(self.case_info_list)):\n element = self.case_info_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.case_info_list[i] = element.to_alipay_dict()\n if hasattr(self.case_info_list, 'to_alipay_dict'):\n params['case_info_list'] = self.case_info_list.to_alipay_dict()\n else:\n params['case_info_list'] = self.case_info_list\n if self.entinv_list:\n if isinstance(self.entinv_list, list):\n for i in range(0, len(self.entinv_list)):\n element = self.entinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.entinv_list[i] = element.to_alipay_dict()\n if hasattr(self.entinv_list, 'to_alipay_dict'):\n params['entinv_list'] = self.entinv_list.to_alipay_dict()\n else:\n params['entinv_list'] = self.entinv_list\n if self.fr_position_list:\n if isinstance(self.fr_position_list, list):\n for i in range(0, len(self.fr_position_list)):\n element = self.fr_position_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.fr_position_list[i] = element.to_alipay_dict()\n if hasattr(self.fr_position_list, 'to_alipay_dict'):\n params['fr_position_list'\n ] = self.fr_position_list.to_alipay_dict()\n else:\n params['fr_position_list'] = self.fr_position_list\n if self.frinv_list:\n if isinstance(self.frinv_list, list):\n for i in range(0, len(self.frinv_list)):\n element = self.frinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.frinv_list[i] = element.to_alipay_dict()\n if hasattr(self.frinv_list, 'to_alipay_dict'):\n params['frinv_list'] = self.frinv_list.to_alipay_dict()\n else:\n params['frinv_list'] = self.frinv_list\n if self.person_list:\n if isinstance(self.person_list, list):\n for i in range(0, len(self.person_list)):\n element = self.person_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.person_list[i] = element.to_alipay_dict()\n if hasattr(self.person_list, 'to_alipay_dict'):\n params['person_list'] = self.person_list.to_alipay_dict()\n else:\n params['person_list'] = self.person_list\n if self.share_holder_list:\n if isinstance(self.share_holder_list, list):\n for i in range(0, len(self.share_holder_list)):\n element = self.share_holder_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.share_holder_list[i] = element.to_alipay_dict()\n if hasattr(self.share_holder_list, 'to_alipay_dict'):\n params['share_holder_list'\n ] = self.share_holder_list.to_alipay_dict()\n else:\n params['share_holder_list'] = self.share_holder_list\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CompanyInfo()\n if 'alter_list' in d:\n o.alter_list = d['alter_list']\n if 'basic_info' in d:\n o.basic_info = d['basic_info']\n if 'case_info_list' in d:\n o.case_info_list = d['case_info_list']\n if 'entinv_list' in d:\n o.entinv_list = d['entinv_list']\n if 'fr_position_list' in d:\n o.fr_position_list = d['fr_position_list']\n if 'frinv_list' in d:\n o.frinv_list = d['frinv_list']\n if 'person_list' in d:\n o.person_list = d['person_list']\n if 'share_holder_list' in d:\n o.share_holder_list = d['share_holder_list']\n return o\n",
"step-4": "<mask token>\n\n\nclass CompanyInfo(object):\n\n def __init__(self):\n self._alter_list = None\n self._basic_info = None\n self._case_info_list = None\n self._entinv_list = None\n self._fr_position_list = None\n self._frinv_list = None\n self._person_list = None\n self._share_holder_list = None\n\n @property\n def alter_list(self):\n return self._alter_list\n\n @alter_list.setter\n def alter_list(self, value):\n if isinstance(value, list):\n self._alter_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._alter_list.append(i)\n else:\n self._alter_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def basic_info(self):\n return self._basic_info\n\n @basic_info.setter\n def basic_info(self, value):\n if isinstance(value, EpInfo):\n self._basic_info = value\n else:\n self._basic_info = EpInfo.from_alipay_dict(value)\n\n @property\n def case_info_list(self):\n return self._case_info_list\n\n @case_info_list.setter\n def case_info_list(self, value):\n if isinstance(value, list):\n self._case_info_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._case_info_list.append(i)\n else:\n self._case_info_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def entinv_list(self):\n return self._entinv_list\n\n @entinv_list.setter\n def entinv_list(self, value):\n if isinstance(value, list):\n self._entinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._entinv_list.append(i)\n else:\n self._entinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def fr_position_list(self):\n return self._fr_position_list\n\n @fr_position_list.setter\n def fr_position_list(self, value):\n if isinstance(value, list):\n self._fr_position_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._fr_position_list.append(i)\n else:\n self._fr_position_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def frinv_list(self):\n return self._frinv_list\n\n @frinv_list.setter\n def frinv_list(self, value):\n if isinstance(value, list):\n self._frinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._frinv_list.append(i)\n else:\n self._frinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def person_list(self):\n return self._person_list\n\n @person_list.setter\n def person_list(self, value):\n if isinstance(value, list):\n self._person_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._person_list.append(i)\n else:\n self._person_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def share_holder_list(self):\n return self._share_holder_list\n\n @share_holder_list.setter\n def share_holder_list(self, value):\n if isinstance(value, list):\n self._share_holder_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._share_holder_list.append(i)\n else:\n self._share_holder_list.append(EpInfo.from_alipay_dict(i))\n\n def to_alipay_dict(self):\n params = dict()\n if self.alter_list:\n if isinstance(self.alter_list, list):\n for i in range(0, len(self.alter_list)):\n element = self.alter_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.alter_list[i] = element.to_alipay_dict()\n if hasattr(self.alter_list, 'to_alipay_dict'):\n params['alter_list'] = self.alter_list.to_alipay_dict()\n else:\n params['alter_list'] = self.alter_list\n if self.basic_info:\n if hasattr(self.basic_info, 'to_alipay_dict'):\n params['basic_info'] = self.basic_info.to_alipay_dict()\n else:\n params['basic_info'] = self.basic_info\n if self.case_info_list:\n if isinstance(self.case_info_list, list):\n for i in range(0, len(self.case_info_list)):\n element = self.case_info_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.case_info_list[i] = element.to_alipay_dict()\n if hasattr(self.case_info_list, 'to_alipay_dict'):\n params['case_info_list'] = self.case_info_list.to_alipay_dict()\n else:\n params['case_info_list'] = self.case_info_list\n if self.entinv_list:\n if isinstance(self.entinv_list, list):\n for i in range(0, len(self.entinv_list)):\n element = self.entinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.entinv_list[i] = element.to_alipay_dict()\n if hasattr(self.entinv_list, 'to_alipay_dict'):\n params['entinv_list'] = self.entinv_list.to_alipay_dict()\n else:\n params['entinv_list'] = self.entinv_list\n if self.fr_position_list:\n if isinstance(self.fr_position_list, list):\n for i in range(0, len(self.fr_position_list)):\n element = self.fr_position_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.fr_position_list[i] = element.to_alipay_dict()\n if hasattr(self.fr_position_list, 'to_alipay_dict'):\n params['fr_position_list'\n ] = self.fr_position_list.to_alipay_dict()\n else:\n params['fr_position_list'] = self.fr_position_list\n if self.frinv_list:\n if isinstance(self.frinv_list, list):\n for i in range(0, len(self.frinv_list)):\n element = self.frinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.frinv_list[i] = element.to_alipay_dict()\n if hasattr(self.frinv_list, 'to_alipay_dict'):\n params['frinv_list'] = self.frinv_list.to_alipay_dict()\n else:\n params['frinv_list'] = self.frinv_list\n if self.person_list:\n if isinstance(self.person_list, list):\n for i in range(0, len(self.person_list)):\n element = self.person_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.person_list[i] = element.to_alipay_dict()\n if hasattr(self.person_list, 'to_alipay_dict'):\n params['person_list'] = self.person_list.to_alipay_dict()\n else:\n params['person_list'] = self.person_list\n if self.share_holder_list:\n if isinstance(self.share_holder_list, list):\n for i in range(0, len(self.share_holder_list)):\n element = self.share_holder_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.share_holder_list[i] = element.to_alipay_dict()\n if hasattr(self.share_holder_list, 'to_alipay_dict'):\n params['share_holder_list'\n ] = self.share_holder_list.to_alipay_dict()\n else:\n params['share_holder_list'] = self.share_holder_list\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CompanyInfo()\n if 'alter_list' in d:\n o.alter_list = d['alter_list']\n if 'basic_info' in d:\n o.basic_info = d['basic_info']\n if 'case_info_list' in d:\n o.case_info_list = d['case_info_list']\n if 'entinv_list' in d:\n o.entinv_list = d['entinv_list']\n if 'fr_position_list' in d:\n o.fr_position_list = d['fr_position_list']\n if 'frinv_list' in d:\n o.frinv_list = d['frinv_list']\n if 'person_list' in d:\n o.person_list = d['person_list']\n if 'share_holder_list' in d:\n o.share_holder_list = d['share_holder_list']\n return o\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\n\n\nclass CompanyInfo(object):\n\n def __init__(self):\n self._alter_list = None\n self._basic_info = None\n self._case_info_list = None\n self._entinv_list = None\n self._fr_position_list = None\n self._frinv_list = None\n self._person_list = None\n self._share_holder_list = None\n\n @property\n def alter_list(self):\n return self._alter_list\n\n @alter_list.setter\n def alter_list(self, value):\n if isinstance(value, list):\n self._alter_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._alter_list.append(i)\n else:\n self._alter_list.append(EpInfo.from_alipay_dict(i))\n @property\n def basic_info(self):\n return self._basic_info\n\n @basic_info.setter\n def basic_info(self, value):\n if isinstance(value, EpInfo):\n self._basic_info = value\n else:\n self._basic_info = EpInfo.from_alipay_dict(value)\n @property\n def case_info_list(self):\n return self._case_info_list\n\n @case_info_list.setter\n def case_info_list(self, value):\n if isinstance(value, list):\n self._case_info_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._case_info_list.append(i)\n else:\n self._case_info_list.append(EpInfo.from_alipay_dict(i))\n @property\n def entinv_list(self):\n return self._entinv_list\n\n @entinv_list.setter\n def entinv_list(self, value):\n if isinstance(value, list):\n self._entinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._entinv_list.append(i)\n else:\n self._entinv_list.append(EpInfo.from_alipay_dict(i))\n @property\n def fr_position_list(self):\n return self._fr_position_list\n\n @fr_position_list.setter\n def fr_position_list(self, value):\n if isinstance(value, list):\n self._fr_position_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._fr_position_list.append(i)\n else:\n self._fr_position_list.append(EpInfo.from_alipay_dict(i))\n @property\n def frinv_list(self):\n return self._frinv_list\n\n @frinv_list.setter\n def frinv_list(self, value):\n if isinstance(value, list):\n self._frinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._frinv_list.append(i)\n else:\n self._frinv_list.append(EpInfo.from_alipay_dict(i))\n @property\n def person_list(self):\n return self._person_list\n\n @person_list.setter\n def person_list(self, value):\n if isinstance(value, list):\n self._person_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._person_list.append(i)\n else:\n self._person_list.append(EpInfo.from_alipay_dict(i))\n @property\n def share_holder_list(self):\n return self._share_holder_list\n\n @share_holder_list.setter\n def share_holder_list(self, value):\n if isinstance(value, list):\n self._share_holder_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._share_holder_list.append(i)\n else:\n self._share_holder_list.append(EpInfo.from_alipay_dict(i))\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.alter_list:\n if isinstance(self.alter_list, list):\n for i in range(0, len(self.alter_list)):\n element = self.alter_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.alter_list[i] = element.to_alipay_dict()\n if hasattr(self.alter_list, 'to_alipay_dict'):\n params['alter_list'] = self.alter_list.to_alipay_dict()\n else:\n params['alter_list'] = self.alter_list\n if self.basic_info:\n if hasattr(self.basic_info, 'to_alipay_dict'):\n params['basic_info'] = self.basic_info.to_alipay_dict()\n else:\n params['basic_info'] = self.basic_info\n if self.case_info_list:\n if isinstance(self.case_info_list, list):\n for i in range(0, len(self.case_info_list)):\n element = self.case_info_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.case_info_list[i] = element.to_alipay_dict()\n if hasattr(self.case_info_list, 'to_alipay_dict'):\n params['case_info_list'] = self.case_info_list.to_alipay_dict()\n else:\n params['case_info_list'] = self.case_info_list\n if self.entinv_list:\n if isinstance(self.entinv_list, list):\n for i in range(0, len(self.entinv_list)):\n element = self.entinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.entinv_list[i] = element.to_alipay_dict()\n if hasattr(self.entinv_list, 'to_alipay_dict'):\n params['entinv_list'] = self.entinv_list.to_alipay_dict()\n else:\n params['entinv_list'] = self.entinv_list\n if self.fr_position_list:\n if isinstance(self.fr_position_list, list):\n for i in range(0, len(self.fr_position_list)):\n element = self.fr_position_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.fr_position_list[i] = element.to_alipay_dict()\n if hasattr(self.fr_position_list, 'to_alipay_dict'):\n params['fr_position_list'] = self.fr_position_list.to_alipay_dict()\n else:\n params['fr_position_list'] = self.fr_position_list\n if self.frinv_list:\n if isinstance(self.frinv_list, list):\n for i in range(0, len(self.frinv_list)):\n element = self.frinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.frinv_list[i] = element.to_alipay_dict()\n if hasattr(self.frinv_list, 'to_alipay_dict'):\n params['frinv_list'] = self.frinv_list.to_alipay_dict()\n else:\n params['frinv_list'] = self.frinv_list\n if self.person_list:\n if isinstance(self.person_list, list):\n for i in range(0, len(self.person_list)):\n element = self.person_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.person_list[i] = element.to_alipay_dict()\n if hasattr(self.person_list, 'to_alipay_dict'):\n params['person_list'] = self.person_list.to_alipay_dict()\n else:\n params['person_list'] = self.person_list\n if self.share_holder_list:\n if isinstance(self.share_holder_list, list):\n for i in range(0, len(self.share_holder_list)):\n element = self.share_holder_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.share_holder_list[i] = element.to_alipay_dict()\n if hasattr(self.share_holder_list, 'to_alipay_dict'):\n params['share_holder_list'] = self.share_holder_list.to_alipay_dict()\n else:\n params['share_holder_list'] = self.share_holder_list\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CompanyInfo()\n if 'alter_list' in d:\n o.alter_list = d['alter_list']\n if 'basic_info' in d:\n o.basic_info = d['basic_info']\n if 'case_info_list' in d:\n o.case_info_list = d['case_info_list']\n if 'entinv_list' in d:\n o.entinv_list = d['entinv_list']\n if 'fr_position_list' in d:\n o.fr_position_list = d['fr_position_list']\n if 'frinv_list' in d:\n o.frinv_list = d['frinv_list']\n if 'person_list' in d:\n o.person_list = d['person_list']\n if 'share_holder_list' in d:\n o.share_holder_list = d['share_holder_list']\n return o\n\n\n",
"step-ids": [
14,
18,
19,
20,
22
]
}
|
[
14,
18,
19,
20,
22
] |
import pandas as pd
import csv
import numpy as np
import matplotlib.pyplot as plt
#import csv file with recorded left, right servo angles and their corresponding roll and pitch values
df = pd.read_csv('C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv') #change address to csv file address
#remove all the NaN rows
df = df.apply (pd.to_numeric, errors='coerce')
df = df.dropna()
#scatter plot of all avaiable left and right servo angles
plt.scatter(df['left_rel_angle'], df['right_rel_angle'])
plt.xlabel('Left servo angle(deg)')
plt.ylabel('Right servo angle(deg)')
plt.title('Plot of left and right servo values')
plt.show()
#scatter plot of all avaiable roll and pitch angles
plt.scatter(df['roll'], df['pitch'])
plt.xlabel('Roll(deg)')
plt.ylabel('Pitch(deg)')
plt.title('Plot of roll and pitch values')
plt.show()
#change to integer
df['roll'] = df['roll'].astype('int8')
df['pitch'] = df['pitch'].astype('int8')
#sort df by roll(ascending) and then pitch(ascending)
df_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)
#group dataframe by roll and pitch values (i.e. collect the data sets with the same roll and pitch outputs) and calculate the mean for left and right servo values
df_sorted = df.groupby(['pitch','roll']).mean().reset_index()
#change left and right servo values to integer
df_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')
df_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')
#group left and right servo value together into a tuple
df_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']].apply(tuple, axis=1)
#change table format to row index:pitch, column index: roll, create two tables with left and right servo angles
df_sorted_left = df_sorted.pivot(index ='pitch', columns='roll', values='left_rel_angle')
df_sorted_right = df_sorted.pivot(index ='pitch', columns='roll', values='right_rel_angle')
#for every cell that is empty, write it a value of it's left or right most adjacent available cell
df_sorted_left.bfill(axis ='columns', inplace = True)
df_sorted_left.ffill(axis ='columns', inplace = True)
df_sorted_right.bfill(axis ='columns', inplace = True)
df_sorted_right.ffill(axis ='columns', inplace = True)
#change table type to integer
df_sorted_left = df_sorted_left.astype('int8')
df_sorted_right = df_sorted_right.astype('int8')
#save the left and right servo table files locally (debugging step)
df_sorted_left.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/left_test.csv')
df_sorted_right.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/right_test.csv')
#create empty data table and row
data = []
row = []
for i in range(-55,52): #for i in pitch range (rows); check the left_test.csv or right_test.csv file to find out the range of pitch values
row = []
for j in range(-21, 23): #for j in roll range (column); check the left_test.csv or right_test.csv file to find out the range of pitch values
tup = (df_sorted_left[j][i], df_sorted_right[j][i]) #create a tuple in the format of (left_serve_angle, right_servo_angle)
# print(i,j)
# print(tup)
row.append(tup) #apend tuple to row
data.append(row) #append row to data
df_concat = pd.DataFrame(data=data)
# df_concat = df_concat.applymap(str)
df_concat = df_concat.astype(str)
df_concat.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')
# df_concat = df_concat.str.replace('(','{')
# df_concat = df_concat.str.replace(')','},')
# df_concat.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/tabblepeggy_2_angle_reference_TEST.csv')
'''
Run the next two lines after you open the csv file and edited the following:
1. change all "(" to "{"
2. change all ")" to "}"
3. delete the first column (index column)
'''
# df_concat = pd.read_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')
# np.savetxt(r'C:/Users/yuyan.shi/Desktop/test files/mid_servo_2deg_1.h', df_concat, fmt='%s', newline="}, \n {", header="#ifndef NECK_H_\n#define NECK_H_")
|
normal
|
{
"blob_id": "fd7961d3a94b53ae791da696bb2024165db8b8fc",
"index": 5354,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.scatter(df['left_rel_angle'], df['right_rel_angle'])\nplt.xlabel('Left servo angle(deg)')\nplt.ylabel('Right servo angle(deg)')\nplt.title('Plot of left and right servo values')\nplt.show()\nplt.scatter(df['roll'], df['pitch'])\nplt.xlabel('Roll(deg)')\nplt.ylabel('Pitch(deg)')\nplt.title('Plot of roll and pitch values')\nplt.show()\n<mask token>\ndf_sorted_left.bfill(axis='columns', inplace=True)\ndf_sorted_left.ffill(axis='columns', inplace=True)\ndf_sorted_right.bfill(axis='columns', inplace=True)\ndf_sorted_right.ffill(axis='columns', inplace=True)\n<mask token>\ndf_sorted_left.to_csv('C:/Users/yuyan.shi/Desktop/test files/left_test.csv')\ndf_sorted_right.to_csv('C:/Users/yuyan.shi/Desktop/test files/right_test.csv')\n<mask token>\nfor i in range(-55, 52):\n row = []\n for j in range(-21, 23):\n tup = df_sorted_left[j][i], df_sorted_right[j][i]\n row.append(tup)\n data.append(row)\n<mask token>\ndf_concat.to_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')\n<mask token>\n",
"step-3": "<mask token>\ndf = pd.read_csv(\n 'C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv'\n )\ndf = df.apply(pd.to_numeric, errors='coerce')\ndf = df.dropna()\nplt.scatter(df['left_rel_angle'], df['right_rel_angle'])\nplt.xlabel('Left servo angle(deg)')\nplt.ylabel('Right servo angle(deg)')\nplt.title('Plot of left and right servo values')\nplt.show()\nplt.scatter(df['roll'], df['pitch'])\nplt.xlabel('Roll(deg)')\nplt.ylabel('Pitch(deg)')\nplt.title('Plot of roll and pitch values')\nplt.show()\ndf['roll'] = df['roll'].astype('int8')\ndf['pitch'] = df['pitch'].astype('int8')\ndf_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)\ndf_sorted = df.groupby(['pitch', 'roll']).mean().reset_index()\ndf_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')\ndf_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')\ndf_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']\n ].apply(tuple, axis=1)\ndf_sorted_left = df_sorted.pivot(index='pitch', columns='roll', values=\n 'left_rel_angle')\ndf_sorted_right = df_sorted.pivot(index='pitch', columns='roll', values=\n 'right_rel_angle')\ndf_sorted_left.bfill(axis='columns', inplace=True)\ndf_sorted_left.ffill(axis='columns', inplace=True)\ndf_sorted_right.bfill(axis='columns', inplace=True)\ndf_sorted_right.ffill(axis='columns', inplace=True)\ndf_sorted_left = df_sorted_left.astype('int8')\ndf_sorted_right = df_sorted_right.astype('int8')\ndf_sorted_left.to_csv('C:/Users/yuyan.shi/Desktop/test files/left_test.csv')\ndf_sorted_right.to_csv('C:/Users/yuyan.shi/Desktop/test files/right_test.csv')\ndata = []\nrow = []\nfor i in range(-55, 52):\n row = []\n for j in range(-21, 23):\n tup = df_sorted_left[j][i], df_sorted_right[j][i]\n row.append(tup)\n data.append(row)\ndf_concat = pd.DataFrame(data=data)\ndf_concat = df_concat.astype(str)\ndf_concat.to_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')\n<mask token>\n",
"step-4": "import pandas as pd\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\ndf = pd.read_csv(\n 'C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv'\n )\ndf = df.apply(pd.to_numeric, errors='coerce')\ndf = df.dropna()\nplt.scatter(df['left_rel_angle'], df['right_rel_angle'])\nplt.xlabel('Left servo angle(deg)')\nplt.ylabel('Right servo angle(deg)')\nplt.title('Plot of left and right servo values')\nplt.show()\nplt.scatter(df['roll'], df['pitch'])\nplt.xlabel('Roll(deg)')\nplt.ylabel('Pitch(deg)')\nplt.title('Plot of roll and pitch values')\nplt.show()\ndf['roll'] = df['roll'].astype('int8')\ndf['pitch'] = df['pitch'].astype('int8')\ndf_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)\ndf_sorted = df.groupby(['pitch', 'roll']).mean().reset_index()\ndf_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')\ndf_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')\ndf_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']\n ].apply(tuple, axis=1)\ndf_sorted_left = df_sorted.pivot(index='pitch', columns='roll', values=\n 'left_rel_angle')\ndf_sorted_right = df_sorted.pivot(index='pitch', columns='roll', values=\n 'right_rel_angle')\ndf_sorted_left.bfill(axis='columns', inplace=True)\ndf_sorted_left.ffill(axis='columns', inplace=True)\ndf_sorted_right.bfill(axis='columns', inplace=True)\ndf_sorted_right.ffill(axis='columns', inplace=True)\ndf_sorted_left = df_sorted_left.astype('int8')\ndf_sorted_right = df_sorted_right.astype('int8')\ndf_sorted_left.to_csv('C:/Users/yuyan.shi/Desktop/test files/left_test.csv')\ndf_sorted_right.to_csv('C:/Users/yuyan.shi/Desktop/test files/right_test.csv')\ndata = []\nrow = []\nfor i in range(-55, 52):\n row = []\n for j in range(-21, 23):\n tup = df_sorted_left[j][i], df_sorted_right[j][i]\n row.append(tup)\n data.append(row)\ndf_concat = pd.DataFrame(data=data)\ndf_concat = df_concat.astype(str)\ndf_concat.to_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')\n<mask token>\n",
"step-5": "import pandas as pd\r\nimport csv\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#import csv file with recorded left, right servo angles and their corresponding roll and pitch values\r\ndf = pd.read_csv('C:/Users/yuyan.shi/Desktop/work/head-neck/kinematics/tabblepeggy reference tables/mid_servo_angle_2deg_3.csv') #change address to csv file address\r\n\r\n#remove all the NaN rows\r\ndf = df.apply (pd.to_numeric, errors='coerce')\r\ndf = df.dropna()\r\n\r\n#scatter plot of all avaiable left and right servo angles\r\nplt.scatter(df['left_rel_angle'], df['right_rel_angle'])\r\nplt.xlabel('Left servo angle(deg)')\r\nplt.ylabel('Right servo angle(deg)')\r\nplt.title('Plot of left and right servo values')\r\nplt.show()\r\n\r\n#scatter plot of all avaiable roll and pitch angles\r\nplt.scatter(df['roll'], df['pitch'])\r\nplt.xlabel('Roll(deg)')\r\nplt.ylabel('Pitch(deg)')\r\nplt.title('Plot of roll and pitch values')\r\nplt.show()\r\n\r\n#change to integer\t\r\ndf['roll'] = df['roll'].astype('int8')\r\ndf['pitch'] = df['pitch'].astype('int8')\r\n\r\n#sort df by roll(ascending) and then pitch(ascending) \r\ndf_sorted = df.sort_values(by=['roll', 'pitch']).reset_index(drop=True)\r\n\r\n#group dataframe by roll and pitch values (i.e. collect the data sets with the same roll and pitch outputs) and calculate the mean for left and right servo values\r\ndf_sorted = df.groupby(['pitch','roll']).mean().reset_index()\r\n\r\n#change left and right servo values to integer\r\ndf_sorted['left_rel_angle'] = df_sorted['left_rel_angle'].astype('int8')\r\ndf_sorted['right_rel_angle'] = df_sorted['right_rel_angle'].astype('int8')\r\n\r\n#group left and right servo value together into a tuple\r\ndf_sorted['servo_angles'] = df_sorted[['left_rel_angle', 'right_rel_angle']].apply(tuple, axis=1)\r\n\r\n#change table format to row index:pitch, column index: roll, create two tables with left and right servo angles\r\ndf_sorted_left = df_sorted.pivot(index ='pitch', columns='roll', values='left_rel_angle')\r\ndf_sorted_right = df_sorted.pivot(index ='pitch', columns='roll', values='right_rel_angle')\r\n\r\n#for every cell that is empty, write it a value of it's left or right most adjacent available cell\r\ndf_sorted_left.bfill(axis ='columns', inplace = True)\r\ndf_sorted_left.ffill(axis ='columns', inplace = True)\r\ndf_sorted_right.bfill(axis ='columns', inplace = True)\r\ndf_sorted_right.ffill(axis ='columns', inplace = True)\r\n\r\n#change table type to integer\r\ndf_sorted_left = df_sorted_left.astype('int8')\r\ndf_sorted_right = df_sorted_right.astype('int8') \r\n\r\n#save the left and right servo table files locally (debugging step)\r\ndf_sorted_left.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/left_test.csv')\r\ndf_sorted_right.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/right_test.csv')\r\n\r\n#create empty data table and row \r\ndata = []\r\nrow = []\r\n\r\nfor i in range(-55,52): #for i in pitch range (rows); check the left_test.csv or right_test.csv file to find out the range of pitch values \r\n\trow = []\r\n\tfor j in range(-21, 23): #for j in roll range (column); check the left_test.csv or right_test.csv file to find out the range of pitch values\r\n\t\ttup = (df_sorted_left[j][i], df_sorted_right[j][i]) #create a tuple in the format of (left_serve_angle, right_servo_angle)\r\n\t\t# print(i,j)\r\n\t\t# print(tup)\r\n\t\trow.append(tup) #apend tuple to row\r\n\tdata.append(row) #append row to data\r\n\r\ndf_concat = pd.DataFrame(data=data)\r\n# df_concat = df_concat.applymap(str)\r\ndf_concat = df_concat.astype(str)\r\ndf_concat.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')\r\n\r\n# df_concat = df_concat.str.replace('(','{')\r\n# df_concat = df_concat.str.replace(')','},')\r\n# df_concat.to_csv (r'C:/Users/yuyan.shi/Desktop/test files/tabblepeggy_2_angle_reference_TEST.csv')\r\n\r\n'''\r\nRun the next two lines after you open the csv file and edited the following:\r\n1. change all \"(\" to \"{\"\r\n2. change all \")\" to \"}\"\r\n3. delete the first column (index column) \r\n'''\r\n# df_concat = pd.read_csv('C:/Users/yuyan.shi/Desktop/test files/mid_servo_2.csv')\r\n# np.savetxt(r'C:/Users/yuyan.shi/Desktop/test files/mid_servo_2deg_1.h', df_concat, fmt='%s', newline=\"}, \\n {\", header=\"#ifndef NECK_H_\\n#define NECK_H_\")\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class HomePageView(TemplateView):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HomePageView(TemplateView):
template_name = 'base.html'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
context = 'Welcome home'
return render(request, 'base.html', {'context': context})
class HomePageView(TemplateView):
template_name = 'base.html'
<|reserved_special_token_1|>
from django.shortcuts import render
from django.views.generic import TemplateView
def index(request):
context = 'Welcome home'
return render(request, 'base.html', {'context': context})
class HomePageView(TemplateView):
template_name = 'base.html'
<|reserved_special_token_1|>
from django.shortcuts import render
from django.views.generic import TemplateView
# Create your views here.
def index(request):
context = 'Welcome home'
return render(request,'base.html',{'context':context})
class HomePageView(TemplateView):
template_name = 'base.html'
|
flexible
|
{
"blob_id": "f0a54feaa165a393c4e87cbac2a38347633acf5a",
"index": 1425,
"step-1": "<mask token>\n\n\nclass HomePageView(TemplateView):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass HomePageView(TemplateView):\n template_name = 'base.html'\n",
"step-3": "<mask token>\n\n\ndef index(request):\n context = 'Welcome home'\n return render(request, 'base.html', {'context': context})\n\n\nclass HomePageView(TemplateView):\n template_name = 'base.html'\n",
"step-4": "from django.shortcuts import render\nfrom django.views.generic import TemplateView\n\n\ndef index(request):\n context = 'Welcome home'\n return render(request, 'base.html', {'context': context})\n\n\nclass HomePageView(TemplateView):\n template_name = 'base.html'\n",
"step-5": "from django.shortcuts import render\nfrom django.views.generic import TemplateView\n\n# Create your views here.\ndef index(request):\n context = 'Welcome home'\n return render(request,'base.html',{'context':context})\n\nclass HomePageView(TemplateView):\n template_name = 'base.html'\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('app.root_path===', app.root_path)
print('app.static_url_path===', app.static_url_path)
app.secret_key('uaremyhero')
<|reserved_special_token_0|>
Session(app)
app.register_blueprint(login.login)
app.register_blueprint()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__, template_folder='templates', static_url_path='static')
app.debug = True
print('app.root_path===', app.root_path)
print('app.static_url_path===', app.static_url_path)
app.secret_key('uaremyhero')
app.config['SESSION_TYPE'] = 'redis'
app.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379',
password='123123')
app.config['SESSION_KEY_PREFIX'] = 'session:'
app.config['SESSION_PERMANENT'] = False
app.config['SESSION_USE_SIGNER'] = False
Session(app)
app.register_blueprint(login.login)
app.register_blueprint()
<|reserved_special_token_1|>
from flask import Flask, Blueprint
from .views import login
from flask_session import Session
import redis
app = Flask(__name__, template_folder='templates', static_url_path='static')
app.debug = True
print('app.root_path===', app.root_path)
print('app.static_url_path===', app.static_url_path)
app.secret_key('uaremyhero')
app.config['SESSION_TYPE'] = 'redis'
app.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379',
password='123123')
app.config['SESSION_KEY_PREFIX'] = 'session:'
app.config['SESSION_PERMANENT'] = False
app.config['SESSION_USE_SIGNER'] = False
Session(app)
app.register_blueprint(login.login)
app.register_blueprint()
<|reserved_special_token_1|>
from flask import Flask,Blueprint
from .views import login
from flask_session import Session
import redis
app = Flask(__name__,template_folder='templates',static_url_path='static')
app.debug = True
print('app.root_path===',app.root_path)
print('app.static_url_path===',app.static_url_path)
app.secret_key('uaremyhero')
app.config['SESSION_TYPE'] = 'redis' # session类型为redis
app.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379', password='123123') # 用于连接redis的配置
app.config['SESSION_KEY_PREFIX'] = 'session:' # 保存到session中的值的前缀
app.config['SESSION_PERMANENT'] = False # 如果设置为True,则关闭浏览器session就失效。
app.config['SESSION_USE_SIGNER'] = False # 是否对发送到浏览器上 session:cookie值进行加密
Session(app)
app.register_blueprint(login.login)
app.register_blueprint()
|
flexible
|
{
"blob_id": "9d2fdf47b5c4b56cc0177a9c0a86b1ed57c88d49",
"index": 4151,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('app.root_path===', app.root_path)\nprint('app.static_url_path===', app.static_url_path)\napp.secret_key('uaremyhero')\n<mask token>\nSession(app)\napp.register_blueprint(login.login)\napp.register_blueprint()\n",
"step-3": "<mask token>\napp = Flask(__name__, template_folder='templates', static_url_path='static')\napp.debug = True\nprint('app.root_path===', app.root_path)\nprint('app.static_url_path===', app.static_url_path)\napp.secret_key('uaremyhero')\napp.config['SESSION_TYPE'] = 'redis'\napp.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379',\n password='123123')\napp.config['SESSION_KEY_PREFIX'] = 'session:'\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_USE_SIGNER'] = False\nSession(app)\napp.register_blueprint(login.login)\napp.register_blueprint()\n",
"step-4": "from flask import Flask, Blueprint\nfrom .views import login\nfrom flask_session import Session\nimport redis\napp = Flask(__name__, template_folder='templates', static_url_path='static')\napp.debug = True\nprint('app.root_path===', app.root_path)\nprint('app.static_url_path===', app.static_url_path)\napp.secret_key('uaremyhero')\napp.config['SESSION_TYPE'] = 'redis'\napp.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379',\n password='123123')\napp.config['SESSION_KEY_PREFIX'] = 'session:'\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_USE_SIGNER'] = False\nSession(app)\napp.register_blueprint(login.login)\napp.register_blueprint()\n",
"step-5": "from flask import Flask,Blueprint\nfrom .views import login\nfrom flask_session import Session\nimport redis\n\n\napp = Flask(__name__,template_folder='templates',static_url_path='static')\napp.debug = True\n\nprint('app.root_path===',app.root_path)\nprint('app.static_url_path===',app.static_url_path)\n\napp.secret_key('uaremyhero')\n\napp.config['SESSION_TYPE'] = 'redis' # session类型为redis\napp.config['SESSION_REDIS'] = redis.Redis(host='127.0.0.1', port='6379', password='123123') # 用于连接redis的配置\napp.config['SESSION_KEY_PREFIX'] = 'session:' # 保存到session中的值的前缀\napp.config['SESSION_PERMANENT'] = False # 如果设置为True,则关闭浏览器session就失效。\napp.config['SESSION_USE_SIGNER'] = False # 是否对发送到浏览器上 session:cookie值进行加密\nSession(app)\n\n\n\napp.register_blueprint(login.login)\napp.register_blueprint()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from page_parsing import get_item_info_from,url_list,item_info,get_links_from
# ================================================= < <链接去重 > > =====================================================
# 设计思路:
# 1.分两个数据库,第一个用于只用于存放抓取下来的 url (ulr_list);第二个则储存 url 对应的物品详情信息(item_info)
# 2.在抓取过程中在第二个数据库中写入数据的同时,新增一个字段(key) 'index_url' 即该详情对应的链接
# 3.若抓取中断,在第二个存放详情页信息的数据库中的 url 字段应该是第一个数据库中 url 集合的子集
# 4.两个集合的 url 相减得出剩下应该抓取的 url 还有哪些
db_urls = [item['url'] for item in url_list.find()] # 用列表解析式装入所有要爬取的链接
index_urls = [item['url'] for item in item_info.find()] # 所引出详情信息数据库中所有的现存的 url 字段
x = set(db_urls) # 转换成集合的数据结构
y = set(index_urls)
rest_of_urls = x-y # 相减
# ======================================================================================================================
|
normal
|
{
"blob_id": "4f2017632d905c80c35fbaead83ecb7e1ac95760",
"index": 9868,
"step-1": " from page_parsing import get_item_info_from,url_list,item_info,get_links_from\n\n\n # ================================================= < <链接去重 > > =====================================================\n\n # 设计思路:\n # 1.分两个数据库,第一个用于只用于存放抓取下来的 url (ulr_list);第二个则储存 url 对应的物品详情信息(item_info)\n # 2.在抓取过程中在第二个数据库中写入数据的同时,新增一个字段(key) 'index_url' 即该详情对应的链接\n # 3.若抓取中断,在第二个存放详情页信息的数据库中的 url 字段应该是第一个数据库中 url 集合的子集\n # 4.两个集合的 url 相减得出剩下应该抓取的 url 还有哪些\n\n\n db_urls = [item['url'] for item in url_list.find()] # 用列表解析式装入所有要爬取的链接\n index_urls = [item['url'] for item in item_info.find()] # 所引出详情信息数据库中所有的现存的 url 字段\n x = set(db_urls) # 转换成集合的数据结构\n y = set(index_urls)\n rest_of_urls = x-y # 相减\n\n # ======================================================================================================================\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pymongo
client = pymongo.MongoClient("mongodb://localhost:27017/")
# Database Name
db = client["Test"]
# Collection Name
col = db["C100"]
x = col.find_one()
print(x)
|
normal
|
{
"blob_id": "7d10fb58aa5213516c656c05966fcaad6868ae81",
"index": 1548,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(x)\n",
"step-3": "<mask token>\nclient = pymongo.MongoClient('mongodb://localhost:27017/')\ndb = client['Test']\ncol = db['C100']\nx = col.find_one()\nprint(x)\n",
"step-4": "import pymongo\nclient = pymongo.MongoClient('mongodb://localhost:27017/')\ndb = client['Test']\ncol = db['C100']\nx = col.find_one()\nprint(x)\n",
"step-5": "import pymongo \n \n \nclient = pymongo.MongoClient(\"mongodb://localhost:27017/\") \n \n# Database Name \ndb = client[\"Test\"] \n \n# Collection Name \ncol = db[\"C100\"] \n \nx = col.find_one() \n \nprint(x)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Databricks notebook source
#import and create sparksession object
from pyspark.sql import SparkSession
spark=SparkSession.builder.appName('rc').getOrCreate()
# COMMAND ----------
#import the required functions and libraries
from pyspark.sql.functions import *
# COMMAND ----------
# Convert csv file to Spark DataFrame (Databricks version)
def loadDataFrame(fileName, fileSchema):
return (spark.read.format("csv")
.schema(fileSchema)
.option("header", "true")
.option("mode", "DROPMALFORMED")
.csv("/FileStore/tables/%s" % (fileName)))
# COMMAND ----------
from pyspark.sql.types import *
movieRatingSchema = StructType([
StructField("userId", IntegerType(), True),
StructField("movieId", IntegerType(), True),
StructField("rating", FloatType(), True),
StructField("timestamp", StringType(), True)])
movieSchema = StructType([
StructField("movieId", IntegerType(), True),
StructField("title", StringType(), True),
StructField("genres", StringType(), True)])
MovieRatingsDF = loadDataFrame("ratings.csv", movieRatingSchema).cache()
MoviesDF = loadDataFrame("movies.csv", movieSchema).cache()
# COMMAND ----------
#load the dataset and create sprk dataframe
df = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title', 'rating'])
#df=spark.read.csv('movie_ratings_df.csv',inferSchema=True,header=True)
# COMMAND ----------
#validate the shape of the data
print((df.count(),len(df.columns)))
# COMMAND ----------
#check columns in dataframe
df.printSchema()
# COMMAND ----------
#validate few rows of dataframe in random order
df.orderBy(rand()).show(10,False)
# COMMAND ----------
#check number of ratings by each user
df.groupBy('userId').count().orderBy('count',ascending=False).show(10,False)
# COMMAND ----------
#check number of ratings by each user
df.groupBy('userId').count().orderBy('count',ascending=True).show(10,False)
# COMMAND ----------
#number of times movie been rated
df.groupBy('title').count().orderBy('count',ascending=False).show(10,False)
# COMMAND ----------
df.groupBy('title').count().orderBy('count',ascending=True).show(10,False)
# COMMAND ----------
#import String indexer to convert string values to numeric values
from pyspark.ml.feature import StringIndexer,IndexToString
# COMMAND ----------
#creating string indexer to convert the movie title column values into numerical values
stringIndexer = StringIndexer(inputCol="title", outputCol="title_new")
# COMMAND ----------
#applying stringindexer object on dataframe movie title column
model = stringIndexer.fit(df)
# COMMAND ----------
#creating new dataframe with transformed values
indexed = model.transform(df)
# COMMAND ----------
#validate the numerical title values
indexed.show(10)
# COMMAND ----------
#number of times each numerical movie title has been rated
indexed.groupBy('title_new').count().orderBy('count',ascending=False).show(10,False)
# COMMAND ----------
#split the data into training and test datatset
train,test=indexed.randomSplit([0.75,0.25])
# COMMAND ----------
#count number of records in train set
train.count()
# COMMAND ----------
#count number of records in test set
test.count()
# COMMAND ----------
#import ALS recommender function from pyspark ml library
from pyspark.ml.recommendation import ALS
# COMMAND ----------
#Training the recommender model using train datatset
rec=ALS(maxIter=10,regParam=0.01,userCol='userId',itemCol='title_new',ratingCol='rating',nonnegative=True,coldStartStrategy="drop")
# COMMAND ----------
#fit the model on train set
rec_model=rec.fit(train)
# COMMAND ----------
#making predictions on test set
predicted_ratings=rec_model.transform(test)
# COMMAND ----------
#columns in predicted ratings dataframe
predicted_ratings.printSchema()
# COMMAND ----------
#predicted vs actual ratings for test set
predicted_ratings.orderBy(rand()).show(10)
# COMMAND ----------
#importing Regression Evaluator to measure RMSE
from pyspark.ml.evaluation import RegressionEvaluator
# COMMAND ----------
#create Regressor evaluator object for measuring accuracy
evaluator=RegressionEvaluator(metricName='rmse',predictionCol='prediction',labelCol='rating')
# COMMAND ----------
#apply the RE on predictions dataframe to calculate RMSE
rmse=evaluator.evaluate(predicted_ratings)
# COMMAND ----------
#print RMSE error
print(rmse)
# COMMAND ----------
#Recommend top movies which user might like
# COMMAND ----------
#create dataset of all distinct movies
unique_movies=indexed.select('title_new').distinct()
# COMMAND ----------
#number of unique movies
unique_movies.count()
# COMMAND ----------
#assigning alias name 'a' to unique movies df
a = unique_movies.alias('a')
# COMMAND ----------
user_id=85
# COMMAND ----------
#creating another dataframe which contains already watched movie by active user
watched_movies=indexed.filter(indexed['userId'] == user_id).select('title_new').distinct()
# COMMAND ----------
#number of movies already rated
watched_movies.count()
# COMMAND ----------
#assigning alias name 'b' to watched movies df
b=watched_movies.alias('b')
# COMMAND ----------
#joining both tables on left join
total_movies = a.join(b, a.title_new == b.title_new,how='left')
# COMMAND ----------
total_movies.show(10,False)
# COMMAND ----------
#selecting movies which active user is yet to rate or watch
remaining_movies=total_movies.where(col("b.title_new").isNull()).select(a.title_new).distinct()
# COMMAND ----------
#number of movies user is yet to rate
remaining_movies.count()
# COMMAND ----------
#adding new column of user_Id of active useer to remaining movies df
remaining_movies=remaining_movies.withColumn("userId",lit(int(user_id)))
# COMMAND ----------
remaining_movies.show(10,False)
# COMMAND ----------
#making recommendations using ALS recommender model and selecting only top 'n' movies
recommendations=rec_model.transform(remaining_movies).orderBy('prediction',ascending=False)
# COMMAND ----------
recommendations.show(5,False)
# COMMAND ----------
#converting title_new values back to movie titles
movie_title = IndexToString(inputCol="title_new", outputCol="title",labels=model.labels)
final_recommendations=movie_title.transform(recommendations)
# COMMAND ----------
final_recommendations.show(10,False)
# COMMAND ----------
#create function to recommend top 'n' movies to any particular user
def top_movies(user_id,n):
"""
This function returns the top 'n' movies that user has not seen yet but might like
"""
#assigning alias name 'a' to unique movies df
a = unique_movies.alias('a')
#creating another dataframe which contains already watched movie by active user
watched_movies=indexed.filter(indexed['userId'] == user_id).select('title_new')
#assigning alias name 'b' to watched movies df
b=watched_movies.alias('b')
#joining both tables on left join
total_movies = a.join(b, a.title_new == b.title_new,how='left')
#selecting movies which active user is yet to rate or watch
remaining_movies=total_movies.where(col("b.title_new").isNull()).select(a.title_new).distinct()
#adding new column of user_Id of active useer to remaining movies df
remaining_movies=remaining_movies.withColumn("userId",lit(int(user_id)))
#making recommendations using ALS recommender model and selecting only top 'n' movies
recommendations=rec_model.transform(remaining_movies).orderBy('prediction',ascending=False).limit(n)
#adding columns of movie titles in recommendations
movie_title = IndexToString(inputCol="title_new", outputCol="title",labels=model.labels)
final_recommendations=movie_title.transform(recommendations)
#return the recommendations to active user
return final_recommendations.show(n,False)
# COMMAND ----------
top_movies(85,10)
# COMMAND ----------
|
normal
|
{
"blob_id": "d22ebe24605065452ae35c44367ee21a726ae7a1",
"index": 1892,
"step-1": "<mask token>\n\n\ndef loadDataFrame(fileName, fileSchema):\n return spark.read.format('csv').schema(fileSchema).option('header', 'true'\n ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName\n )\n\n\n<mask token>\n\n\ndef top_movies(user_id, n):\n \"\"\"\n This function returns the top 'n' movies that user has not seen yet but might like \n \n \"\"\"\n a = unique_movies.alias('a')\n watched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new')\n b = watched_movies.alias('b')\n total_movies = a.join(b, a.title_new == b.title_new, how='left')\n remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\n remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\n recommendations = rec_model.transform(remaining_movies).orderBy(\n 'prediction', ascending=False).limit(n)\n movie_title = IndexToString(inputCol='title_new', outputCol='title',\n labels=model.labels)\n final_recommendations = movie_title.transform(recommendations)\n return final_recommendations.show(n, False)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadDataFrame(fileName, fileSchema):\n return spark.read.format('csv').schema(fileSchema).option('header', 'true'\n ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName\n )\n\n\n<mask token>\nprint((df.count(), len(df.columns)))\ndf.printSchema()\ndf.orderBy(rand()).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=True).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=True).show(10, False)\n<mask token>\nindexed.show(10)\nindexed.groupBy('title_new').count().orderBy('count', ascending=False).show(\n 10, False)\n<mask token>\ntrain.count()\ntest.count()\n<mask token>\npredicted_ratings.printSchema()\npredicted_ratings.orderBy(rand()).show(10)\n<mask token>\nprint(rmse)\n<mask token>\nunique_movies.count()\n<mask token>\nwatched_movies.count()\n<mask token>\ntotal_movies.show(10, False)\n<mask token>\nremaining_movies.count()\n<mask token>\nremaining_movies.show(10, False)\n<mask token>\nrecommendations.show(5, False)\n<mask token>\nfinal_recommendations.show(10, False)\n\n\ndef top_movies(user_id, n):\n \"\"\"\n This function returns the top 'n' movies that user has not seen yet but might like \n \n \"\"\"\n a = unique_movies.alias('a')\n watched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new')\n b = watched_movies.alias('b')\n total_movies = a.join(b, a.title_new == b.title_new, how='left')\n remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\n remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\n recommendations = rec_model.transform(remaining_movies).orderBy(\n 'prediction', ascending=False).limit(n)\n movie_title = IndexToString(inputCol='title_new', outputCol='title',\n labels=model.labels)\n final_recommendations = movie_title.transform(recommendations)\n return final_recommendations.show(n, False)\n\n\ntop_movies(85, 10)\n",
"step-3": "<mask token>\nspark = SparkSession.builder.appName('rc').getOrCreate()\n<mask token>\n\n\ndef loadDataFrame(fileName, fileSchema):\n return spark.read.format('csv').schema(fileSchema).option('header', 'true'\n ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName\n )\n\n\n<mask token>\nmovieRatingSchema = StructType([StructField('userId', IntegerType(), True),\n StructField('movieId', IntegerType(), True), StructField('rating',\n FloatType(), True), StructField('timestamp', StringType(), True)])\nmovieSchema = StructType([StructField('movieId', IntegerType(), True),\n StructField('title', StringType(), True), StructField('genres',\n StringType(), True)])\nMovieRatingsDF = loadDataFrame('ratings.csv', movieRatingSchema).cache()\nMoviesDF = loadDataFrame('movies.csv', movieSchema).cache()\ndf = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title',\n 'rating'])\nprint((df.count(), len(df.columns)))\ndf.printSchema()\ndf.orderBy(rand()).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=True).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=True).show(10, False)\n<mask token>\nstringIndexer = StringIndexer(inputCol='title', outputCol='title_new')\nmodel = stringIndexer.fit(df)\nindexed = model.transform(df)\nindexed.show(10)\nindexed.groupBy('title_new').count().orderBy('count', ascending=False).show(\n 10, False)\ntrain, test = indexed.randomSplit([0.75, 0.25])\ntrain.count()\ntest.count()\n<mask token>\nrec = ALS(maxIter=10, regParam=0.01, userCol='userId', itemCol='title_new',\n ratingCol='rating', nonnegative=True, coldStartStrategy='drop')\nrec_model = rec.fit(train)\npredicted_ratings = rec_model.transform(test)\npredicted_ratings.printSchema()\npredicted_ratings.orderBy(rand()).show(10)\n<mask token>\nevaluator = RegressionEvaluator(metricName='rmse', predictionCol=\n 'prediction', labelCol='rating')\nrmse = evaluator.evaluate(predicted_ratings)\nprint(rmse)\nunique_movies = indexed.select('title_new').distinct()\nunique_movies.count()\na = unique_movies.alias('a')\nuser_id = 85\nwatched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new').distinct()\nwatched_movies.count()\nb = watched_movies.alias('b')\ntotal_movies = a.join(b, a.title_new == b.title_new, how='left')\ntotal_movies.show(10, False)\nremaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\nremaining_movies.count()\nremaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\nremaining_movies.show(10, False)\nrecommendations = rec_model.transform(remaining_movies).orderBy('prediction',\n ascending=False)\nrecommendations.show(5, False)\nmovie_title = IndexToString(inputCol='title_new', outputCol='title', labels\n =model.labels)\nfinal_recommendations = movie_title.transform(recommendations)\nfinal_recommendations.show(10, False)\n\n\ndef top_movies(user_id, n):\n \"\"\"\n This function returns the top 'n' movies that user has not seen yet but might like \n \n \"\"\"\n a = unique_movies.alias('a')\n watched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new')\n b = watched_movies.alias('b')\n total_movies = a.join(b, a.title_new == b.title_new, how='left')\n remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\n remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\n recommendations = rec_model.transform(remaining_movies).orderBy(\n 'prediction', ascending=False).limit(n)\n movie_title = IndexToString(inputCol='title_new', outputCol='title',\n labels=model.labels)\n final_recommendations = movie_title.transform(recommendations)\n return final_recommendations.show(n, False)\n\n\ntop_movies(85, 10)\n",
"step-4": "from pyspark.sql import SparkSession\nspark = SparkSession.builder.appName('rc').getOrCreate()\nfrom pyspark.sql.functions import *\n\n\ndef loadDataFrame(fileName, fileSchema):\n return spark.read.format('csv').schema(fileSchema).option('header', 'true'\n ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName\n )\n\n\nfrom pyspark.sql.types import *\nmovieRatingSchema = StructType([StructField('userId', IntegerType(), True),\n StructField('movieId', IntegerType(), True), StructField('rating',\n FloatType(), True), StructField('timestamp', StringType(), True)])\nmovieSchema = StructType([StructField('movieId', IntegerType(), True),\n StructField('title', StringType(), True), StructField('genres',\n StringType(), True)])\nMovieRatingsDF = loadDataFrame('ratings.csv', movieRatingSchema).cache()\nMoviesDF = loadDataFrame('movies.csv', movieSchema).cache()\ndf = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title',\n 'rating'])\nprint((df.count(), len(df.columns)))\ndf.printSchema()\ndf.orderBy(rand()).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=True).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=True).show(10, False)\nfrom pyspark.ml.feature import StringIndexer, IndexToString\nstringIndexer = StringIndexer(inputCol='title', outputCol='title_new')\nmodel = stringIndexer.fit(df)\nindexed = model.transform(df)\nindexed.show(10)\nindexed.groupBy('title_new').count().orderBy('count', ascending=False).show(\n 10, False)\ntrain, test = indexed.randomSplit([0.75, 0.25])\ntrain.count()\ntest.count()\nfrom pyspark.ml.recommendation import ALS\nrec = ALS(maxIter=10, regParam=0.01, userCol='userId', itemCol='title_new',\n ratingCol='rating', nonnegative=True, coldStartStrategy='drop')\nrec_model = rec.fit(train)\npredicted_ratings = rec_model.transform(test)\npredicted_ratings.printSchema()\npredicted_ratings.orderBy(rand()).show(10)\nfrom pyspark.ml.evaluation import RegressionEvaluator\nevaluator = RegressionEvaluator(metricName='rmse', predictionCol=\n 'prediction', labelCol='rating')\nrmse = evaluator.evaluate(predicted_ratings)\nprint(rmse)\nunique_movies = indexed.select('title_new').distinct()\nunique_movies.count()\na = unique_movies.alias('a')\nuser_id = 85\nwatched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new').distinct()\nwatched_movies.count()\nb = watched_movies.alias('b')\ntotal_movies = a.join(b, a.title_new == b.title_new, how='left')\ntotal_movies.show(10, False)\nremaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\nremaining_movies.count()\nremaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\nremaining_movies.show(10, False)\nrecommendations = rec_model.transform(remaining_movies).orderBy('prediction',\n ascending=False)\nrecommendations.show(5, False)\nmovie_title = IndexToString(inputCol='title_new', outputCol='title', labels\n =model.labels)\nfinal_recommendations = movie_title.transform(recommendations)\nfinal_recommendations.show(10, False)\n\n\ndef top_movies(user_id, n):\n \"\"\"\n This function returns the top 'n' movies that user has not seen yet but might like \n \n \"\"\"\n a = unique_movies.alias('a')\n watched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new')\n b = watched_movies.alias('b')\n total_movies = a.join(b, a.title_new == b.title_new, how='left')\n remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\n remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\n recommendations = rec_model.transform(remaining_movies).orderBy(\n 'prediction', ascending=False).limit(n)\n movie_title = IndexToString(inputCol='title_new', outputCol='title',\n labels=model.labels)\n final_recommendations = movie_title.transform(recommendations)\n return final_recommendations.show(n, False)\n\n\ntop_movies(85, 10)\n",
"step-5": "# Databricks notebook source\n#import and create sparksession object\nfrom pyspark.sql import SparkSession \nspark=SparkSession.builder.appName('rc').getOrCreate()\n\n# COMMAND ----------\n\n#import the required functions and libraries\nfrom pyspark.sql.functions import *\n\n# COMMAND ----------\n\n# Convert csv file to Spark DataFrame (Databricks version)\ndef loadDataFrame(fileName, fileSchema):\n return (spark.read.format(\"csv\")\n .schema(fileSchema)\n .option(\"header\", \"true\")\n .option(\"mode\", \"DROPMALFORMED\")\n .csv(\"/FileStore/tables/%s\" % (fileName)))\n\n# COMMAND ----------\n\nfrom pyspark.sql.types import *\n\nmovieRatingSchema = StructType([\n StructField(\"userId\", IntegerType(), True),\n StructField(\"movieId\", IntegerType(), True),\n StructField(\"rating\", FloatType(), True),\n StructField(\"timestamp\", StringType(), True)])\n\nmovieSchema = StructType([\n StructField(\"movieId\", IntegerType(), True),\n StructField(\"title\", StringType(), True),\n StructField(\"genres\", StringType(), True)])\n\nMovieRatingsDF = loadDataFrame(\"ratings.csv\", movieRatingSchema).cache()\nMoviesDF = loadDataFrame(\"movies.csv\", movieSchema).cache()\n\n# COMMAND ----------\n\n#load the dataset and create sprk dataframe\ndf = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title', 'rating'])\n\n\n#df=spark.read.csv('movie_ratings_df.csv',inferSchema=True,header=True)\n\n# COMMAND ----------\n\n#validate the shape of the data \nprint((df.count(),len(df.columns)))\n\n# COMMAND ----------\n\n#check columns in dataframe\ndf.printSchema()\n\n# COMMAND ----------\n\n#validate few rows of dataframe in random order\ndf.orderBy(rand()).show(10,False)\n\n# COMMAND ----------\n\n#check number of ratings by each user\ndf.groupBy('userId').count().orderBy('count',ascending=False).show(10,False)\n\n# COMMAND ----------\n\n#check number of ratings by each user\ndf.groupBy('userId').count().orderBy('count',ascending=True).show(10,False)\n\n# COMMAND ----------\n\n#number of times movie been rated \ndf.groupBy('title').count().orderBy('count',ascending=False).show(10,False)\n\n# COMMAND ----------\n\ndf.groupBy('title').count().orderBy('count',ascending=True).show(10,False)\n\n# COMMAND ----------\n\n#import String indexer to convert string values to numeric values\nfrom pyspark.ml.feature import StringIndexer,IndexToString\n\n# COMMAND ----------\n\n#creating string indexer to convert the movie title column values into numerical values\nstringIndexer = StringIndexer(inputCol=\"title\", outputCol=\"title_new\")\n\n# COMMAND ----------\n\n#applying stringindexer object on dataframe movie title column\nmodel = stringIndexer.fit(df)\n\n# COMMAND ----------\n\n#creating new dataframe with transformed values\nindexed = model.transform(df)\n\n# COMMAND ----------\n\n#validate the numerical title values\nindexed.show(10)\n\n# COMMAND ----------\n\n#number of times each numerical movie title has been rated \nindexed.groupBy('title_new').count().orderBy('count',ascending=False).show(10,False)\n\n# COMMAND ----------\n\n#split the data into training and test datatset\ntrain,test=indexed.randomSplit([0.75,0.25])\n\n# COMMAND ----------\n\n#count number of records in train set\ntrain.count()\n\n# COMMAND ----------\n\n#count number of records in test set\ntest.count()\n\n# COMMAND ----------\n\n#import ALS recommender function from pyspark ml library\nfrom pyspark.ml.recommendation import ALS\n\n# COMMAND ----------\n\n#Training the recommender model using train datatset\nrec=ALS(maxIter=10,regParam=0.01,userCol='userId',itemCol='title_new',ratingCol='rating',nonnegative=True,coldStartStrategy=\"drop\")\n\n# COMMAND ----------\n\n#fit the model on train set\nrec_model=rec.fit(train)\n\n# COMMAND ----------\n\n#making predictions on test set \npredicted_ratings=rec_model.transform(test)\n\n# COMMAND ----------\n\n#columns in predicted ratings dataframe\npredicted_ratings.printSchema()\n\n# COMMAND ----------\n\n#predicted vs actual ratings for test set \npredicted_ratings.orderBy(rand()).show(10)\n\n# COMMAND ----------\n\n#importing Regression Evaluator to measure RMSE\nfrom pyspark.ml.evaluation import RegressionEvaluator\n\n# COMMAND ----------\n\n#create Regressor evaluator object for measuring accuracy\nevaluator=RegressionEvaluator(metricName='rmse',predictionCol='prediction',labelCol='rating')\n\n# COMMAND ----------\n\n#apply the RE on predictions dataframe to calculate RMSE\nrmse=evaluator.evaluate(predicted_ratings)\n\n# COMMAND ----------\n\n#print RMSE error\nprint(rmse)\n\n# COMMAND ----------\n\n#Recommend top movies which user might like \n\n# COMMAND ----------\n\n#create dataset of all distinct movies \nunique_movies=indexed.select('title_new').distinct()\n\n# COMMAND ----------\n\n#number of unique movies\nunique_movies.count()\n\n# COMMAND ----------\n\n#assigning alias name 'a' to unique movies df\na = unique_movies.alias('a')\n\n# COMMAND ----------\n\nuser_id=85\n\n# COMMAND ----------\n\n#creating another dataframe which contains already watched movie by active user \nwatched_movies=indexed.filter(indexed['userId'] == user_id).select('title_new').distinct()\n\n# COMMAND ----------\n\n#number of movies already rated \nwatched_movies.count()\n\n# COMMAND ----------\n\n#assigning alias name 'b' to watched movies df\nb=watched_movies.alias('b')\n\n# COMMAND ----------\n\n#joining both tables on left join \ntotal_movies = a.join(b, a.title_new == b.title_new,how='left')\n\n\n# COMMAND ----------\n\ntotal_movies.show(10,False)\n\n# COMMAND ----------\n\n#selecting movies which active user is yet to rate or watch\nremaining_movies=total_movies.where(col(\"b.title_new\").isNull()).select(a.title_new).distinct()\n\n# COMMAND ----------\n\n#number of movies user is yet to rate \nremaining_movies.count()\n\n# COMMAND ----------\n\n#adding new column of user_Id of active useer to remaining movies df \nremaining_movies=remaining_movies.withColumn(\"userId\",lit(int(user_id)))\n\n\n# COMMAND ----------\n\nremaining_movies.show(10,False)\n\n# COMMAND ----------\n\n#making recommendations using ALS recommender model and selecting only top 'n' movies\nrecommendations=rec_model.transform(remaining_movies).orderBy('prediction',ascending=False)\n\n# COMMAND ----------\n\nrecommendations.show(5,False)\n\n# COMMAND ----------\n\n#converting title_new values back to movie titles\nmovie_title = IndexToString(inputCol=\"title_new\", outputCol=\"title\",labels=model.labels)\n\nfinal_recommendations=movie_title.transform(recommendations)\n\n\n# COMMAND ----------\n\nfinal_recommendations.show(10,False)\n\n# COMMAND ----------\n\n#create function to recommend top 'n' movies to any particular user\ndef top_movies(user_id,n):\n \"\"\"\n This function returns the top 'n' movies that user has not seen yet but might like \n \n \"\"\"\n #assigning alias name 'a' to unique movies df\n a = unique_movies.alias('a')\n \n #creating another dataframe which contains already watched movie by active user \n watched_movies=indexed.filter(indexed['userId'] == user_id).select('title_new')\n \n #assigning alias name 'b' to watched movies df\n b=watched_movies.alias('b')\n \n #joining both tables on left join \n total_movies = a.join(b, a.title_new == b.title_new,how='left')\n \n #selecting movies which active user is yet to rate or watch\n remaining_movies=total_movies.where(col(\"b.title_new\").isNull()).select(a.title_new).distinct()\n \n \n #adding new column of user_Id of active useer to remaining movies df \n remaining_movies=remaining_movies.withColumn(\"userId\",lit(int(user_id)))\n \n \n #making recommendations using ALS recommender model and selecting only top 'n' movies\n recommendations=rec_model.transform(remaining_movies).orderBy('prediction',ascending=False).limit(n)\n \n \n #adding columns of movie titles in recommendations\n movie_title = IndexToString(inputCol=\"title_new\", outputCol=\"title\",labels=model.labels)\n final_recommendations=movie_title.transform(recommendations)\n \n #return the recommendations to active user\n return final_recommendations.show(n,False)\n\n# COMMAND ----------\n\ntop_movies(85,10)\n\n# COMMAND ----------\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for group in groups:
allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))
answers = set()
people = group.split('\n')
for person in people:
allananswers = allananswers & set(list(person))
for answer in person:
if answer not in answers:
answers.add(answer)
count = count + 1
groupanswers.append(allananswers)
print(count)
<|reserved_special_token_0|>
for group in groupanswers:
answer2 = answer2 + len(group)
print(answer2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
groups = input.split('\n\n')
count = 0
groupanswers = []
for group in groups:
allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))
answers = set()
people = group.split('\n')
for person in people:
allananswers = allananswers & set(list(person))
for answer in person:
if answer not in answers:
answers.add(answer)
count = count + 1
groupanswers.append(allananswers)
print(count)
answer2 = 0
for group in groupanswers:
answer2 = answer2 + len(group)
print(answer2)
<|reserved_special_token_1|>
from day6input import *
groups = input.split('\n\n')
count = 0
groupanswers = []
for group in groups:
allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))
answers = set()
people = group.split('\n')
for person in people:
allananswers = allananswers & set(list(person))
for answer in person:
if answer not in answers:
answers.add(answer)
count = count + 1
groupanswers.append(allananswers)
print(count)
answer2 = 0
for group in groupanswers:
answer2 = answer2 + len(group)
print(answer2)
<|reserved_special_token_1|>
from day6input import *
groups = input.split('\n\n')
count = 0 #1
groupanswers = [] #2
for group in groups:
allananswers = set(list('abcdefghijklmnopqrstuvwxyz')) #2
answers = set() #1
people = group.split('\n')
for person in people:
allananswers = allananswers & set(list(person)) #2
#1
for answer in person:
if answer not in answers:
answers.add(answer)
count = count + 1
groupanswers.append(allananswers) #2
print(count) #1
#####2
answer2 = 0
for group in groupanswers:
answer2 = answer2 + len(group)
print(answer2)
|
flexible
|
{
"blob_id": "8f1ec65ca60605747f46f596e0b5848922bcd0b5",
"index": 2127,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor group in groups:\n allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))\n answers = set()\n people = group.split('\\n')\n for person in people:\n allananswers = allananswers & set(list(person))\n for answer in person:\n if answer not in answers:\n answers.add(answer)\n count = count + 1\n groupanswers.append(allananswers)\nprint(count)\n<mask token>\nfor group in groupanswers:\n answer2 = answer2 + len(group)\nprint(answer2)\n",
"step-3": "<mask token>\ngroups = input.split('\\n\\n')\ncount = 0\ngroupanswers = []\nfor group in groups:\n allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))\n answers = set()\n people = group.split('\\n')\n for person in people:\n allananswers = allananswers & set(list(person))\n for answer in person:\n if answer not in answers:\n answers.add(answer)\n count = count + 1\n groupanswers.append(allananswers)\nprint(count)\nanswer2 = 0\nfor group in groupanswers:\n answer2 = answer2 + len(group)\nprint(answer2)\n",
"step-4": "from day6input import *\ngroups = input.split('\\n\\n')\ncount = 0\ngroupanswers = []\nfor group in groups:\n allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))\n answers = set()\n people = group.split('\\n')\n for person in people:\n allananswers = allananswers & set(list(person))\n for answer in person:\n if answer not in answers:\n answers.add(answer)\n count = count + 1\n groupanswers.append(allananswers)\nprint(count)\nanswer2 = 0\nfor group in groupanswers:\n answer2 = answer2 + len(group)\nprint(answer2)\n",
"step-5": "from day6input import *\n\ngroups = input.split('\\n\\n')\n\ncount = 0 #1\ngroupanswers = [] #2\n\nfor group in groups:\n\n allananswers = set(list('abcdefghijklmnopqrstuvwxyz')) #2\n answers = set() #1\n\n people = group.split('\\n')\n for person in people:\n\n allananswers = allananswers & set(list(person)) #2\n\n #1\n for answer in person:\n if answer not in answers:\n answers.add(answer)\n count = count + 1\n\n groupanswers.append(allananswers) #2\n\nprint(count) #1\n\n#####2\nanswer2 = 0\nfor group in groupanswers:\n answer2 = answer2 + len(group)\nprint(answer2)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import io
import yaml
from collections import OrderedDict
from rich.console import Console
from malwarebazaar.platform import get_config_path, get_config_dir
class Config(OrderedDict):
instance = None
def __init__(self):
ec = Console(stderr=True, style="bold red")
Config.ensure_path(ec)
config_file = get_config_path()
if not os.path.exists(config_file) or os.path.getsize(config_file) == 0:
ec.print("Config does not exist, please run the init command.")
exit(-1)
with io.open(config_file, "r") as handle:
config_data = yaml.load(handle.read(), Loader=yaml.Loader)
super().__init__(**config_data)
@staticmethod
def get_instance():
if not Config.instance:
return Config()
return Config.instance
@staticmethod
def ensure_path(ec: Console = Console(stderr=True, style="bold red")):
config_dir = get_config_dir()
if not os.path.exists(config_dir):
os.mkdir(config_dir)
if not os.path.isdir(config_dir):
ec.print(f"{config_dir} should be a dir, but is a file.")
exit(-1)
@staticmethod
def init_config(key: str):
Config.ensure_path()
with io.open(get_config_path(), "w") as handle:
bytes = handle.write(yaml.dump(
{
"api_key": key,
"csv_columns": {
"md5": "md5_hash",
"sha1": "sha1_hash",
"sha256": "sha256_hash",
"imphash": "imphash",
"signature": "signature",
"tags": "tags"
}
},
Dumper=yaml.Dumper
))
if bytes <= 0:
raise IOError(f"Writing to config file failed.")
return True
|
normal
|
{
"blob_id": "5a9e0b220d2c94aea7e3d67338771cf48c3aec8f",
"index": 6439,
"step-1": "<mask token>\n\n\nclass Config(OrderedDict):\n <mask token>\n\n def __init__(self):\n ec = Console(stderr=True, style='bold red')\n Config.ensure_path(ec)\n config_file = get_config_path()\n if not os.path.exists(config_file) or os.path.getsize(config_file\n ) == 0:\n ec.print('Config does not exist, please run the init command.')\n exit(-1)\n with io.open(config_file, 'r') as handle:\n config_data = yaml.load(handle.read(), Loader=yaml.Loader)\n super().__init__(**config_data)\n\n @staticmethod\n def get_instance():\n if not Config.instance:\n return Config()\n return Config.instance\n\n @staticmethod\n def ensure_path(ec: Console=Console(stderr=True, style='bold red')):\n config_dir = get_config_dir()\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n if not os.path.isdir(config_dir):\n ec.print(f'{config_dir} should be a dir, but is a file.')\n exit(-1)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Config(OrderedDict):\n <mask token>\n\n def __init__(self):\n ec = Console(stderr=True, style='bold red')\n Config.ensure_path(ec)\n config_file = get_config_path()\n if not os.path.exists(config_file) or os.path.getsize(config_file\n ) == 0:\n ec.print('Config does not exist, please run the init command.')\n exit(-1)\n with io.open(config_file, 'r') as handle:\n config_data = yaml.load(handle.read(), Loader=yaml.Loader)\n super().__init__(**config_data)\n\n @staticmethod\n def get_instance():\n if not Config.instance:\n return Config()\n return Config.instance\n\n @staticmethod\n def ensure_path(ec: Console=Console(stderr=True, style='bold red')):\n config_dir = get_config_dir()\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n if not os.path.isdir(config_dir):\n ec.print(f'{config_dir} should be a dir, but is a file.')\n exit(-1)\n\n @staticmethod\n def init_config(key: str):\n Config.ensure_path()\n with io.open(get_config_path(), 'w') as handle:\n bytes = handle.write(yaml.dump({'api_key': key, 'csv_columns':\n {'md5': 'md5_hash', 'sha1': 'sha1_hash', 'sha256':\n 'sha256_hash', 'imphash': 'imphash', 'signature':\n 'signature', 'tags': 'tags'}}, Dumper=yaml.Dumper))\n if bytes <= 0:\n raise IOError(f'Writing to config file failed.')\n return True\n",
"step-3": "<mask token>\n\n\nclass Config(OrderedDict):\n instance = None\n\n def __init__(self):\n ec = Console(stderr=True, style='bold red')\n Config.ensure_path(ec)\n config_file = get_config_path()\n if not os.path.exists(config_file) or os.path.getsize(config_file\n ) == 0:\n ec.print('Config does not exist, please run the init command.')\n exit(-1)\n with io.open(config_file, 'r') as handle:\n config_data = yaml.load(handle.read(), Loader=yaml.Loader)\n super().__init__(**config_data)\n\n @staticmethod\n def get_instance():\n if not Config.instance:\n return Config()\n return Config.instance\n\n @staticmethod\n def ensure_path(ec: Console=Console(stderr=True, style='bold red')):\n config_dir = get_config_dir()\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n if not os.path.isdir(config_dir):\n ec.print(f'{config_dir} should be a dir, but is a file.')\n exit(-1)\n\n @staticmethod\n def init_config(key: str):\n Config.ensure_path()\n with io.open(get_config_path(), 'w') as handle:\n bytes = handle.write(yaml.dump({'api_key': key, 'csv_columns':\n {'md5': 'md5_hash', 'sha1': 'sha1_hash', 'sha256':\n 'sha256_hash', 'imphash': 'imphash', 'signature':\n 'signature', 'tags': 'tags'}}, Dumper=yaml.Dumper))\n if bytes <= 0:\n raise IOError(f'Writing to config file failed.')\n return True\n",
"step-4": "import os\nimport io\nimport yaml\nfrom collections import OrderedDict\nfrom rich.console import Console\nfrom malwarebazaar.platform import get_config_path, get_config_dir\n\n\nclass Config(OrderedDict):\n instance = None\n\n def __init__(self):\n ec = Console(stderr=True, style='bold red')\n Config.ensure_path(ec)\n config_file = get_config_path()\n if not os.path.exists(config_file) or os.path.getsize(config_file\n ) == 0:\n ec.print('Config does not exist, please run the init command.')\n exit(-1)\n with io.open(config_file, 'r') as handle:\n config_data = yaml.load(handle.read(), Loader=yaml.Loader)\n super().__init__(**config_data)\n\n @staticmethod\n def get_instance():\n if not Config.instance:\n return Config()\n return Config.instance\n\n @staticmethod\n def ensure_path(ec: Console=Console(stderr=True, style='bold red')):\n config_dir = get_config_dir()\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n if not os.path.isdir(config_dir):\n ec.print(f'{config_dir} should be a dir, but is a file.')\n exit(-1)\n\n @staticmethod\n def init_config(key: str):\n Config.ensure_path()\n with io.open(get_config_path(), 'w') as handle:\n bytes = handle.write(yaml.dump({'api_key': key, 'csv_columns':\n {'md5': 'md5_hash', 'sha1': 'sha1_hash', 'sha256':\n 'sha256_hash', 'imphash': 'imphash', 'signature':\n 'signature', 'tags': 'tags'}}, Dumper=yaml.Dumper))\n if bytes <= 0:\n raise IOError(f'Writing to config file failed.')\n return True\n",
"step-5": "import os\nimport io\nimport yaml\nfrom collections import OrderedDict\n\nfrom rich.console import Console\n\nfrom malwarebazaar.platform import get_config_path, get_config_dir\n\n\nclass Config(OrderedDict):\n instance = None\n\n def __init__(self):\n ec = Console(stderr=True, style=\"bold red\")\n Config.ensure_path(ec)\n config_file = get_config_path()\n if not os.path.exists(config_file) or os.path.getsize(config_file) == 0:\n ec.print(\"Config does not exist, please run the init command.\")\n exit(-1)\n\n with io.open(config_file, \"r\") as handle:\n config_data = yaml.load(handle.read(), Loader=yaml.Loader)\n\n super().__init__(**config_data)\n\n @staticmethod\n def get_instance():\n if not Config.instance:\n return Config()\n return Config.instance\n\n @staticmethod\n def ensure_path(ec: Console = Console(stderr=True, style=\"bold red\")):\n config_dir = get_config_dir()\n\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n\n if not os.path.isdir(config_dir):\n ec.print(f\"{config_dir} should be a dir, but is a file.\")\n exit(-1)\n\n @staticmethod\n def init_config(key: str):\n Config.ensure_path()\n with io.open(get_config_path(), \"w\") as handle:\n bytes = handle.write(yaml.dump(\n {\n \"api_key\": key,\n \"csv_columns\": {\n \"md5\": \"md5_hash\",\n \"sha1\": \"sha1_hash\",\n \"sha256\": \"sha256_hash\",\n \"imphash\": \"imphash\",\n \"signature\": \"signature\",\n \"tags\": \"tags\"\n }\n },\n Dumper=yaml.Dumper\n ))\n\n if bytes <= 0:\n raise IOError(f\"Writing to config file failed.\")\n return True\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from .ctoybox import Game, State as FrameState, Input
import numpy as np
from PIL import Image
import json
from typing import Dict, Any, List, Tuple, Union, Optional
def json_str(js: Union[Dict[str, Any], Input, str]) -> str:
"""
Turn an object into a JSON string -- handles dictionaries, the Input class, and JSON you've already prepared (e.g., strings).
"""
if type(js) is dict:
js = json.dumps(js)
elif type(js) is Input:
js = json.dumps(js.__dict__)
elif type(js) is not str:
raise ValueError(
"Unknown json type: %s (only str and dict supported)" % type(js)
)
return js
class Simulator(object):
"""
The Simulator is an instance of a game configuration.
You can call new_game on it to begin.
"""
def __init__(self, game_name, sim=None):
"""
Construct a new instance.
Parameters:
game_name: one of "breakout", "amidar", etc.
sim: optionally a Rust pointer to an existing simulator.
"""
if sim is None:
sim = Game(game_name)
self.__sim = sim
# sim should be a pointer
self.game_name = game_name
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def set_seed(self, seed: int):
"""Configure the random number generator that spawns new game states.
Parameters:
seed: a parameter to reset the built-in random number generator.
"""
self.__sim.seed(seed)
def get_frame_size(self) -> Tuple[int, int]:
"""Get the width in pixels of the frames this game renders."""
return self.__sim.frame_size()
def get_frame_width(self) -> int:
"""Get the width in pixels of the frames this game renders."""
return self.__sim.frame_size()[0]
def get_frame_height(self) -> int:
"""Get the height in pixels of the frames this game renders."""
return self.__sim.frame_size()[1]
def get_simulator(self) -> Game:
"""Get access to the raw simulator pointer."""
return self.__sim
def new_game(self) -> "State":
"""Start a new game."""
return State(self, self.__sim.new_game())
def state_from_json(self, js: Union[Dict[str, Any], str]) -> "State":
"""Generate a State from the state json and this configuration.
Parameters:
js: a JSON object or string containing a serialized state.
"""
state: FrameState = self.__sim.new_state(json_str(js))
return State(self, state=state)
def to_json(self) -> Dict[str, Any]:
"""Get the configuration of this simulator/config as JSON"""
return json.loads(self.__sim.to_json())
def from_json(self, config_js: Union[Dict[str, Any], str]):
"""Mutably update this simulator/config with the replacement json."""
self.__sim = self.__sim.from_json(json_str(config_js))
def schema_for_state(self) -> Dict[str, Any]:
"""Get the JSON Schema for any state for this game."""
return json.loads(self.__sim.frame_schema())
def schema_for_config(self) -> Dict[str, Any]:
"""Get the JSON Schema for any config for this game."""
return json.loads(self.__sim.config_schema())
class State(object):
"""
The State object represents everything the game needs to know about any single simulated frame.
You can rewind in time by storing and restoring these state representations.
- Access the json: ``to_json``
- Access the image: ``render_frame``
"""
def __init__(self, sim: Simulator, state=None):
"""
Construct a new State instance wrapper.
Parameters:
sim: The simulator responsible for this state.
state: Optional pointer to a state to use (otherwise it will create one).
"""
self.sim = sim
"""A reference to the simulator that created this state."""
self.__state = state or sim.__sim.new_game()
"""The raw pointer to the state itself."""
self.game_name = sim.game_name
"""The name of the game that created this state."""
def __enter__(self):
return self
def __del__(self):
self.__state = None
self.sim = None
def __exit__(self, exc_type, exc_value, traceback):
self.__del__()
def clone(self) -> 'State':
"""Quickly make a copy of this state; should be more efficient than saving the JSON."""
return State(self.sim, state=self.get_state().copy())
def get_state(self) -> FrameState:
"""Get the raw state pointer."""
assert self.__state is not None
return self.__state
def lives(self) -> int:
"""How many lives are remaining in the current state?"""
return self.__state.lives()
def level(self) -> int:
"""How many levels have been completed in the current state?"""
return self.__state.level()
def score(self) -> int:
"""How many points have been earned in the current state?"""
return self.__state.score()
def game_over(self):
"""Determine whether the game has ended; i.e., the player has run out of lives.
>>> assert self.lives() < 0 == self.game_over()
"""
return self.lives() < 0
def query_json(
self, query: str, args: Union[Dict[str, Any], str] = "null"
) -> Dict[str, Any]:
"""
Ask a question of the Rust state; queries are currently implemented manually.
Parameters:
query: the message to send to the rust state.
args: the arguments to send to the rust state, defaults to "null".
Returns:
response: A JSON response loaded to python objects.
Raises:
ValueError: if anything goes wrong with the query
```python
with Toybox("breakout") as tb:
tb.query_json("bricks_remaining")
```
"""
return json.loads(self.__state.query(json_str(query), json_str(args)))
def render_frame(self, sim: Simulator, grayscale: bool = True) -> np.array:
"""Generate an image from the current frame state object.
Parameters:
sim: the simulator to use; this tells us the width/height necessary.
grayscale: True if we want to render in grayscale rather than in color (RGBA).
"""
if grayscale:
return self.render_frame_rgb(sim)
else:
return self.render_frame_color(sim)
def render_frame_color(self, sim: Simulator) -> np.array:
"""Generate an RGBA image from the current frame state object.
Parameters:
sim: the simulator to use; this tells us the width/height necessary.
"""
(w, h) = sim.get_frame_size()
rgba = 4
size = h * w * rgba
frame = bytearray(size)
self.get_state().render_into_buffer(frame, True)
return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba)
def render_frame_rgb(self, sim: Simulator) -> np.array:
"""Generate an RGB image from the current frame state object.
Parameters:
sim: the simulator to use; this tells us the width/height necessary.
"""
rgba_frame = self.render_frame_color(sim)
return rgba_frame[:, :, :3]
def render_frame_grayscale(self, sim: Simulator) -> np.array:
"""Generate a grayscale image from the current frame state object.
Parameters:
sim: the simulator to use; this tells us the width/height necessary.
"""
(w, h) = sim.get_frame_size()
depth = 1
size = h * w * depth
frame = bytearray(size)
self.get_state().render_into_buffer(frame, False)
return np.asarray(frame, dtype=np.uint8).reshape(h, w, depth)
def to_json(self) -> Dict[str, Any]:
"""Get a JSON representation of the state."""
return json.loads(self.get_state().to_json())
class Toybox(object):
"""
This is a stateful representation of Toybox -- since it manages memory, we provide ``__enter__`` and ``__exit__`` usage for Python's with-blocks:
```python
with Toybox("amidar") as tb:
print(tb.get_score())
# the 'tb' variable only lives in the block.
```
Important:
Note how we should use this in a with-block; this will clean up pointers and prevent memory leaks.
"""
def __init__(self,
game_name: str,
grayscale: bool = True,
frameskip: int = 0,
seed: Optional[int] = None,
withstate: Optional[dict] = None):
"""
Construct a new Toybox state/game wrapper. Use this in a with block!
Parameters:
game_name: One of "breakout", "space_invaders", "amidar", etc.
grayscale: Toybox can render directly to grayscale, saving time. Default is True.
frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0.
seed: The seed
"""
self.game_name = game_name
self.frames_per_action = frameskip + 1
self.rsimulator = Simulator(game_name)
self.rstate = self.rsimulator.new_game()
self.grayscale = grayscale
if seed:
self.set_seed(seed)
self.new_game()
if withstate:
self.write_state_json(withstate)
def new_game(self):
"""
Modify this Toybox wrapper to have a new_game state.
Important:
This discards the old state!
"""
old_state = self.rstate
del old_state
self.rstate = self.rsimulator.new_game()
def get_height(self) -> int:
"""Get the height of the rendered game in pixels."""
return self.rsimulator.get_frame_height()
def get_width(self) -> int:
"""Get the width of the rendered game in pixels."""
return self.rsimulator.get_frame_width()
def get_legal_action_set(self) -> List[int]:
"""Get the set of actions consumed by this game: they are ALE numbered."""
sim = self.rsimulator.get_simulator()
return sim.legal_actions()
def apply_ale_action(self, action_int: int):
"""Takes an integer corresponding to an action, as specified in ALE.
This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.
```python
ALE_INPUT_MAPPING = {
0 : "NOOP",
1 : "FIRE",
2 : "UP",
3 : "RIGHT",
4 : "LEFT",
5 : "DOWN",
6 : "UPRIGHT",
7 : "UPLEFT",
8 : "DOWNRIGHT",
9 : "DOWNLEFT",
10 : "UPFIRE",
11 : "RIGHTFIRE",
12 : "LEFTFIRE",
13 : "DOWNFIRE",
14 : "UPRIGHTFIRE",
15 : "UPLEFTFIRE",
16 : "DOWNRIGHTFIRE",
17 : "DOWNLEFTFIRE"
}
```
Parameters:
action_int: A number from 0 to 17 inclusive.
"""
# implement frameskip(k) by sending the action (k+1) times every time we have an action.
for _ in range(self.frames_per_action):
if not self.rstate.get_state().apply_ale_action(action_int):
raise ValueError(
"Expected to apply action, but failed: {0}".format(action_int)
)
def apply_action(self, action_input_obj: Input):
"""Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing.
This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.
Parameters:
action_input_obj: An instance of the [ctoybox.Input][] class.
"""
# implement frameskip(k) by sending the action (k+1) times every time we have an action.
for _ in range(self.frames_per_action):
self.rstate.get_state().apply_action(action_input_obj)
def get_state(self) -> np.array:
"""This state here actually refers to the graphical, RGBA or grayscale representation of the current state."""
return self.rstate.render_frame(self.rsimulator, self.grayscale)
def set_seed(self, seed: int):
"""Control the random number generator of the config -- only affects a new_game.
Parameters:
seed: a parameter to reset the built-in random number generator.
"""
self.rsimulator.set_seed(seed)
# Maybe call new game here?
def save_frame_image(self, path: str, grayscale: bool = False):
"""Save the current frame image to a PNG file.
Parameters:
path: the filename to save to.
grayscale: whether images should be saved in color or black & white.
"""
img = None
if grayscale:
img = Image.fromarray(
self.rstate.render_frame_grayscale(self.rsimulator), "L"
)
else:
img = Image.fromarray(
self.rstate.render_frame_color(self.rsimulator), "RGBA"
)
img.save(path, format="png")
def get_rgb_frame(self) -> np.array:
"""Get the RGB frame as a numpy array."""
return self.rstate.render_frame_rgb(self.rsimulator)
def get_score(self) -> int:
"""Access the current score.
Returns:
The number of points earned in the current state."""
return self.rstate.score()
def get_lives(self) -> int:
"""Access the number of lives.
Returns:
The number of lives remaining in the current state."""
return self.rstate.lives()
def get_level(self) -> int:
"""
Access the number of levels.
Returns:
The number of levels completed in the current state."""
return self.rstate.level()
def game_over(self) -> bool:
"""
Check for game over condition.
Returns:
``True`` if the player has run out of lives in the current state.
"""
return self.rstate.game_over()
def state_to_json(self) -> Dict[str, Any]:
"""Get the state's JSON representation as a python object."""
return self.rstate.to_json()
def to_state_json(self) -> Dict[str, Any]:
"""Get the state's JSON representation as a python dict.
Important:
This method is deprecated; please use ``state_to_json`` instead!
"""
return self.state_to_json()
def config_to_json(self) -> Dict[str, Any]:
"""Get the state's JSON representation as a python dict."""
return self.rsimulator.to_json()
def write_state_json(self, js: Dict[str, Any]):
"""Overwrite the state's JSON representation from a python dict.
Parameters:
js: the python representation of the JSON state.
"""
old_state = self.rstate
del old_state
self.rstate = self.rsimulator.state_from_json(js)
def write_config_json(self, config_js: Dict[str, Any]):
"""Overwrite the config's JSON representation from a python dict.
It is likely that some changes will be seen until you call new_game()
Parameters:
config_js: the python representation of the config JSON
"""
# from_json replaces simulator!
self.rsimulator.from_json(config_js)
# new_game replaces state!
self.new_game()
def query_state_json(
self, query: str, args: Union[Dict[str, Any], str] = "null"
) -> Dict[str, Any]:
"""Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection.
Parameters:
query: the query string to send to the game.
args: a JSON argument to attach to the query string.
"""
return self.rstate.query_json(query, args)
def __del__(self):
self.rstate = None
self.rsimulator = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.__del__()
def schema_for_state(self) -> Dict[str, Any]:
"""Get the JSON Schema for the frame State object."""
return self.rsimulator.schema_for_state()
def schema_for_config(self) -> Dict[str, Any]:
"""Get the JSON Schema for the Config object."""
return self.rsimulator.schema_for_config()
|
normal
|
{
"blob_id": "c77e320cee90e8210e4c13d854649b15f6e24180",
"index": 2798,
"step-1": "<mask token>\n\n\nclass Toybox(object):\n <mask token>\n\n def __init__(self, game_name: str, grayscale: bool=True, frameskip: int\n =0, seed: Optional[int]=None, withstate: Optional[dict]=None):\n \"\"\"\n Construct a new Toybox state/game wrapper. Use this in a with block!\n\n Parameters:\n game_name: One of \"breakout\", \"space_invaders\", \"amidar\", etc.\n grayscale: Toybox can render directly to grayscale, saving time. Default is True.\n frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0.\n seed: The seed \n \"\"\"\n self.game_name = game_name\n self.frames_per_action = frameskip + 1\n self.rsimulator = Simulator(game_name)\n self.rstate = self.rsimulator.new_game()\n self.grayscale = grayscale\n if seed:\n self.set_seed(seed)\n self.new_game()\n if withstate:\n self.write_state_json(withstate)\n\n def new_game(self):\n \"\"\"\n Modify this Toybox wrapper to have a new_game state.\n\n Important:\n This discards the old state!\n \"\"\"\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.new_game()\n\n def get_height(self) ->int:\n \"\"\"Get the height of the rendered game in pixels.\"\"\"\n return self.rsimulator.get_frame_height()\n\n def get_width(self) ->int:\n \"\"\"Get the width of the rendered game in pixels.\"\"\"\n return self.rsimulator.get_frame_width()\n\n def get_legal_action_set(self) ->List[int]:\n \"\"\"Get the set of actions consumed by this game: they are ALE numbered.\"\"\"\n sim = self.rsimulator.get_simulator()\n return sim.legal_actions()\n\n def apply_ale_action(self, action_int: int):\n \"\"\"Takes an integer corresponding to an action, as specified in ALE.\n \n This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.\n \n ```python\n ALE_INPUT_MAPPING = {\n 0 : \"NOOP\",\n 1 : \"FIRE\",\n 2 : \"UP\",\n 3 : \"RIGHT\",\n 4 : \"LEFT\",\n 5 : \"DOWN\",\n 6 : \"UPRIGHT\",\n 7 : \"UPLEFT\",\n 8 : \"DOWNRIGHT\",\n 9 : \"DOWNLEFT\",\n 10 : \"UPFIRE\",\n 11 : \"RIGHTFIRE\",\n 12 : \"LEFTFIRE\",\n 13 : \"DOWNFIRE\",\n 14 : \"UPRIGHTFIRE\",\n 15 : \"UPLEFTFIRE\",\n 16 : \"DOWNRIGHTFIRE\",\n 17 : \"DOWNLEFTFIRE\"\n }\n ```\n\n Parameters:\n action_int: A number from 0 to 17 inclusive.\n \"\"\"\n for _ in range(self.frames_per_action):\n if not self.rstate.get_state().apply_ale_action(action_int):\n raise ValueError('Expected to apply action, but failed: {0}'\n .format(action_int))\n\n def apply_action(self, action_input_obj: Input):\n \"\"\"Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing.\n\n This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.\n \n Parameters:\n action_input_obj: An instance of the [ctoybox.Input][] class.\n \"\"\"\n for _ in range(self.frames_per_action):\n self.rstate.get_state().apply_action(action_input_obj)\n\n def get_state(self) ->np.array:\n \"\"\"This state here actually refers to the graphical, RGBA or grayscale representation of the current state.\"\"\"\n return self.rstate.render_frame(self.rsimulator, self.grayscale)\n\n def set_seed(self, seed: int):\n \"\"\"Control the random number generator of the config -- only affects a new_game.\n \n Parameters:\n seed: a parameter to reset the built-in random number generator.\n \"\"\"\n self.rsimulator.set_seed(seed)\n\n def save_frame_image(self, path: str, grayscale: bool=False):\n \"\"\"Save the current frame image to a PNG file.\n \n Parameters:\n path: the filename to save to.\n grayscale: whether images should be saved in color or black & white.\n \"\"\"\n img = None\n if grayscale:\n img = Image.fromarray(self.rstate.render_frame_grayscale(self.\n rsimulator), 'L')\n else:\n img = Image.fromarray(self.rstate.render_frame_color(self.\n rsimulator), 'RGBA')\n img.save(path, format='png')\n\n def get_rgb_frame(self) ->np.array:\n \"\"\"Get the RGB frame as a numpy array.\"\"\"\n return self.rstate.render_frame_rgb(self.rsimulator)\n\n def get_score(self) ->int:\n \"\"\"Access the current score.\n\n Returns:\n The number of points earned in the current state.\"\"\"\n return self.rstate.score()\n\n def get_lives(self) ->int:\n \"\"\"Access the number of lives.\n\n Returns:\n The number of lives remaining in the current state.\"\"\"\n return self.rstate.lives()\n\n def get_level(self) ->int:\n \"\"\"\n Access the number of levels.\n \n Returns:\n The number of levels completed in the current state.\"\"\"\n return self.rstate.level()\n\n def game_over(self) ->bool:\n \"\"\"\n Check for game over condition.\n\n Returns:\n ``True`` if the player has run out of lives in the current state.\n \"\"\"\n return self.rstate.game_over()\n\n def state_to_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python object.\"\"\"\n return self.rstate.to_json()\n\n def to_state_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python dict.\n \n Important:\n This method is deprecated; please use ``state_to_json`` instead!\n \"\"\"\n return self.state_to_json()\n\n def config_to_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python dict.\"\"\"\n return self.rsimulator.to_json()\n\n def write_state_json(self, js: Dict[str, Any]):\n \"\"\"Overwrite the state's JSON representation from a python dict.\n \n Parameters:\n js: the python representation of the JSON state.\n \"\"\"\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.state_from_json(js)\n\n def write_config_json(self, config_js: Dict[str, Any]):\n \"\"\"Overwrite the config's JSON representation from a python dict. \n \n It is likely that some changes will be seen until you call new_game()\n\n Parameters:\n config_js: the python representation of the config JSON\n \"\"\"\n self.rsimulator.from_json(config_js)\n self.new_game()\n\n def query_state_json(self, query: str, args: Union[Dict[str, Any], str]\n ='null') ->Dict[str, Any]:\n \"\"\"Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection.\n\n Parameters:\n query: the query string to send to the game.\n args: a JSON argument to attach to the query string.\n \"\"\"\n return self.rstate.query_json(query, args)\n\n def __del__(self):\n self.rstate = None\n self.rsimulator = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.__del__()\n\n def schema_for_state(self) ->Dict[str, Any]:\n \"\"\"Get the JSON Schema for the frame State object.\"\"\"\n return self.rsimulator.schema_for_state()\n\n def schema_for_config(self) ->Dict[str, Any]:\n \"\"\"Get the JSON Schema for the Config object.\"\"\"\n return self.rsimulator.schema_for_config()\n",
"step-2": "<mask token>\n\n\nclass State(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def render_frame_color(self, sim: Simulator) ->np.array:\n \"\"\"Generate an RGBA image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n \"\"\"\n w, h = sim.get_frame_size()\n rgba = 4\n size = h * w * rgba\n frame = bytearray(size)\n self.get_state().render_into_buffer(frame, True)\n return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba)\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Toybox(object):\n \"\"\"\n This is a stateful representation of Toybox -- since it manages memory, we provide ``__enter__`` and ``__exit__`` usage for Python's with-blocks:\n \n ```python\n with Toybox(\"amidar\") as tb:\n print(tb.get_score())\n # the 'tb' variable only lives in the block.\n ```\n\n Important:\n Note how we should use this in a with-block; this will clean up pointers and prevent memory leaks.\n\n \"\"\"\n\n def __init__(self, game_name: str, grayscale: bool=True, frameskip: int\n =0, seed: Optional[int]=None, withstate: Optional[dict]=None):\n \"\"\"\n Construct a new Toybox state/game wrapper. Use this in a with block!\n\n Parameters:\n game_name: One of \"breakout\", \"space_invaders\", \"amidar\", etc.\n grayscale: Toybox can render directly to grayscale, saving time. Default is True.\n frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0.\n seed: The seed \n \"\"\"\n self.game_name = game_name\n self.frames_per_action = frameskip + 1\n self.rsimulator = Simulator(game_name)\n self.rstate = self.rsimulator.new_game()\n self.grayscale = grayscale\n if seed:\n self.set_seed(seed)\n self.new_game()\n if withstate:\n self.write_state_json(withstate)\n\n def new_game(self):\n \"\"\"\n Modify this Toybox wrapper to have a new_game state.\n\n Important:\n This discards the old state!\n \"\"\"\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.new_game()\n\n def get_height(self) ->int:\n \"\"\"Get the height of the rendered game in pixels.\"\"\"\n return self.rsimulator.get_frame_height()\n\n def get_width(self) ->int:\n \"\"\"Get the width of the rendered game in pixels.\"\"\"\n return self.rsimulator.get_frame_width()\n\n def get_legal_action_set(self) ->List[int]:\n \"\"\"Get the set of actions consumed by this game: they are ALE numbered.\"\"\"\n sim = self.rsimulator.get_simulator()\n return sim.legal_actions()\n\n def apply_ale_action(self, action_int: int):\n \"\"\"Takes an integer corresponding to an action, as specified in ALE.\n \n This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.\n \n ```python\n ALE_INPUT_MAPPING = {\n 0 : \"NOOP\",\n 1 : \"FIRE\",\n 2 : \"UP\",\n 3 : \"RIGHT\",\n 4 : \"LEFT\",\n 5 : \"DOWN\",\n 6 : \"UPRIGHT\",\n 7 : \"UPLEFT\",\n 8 : \"DOWNRIGHT\",\n 9 : \"DOWNLEFT\",\n 10 : \"UPFIRE\",\n 11 : \"RIGHTFIRE\",\n 12 : \"LEFTFIRE\",\n 13 : \"DOWNFIRE\",\n 14 : \"UPRIGHTFIRE\",\n 15 : \"UPLEFTFIRE\",\n 16 : \"DOWNRIGHTFIRE\",\n 17 : \"DOWNLEFTFIRE\"\n }\n ```\n\n Parameters:\n action_int: A number from 0 to 17 inclusive.\n \"\"\"\n for _ in range(self.frames_per_action):\n if not self.rstate.get_state().apply_ale_action(action_int):\n raise ValueError('Expected to apply action, but failed: {0}'\n .format(action_int))\n\n def apply_action(self, action_input_obj: Input):\n \"\"\"Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing.\n\n This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.\n \n Parameters:\n action_input_obj: An instance of the [ctoybox.Input][] class.\n \"\"\"\n for _ in range(self.frames_per_action):\n self.rstate.get_state().apply_action(action_input_obj)\n\n def get_state(self) ->np.array:\n \"\"\"This state here actually refers to the graphical, RGBA or grayscale representation of the current state.\"\"\"\n return self.rstate.render_frame(self.rsimulator, self.grayscale)\n\n def set_seed(self, seed: int):\n \"\"\"Control the random number generator of the config -- only affects a new_game.\n \n Parameters:\n seed: a parameter to reset the built-in random number generator.\n \"\"\"\n self.rsimulator.set_seed(seed)\n\n def save_frame_image(self, path: str, grayscale: bool=False):\n \"\"\"Save the current frame image to a PNG file.\n \n Parameters:\n path: the filename to save to.\n grayscale: whether images should be saved in color or black & white.\n \"\"\"\n img = None\n if grayscale:\n img = Image.fromarray(self.rstate.render_frame_grayscale(self.\n rsimulator), 'L')\n else:\n img = Image.fromarray(self.rstate.render_frame_color(self.\n rsimulator), 'RGBA')\n img.save(path, format='png')\n\n def get_rgb_frame(self) ->np.array:\n \"\"\"Get the RGB frame as a numpy array.\"\"\"\n return self.rstate.render_frame_rgb(self.rsimulator)\n\n def get_score(self) ->int:\n \"\"\"Access the current score.\n\n Returns:\n The number of points earned in the current state.\"\"\"\n return self.rstate.score()\n\n def get_lives(self) ->int:\n \"\"\"Access the number of lives.\n\n Returns:\n The number of lives remaining in the current state.\"\"\"\n return self.rstate.lives()\n\n def get_level(self) ->int:\n \"\"\"\n Access the number of levels.\n \n Returns:\n The number of levels completed in the current state.\"\"\"\n return self.rstate.level()\n\n def game_over(self) ->bool:\n \"\"\"\n Check for game over condition.\n\n Returns:\n ``True`` if the player has run out of lives in the current state.\n \"\"\"\n return self.rstate.game_over()\n\n def state_to_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python object.\"\"\"\n return self.rstate.to_json()\n\n def to_state_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python dict.\n \n Important:\n This method is deprecated; please use ``state_to_json`` instead!\n \"\"\"\n return self.state_to_json()\n\n def config_to_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python dict.\"\"\"\n return self.rsimulator.to_json()\n\n def write_state_json(self, js: Dict[str, Any]):\n \"\"\"Overwrite the state's JSON representation from a python dict.\n \n Parameters:\n js: the python representation of the JSON state.\n \"\"\"\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.state_from_json(js)\n\n def write_config_json(self, config_js: Dict[str, Any]):\n \"\"\"Overwrite the config's JSON representation from a python dict. \n \n It is likely that some changes will be seen until you call new_game()\n\n Parameters:\n config_js: the python representation of the config JSON\n \"\"\"\n self.rsimulator.from_json(config_js)\n self.new_game()\n\n def query_state_json(self, query: str, args: Union[Dict[str, Any], str]\n ='null') ->Dict[str, Any]:\n \"\"\"Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection.\n\n Parameters:\n query: the query string to send to the game.\n args: a JSON argument to attach to the query string.\n \"\"\"\n return self.rstate.query_json(query, args)\n\n def __del__(self):\n self.rstate = None\n self.rsimulator = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.__del__()\n\n def schema_for_state(self) ->Dict[str, Any]:\n \"\"\"Get the JSON Schema for the frame State object.\"\"\"\n return self.rsimulator.schema_for_state()\n\n def schema_for_config(self) ->Dict[str, Any]:\n \"\"\"Get the JSON Schema for the Config object.\"\"\"\n return self.rsimulator.schema_for_config()\n",
"step-3": "<mask token>\n\n\nclass State(object):\n <mask token>\n\n def __init__(self, sim: Simulator, state=None):\n \"\"\"\n Construct a new State instance wrapper.\n\n Parameters:\n sim: The simulator responsible for this state.\n state: Optional pointer to a state to use (otherwise it will create one). \n \"\"\"\n self.sim = sim\n \"\"\"A reference to the simulator that created this state.\"\"\"\n self.__state = state or sim.__sim.new_game()\n \"\"\"The raw pointer to the state itself.\"\"\"\n self.game_name = sim.game_name\n \"\"\"The name of the game that created this state.\"\"\"\n <mask token>\n\n def __del__(self):\n self.__state = None\n self.sim = None\n <mask token>\n\n def clone(self) ->'State':\n \"\"\"Quickly make a copy of this state; should be more efficient than saving the JSON.\"\"\"\n return State(self.sim, state=self.get_state().copy())\n\n def get_state(self) ->FrameState:\n \"\"\"Get the raw state pointer.\"\"\"\n assert self.__state is not None\n return self.__state\n\n def lives(self) ->int:\n \"\"\"How many lives are remaining in the current state?\"\"\"\n return self.__state.lives()\n\n def level(self) ->int:\n \"\"\"How many levels have been completed in the current state?\"\"\"\n return self.__state.level()\n <mask token>\n <mask token>\n\n def query_json(self, query: str, args: Union[Dict[str, Any], str]='null'\n ) ->Dict[str, Any]:\n \"\"\"\n Ask a question of the Rust state; queries are currently implemented manually.\n\n Parameters:\n query: the message to send to the rust state.\n args: the arguments to send to the rust state, defaults to \"null\".\n\n Returns:\n response: A JSON response loaded to python objects.\n\n Raises:\n ValueError: if anything goes wrong with the query\n\n ```python\n with Toybox(\"breakout\") as tb:\n tb.query_json(\"bricks_remaining\")\n ```\n \"\"\"\n return json.loads(self.__state.query(json_str(query), json_str(args)))\n\n def render_frame(self, sim: Simulator, grayscale: bool=True) ->np.array:\n \"\"\"Generate an image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n grayscale: True if we want to render in grayscale rather than in color (RGBA).\n \"\"\"\n if grayscale:\n return self.render_frame_rgb(sim)\n else:\n return self.render_frame_color(sim)\n\n def render_frame_color(self, sim: Simulator) ->np.array:\n \"\"\"Generate an RGBA image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n \"\"\"\n w, h = sim.get_frame_size()\n rgba = 4\n size = h * w * rgba\n frame = bytearray(size)\n self.get_state().render_into_buffer(frame, True)\n return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba)\n <mask token>\n <mask token>\n\n def to_json(self) ->Dict[str, Any]:\n \"\"\"Get a JSON representation of the state.\"\"\"\n return json.loads(self.get_state().to_json())\n\n\nclass Toybox(object):\n \"\"\"\n This is a stateful representation of Toybox -- since it manages memory, we provide ``__enter__`` and ``__exit__`` usage for Python's with-blocks:\n \n ```python\n with Toybox(\"amidar\") as tb:\n print(tb.get_score())\n # the 'tb' variable only lives in the block.\n ```\n\n Important:\n Note how we should use this in a with-block; this will clean up pointers and prevent memory leaks.\n\n \"\"\"\n\n def __init__(self, game_name: str, grayscale: bool=True, frameskip: int\n =0, seed: Optional[int]=None, withstate: Optional[dict]=None):\n \"\"\"\n Construct a new Toybox state/game wrapper. Use this in a with block!\n\n Parameters:\n game_name: One of \"breakout\", \"space_invaders\", \"amidar\", etc.\n grayscale: Toybox can render directly to grayscale, saving time. Default is True.\n frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0.\n seed: The seed \n \"\"\"\n self.game_name = game_name\n self.frames_per_action = frameskip + 1\n self.rsimulator = Simulator(game_name)\n self.rstate = self.rsimulator.new_game()\n self.grayscale = grayscale\n if seed:\n self.set_seed(seed)\n self.new_game()\n if withstate:\n self.write_state_json(withstate)\n\n def new_game(self):\n \"\"\"\n Modify this Toybox wrapper to have a new_game state.\n\n Important:\n This discards the old state!\n \"\"\"\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.new_game()\n\n def get_height(self) ->int:\n \"\"\"Get the height of the rendered game in pixels.\"\"\"\n return self.rsimulator.get_frame_height()\n\n def get_width(self) ->int:\n \"\"\"Get the width of the rendered game in pixels.\"\"\"\n return self.rsimulator.get_frame_width()\n\n def get_legal_action_set(self) ->List[int]:\n \"\"\"Get the set of actions consumed by this game: they are ALE numbered.\"\"\"\n sim = self.rsimulator.get_simulator()\n return sim.legal_actions()\n\n def apply_ale_action(self, action_int: int):\n \"\"\"Takes an integer corresponding to an action, as specified in ALE.\n \n This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.\n \n ```python\n ALE_INPUT_MAPPING = {\n 0 : \"NOOP\",\n 1 : \"FIRE\",\n 2 : \"UP\",\n 3 : \"RIGHT\",\n 4 : \"LEFT\",\n 5 : \"DOWN\",\n 6 : \"UPRIGHT\",\n 7 : \"UPLEFT\",\n 8 : \"DOWNRIGHT\",\n 9 : \"DOWNLEFT\",\n 10 : \"UPFIRE\",\n 11 : \"RIGHTFIRE\",\n 12 : \"LEFTFIRE\",\n 13 : \"DOWNFIRE\",\n 14 : \"UPRIGHTFIRE\",\n 15 : \"UPLEFTFIRE\",\n 16 : \"DOWNRIGHTFIRE\",\n 17 : \"DOWNLEFTFIRE\"\n }\n ```\n\n Parameters:\n action_int: A number from 0 to 17 inclusive.\n \"\"\"\n for _ in range(self.frames_per_action):\n if not self.rstate.get_state().apply_ale_action(action_int):\n raise ValueError('Expected to apply action, but failed: {0}'\n .format(action_int))\n\n def apply_action(self, action_input_obj: Input):\n \"\"\"Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing.\n\n This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.\n \n Parameters:\n action_input_obj: An instance of the [ctoybox.Input][] class.\n \"\"\"\n for _ in range(self.frames_per_action):\n self.rstate.get_state().apply_action(action_input_obj)\n\n def get_state(self) ->np.array:\n \"\"\"This state here actually refers to the graphical, RGBA or grayscale representation of the current state.\"\"\"\n return self.rstate.render_frame(self.rsimulator, self.grayscale)\n\n def set_seed(self, seed: int):\n \"\"\"Control the random number generator of the config -- only affects a new_game.\n \n Parameters:\n seed: a parameter to reset the built-in random number generator.\n \"\"\"\n self.rsimulator.set_seed(seed)\n\n def save_frame_image(self, path: str, grayscale: bool=False):\n \"\"\"Save the current frame image to a PNG file.\n \n Parameters:\n path: the filename to save to.\n grayscale: whether images should be saved in color or black & white.\n \"\"\"\n img = None\n if grayscale:\n img = Image.fromarray(self.rstate.render_frame_grayscale(self.\n rsimulator), 'L')\n else:\n img = Image.fromarray(self.rstate.render_frame_color(self.\n rsimulator), 'RGBA')\n img.save(path, format='png')\n\n def get_rgb_frame(self) ->np.array:\n \"\"\"Get the RGB frame as a numpy array.\"\"\"\n return self.rstate.render_frame_rgb(self.rsimulator)\n\n def get_score(self) ->int:\n \"\"\"Access the current score.\n\n Returns:\n The number of points earned in the current state.\"\"\"\n return self.rstate.score()\n\n def get_lives(self) ->int:\n \"\"\"Access the number of lives.\n\n Returns:\n The number of lives remaining in the current state.\"\"\"\n return self.rstate.lives()\n\n def get_level(self) ->int:\n \"\"\"\n Access the number of levels.\n \n Returns:\n The number of levels completed in the current state.\"\"\"\n return self.rstate.level()\n\n def game_over(self) ->bool:\n \"\"\"\n Check for game over condition.\n\n Returns:\n ``True`` if the player has run out of lives in the current state.\n \"\"\"\n return self.rstate.game_over()\n\n def state_to_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python object.\"\"\"\n return self.rstate.to_json()\n\n def to_state_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python dict.\n \n Important:\n This method is deprecated; please use ``state_to_json`` instead!\n \"\"\"\n return self.state_to_json()\n\n def config_to_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python dict.\"\"\"\n return self.rsimulator.to_json()\n\n def write_state_json(self, js: Dict[str, Any]):\n \"\"\"Overwrite the state's JSON representation from a python dict.\n \n Parameters:\n js: the python representation of the JSON state.\n \"\"\"\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.state_from_json(js)\n\n def write_config_json(self, config_js: Dict[str, Any]):\n \"\"\"Overwrite the config's JSON representation from a python dict. \n \n It is likely that some changes will be seen until you call new_game()\n\n Parameters:\n config_js: the python representation of the config JSON\n \"\"\"\n self.rsimulator.from_json(config_js)\n self.new_game()\n\n def query_state_json(self, query: str, args: Union[Dict[str, Any], str]\n ='null') ->Dict[str, Any]:\n \"\"\"Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection.\n\n Parameters:\n query: the query string to send to the game.\n args: a JSON argument to attach to the query string.\n \"\"\"\n return self.rstate.query_json(query, args)\n\n def __del__(self):\n self.rstate = None\n self.rsimulator = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.__del__()\n\n def schema_for_state(self) ->Dict[str, Any]:\n \"\"\"Get the JSON Schema for the frame State object.\"\"\"\n return self.rsimulator.schema_for_state()\n\n def schema_for_config(self) ->Dict[str, Any]:\n \"\"\"Get the JSON Schema for the Config object.\"\"\"\n return self.rsimulator.schema_for_config()\n",
"step-4": "<mask token>\n\n\nclass Simulator(object):\n <mask token>\n\n def __init__(self, game_name, sim=None):\n \"\"\"\n Construct a new instance.\n\n Parameters:\n game_name: one of \"breakout\", \"amidar\", etc.\n sim: optionally a Rust pointer to an existing simulator.\n \"\"\"\n if sim is None:\n sim = Game(game_name)\n self.__sim = sim\n self.game_name = game_name\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n <mask token>\n\n def get_frame_size(self) ->Tuple[int, int]:\n \"\"\"Get the width in pixels of the frames this game renders.\"\"\"\n return self.__sim.frame_size()\n\n def get_frame_width(self) ->int:\n \"\"\"Get the width in pixels of the frames this game renders.\"\"\"\n return self.__sim.frame_size()[0]\n\n def get_frame_height(self) ->int:\n \"\"\"Get the height in pixels of the frames this game renders.\"\"\"\n return self.__sim.frame_size()[1]\n\n def get_simulator(self) ->Game:\n \"\"\"Get access to the raw simulator pointer.\"\"\"\n return self.__sim\n\n def new_game(self) ->'State':\n \"\"\"Start a new game.\"\"\"\n return State(self, self.__sim.new_game())\n <mask token>\n\n def to_json(self) ->Dict[str, Any]:\n \"\"\"Get the configuration of this simulator/config as JSON\"\"\"\n return json.loads(self.__sim.to_json())\n\n def from_json(self, config_js: Union[Dict[str, Any], str]):\n \"\"\"Mutably update this simulator/config with the replacement json.\"\"\"\n self.__sim = self.__sim.from_json(json_str(config_js))\n\n def schema_for_state(self) ->Dict[str, Any]:\n \"\"\"Get the JSON Schema for any state for this game.\"\"\"\n return json.loads(self.__sim.frame_schema())\n\n def schema_for_config(self) ->Dict[str, Any]:\n \"\"\"Get the JSON Schema for any config for this game.\"\"\"\n return json.loads(self.__sim.config_schema())\n\n\nclass State(object):\n \"\"\"\n The State object represents everything the game needs to know about any single simulated frame.\n\n You can rewind in time by storing and restoring these state representations.\n\n - Access the json: ``to_json``\n - Access the image: ``render_frame``\n \"\"\"\n\n def __init__(self, sim: Simulator, state=None):\n \"\"\"\n Construct a new State instance wrapper.\n\n Parameters:\n sim: The simulator responsible for this state.\n state: Optional pointer to a state to use (otherwise it will create one). \n \"\"\"\n self.sim = sim\n \"\"\"A reference to the simulator that created this state.\"\"\"\n self.__state = state or sim.__sim.new_game()\n \"\"\"The raw pointer to the state itself.\"\"\"\n self.game_name = sim.game_name\n \"\"\"The name of the game that created this state.\"\"\"\n\n def __enter__(self):\n return self\n\n def __del__(self):\n self.__state = None\n self.sim = None\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.__del__()\n\n def clone(self) ->'State':\n \"\"\"Quickly make a copy of this state; should be more efficient than saving the JSON.\"\"\"\n return State(self.sim, state=self.get_state().copy())\n\n def get_state(self) ->FrameState:\n \"\"\"Get the raw state pointer.\"\"\"\n assert self.__state is not None\n return self.__state\n\n def lives(self) ->int:\n \"\"\"How many lives are remaining in the current state?\"\"\"\n return self.__state.lives()\n\n def level(self) ->int:\n \"\"\"How many levels have been completed in the current state?\"\"\"\n return self.__state.level()\n\n def score(self) ->int:\n \"\"\"How many points have been earned in the current state?\"\"\"\n return self.__state.score()\n\n def game_over(self):\n \"\"\"Determine whether the game has ended; i.e., the player has run out of lives.\n\n >>> assert self.lives() < 0 == self.game_over()\n \"\"\"\n return self.lives() < 0\n\n def query_json(self, query: str, args: Union[Dict[str, Any], str]='null'\n ) ->Dict[str, Any]:\n \"\"\"\n Ask a question of the Rust state; queries are currently implemented manually.\n\n Parameters:\n query: the message to send to the rust state.\n args: the arguments to send to the rust state, defaults to \"null\".\n\n Returns:\n response: A JSON response loaded to python objects.\n\n Raises:\n ValueError: if anything goes wrong with the query\n\n ```python\n with Toybox(\"breakout\") as tb:\n tb.query_json(\"bricks_remaining\")\n ```\n \"\"\"\n return json.loads(self.__state.query(json_str(query), json_str(args)))\n\n def render_frame(self, sim: Simulator, grayscale: bool=True) ->np.array:\n \"\"\"Generate an image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n grayscale: True if we want to render in grayscale rather than in color (RGBA).\n \"\"\"\n if grayscale:\n return self.render_frame_rgb(sim)\n else:\n return self.render_frame_color(sim)\n\n def render_frame_color(self, sim: Simulator) ->np.array:\n \"\"\"Generate an RGBA image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n \"\"\"\n w, h = sim.get_frame_size()\n rgba = 4\n size = h * w * rgba\n frame = bytearray(size)\n self.get_state().render_into_buffer(frame, True)\n return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba)\n\n def render_frame_rgb(self, sim: Simulator) ->np.array:\n \"\"\"Generate an RGB image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n \"\"\"\n rgba_frame = self.render_frame_color(sim)\n return rgba_frame[:, :, :3]\n\n def render_frame_grayscale(self, sim: Simulator) ->np.array:\n \"\"\"Generate a grayscale image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n \"\"\"\n w, h = sim.get_frame_size()\n depth = 1\n size = h * w * depth\n frame = bytearray(size)\n self.get_state().render_into_buffer(frame, False)\n return np.asarray(frame, dtype=np.uint8).reshape(h, w, depth)\n\n def to_json(self) ->Dict[str, Any]:\n \"\"\"Get a JSON representation of the state.\"\"\"\n return json.loads(self.get_state().to_json())\n\n\nclass Toybox(object):\n \"\"\"\n This is a stateful representation of Toybox -- since it manages memory, we provide ``__enter__`` and ``__exit__`` usage for Python's with-blocks:\n \n ```python\n with Toybox(\"amidar\") as tb:\n print(tb.get_score())\n # the 'tb' variable only lives in the block.\n ```\n\n Important:\n Note how we should use this in a with-block; this will clean up pointers and prevent memory leaks.\n\n \"\"\"\n\n def __init__(self, game_name: str, grayscale: bool=True, frameskip: int\n =0, seed: Optional[int]=None, withstate: Optional[dict]=None):\n \"\"\"\n Construct a new Toybox state/game wrapper. Use this in a with block!\n\n Parameters:\n game_name: One of \"breakout\", \"space_invaders\", \"amidar\", etc.\n grayscale: Toybox can render directly to grayscale, saving time. Default is True.\n frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0.\n seed: The seed \n \"\"\"\n self.game_name = game_name\n self.frames_per_action = frameskip + 1\n self.rsimulator = Simulator(game_name)\n self.rstate = self.rsimulator.new_game()\n self.grayscale = grayscale\n if seed:\n self.set_seed(seed)\n self.new_game()\n if withstate:\n self.write_state_json(withstate)\n\n def new_game(self):\n \"\"\"\n Modify this Toybox wrapper to have a new_game state.\n\n Important:\n This discards the old state!\n \"\"\"\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.new_game()\n\n def get_height(self) ->int:\n \"\"\"Get the height of the rendered game in pixels.\"\"\"\n return self.rsimulator.get_frame_height()\n\n def get_width(self) ->int:\n \"\"\"Get the width of the rendered game in pixels.\"\"\"\n return self.rsimulator.get_frame_width()\n\n def get_legal_action_set(self) ->List[int]:\n \"\"\"Get the set of actions consumed by this game: they are ALE numbered.\"\"\"\n sim = self.rsimulator.get_simulator()\n return sim.legal_actions()\n\n def apply_ale_action(self, action_int: int):\n \"\"\"Takes an integer corresponding to an action, as specified in ALE.\n \n This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.\n \n ```python\n ALE_INPUT_MAPPING = {\n 0 : \"NOOP\",\n 1 : \"FIRE\",\n 2 : \"UP\",\n 3 : \"RIGHT\",\n 4 : \"LEFT\",\n 5 : \"DOWN\",\n 6 : \"UPRIGHT\",\n 7 : \"UPLEFT\",\n 8 : \"DOWNRIGHT\",\n 9 : \"DOWNLEFT\",\n 10 : \"UPFIRE\",\n 11 : \"RIGHTFIRE\",\n 12 : \"LEFTFIRE\",\n 13 : \"DOWNFIRE\",\n 14 : \"UPRIGHTFIRE\",\n 15 : \"UPLEFTFIRE\",\n 16 : \"DOWNRIGHTFIRE\",\n 17 : \"DOWNLEFTFIRE\"\n }\n ```\n\n Parameters:\n action_int: A number from 0 to 17 inclusive.\n \"\"\"\n for _ in range(self.frames_per_action):\n if not self.rstate.get_state().apply_ale_action(action_int):\n raise ValueError('Expected to apply action, but failed: {0}'\n .format(action_int))\n\n def apply_action(self, action_input_obj: Input):\n \"\"\"Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing.\n\n This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.\n \n Parameters:\n action_input_obj: An instance of the [ctoybox.Input][] class.\n \"\"\"\n for _ in range(self.frames_per_action):\n self.rstate.get_state().apply_action(action_input_obj)\n\n def get_state(self) ->np.array:\n \"\"\"This state here actually refers to the graphical, RGBA or grayscale representation of the current state.\"\"\"\n return self.rstate.render_frame(self.rsimulator, self.grayscale)\n\n def set_seed(self, seed: int):\n \"\"\"Control the random number generator of the config -- only affects a new_game.\n \n Parameters:\n seed: a parameter to reset the built-in random number generator.\n \"\"\"\n self.rsimulator.set_seed(seed)\n\n def save_frame_image(self, path: str, grayscale: bool=False):\n \"\"\"Save the current frame image to a PNG file.\n \n Parameters:\n path: the filename to save to.\n grayscale: whether images should be saved in color or black & white.\n \"\"\"\n img = None\n if grayscale:\n img = Image.fromarray(self.rstate.render_frame_grayscale(self.\n rsimulator), 'L')\n else:\n img = Image.fromarray(self.rstate.render_frame_color(self.\n rsimulator), 'RGBA')\n img.save(path, format='png')\n\n def get_rgb_frame(self) ->np.array:\n \"\"\"Get the RGB frame as a numpy array.\"\"\"\n return self.rstate.render_frame_rgb(self.rsimulator)\n\n def get_score(self) ->int:\n \"\"\"Access the current score.\n\n Returns:\n The number of points earned in the current state.\"\"\"\n return self.rstate.score()\n\n def get_lives(self) ->int:\n \"\"\"Access the number of lives.\n\n Returns:\n The number of lives remaining in the current state.\"\"\"\n return self.rstate.lives()\n\n def get_level(self) ->int:\n \"\"\"\n Access the number of levels.\n \n Returns:\n The number of levels completed in the current state.\"\"\"\n return self.rstate.level()\n\n def game_over(self) ->bool:\n \"\"\"\n Check for game over condition.\n\n Returns:\n ``True`` if the player has run out of lives in the current state.\n \"\"\"\n return self.rstate.game_over()\n\n def state_to_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python object.\"\"\"\n return self.rstate.to_json()\n\n def to_state_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python dict.\n \n Important:\n This method is deprecated; please use ``state_to_json`` instead!\n \"\"\"\n return self.state_to_json()\n\n def config_to_json(self) ->Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python dict.\"\"\"\n return self.rsimulator.to_json()\n\n def write_state_json(self, js: Dict[str, Any]):\n \"\"\"Overwrite the state's JSON representation from a python dict.\n \n Parameters:\n js: the python representation of the JSON state.\n \"\"\"\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.state_from_json(js)\n\n def write_config_json(self, config_js: Dict[str, Any]):\n \"\"\"Overwrite the config's JSON representation from a python dict. \n \n It is likely that some changes will be seen until you call new_game()\n\n Parameters:\n config_js: the python representation of the config JSON\n \"\"\"\n self.rsimulator.from_json(config_js)\n self.new_game()\n\n def query_state_json(self, query: str, args: Union[Dict[str, Any], str]\n ='null') ->Dict[str, Any]:\n \"\"\"Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection.\n\n Parameters:\n query: the query string to send to the game.\n args: a JSON argument to attach to the query string.\n \"\"\"\n return self.rstate.query_json(query, args)\n\n def __del__(self):\n self.rstate = None\n self.rsimulator = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.__del__()\n\n def schema_for_state(self) ->Dict[str, Any]:\n \"\"\"Get the JSON Schema for the frame State object.\"\"\"\n return self.rsimulator.schema_for_state()\n\n def schema_for_config(self) ->Dict[str, Any]:\n \"\"\"Get the JSON Schema for the Config object.\"\"\"\n return self.rsimulator.schema_for_config()\n",
"step-5": "from .ctoybox import Game, State as FrameState, Input\nimport numpy as np\nfrom PIL import Image\nimport json\nfrom typing import Dict, Any, List, Tuple, Union, Optional\n\n\n\ndef json_str(js: Union[Dict[str, Any], Input, str]) -> str:\n \"\"\"\n Turn an object into a JSON string -- handles dictionaries, the Input class, and JSON you've already prepared (e.g., strings).\n \"\"\"\n if type(js) is dict:\n js = json.dumps(js)\n elif type(js) is Input:\n js = json.dumps(js.__dict__)\n elif type(js) is not str:\n raise ValueError(\n \"Unknown json type: %s (only str and dict supported)\" % type(js)\n )\n return js\n\n\nclass Simulator(object):\n \"\"\"\n The Simulator is an instance of a game configuration.\n You can call new_game on it to begin.\n\n \"\"\"\n\n def __init__(self, game_name, sim=None):\n \"\"\"\n Construct a new instance.\n\n Parameters:\n game_name: one of \"breakout\", \"amidar\", etc.\n sim: optionally a Rust pointer to an existing simulator.\n \"\"\"\n if sim is None:\n sim = Game(game_name)\n self.__sim = sim\n # sim should be a pointer\n self.game_name = game_name\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def set_seed(self, seed: int):\n \"\"\"Configure the random number generator that spawns new game states.\n \n Parameters:\n seed: a parameter to reset the built-in random number generator.\n \"\"\"\n self.__sim.seed(seed)\n \n def get_frame_size(self) -> Tuple[int, int]:\n \"\"\"Get the width in pixels of the frames this game renders.\"\"\"\n return self.__sim.frame_size()\n\n def get_frame_width(self) -> int:\n \"\"\"Get the width in pixels of the frames this game renders.\"\"\"\n return self.__sim.frame_size()[0]\n\n def get_frame_height(self) -> int:\n \"\"\"Get the height in pixels of the frames this game renders.\"\"\"\n return self.__sim.frame_size()[1]\n\n def get_simulator(self) -> Game:\n \"\"\"Get access to the raw simulator pointer.\"\"\"\n return self.__sim\n\n def new_game(self) -> \"State\":\n \"\"\"Start a new game.\"\"\"\n return State(self, self.__sim.new_game())\n\n def state_from_json(self, js: Union[Dict[str, Any], str]) -> \"State\":\n \"\"\"Generate a State from the state json and this configuration.\n \n Parameters:\n js: a JSON object or string containing a serialized state.\n \"\"\"\n state: FrameState = self.__sim.new_state(json_str(js))\n return State(self, state=state)\n\n def to_json(self) -> Dict[str, Any]:\n \"\"\"Get the configuration of this simulator/config as JSON\"\"\"\n return json.loads(self.__sim.to_json())\n\n def from_json(self, config_js: Union[Dict[str, Any], str]):\n \"\"\"Mutably update this simulator/config with the replacement json.\"\"\"\n self.__sim = self.__sim.from_json(json_str(config_js))\n\n def schema_for_state(self) -> Dict[str, Any]:\n \"\"\"Get the JSON Schema for any state for this game.\"\"\"\n return json.loads(self.__sim.frame_schema())\n\n def schema_for_config(self) -> Dict[str, Any]:\n \"\"\"Get the JSON Schema for any config for this game.\"\"\"\n return json.loads(self.__sim.config_schema())\n\n\nclass State(object):\n \"\"\"\n The State object represents everything the game needs to know about any single simulated frame.\n\n You can rewind in time by storing and restoring these state representations.\n\n - Access the json: ``to_json``\n - Access the image: ``render_frame``\n \"\"\"\n\n def __init__(self, sim: Simulator, state=None):\n \"\"\"\n Construct a new State instance wrapper.\n\n Parameters:\n sim: The simulator responsible for this state.\n state: Optional pointer to a state to use (otherwise it will create one). \n \"\"\"\n self.sim = sim\n \"\"\"A reference to the simulator that created this state.\"\"\"\n self.__state = state or sim.__sim.new_game()\n \"\"\"The raw pointer to the state itself.\"\"\"\n self.game_name = sim.game_name\n \"\"\"The name of the game that created this state.\"\"\"\n\n def __enter__(self):\n return self\n\n def __del__(self):\n self.__state = None\n self.sim = None\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.__del__()\n\n def clone(self) -> 'State':\n \"\"\"Quickly make a copy of this state; should be more efficient than saving the JSON.\"\"\"\n return State(self.sim, state=self.get_state().copy())\n\n def get_state(self) -> FrameState:\n \"\"\"Get the raw state pointer.\"\"\"\n assert self.__state is not None\n return self.__state\n\n def lives(self) -> int:\n \"\"\"How many lives are remaining in the current state?\"\"\"\n return self.__state.lives()\n\n def level(self) -> int:\n \"\"\"How many levels have been completed in the current state?\"\"\"\n return self.__state.level()\n\n def score(self) -> int:\n \"\"\"How many points have been earned in the current state?\"\"\"\n return self.__state.score()\n\n def game_over(self):\n \"\"\"Determine whether the game has ended; i.e., the player has run out of lives.\n\n >>> assert self.lives() < 0 == self.game_over()\n \"\"\"\n return self.lives() < 0\n\n def query_json(\n self, query: str, args: Union[Dict[str, Any], str] = \"null\"\n ) -> Dict[str, Any]:\n \"\"\"\n Ask a question of the Rust state; queries are currently implemented manually.\n\n Parameters:\n query: the message to send to the rust state.\n args: the arguments to send to the rust state, defaults to \"null\".\n\n Returns:\n response: A JSON response loaded to python objects.\n\n Raises:\n ValueError: if anything goes wrong with the query\n\n ```python\n with Toybox(\"breakout\") as tb:\n tb.query_json(\"bricks_remaining\")\n ```\n \"\"\"\n return json.loads(self.__state.query(json_str(query), json_str(args)))\n\n def render_frame(self, sim: Simulator, grayscale: bool = True) -> np.array:\n \"\"\"Generate an image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n grayscale: True if we want to render in grayscale rather than in color (RGBA).\n \"\"\"\n if grayscale:\n return self.render_frame_rgb(sim)\n else:\n return self.render_frame_color(sim)\n\n def render_frame_color(self, sim: Simulator) -> np.array:\n \"\"\"Generate an RGBA image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n \"\"\"\n (w, h) = sim.get_frame_size()\n rgba = 4\n size = h * w * rgba\n frame = bytearray(size)\n self.get_state().render_into_buffer(frame, True)\n return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba)\n\n def render_frame_rgb(self, sim: Simulator) -> np.array:\n \"\"\"Generate an RGB image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n \"\"\"\n rgba_frame = self.render_frame_color(sim)\n return rgba_frame[:, :, :3]\n\n def render_frame_grayscale(self, sim: Simulator) -> np.array:\n \"\"\"Generate a grayscale image from the current frame state object.\n\n Parameters:\n sim: the simulator to use; this tells us the width/height necessary.\n \"\"\"\n (w, h) = sim.get_frame_size()\n depth = 1\n size = h * w * depth\n frame = bytearray(size)\n self.get_state().render_into_buffer(frame, False)\n return np.asarray(frame, dtype=np.uint8).reshape(h, w, depth)\n\n def to_json(self) -> Dict[str, Any]:\n \"\"\"Get a JSON representation of the state.\"\"\"\n return json.loads(self.get_state().to_json())\n\n\n\nclass Toybox(object):\n \"\"\"\n This is a stateful representation of Toybox -- since it manages memory, we provide ``__enter__`` and ``__exit__`` usage for Python's with-blocks:\n \n ```python\n with Toybox(\"amidar\") as tb:\n print(tb.get_score())\n # the 'tb' variable only lives in the block.\n ```\n\n Important:\n Note how we should use this in a with-block; this will clean up pointers and prevent memory leaks.\n\n \"\"\"\n\n def __init__(self, \n game_name: str, \n grayscale: bool = True, \n frameskip: int = 0, \n seed: Optional[int] = None, \n withstate: Optional[dict] = None):\n \"\"\"\n Construct a new Toybox state/game wrapper. Use this in a with block!\n\n Parameters:\n game_name: One of \"breakout\", \"space_invaders\", \"amidar\", etc.\n grayscale: Toybox can render directly to grayscale, saving time. Default is True.\n frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0.\n seed: The seed \n \"\"\"\n self.game_name = game_name\n self.frames_per_action = frameskip + 1\n self.rsimulator = Simulator(game_name)\n self.rstate = self.rsimulator.new_game()\n self.grayscale = grayscale\n if seed:\n self.set_seed(seed)\n self.new_game()\n if withstate:\n self.write_state_json(withstate)\n\n def new_game(self):\n \"\"\"\n Modify this Toybox wrapper to have a new_game state.\n\n Important:\n This discards the old state!\n \"\"\"\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.new_game()\n\n def get_height(self) -> int:\n \"\"\"Get the height of the rendered game in pixels.\"\"\"\n return self.rsimulator.get_frame_height()\n\n def get_width(self) -> int:\n \"\"\"Get the width of the rendered game in pixels.\"\"\"\n return self.rsimulator.get_frame_width()\n\n def get_legal_action_set(self) -> List[int]:\n \"\"\"Get the set of actions consumed by this game: they are ALE numbered.\"\"\"\n sim = self.rsimulator.get_simulator()\n return sim.legal_actions()\n\n def apply_ale_action(self, action_int: int):\n \"\"\"Takes an integer corresponding to an action, as specified in ALE.\n \n This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.\n \n ```python\n ALE_INPUT_MAPPING = {\n 0 : \"NOOP\",\n 1 : \"FIRE\",\n 2 : \"UP\",\n 3 : \"RIGHT\",\n 4 : \"LEFT\",\n 5 : \"DOWN\",\n 6 : \"UPRIGHT\",\n 7 : \"UPLEFT\",\n 8 : \"DOWNRIGHT\",\n 9 : \"DOWNLEFT\",\n 10 : \"UPFIRE\",\n 11 : \"RIGHTFIRE\",\n 12 : \"LEFTFIRE\",\n 13 : \"DOWNFIRE\",\n 14 : \"UPRIGHTFIRE\",\n 15 : \"UPLEFTFIRE\",\n 16 : \"DOWNRIGHTFIRE\",\n 17 : \"DOWNLEFTFIRE\"\n }\n ```\n\n Parameters:\n action_int: A number from 0 to 17 inclusive.\n \"\"\"\n # implement frameskip(k) by sending the action (k+1) times every time we have an action.\n for _ in range(self.frames_per_action):\n if not self.rstate.get_state().apply_ale_action(action_int):\n raise ValueError(\n \"Expected to apply action, but failed: {0}\".format(action_int)\n )\n\n def apply_action(self, action_input_obj: Input):\n \"\"\"Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing.\n\n This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.\n \n Parameters:\n action_input_obj: An instance of the [ctoybox.Input][] class.\n \"\"\"\n # implement frameskip(k) by sending the action (k+1) times every time we have an action.\n for _ in range(self.frames_per_action):\n self.rstate.get_state().apply_action(action_input_obj)\n\n def get_state(self) -> np.array:\n \"\"\"This state here actually refers to the graphical, RGBA or grayscale representation of the current state.\"\"\"\n return self.rstate.render_frame(self.rsimulator, self.grayscale)\n\n def set_seed(self, seed: int):\n \"\"\"Control the random number generator of the config -- only affects a new_game.\n \n Parameters:\n seed: a parameter to reset the built-in random number generator.\n \"\"\"\n self.rsimulator.set_seed(seed)\n # Maybe call new game here?\n\n def save_frame_image(self, path: str, grayscale: bool = False):\n \"\"\"Save the current frame image to a PNG file.\n \n Parameters:\n path: the filename to save to.\n grayscale: whether images should be saved in color or black & white.\n \"\"\"\n img = None\n if grayscale:\n img = Image.fromarray(\n self.rstate.render_frame_grayscale(self.rsimulator), \"L\"\n )\n else:\n img = Image.fromarray(\n self.rstate.render_frame_color(self.rsimulator), \"RGBA\"\n )\n img.save(path, format=\"png\")\n\n def get_rgb_frame(self) -> np.array:\n \"\"\"Get the RGB frame as a numpy array.\"\"\"\n return self.rstate.render_frame_rgb(self.rsimulator)\n\n def get_score(self) -> int:\n \"\"\"Access the current score.\n\n Returns:\n The number of points earned in the current state.\"\"\"\n return self.rstate.score()\n\n def get_lives(self) -> int:\n \"\"\"Access the number of lives.\n\n Returns:\n The number of lives remaining in the current state.\"\"\"\n return self.rstate.lives()\n\n def get_level(self) -> int:\n \"\"\"\n Access the number of levels.\n \n Returns:\n The number of levels completed in the current state.\"\"\"\n return self.rstate.level()\n\n def game_over(self) -> bool:\n \"\"\"\n Check for game over condition.\n\n Returns:\n ``True`` if the player has run out of lives in the current state.\n \"\"\"\n return self.rstate.game_over()\n\n def state_to_json(self) -> Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python object.\"\"\"\n return self.rstate.to_json()\n\n def to_state_json(self) -> Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python dict.\n \n Important:\n This method is deprecated; please use ``state_to_json`` instead!\n \"\"\"\n return self.state_to_json()\n\n def config_to_json(self) -> Dict[str, Any]:\n \"\"\"Get the state's JSON representation as a python dict.\"\"\"\n return self.rsimulator.to_json()\n\n def write_state_json(self, js: Dict[str, Any]):\n \"\"\"Overwrite the state's JSON representation from a python dict.\n \n Parameters:\n js: the python representation of the JSON state.\n \"\"\"\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.state_from_json(js)\n\n def write_config_json(self, config_js: Dict[str, Any]):\n \"\"\"Overwrite the config's JSON representation from a python dict. \n \n It is likely that some changes will be seen until you call new_game()\n\n Parameters:\n config_js: the python representation of the config JSON\n \"\"\"\n # from_json replaces simulator!\n self.rsimulator.from_json(config_js)\n # new_game replaces state!\n self.new_game()\n\n def query_state_json(\n self, query: str, args: Union[Dict[str, Any], str] = \"null\"\n ) -> Dict[str, Any]:\n \"\"\"Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection.\n\n Parameters:\n query: the query string to send to the game.\n args: a JSON argument to attach to the query string.\n \"\"\"\n return self.rstate.query_json(query, args)\n\n def __del__(self):\n self.rstate = None\n self.rsimulator = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.__del__()\n\n def schema_for_state(self) -> Dict[str, Any]:\n \"\"\"Get the JSON Schema for the frame State object.\"\"\"\n return self.rsimulator.schema_for_state()\n\n def schema_for_config(self) -> Dict[str, Any]:\n \"\"\"Get the JSON Schema for the Config object.\"\"\"\n return self.rsimulator.schema_for_config()\n",
"step-ids": [
27,
30,
39,
59,
65
]
}
|
[
27,
30,
39,
59,
65
] |
import requests
url = 'https://item.jd.com/100008348550.html'
try:
r = requests.get(url)
r.raise_for_status()
print(r.encoding)
r.encoding = r.apparent_encoding
print(r.text[:1000])
print(r.apparent_encoding)
except:
print('error')
|
normal
|
{
"blob_id": "0271c45a21047b948946dd76f147692bb16b8bcf",
"index": 5378,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n r = requests.get(url)\n r.raise_for_status()\n print(r.encoding)\n r.encoding = r.apparent_encoding\n print(r.text[:1000])\n print(r.apparent_encoding)\nexcept:\n print('error')\n",
"step-3": "<mask token>\nurl = 'https://item.jd.com/100008348550.html'\ntry:\n r = requests.get(url)\n r.raise_for_status()\n print(r.encoding)\n r.encoding = r.apparent_encoding\n print(r.text[:1000])\n print(r.apparent_encoding)\nexcept:\n print('error')\n",
"step-4": "import requests\nurl = 'https://item.jd.com/100008348550.html'\ntry:\n r = requests.get(url)\n r.raise_for_status()\n print(r.encoding)\n r.encoding = r.apparent_encoding\n print(r.text[:1000])\n print(r.apparent_encoding)\nexcept:\n print('error')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.chdir(main_dir)
<|reserved_special_token_0|>
for col in loan_seller_cols:
cmbs.drop(columns=col, axis=1, inplace=True)
<|reserved_special_token_0|>
for key, value in regex_dict.items():
cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]
for col in list(cmbs.columns.values):
try:
if cmbs[col].str.normalize('NFKD').str.match(' ').all():
cmbs.drop(columns=col, axis=1, inplace=True)
except AttributeError:
continue
cmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
main_dir = (
'C:\\Users\\Username\\Desktop\\Python\\End-to-End-Data-Analysis\\1. Get the Data\\table'
)
file = 'CMBS Table.csv'
os.chdir(main_dir)
cmbs = pd.read_csv(file, encoding='ISO-8859-1')
loan_seller_cols = [val for val in cmbs.columns.values if re.search(
'(^Loan\\s#|^Seller|^Property\\sName)', val)][3:]
for col in loan_seller_cols:
cmbs.drop(columns=col, axis=1, inplace=True)
regex_dict = {'_\\d': '', '\\(.+\\)+': '', '#': '', '%': '', '\\/': '',
'\\s\\s+': ' ', '^\\s+': '', '\\s+$': ''}
for key, value in regex_dict.items():
cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]
for col in list(cmbs.columns.values):
try:
if cmbs[col].str.normalize('NFKD').str.match(' ').all():
cmbs.drop(columns=col, axis=1, inplace=True)
except AttributeError:
continue
cmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')
<|reserved_special_token_1|>
import pandas as pd
import os
import re
main_dir = (
'C:\\Users\\Username\\Desktop\\Python\\End-to-End-Data-Analysis\\1. Get the Data\\table'
)
file = 'CMBS Table.csv'
os.chdir(main_dir)
cmbs = pd.read_csv(file, encoding='ISO-8859-1')
loan_seller_cols = [val for val in cmbs.columns.values if re.search(
'(^Loan\\s#|^Seller|^Property\\sName)', val)][3:]
for col in loan_seller_cols:
cmbs.drop(columns=col, axis=1, inplace=True)
regex_dict = {'_\\d': '', '\\(.+\\)+': '', '#': '', '%': '', '\\/': '',
'\\s\\s+': ' ', '^\\s+': '', '\\s+$': ''}
for key, value in regex_dict.items():
cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]
for col in list(cmbs.columns.values):
try:
if cmbs[col].str.normalize('NFKD').str.match(' ').all():
cmbs.drop(columns=col, axis=1, inplace=True)
except AttributeError:
continue
cmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')
<|reserved_special_token_1|>
import pandas as pd
import os
import re
main_dir = r'C:\Users\Username\Desktop\Python\End-to-End-Data-Analysis\1. Get the Data\table'
file = 'CMBS Table.csv'
os.chdir(main_dir)
cmbs = pd.read_csv(file, encoding='ISO-8859-1')
# Delete extra Loan & Seller columns
loan_seller_cols = [val for val in cmbs.columns.values if re.search('(^Loan\s#|^Seller|^Property\sName)', val)][3:]
for col in loan_seller_cols:
cmbs.drop(columns=col, axis=1, inplace=True)
# Regex to edit headers
regex_dict = {'_\d': '', '\(.+\)+': '', '#': '', '%': '', r'\/' : '', '\s\s+': ' ', '^\s+': '', '\s+$': ''}
for key, value in regex_dict.items():
cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]
# Delete
for col in list(cmbs.columns.values):
try:
if cmbs[col].str.normalize('NFKD').str.match(' ').all():
cmbs.drop(columns=col, axis=1, inplace=True)
except AttributeError:
continue
cmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')
|
flexible
|
{
"blob_id": "eb890c68885cbab032ce9d6f3be3fd7013a2788b",
"index": 2140,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.chdir(main_dir)\n<mask token>\nfor col in loan_seller_cols:\n cmbs.drop(columns=col, axis=1, inplace=True)\n<mask token>\nfor key, value in regex_dict.items():\n cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]\nfor col in list(cmbs.columns.values):\n try:\n if cmbs[col].str.normalize('NFKD').str.match(' ').all():\n cmbs.drop(columns=col, axis=1, inplace=True)\n except AttributeError:\n continue\ncmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')\n",
"step-3": "<mask token>\nmain_dir = (\n 'C:\\\\Users\\\\Username\\\\Desktop\\\\Python\\\\End-to-End-Data-Analysis\\\\1. Get the Data\\\\table'\n )\nfile = 'CMBS Table.csv'\nos.chdir(main_dir)\ncmbs = pd.read_csv(file, encoding='ISO-8859-1')\nloan_seller_cols = [val for val in cmbs.columns.values if re.search(\n '(^Loan\\\\s#|^Seller|^Property\\\\sName)', val)][3:]\nfor col in loan_seller_cols:\n cmbs.drop(columns=col, axis=1, inplace=True)\nregex_dict = {'_\\\\d': '', '\\\\(.+\\\\)+': '', '#': '', '%': '', '\\\\/': '',\n '\\\\s\\\\s+': ' ', '^\\\\s+': '', '\\\\s+$': ''}\nfor key, value in regex_dict.items():\n cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]\nfor col in list(cmbs.columns.values):\n try:\n if cmbs[col].str.normalize('NFKD').str.match(' ').all():\n cmbs.drop(columns=col, axis=1, inplace=True)\n except AttributeError:\n continue\ncmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')\n",
"step-4": "import pandas as pd\nimport os\nimport re\nmain_dir = (\n 'C:\\\\Users\\\\Username\\\\Desktop\\\\Python\\\\End-to-End-Data-Analysis\\\\1. Get the Data\\\\table'\n )\nfile = 'CMBS Table.csv'\nos.chdir(main_dir)\ncmbs = pd.read_csv(file, encoding='ISO-8859-1')\nloan_seller_cols = [val for val in cmbs.columns.values if re.search(\n '(^Loan\\\\s#|^Seller|^Property\\\\sName)', val)][3:]\nfor col in loan_seller_cols:\n cmbs.drop(columns=col, axis=1, inplace=True)\nregex_dict = {'_\\\\d': '', '\\\\(.+\\\\)+': '', '#': '', '%': '', '\\\\/': '',\n '\\\\s\\\\s+': ' ', '^\\\\s+': '', '\\\\s+$': ''}\nfor key, value in regex_dict.items():\n cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]\nfor col in list(cmbs.columns.values):\n try:\n if cmbs[col].str.normalize('NFKD').str.match(' ').all():\n cmbs.drop(columns=col, axis=1, inplace=True)\n except AttributeError:\n continue\ncmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')\n",
"step-5": "import pandas as pd\r\nimport os\r\nimport re\r\n\r\nmain_dir = r'C:\\Users\\Username\\Desktop\\Python\\End-to-End-Data-Analysis\\1. Get the Data\\table'\r\nfile = 'CMBS Table.csv'\r\n\r\nos.chdir(main_dir)\r\n\r\ncmbs = pd.read_csv(file, encoding='ISO-8859-1')\r\n\r\n# Delete extra Loan & Seller columns\r\nloan_seller_cols = [val for val in cmbs.columns.values if re.search('(^Loan\\s#|^Seller|^Property\\sName)', val)][3:]\r\n\r\nfor col in loan_seller_cols:\r\n cmbs.drop(columns=col, axis=1, inplace=True)\r\n\r\n# Regex to edit headers\r\nregex_dict = {'_\\d': '', '\\(.+\\)+': '', '#': '', '%': '', r'\\/' : '', '\\s\\s+': ' ', '^\\s+': '', '\\s+$': ''}\r\n\r\nfor key, value in regex_dict.items():\r\n cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]\r\n\r\n# Delete \r\nfor col in list(cmbs.columns.values):\r\n try:\r\n if cmbs[col].str.normalize('NFKD').str.match(' ').all():\r\n cmbs.drop(columns=col, axis=1, inplace=True)\r\n except AttributeError:\r\n continue\r\n\r\ncmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def aitoff_projection(theta, phi):
import numpy as np
theta = theta - np.pi
cos_phi = np.cos(phi)
denom = np.sqrt(1 + cos_phi * np.cos(theta / 2))
x = 180 * cos_phi * np.sin(theta / 2) / denom
x = x + 180
y = 90 * np.sin(phi) / denom
return x, y
<|reserved_special_token_1|>
# aitoff projection
# see:
# https://en.wikipedia.org/wiki/Aitoff_projection
def aitoff_projection(theta, phi):
import numpy as np
# theta, phi in radian
theta = theta - np.pi
cos_phi = np.cos(phi)
denom = np.sqrt(1 + cos_phi * np.cos(theta/2))
x = 180 * cos_phi * np.sin(theta/2) / denom
x = x + 180
y = 90 * np.sin(phi) / denom
return x,y
|
flexible
|
{
"blob_id": "0dcf90514543a1ca801e82cd402b3e1002b1f5d0",
"index": 9262,
"step-1": "<mask token>\n",
"step-2": "def aitoff_projection(theta, phi):\n import numpy as np\n theta = theta - np.pi\n cos_phi = np.cos(phi)\n denom = np.sqrt(1 + cos_phi * np.cos(theta / 2))\n x = 180 * cos_phi * np.sin(theta / 2) / denom\n x = x + 180\n y = 90 * np.sin(phi) / denom\n return x, y\n",
"step-3": "# aitoff projection\n# see:\n# https://en.wikipedia.org/wiki/Aitoff_projection\ndef aitoff_projection(theta, phi):\n import numpy as np\n # theta, phi in radian\n theta = theta - np.pi\n cos_phi = np.cos(phi)\n denom = np.sqrt(1 + cos_phi * np.cos(theta/2))\n x = 180 * cos_phi * np.sin(theta/2) / denom\n x = x + 180\n y = 90 * np.sin(phi) / denom\n return x,y\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.shortcuts import redirect, render
from users.models import CustomUser
from .models import Profile
def profile_page_view(request, username):
current_user = request.user
user = CustomUser.objects.get(username=username)
profile = Profile.objects.get(user=user)
if current_user in profile.followers.all():
check = True
posts = user.post_set.all()
context = {
'profile' : profile,
'posts' : posts,
'check' : check,
}
return render(request, 'profile.html', context)
def follow_user_view(request, user1, user2):
follower = CustomUser.objects.get(username = user1)
to_follow = CustomUser.objects.get(username = user2)
follower_profile = Profile.objects.get(user = follower)
to_follow_profile = Profile.objects.get(user = to_follow)
if follower not in to_follow_profile.followers.all():
follower_profile.following.add(to_follow)
to_follow_profile.followers.add(follower)
follower_profile.following_count += 1
to_follow_profile.followers_count += 1
follower_profile.save()
to_follow_profile.save()
return redirect('profile', user2)
else:
return redirect('profile', user2)
|
normal
|
{
"blob_id": "3caaa455cda0567b79ae063c777846157839d64f",
"index": 8548,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef profile_page_view(request, username):\n current_user = request.user\n user = CustomUser.objects.get(username=username)\n profile = Profile.objects.get(user=user)\n if current_user in profile.followers.all():\n check = True\n posts = user.post_set.all()\n context = {'profile': profile, 'posts': posts, 'check': check}\n return render(request, 'profile.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef profile_page_view(request, username):\n current_user = request.user\n user = CustomUser.objects.get(username=username)\n profile = Profile.objects.get(user=user)\n if current_user in profile.followers.all():\n check = True\n posts = user.post_set.all()\n context = {'profile': profile, 'posts': posts, 'check': check}\n return render(request, 'profile.html', context)\n\n\ndef follow_user_view(request, user1, user2):\n follower = CustomUser.objects.get(username=user1)\n to_follow = CustomUser.objects.get(username=user2)\n follower_profile = Profile.objects.get(user=follower)\n to_follow_profile = Profile.objects.get(user=to_follow)\n if follower not in to_follow_profile.followers.all():\n follower_profile.following.add(to_follow)\n to_follow_profile.followers.add(follower)\n follower_profile.following_count += 1\n to_follow_profile.followers_count += 1\n follower_profile.save()\n to_follow_profile.save()\n return redirect('profile', user2)\n else:\n return redirect('profile', user2)\n",
"step-4": "from django.shortcuts import redirect, render\nfrom users.models import CustomUser\nfrom .models import Profile\n\n\ndef profile_page_view(request, username):\n current_user = request.user\n user = CustomUser.objects.get(username=username)\n profile = Profile.objects.get(user=user)\n if current_user in profile.followers.all():\n check = True\n posts = user.post_set.all()\n context = {'profile': profile, 'posts': posts, 'check': check}\n return render(request, 'profile.html', context)\n\n\ndef follow_user_view(request, user1, user2):\n follower = CustomUser.objects.get(username=user1)\n to_follow = CustomUser.objects.get(username=user2)\n follower_profile = Profile.objects.get(user=follower)\n to_follow_profile = Profile.objects.get(user=to_follow)\n if follower not in to_follow_profile.followers.all():\n follower_profile.following.add(to_follow)\n to_follow_profile.followers.add(follower)\n follower_profile.following_count += 1\n to_follow_profile.followers_count += 1\n follower_profile.save()\n to_follow_profile.save()\n return redirect('profile', user2)\n else:\n return redirect('profile', user2)\n",
"step-5": "from django.shortcuts import redirect, render\nfrom users.models import CustomUser\nfrom .models import Profile\n\ndef profile_page_view(request, username):\n current_user = request.user\n user = CustomUser.objects.get(username=username)\n profile = Profile.objects.get(user=user)\n if current_user in profile.followers.all():\n check = True\n posts = user.post_set.all()\n context = {\n 'profile' : profile,\n 'posts' : posts,\n 'check' : check,\n }\n return render(request, 'profile.html', context)\n\ndef follow_user_view(request, user1, user2):\n follower = CustomUser.objects.get(username = user1)\n to_follow = CustomUser.objects.get(username = user2)\n follower_profile = Profile.objects.get(user = follower)\n to_follow_profile = Profile.objects.get(user = to_follow)\n if follower not in to_follow_profile.followers.all():\n follower_profile.following.add(to_follow)\n to_follow_profile.followers.add(follower)\n follower_profile.following_count += 1\n to_follow_profile.followers_count += 1\n follower_profile.save()\n to_follow_profile.save()\n return redirect('profile', user2)\n \n else:\n return redirect('profile', user2)\n\n\n \n\n\n\n\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class event_ticket(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class event_registration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')
sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',
ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line',
'Sale Order Line', ondelete='cascade')
@api.one
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
if (self.event_ticket_id.seats_max and self.event_ticket_id.
seats_available < 0):
raise UserError(_('No more available seats for this ticket'))
@api.multi
def _check_auto_confirmation(self):
res = super(event_registration, self)._check_auto_confirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'),
('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(event_registration, self).create(vals)
if res.origin or res.sale_order_id:
message = _(
'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'
) % {'event_name': '<i>%s</i>' % res.event_id.name,
'ticket': res.event_ticket_id and _(' with ticket %s') % (
'<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res
.origin or res.sale_order_id.name}
res.message_post(body=message)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(event_registration, self)._prepare_attendee_values(
registration)
if line_id:
att_data.update({'event_id': line_id.event_id.id, 'event_id':
line_id.event_id.id, 'event_ticket_id': line_id.
event_ticket_id.id, 'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id, 'sale_order_line_id':
line_id.id})
return att_data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class event_ticket(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@api.model
def _default_product_id(self):
try:
product = self.env['ir.model.data'].get_object('event_sale',
'product_product_event')
return product.id
except ValueError:
return False
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for ticket in self.browse(cr, uid, ids, context=context):
product = ticket.product_id
discount = product.lst_price and (product.lst_price - product.price
) / product.lst_price or 0.0
res[ticket.id] = (1.0 - discount) * ticket.price
return res
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
for ticket in self:
ticket.seats_availability = ('unlimited' if ticket.seats_max ==
0 else 'limited')
(ticket.seats_unconfirmed) = (ticket.seats_reserved) = (ticket.
seats_used) = (ticket.seats_available) = 0
if self.ids:
state_field = {'draft': 'seats_unconfirmed', 'open':
'seats_reserved', 'done': 'seats_used'}
query = """ SELECT event_ticket_id, state, count(event_id)
FROM event_registration
WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_ticket_id, state
"""
self._cr.execute(query, (tuple(self.ids),))
for event_ticket_id, state, num in self._cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
for ticket in self:
if ticket.seats_max > 0:
ticket.seats_available = ticket.seats_max - (ticket.
seats_reserved + ticket.seats_used)
@api.one
@api.constrains('registration_ids', 'seats_max')
def _check_seats_limit(self):
if self.seats_max and self.seats_available < 0:
raise UserError(_('No more available seats for the ticket'))
@api.onchange('product_id')
def onchange_product_id(self):
price = self.product_id.list_price if self.product_id else 0
return {'value': {'price': price}}
class event_registration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')
sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',
ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line',
'Sale Order Line', ondelete='cascade')
@api.one
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
if (self.event_ticket_id.seats_max and self.event_ticket_id.
seats_available < 0):
raise UserError(_('No more available seats for this ticket'))
@api.multi
def _check_auto_confirmation(self):
res = super(event_registration, self)._check_auto_confirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'),
('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(event_registration, self).create(vals)
if res.origin or res.sale_order_id:
message = _(
'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'
) % {'event_name': '<i>%s</i>' % res.event_id.name,
'ticket': res.event_ticket_id and _(' with ticket %s') % (
'<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res
.origin or res.sale_order_id.name}
res.message_post(body=message)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(event_registration, self)._prepare_attendee_values(
registration)
if line_id:
att_data.update({'event_id': line_id.event_id.id, 'event_id':
line_id.event_id.id, 'event_ticket_id': line_id.
event_ticket_id.id, 'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id, 'sale_order_line_id':
line_id.id})
return att_data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class event_ticket(models.Model):
_name = 'event.event.ticket'
_description = 'Event Ticket'
name = fields.Char('Name', required=True, translate=True)
event_id = fields.Many2one('event.event', 'Event', required=True,
ondelete='cascade')
product_id = fields.Many2one('product.product', 'Product', required=
True, domain=[('event_type_id', '!=', False)], default=lambda self:
self._default_product_id())
registration_ids = fields.One2many('event.registration',
'event_ticket_id', 'Registrations')
price = fields.Float('Price', digits=dp.get_precision('Product Price'))
deadline = fields.Date('Sales End')
is_expired = fields.Boolean('Is Expired', compute='_is_expired')
@api.model
def _default_product_id(self):
try:
product = self.env['ir.model.data'].get_object('event_sale',
'product_product_event')
return product.id
except ValueError:
return False
@api.one
@api.depends('deadline')
def _is_expired(self):
if self.deadline:
current_date = fields.Date.context_today(self.with_context({
'tz': self.event_id.date_tz}))
self.is_expired = self.deadline < current_date
else:
self.is_expired = False
<|reserved_special_token_0|>
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for ticket in self.browse(cr, uid, ids, context=context):
product = ticket.product_id
discount = product.lst_price and (product.lst_price - product.price
) / product.lst_price or 0.0
res[ticket.id] = (1.0 - discount) * ticket.price
return res
_columns = {'price_reduce': old_fields.function(_get_price_reduce, type
='float', string='Price Reduce', digits_compute=dp.get_precision(
'Product Price'))}
seats_availability = fields.Selection([('limited', 'Limited'), (
'unlimited', 'Unlimited')], 'Available Seat', required=True, store=
True, compute='_compute_seats', default='limited')
seats_max = fields.Integer('Maximum Available Seats', help=
'Define the number of available tickets. If you have too much registrations you will not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.'
)
seats_reserved = fields.Integer(string='Reserved Seats', compute=
'_compute_seats', store=True)
seats_available = fields.Integer(string='Available Seats', compute=
'_compute_seats', store=True)
seats_unconfirmed = fields.Integer(string=
'Unconfirmed Seat Reservations', compute='_compute_seats', store=True)
seats_used = fields.Integer(compute='_compute_seats', store=True)
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
for ticket in self:
ticket.seats_availability = ('unlimited' if ticket.seats_max ==
0 else 'limited')
(ticket.seats_unconfirmed) = (ticket.seats_reserved) = (ticket.
seats_used) = (ticket.seats_available) = 0
if self.ids:
state_field = {'draft': 'seats_unconfirmed', 'open':
'seats_reserved', 'done': 'seats_used'}
query = """ SELECT event_ticket_id, state, count(event_id)
FROM event_registration
WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_ticket_id, state
"""
self._cr.execute(query, (tuple(self.ids),))
for event_ticket_id, state, num in self._cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
for ticket in self:
if ticket.seats_max > 0:
ticket.seats_available = ticket.seats_max - (ticket.
seats_reserved + ticket.seats_used)
@api.one
@api.constrains('registration_ids', 'seats_max')
def _check_seats_limit(self):
if self.seats_max and self.seats_available < 0:
raise UserError(_('No more available seats for the ticket'))
@api.onchange('product_id')
def onchange_product_id(self):
price = self.product_id.list_price if self.product_id else 0
return {'value': {'price': price}}
class event_registration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')
sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',
ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line',
'Sale Order Line', ondelete='cascade')
@api.one
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
if (self.event_ticket_id.seats_max and self.event_ticket_id.
seats_available < 0):
raise UserError(_('No more available seats for this ticket'))
@api.multi
def _check_auto_confirmation(self):
res = super(event_registration, self)._check_auto_confirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'),
('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(event_registration, self).create(vals)
if res.origin or res.sale_order_id:
message = _(
'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'
) % {'event_name': '<i>%s</i>' % res.event_id.name,
'ticket': res.event_ticket_id and _(' with ticket %s') % (
'<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res
.origin or res.sale_order_id.name}
res.message_post(body=message)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(event_registration, self)._prepare_attendee_values(
registration)
if line_id:
att_data.update({'event_id': line_id.event_id.id, 'event_id':
line_id.event_id.id, 'event_ticket_id': line_id.
event_ticket_id.id, 'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id, 'sale_order_line_id':
line_id.id})
return att_data
<|reserved_special_token_1|>
from yuancloud import models, fields, api, _
import yuancloud.addons.decimal_precision as dp
from yuancloud.exceptions import UserError
from yuancloud.osv import fields as old_fields
class event_event(models.Model):
_inherit = 'event.event'
event_ticket_ids = fields.One2many('event.event.ticket', 'event_id',
string='Event Ticket', default=lambda rec: rec._default_tickets(),
copy=True)
@api.model
def _default_tickets(self):
try:
product = self.env.ref('event_sale.product_product_event')
return [{'name': _('Subscription'), 'product_id': product.id,
'price': 0}]
except ValueError:
return self.env['event.event.ticket']
class event_ticket(models.Model):
_name = 'event.event.ticket'
_description = 'Event Ticket'
name = fields.Char('Name', required=True, translate=True)
event_id = fields.Many2one('event.event', 'Event', required=True,
ondelete='cascade')
product_id = fields.Many2one('product.product', 'Product', required=
True, domain=[('event_type_id', '!=', False)], default=lambda self:
self._default_product_id())
registration_ids = fields.One2many('event.registration',
'event_ticket_id', 'Registrations')
price = fields.Float('Price', digits=dp.get_precision('Product Price'))
deadline = fields.Date('Sales End')
is_expired = fields.Boolean('Is Expired', compute='_is_expired')
@api.model
def _default_product_id(self):
try:
product = self.env['ir.model.data'].get_object('event_sale',
'product_product_event')
return product.id
except ValueError:
return False
@api.one
@api.depends('deadline')
def _is_expired(self):
if self.deadline:
current_date = fields.Date.context_today(self.with_context({
'tz': self.event_id.date_tz}))
self.is_expired = self.deadline < current_date
else:
self.is_expired = False
"""
price_reduce = fields.Float("Price Reduce", compute="_get_price_reduce", store=False,
digits=dp.get_precision('Product Price'))
@api.one
@api.depends('price', 'product_id.lst_price', 'product_id.price')
def _get_price_reduce(self):
product = self.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
self.price_reduce = (1.0 - discount) * self.price
"""
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for ticket in self.browse(cr, uid, ids, context=context):
product = ticket.product_id
discount = product.lst_price and (product.lst_price - product.price
) / product.lst_price or 0.0
res[ticket.id] = (1.0 - discount) * ticket.price
return res
_columns = {'price_reduce': old_fields.function(_get_price_reduce, type
='float', string='Price Reduce', digits_compute=dp.get_precision(
'Product Price'))}
seats_availability = fields.Selection([('limited', 'Limited'), (
'unlimited', 'Unlimited')], 'Available Seat', required=True, store=
True, compute='_compute_seats', default='limited')
seats_max = fields.Integer('Maximum Available Seats', help=
'Define the number of available tickets. If you have too much registrations you will not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.'
)
seats_reserved = fields.Integer(string='Reserved Seats', compute=
'_compute_seats', store=True)
seats_available = fields.Integer(string='Available Seats', compute=
'_compute_seats', store=True)
seats_unconfirmed = fields.Integer(string=
'Unconfirmed Seat Reservations', compute='_compute_seats', store=True)
seats_used = fields.Integer(compute='_compute_seats', store=True)
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
for ticket in self:
ticket.seats_availability = ('unlimited' if ticket.seats_max ==
0 else 'limited')
(ticket.seats_unconfirmed) = (ticket.seats_reserved) = (ticket.
seats_used) = (ticket.seats_available) = 0
if self.ids:
state_field = {'draft': 'seats_unconfirmed', 'open':
'seats_reserved', 'done': 'seats_used'}
query = """ SELECT event_ticket_id, state, count(event_id)
FROM event_registration
WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_ticket_id, state
"""
self._cr.execute(query, (tuple(self.ids),))
for event_ticket_id, state, num in self._cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
for ticket in self:
if ticket.seats_max > 0:
ticket.seats_available = ticket.seats_max - (ticket.
seats_reserved + ticket.seats_used)
@api.one
@api.constrains('registration_ids', 'seats_max')
def _check_seats_limit(self):
if self.seats_max and self.seats_available < 0:
raise UserError(_('No more available seats for the ticket'))
@api.onchange('product_id')
def onchange_product_id(self):
price = self.product_id.list_price if self.product_id else 0
return {'value': {'price': price}}
class event_registration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')
sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',
ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line',
'Sale Order Line', ondelete='cascade')
@api.one
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
if (self.event_ticket_id.seats_max and self.event_ticket_id.
seats_available < 0):
raise UserError(_('No more available seats for this ticket'))
@api.multi
def _check_auto_confirmation(self):
res = super(event_registration, self)._check_auto_confirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'),
('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(event_registration, self).create(vals)
if res.origin or res.sale_order_id:
message = _(
'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'
) % {'event_name': '<i>%s</i>' % res.event_id.name,
'ticket': res.event_ticket_id and _(' with ticket %s') % (
'<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res
.origin or res.sale_order_id.name}
res.message_post(body=message)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(event_registration, self)._prepare_attendee_values(
registration)
if line_id:
att_data.update({'event_id': line_id.event_id.id, 'event_id':
line_id.event_id.id, 'event_ticket_id': line_id.
event_ticket_id.id, 'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id, 'sale_order_line_id':
line_id.id})
return att_data
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from yuancloud import models, fields, api, _
import yuancloud.addons.decimal_precision as dp
from yuancloud.exceptions import UserError
from yuancloud.osv import fields as old_fields
class event_event(models.Model):
_inherit = 'event.event'
event_ticket_ids = fields.One2many(
'event.event.ticket', 'event_id', string='Event Ticket',
default=lambda rec: rec._default_tickets(), copy=True)
@api.model
def _default_tickets(self):
try:
product = self.env.ref('event_sale.product_product_event')
return [{
'name': _('Subscription'),
'product_id': product.id,
'price': 0,
}]
except ValueError:
return self.env['event.event.ticket']
class event_ticket(models.Model):
_name = 'event.event.ticket'
_description = 'Event Ticket'
name = fields.Char('Name', required=True, translate=True)
event_id = fields.Many2one('event.event', "Event", required=True, ondelete='cascade')
product_id = fields.Many2one(
'product.product', 'Product',
required=True, domain=[("event_type_id", "!=", False)],
default=lambda self: self._default_product_id())
registration_ids = fields.One2many('event.registration', 'event_ticket_id', 'Registrations')
price = fields.Float('Price', digits=dp.get_precision('Product Price'))
deadline = fields.Date("Sales End")
is_expired = fields.Boolean('Is Expired', compute='_is_expired')
@api.model
def _default_product_id(self):
try:
product = self.env['ir.model.data'].get_object('event_sale', 'product_product_event')
return product.id
except ValueError:
return False
@api.one
@api.depends('deadline')
def _is_expired(self):
if self.deadline:
current_date = fields.Date.context_today(self.with_context({'tz': self.event_id.date_tz}))
self.is_expired = self.deadline < current_date
else:
self.is_expired = False
# FIXME non-stored fields wont ends up in _columns (and thus _all_columns), which forbid them
# to be used in qweb views. Waiting a fix, we create an old function field directly.
"""
price_reduce = fields.Float("Price Reduce", compute="_get_price_reduce", store=False,
digits=dp.get_precision('Product Price'))
@api.one
@api.depends('price', 'product_id.lst_price', 'product_id.price')
def _get_price_reduce(self):
product = self.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
self.price_reduce = (1.0 - discount) * self.price
"""
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for ticket in self.browse(cr, uid, ids, context=context):
product = ticket.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
res[ticket.id] = (1.0 - discount) * ticket.price
return res
_columns = {
'price_reduce': old_fields.function(_get_price_reduce, type='float', string='Price Reduce',
digits_compute=dp.get_precision('Product Price')),
}
# seats fields
seats_availability = fields.Selection(
[('limited', 'Limited'), ('unlimited', 'Unlimited')],
'Available Seat', required=True, store=True, compute='_compute_seats', default="limited")
seats_max = fields.Integer('Maximum Available Seats',
help="Define the number of available tickets. If you have too much registrations you will "
"not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.")
seats_reserved = fields.Integer(string='Reserved Seats', compute='_compute_seats', store=True)
seats_available = fields.Integer(string='Available Seats', compute='_compute_seats', store=True)
seats_unconfirmed = fields.Integer(string='Unconfirmed Seat Reservations', compute='_compute_seats', store=True)
seats_used = fields.Integer(compute='_compute_seats', store=True)
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
# initialize fields to 0 + compute seats availability
for ticket in self:
ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'
ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0
# aggregate registrations by ticket and by state
if self.ids:
state_field = {
'draft': 'seats_unconfirmed',
'open': 'seats_reserved',
'done': 'seats_used',
}
query = """ SELECT event_ticket_id, state, count(event_id)
FROM event_registration
WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_ticket_id, state
"""
self._cr.execute(query, (tuple(self.ids),))
for event_ticket_id, state, num in self._cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
# compute seats_available
for ticket in self:
if ticket.seats_max > 0:
ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)
@api.one
@api.constrains('registration_ids', 'seats_max')
def _check_seats_limit(self):
if self.seats_max and self.seats_available < 0:
raise UserError(_('No more available seats for the ticket'))
@api.onchange('product_id')
def onchange_product_id(self):
price = self.product_id.list_price if self.product_id else 0
return {'value': {'price': price}}
class event_registration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')
# in addition to origin generic fields, add real relational fields to correctly
# handle attendees linked to sale orders and their lines
# TDE FIXME: maybe add an onchange on sale_order_id + origin
sale_order_id = fields.Many2one('sale.order', 'Source Sale Order', ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line', 'Sale Order Line', ondelete='cascade')
@api.one
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
if self.event_ticket_id.seats_max and self.event_ticket_id.seats_available < 0:
raise UserError(_('No more available seats for this ticket'))
@api.multi
def _check_auto_confirmation(self):
res = super(event_registration, self)._check_auto_confirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'), ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(event_registration, self).create(vals)
if res.origin or res.sale_order_id:
message = _("The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s") % ({
'event_name': '<i>%s</i>' % res.event_id.name,
'ticket': res.event_ticket_id and _(' with ticket %s') % (('<i>%s</i>') % res.event_ticket_id.name) or '',
'order': res.origin or res.sale_order_id.name})
res.message_post(body=message)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(event_registration, self)._prepare_attendee_values(registration)
if line_id:
att_data.update({
'event_id': line_id.event_id.id,
'event_id': line_id.event_id.id,
'event_ticket_id': line_id.event_ticket_id.id,
'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id,
'sale_order_line_id': line_id.id,
})
return att_data
|
flexible
|
{
"blob_id": "bddba2fd710829db17c6419878ce535df0aba01c",
"index": 2760,
"step-1": "<mask token>\n\n\nclass event_ticket(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')\n sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',\n ondelete='cascade')\n sale_order_line_id = fields.Many2one('sale.order.line',\n 'Sale Order Line', ondelete='cascade')\n\n @api.one\n @api.constrains('event_ticket_id', 'state')\n def _check_ticket_seats_limit(self):\n if (self.event_ticket_id.seats_max and self.event_ticket_id.\n seats_available < 0):\n raise UserError(_('No more available seats for this ticket'))\n\n @api.multi\n def _check_auto_confirmation(self):\n res = super(event_registration, self)._check_auto_confirmation()\n if res:\n orders = self.env['sale.order'].search([('state', '=', 'draft'),\n ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)\n if orders:\n res = False\n return res\n\n @api.model\n def create(self, vals):\n res = super(event_registration, self).create(vals)\n if res.origin or res.sale_order_id:\n message = _(\n 'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'\n ) % {'event_name': '<i>%s</i>' % res.event_id.name,\n 'ticket': res.event_ticket_id and _(' with ticket %s') % (\n '<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res\n .origin or res.sale_order_id.name}\n res.message_post(body=message)\n return res\n\n @api.model\n def _prepare_attendee_values(self, registration):\n \"\"\" Override to add sale related stuff \"\"\"\n line_id = registration.get('sale_order_line_id')\n if line_id:\n registration.setdefault('partner_id', line_id.order_id.partner_id)\n att_data = super(event_registration, self)._prepare_attendee_values(\n registration)\n if line_id:\n att_data.update({'event_id': line_id.event_id.id, 'event_id':\n line_id.event_id.id, 'event_ticket_id': line_id.\n event_ticket_id.id, 'origin': line_id.order_id.name,\n 'sale_order_id': line_id.order_id.id, 'sale_order_line_id':\n line_id.id})\n return att_data\n",
"step-2": "<mask token>\n\n\nclass event_ticket(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.model\n def _default_product_id(self):\n try:\n product = self.env['ir.model.data'].get_object('event_sale',\n 'product_product_event')\n return product.id\n except ValueError:\n return False\n <mask token>\n <mask token>\n\n def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):\n res = dict.fromkeys(ids, 0.0)\n for ticket in self.browse(cr, uid, ids, context=context):\n product = ticket.product_id\n discount = product.lst_price and (product.lst_price - product.price\n ) / product.lst_price or 0.0\n res[ticket.id] = (1.0 - discount) * ticket.price\n return res\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.multi\n @api.depends('seats_max', 'registration_ids.state')\n def _compute_seats(self):\n \"\"\" Determine reserved, available, reserved but unconfirmed and used seats. \"\"\"\n for ticket in self:\n ticket.seats_availability = ('unlimited' if ticket.seats_max ==\n 0 else 'limited')\n (ticket.seats_unconfirmed) = (ticket.seats_reserved) = (ticket.\n seats_used) = (ticket.seats_available) = 0\n if self.ids:\n state_field = {'draft': 'seats_unconfirmed', 'open':\n 'seats_reserved', 'done': 'seats_used'}\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.\n seats_reserved + ticket.seats_used)\n\n @api.one\n @api.constrains('registration_ids', 'seats_max')\n def _check_seats_limit(self):\n if self.seats_max and self.seats_available < 0:\n raise UserError(_('No more available seats for the ticket'))\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n price = self.product_id.list_price if self.product_id else 0\n return {'value': {'price': price}}\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')\n sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',\n ondelete='cascade')\n sale_order_line_id = fields.Many2one('sale.order.line',\n 'Sale Order Line', ondelete='cascade')\n\n @api.one\n @api.constrains('event_ticket_id', 'state')\n def _check_ticket_seats_limit(self):\n if (self.event_ticket_id.seats_max and self.event_ticket_id.\n seats_available < 0):\n raise UserError(_('No more available seats for this ticket'))\n\n @api.multi\n def _check_auto_confirmation(self):\n res = super(event_registration, self)._check_auto_confirmation()\n if res:\n orders = self.env['sale.order'].search([('state', '=', 'draft'),\n ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)\n if orders:\n res = False\n return res\n\n @api.model\n def create(self, vals):\n res = super(event_registration, self).create(vals)\n if res.origin or res.sale_order_id:\n message = _(\n 'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'\n ) % {'event_name': '<i>%s</i>' % res.event_id.name,\n 'ticket': res.event_ticket_id and _(' with ticket %s') % (\n '<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res\n .origin or res.sale_order_id.name}\n res.message_post(body=message)\n return res\n\n @api.model\n def _prepare_attendee_values(self, registration):\n \"\"\" Override to add sale related stuff \"\"\"\n line_id = registration.get('sale_order_line_id')\n if line_id:\n registration.setdefault('partner_id', line_id.order_id.partner_id)\n att_data = super(event_registration, self)._prepare_attendee_values(\n registration)\n if line_id:\n att_data.update({'event_id': line_id.event_id.id, 'event_id':\n line_id.event_id.id, 'event_ticket_id': line_id.\n event_ticket_id.id, 'origin': line_id.order_id.name,\n 'sale_order_id': line_id.order_id.id, 'sale_order_line_id':\n line_id.id})\n return att_data\n",
"step-3": "<mask token>\n\n\nclass event_ticket(models.Model):\n _name = 'event.event.ticket'\n _description = 'Event Ticket'\n name = fields.Char('Name', required=True, translate=True)\n event_id = fields.Many2one('event.event', 'Event', required=True,\n ondelete='cascade')\n product_id = fields.Many2one('product.product', 'Product', required=\n True, domain=[('event_type_id', '!=', False)], default=lambda self:\n self._default_product_id())\n registration_ids = fields.One2many('event.registration',\n 'event_ticket_id', 'Registrations')\n price = fields.Float('Price', digits=dp.get_precision('Product Price'))\n deadline = fields.Date('Sales End')\n is_expired = fields.Boolean('Is Expired', compute='_is_expired')\n\n @api.model\n def _default_product_id(self):\n try:\n product = self.env['ir.model.data'].get_object('event_sale',\n 'product_product_event')\n return product.id\n except ValueError:\n return False\n\n @api.one\n @api.depends('deadline')\n def _is_expired(self):\n if self.deadline:\n current_date = fields.Date.context_today(self.with_context({\n 'tz': self.event_id.date_tz}))\n self.is_expired = self.deadline < current_date\n else:\n self.is_expired = False\n <mask token>\n\n def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):\n res = dict.fromkeys(ids, 0.0)\n for ticket in self.browse(cr, uid, ids, context=context):\n product = ticket.product_id\n discount = product.lst_price and (product.lst_price - product.price\n ) / product.lst_price or 0.0\n res[ticket.id] = (1.0 - discount) * ticket.price\n return res\n _columns = {'price_reduce': old_fields.function(_get_price_reduce, type\n ='float', string='Price Reduce', digits_compute=dp.get_precision(\n 'Product Price'))}\n seats_availability = fields.Selection([('limited', 'Limited'), (\n 'unlimited', 'Unlimited')], 'Available Seat', required=True, store=\n True, compute='_compute_seats', default='limited')\n seats_max = fields.Integer('Maximum Available Seats', help=\n 'Define the number of available tickets. If you have too much registrations you will not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.'\n )\n seats_reserved = fields.Integer(string='Reserved Seats', compute=\n '_compute_seats', store=True)\n seats_available = fields.Integer(string='Available Seats', compute=\n '_compute_seats', store=True)\n seats_unconfirmed = fields.Integer(string=\n 'Unconfirmed Seat Reservations', compute='_compute_seats', store=True)\n seats_used = fields.Integer(compute='_compute_seats', store=True)\n\n @api.multi\n @api.depends('seats_max', 'registration_ids.state')\n def _compute_seats(self):\n \"\"\" Determine reserved, available, reserved but unconfirmed and used seats. \"\"\"\n for ticket in self:\n ticket.seats_availability = ('unlimited' if ticket.seats_max ==\n 0 else 'limited')\n (ticket.seats_unconfirmed) = (ticket.seats_reserved) = (ticket.\n seats_used) = (ticket.seats_available) = 0\n if self.ids:\n state_field = {'draft': 'seats_unconfirmed', 'open':\n 'seats_reserved', 'done': 'seats_used'}\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.\n seats_reserved + ticket.seats_used)\n\n @api.one\n @api.constrains('registration_ids', 'seats_max')\n def _check_seats_limit(self):\n if self.seats_max and self.seats_available < 0:\n raise UserError(_('No more available seats for the ticket'))\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n price = self.product_id.list_price if self.product_id else 0\n return {'value': {'price': price}}\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')\n sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',\n ondelete='cascade')\n sale_order_line_id = fields.Many2one('sale.order.line',\n 'Sale Order Line', ondelete='cascade')\n\n @api.one\n @api.constrains('event_ticket_id', 'state')\n def _check_ticket_seats_limit(self):\n if (self.event_ticket_id.seats_max and self.event_ticket_id.\n seats_available < 0):\n raise UserError(_('No more available seats for this ticket'))\n\n @api.multi\n def _check_auto_confirmation(self):\n res = super(event_registration, self)._check_auto_confirmation()\n if res:\n orders = self.env['sale.order'].search([('state', '=', 'draft'),\n ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)\n if orders:\n res = False\n return res\n\n @api.model\n def create(self, vals):\n res = super(event_registration, self).create(vals)\n if res.origin or res.sale_order_id:\n message = _(\n 'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'\n ) % {'event_name': '<i>%s</i>' % res.event_id.name,\n 'ticket': res.event_ticket_id and _(' with ticket %s') % (\n '<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res\n .origin or res.sale_order_id.name}\n res.message_post(body=message)\n return res\n\n @api.model\n def _prepare_attendee_values(self, registration):\n \"\"\" Override to add sale related stuff \"\"\"\n line_id = registration.get('sale_order_line_id')\n if line_id:\n registration.setdefault('partner_id', line_id.order_id.partner_id)\n att_data = super(event_registration, self)._prepare_attendee_values(\n registration)\n if line_id:\n att_data.update({'event_id': line_id.event_id.id, 'event_id':\n line_id.event_id.id, 'event_ticket_id': line_id.\n event_ticket_id.id, 'origin': line_id.order_id.name,\n 'sale_order_id': line_id.order_id.id, 'sale_order_line_id':\n line_id.id})\n return att_data\n",
"step-4": "from yuancloud import models, fields, api, _\nimport yuancloud.addons.decimal_precision as dp\nfrom yuancloud.exceptions import UserError\nfrom yuancloud.osv import fields as old_fields\n\n\nclass event_event(models.Model):\n _inherit = 'event.event'\n event_ticket_ids = fields.One2many('event.event.ticket', 'event_id',\n string='Event Ticket', default=lambda rec: rec._default_tickets(),\n copy=True)\n\n @api.model\n def _default_tickets(self):\n try:\n product = self.env.ref('event_sale.product_product_event')\n return [{'name': _('Subscription'), 'product_id': product.id,\n 'price': 0}]\n except ValueError:\n return self.env['event.event.ticket']\n\n\nclass event_ticket(models.Model):\n _name = 'event.event.ticket'\n _description = 'Event Ticket'\n name = fields.Char('Name', required=True, translate=True)\n event_id = fields.Many2one('event.event', 'Event', required=True,\n ondelete='cascade')\n product_id = fields.Many2one('product.product', 'Product', required=\n True, domain=[('event_type_id', '!=', False)], default=lambda self:\n self._default_product_id())\n registration_ids = fields.One2many('event.registration',\n 'event_ticket_id', 'Registrations')\n price = fields.Float('Price', digits=dp.get_precision('Product Price'))\n deadline = fields.Date('Sales End')\n is_expired = fields.Boolean('Is Expired', compute='_is_expired')\n\n @api.model\n def _default_product_id(self):\n try:\n product = self.env['ir.model.data'].get_object('event_sale',\n 'product_product_event')\n return product.id\n except ValueError:\n return False\n\n @api.one\n @api.depends('deadline')\n def _is_expired(self):\n if self.deadline:\n current_date = fields.Date.context_today(self.with_context({\n 'tz': self.event_id.date_tz}))\n self.is_expired = self.deadline < current_date\n else:\n self.is_expired = False\n \"\"\"\n price_reduce = fields.Float(\"Price Reduce\", compute=\"_get_price_reduce\", store=False,\n digits=dp.get_precision('Product Price'))\n @api.one\n @api.depends('price', 'product_id.lst_price', 'product_id.price')\n def _get_price_reduce(self):\n product = self.product_id\n discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0\n self.price_reduce = (1.0 - discount) * self.price\n \"\"\"\n\n def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):\n res = dict.fromkeys(ids, 0.0)\n for ticket in self.browse(cr, uid, ids, context=context):\n product = ticket.product_id\n discount = product.lst_price and (product.lst_price - product.price\n ) / product.lst_price or 0.0\n res[ticket.id] = (1.0 - discount) * ticket.price\n return res\n _columns = {'price_reduce': old_fields.function(_get_price_reduce, type\n ='float', string='Price Reduce', digits_compute=dp.get_precision(\n 'Product Price'))}\n seats_availability = fields.Selection([('limited', 'Limited'), (\n 'unlimited', 'Unlimited')], 'Available Seat', required=True, store=\n True, compute='_compute_seats', default='limited')\n seats_max = fields.Integer('Maximum Available Seats', help=\n 'Define the number of available tickets. If you have too much registrations you will not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.'\n )\n seats_reserved = fields.Integer(string='Reserved Seats', compute=\n '_compute_seats', store=True)\n seats_available = fields.Integer(string='Available Seats', compute=\n '_compute_seats', store=True)\n seats_unconfirmed = fields.Integer(string=\n 'Unconfirmed Seat Reservations', compute='_compute_seats', store=True)\n seats_used = fields.Integer(compute='_compute_seats', store=True)\n\n @api.multi\n @api.depends('seats_max', 'registration_ids.state')\n def _compute_seats(self):\n \"\"\" Determine reserved, available, reserved but unconfirmed and used seats. \"\"\"\n for ticket in self:\n ticket.seats_availability = ('unlimited' if ticket.seats_max ==\n 0 else 'limited')\n (ticket.seats_unconfirmed) = (ticket.seats_reserved) = (ticket.\n seats_used) = (ticket.seats_available) = 0\n if self.ids:\n state_field = {'draft': 'seats_unconfirmed', 'open':\n 'seats_reserved', 'done': 'seats_used'}\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.\n seats_reserved + ticket.seats_used)\n\n @api.one\n @api.constrains('registration_ids', 'seats_max')\n def _check_seats_limit(self):\n if self.seats_max and self.seats_available < 0:\n raise UserError(_('No more available seats for the ticket'))\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n price = self.product_id.list_price if self.product_id else 0\n return {'value': {'price': price}}\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')\n sale_order_id = fields.Many2one('sale.order', 'Source Sale Order',\n ondelete='cascade')\n sale_order_line_id = fields.Many2one('sale.order.line',\n 'Sale Order Line', ondelete='cascade')\n\n @api.one\n @api.constrains('event_ticket_id', 'state')\n def _check_ticket_seats_limit(self):\n if (self.event_ticket_id.seats_max and self.event_ticket_id.\n seats_available < 0):\n raise UserError(_('No more available seats for this ticket'))\n\n @api.multi\n def _check_auto_confirmation(self):\n res = super(event_registration, self)._check_auto_confirmation()\n if res:\n orders = self.env['sale.order'].search([('state', '=', 'draft'),\n ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)\n if orders:\n res = False\n return res\n\n @api.model\n def create(self, vals):\n res = super(event_registration, self).create(vals)\n if res.origin or res.sale_order_id:\n message = _(\n 'The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s'\n ) % {'event_name': '<i>%s</i>' % res.event_id.name,\n 'ticket': res.event_ticket_id and _(' with ticket %s') % (\n '<i>%s</i>' % res.event_ticket_id.name) or '', 'order': res\n .origin or res.sale_order_id.name}\n res.message_post(body=message)\n return res\n\n @api.model\n def _prepare_attendee_values(self, registration):\n \"\"\" Override to add sale related stuff \"\"\"\n line_id = registration.get('sale_order_line_id')\n if line_id:\n registration.setdefault('partner_id', line_id.order_id.partner_id)\n att_data = super(event_registration, self)._prepare_attendee_values(\n registration)\n if line_id:\n att_data.update({'event_id': line_id.event_id.id, 'event_id':\n line_id.event_id.id, 'event_ticket_id': line_id.\n event_ticket_id.id, 'origin': line_id.order_id.name,\n 'sale_order_id': line_id.order_id.id, 'sale_order_line_id':\n line_id.id})\n return att_data\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom yuancloud import models, fields, api, _\nimport yuancloud.addons.decimal_precision as dp\nfrom yuancloud.exceptions import UserError\nfrom yuancloud.osv import fields as old_fields\n\n\nclass event_event(models.Model):\n _inherit = 'event.event'\n\n event_ticket_ids = fields.One2many(\n 'event.event.ticket', 'event_id', string='Event Ticket',\n default=lambda rec: rec._default_tickets(), copy=True)\n\n @api.model\n def _default_tickets(self):\n try:\n product = self.env.ref('event_sale.product_product_event')\n return [{\n 'name': _('Subscription'),\n 'product_id': product.id,\n 'price': 0,\n }]\n except ValueError:\n return self.env['event.event.ticket']\n\n\nclass event_ticket(models.Model):\n _name = 'event.event.ticket'\n _description = 'Event Ticket'\n\n name = fields.Char('Name', required=True, translate=True)\n event_id = fields.Many2one('event.event', \"Event\", required=True, ondelete='cascade')\n product_id = fields.Many2one(\n 'product.product', 'Product',\n required=True, domain=[(\"event_type_id\", \"!=\", False)],\n default=lambda self: self._default_product_id())\n registration_ids = fields.One2many('event.registration', 'event_ticket_id', 'Registrations')\n price = fields.Float('Price', digits=dp.get_precision('Product Price'))\n deadline = fields.Date(\"Sales End\")\n is_expired = fields.Boolean('Is Expired', compute='_is_expired')\n\n @api.model\n def _default_product_id(self):\n try:\n product = self.env['ir.model.data'].get_object('event_sale', 'product_product_event')\n return product.id\n except ValueError:\n return False\n\n @api.one\n @api.depends('deadline')\n def _is_expired(self):\n if self.deadline:\n current_date = fields.Date.context_today(self.with_context({'tz': self.event_id.date_tz}))\n self.is_expired = self.deadline < current_date\n else:\n self.is_expired = False\n\n # FIXME non-stored fields wont ends up in _columns (and thus _all_columns), which forbid them\n # to be used in qweb views. Waiting a fix, we create an old function field directly.\n \"\"\"\n price_reduce = fields.Float(\"Price Reduce\", compute=\"_get_price_reduce\", store=False,\n digits=dp.get_precision('Product Price'))\n @api.one\n @api.depends('price', 'product_id.lst_price', 'product_id.price')\n def _get_price_reduce(self):\n product = self.product_id\n discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0\n self.price_reduce = (1.0 - discount) * self.price\n \"\"\"\n def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):\n res = dict.fromkeys(ids, 0.0)\n for ticket in self.browse(cr, uid, ids, context=context):\n product = ticket.product_id\n discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0\n res[ticket.id] = (1.0 - discount) * ticket.price\n return res\n\n _columns = {\n 'price_reduce': old_fields.function(_get_price_reduce, type='float', string='Price Reduce',\n digits_compute=dp.get_precision('Product Price')),\n }\n\n # seats fields\n seats_availability = fields.Selection(\n [('limited', 'Limited'), ('unlimited', 'Unlimited')],\n 'Available Seat', required=True, store=True, compute='_compute_seats', default=\"limited\")\n seats_max = fields.Integer('Maximum Available Seats',\n help=\"Define the number of available tickets. If you have too much registrations you will \"\n \"not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.\")\n seats_reserved = fields.Integer(string='Reserved Seats', compute='_compute_seats', store=True)\n seats_available = fields.Integer(string='Available Seats', compute='_compute_seats', store=True)\n seats_unconfirmed = fields.Integer(string='Unconfirmed Seat Reservations', compute='_compute_seats', store=True)\n seats_used = fields.Integer(compute='_compute_seats', store=True)\n\n @api.multi\n @api.depends('seats_max', 'registration_ids.state')\n def _compute_seats(self):\n \"\"\" Determine reserved, available, reserved but unconfirmed and used seats. \"\"\"\n # initialize fields to 0 + compute seats availability\n for ticket in self:\n ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'\n ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0\n # aggregate registrations by ticket and by state\n if self.ids:\n state_field = {\n 'draft': 'seats_unconfirmed',\n 'open': 'seats_reserved',\n 'done': 'seats_used',\n }\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n # compute seats_available\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)\n\n @api.one\n @api.constrains('registration_ids', 'seats_max')\n def _check_seats_limit(self):\n if self.seats_max and self.seats_available < 0:\n raise UserError(_('No more available seats for the ticket'))\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n price = self.product_id.list_price if self.product_id else 0\n return {'value': {'price': price}}\n\n\nclass event_registration(models.Model):\n _inherit = 'event.registration'\n\n event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')\n # in addition to origin generic fields, add real relational fields to correctly\n # handle attendees linked to sale orders and their lines\n # TDE FIXME: maybe add an onchange on sale_order_id + origin\n sale_order_id = fields.Many2one('sale.order', 'Source Sale Order', ondelete='cascade')\n sale_order_line_id = fields.Many2one('sale.order.line', 'Sale Order Line', ondelete='cascade')\n\n @api.one\n @api.constrains('event_ticket_id', 'state')\n def _check_ticket_seats_limit(self):\n if self.event_ticket_id.seats_max and self.event_ticket_id.seats_available < 0:\n raise UserError(_('No more available seats for this ticket'))\n\n @api.multi\n def _check_auto_confirmation(self):\n res = super(event_registration, self)._check_auto_confirmation()\n if res:\n orders = self.env['sale.order'].search([('state', '=', 'draft'), ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)\n if orders:\n res = False\n return res\n\n @api.model\n def create(self, vals):\n res = super(event_registration, self).create(vals)\n if res.origin or res.sale_order_id:\n message = _(\"The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s\") % ({\n 'event_name': '<i>%s</i>' % res.event_id.name,\n 'ticket': res.event_ticket_id and _(' with ticket %s') % (('<i>%s</i>') % res.event_ticket_id.name) or '',\n 'order': res.origin or res.sale_order_id.name})\n res.message_post(body=message)\n return res\n\n @api.model\n def _prepare_attendee_values(self, registration):\n \"\"\" Override to add sale related stuff \"\"\"\n line_id = registration.get('sale_order_line_id')\n if line_id:\n registration.setdefault('partner_id', line_id.order_id.partner_id)\n att_data = super(event_registration, self)._prepare_attendee_values(registration)\n if line_id:\n att_data.update({\n 'event_id': line_id.event_id.id,\n 'event_id': line_id.event_id.id,\n 'event_ticket_id': line_id.event_ticket_id.id,\n 'origin': line_id.order_id.name,\n 'sale_order_id': line_id.order_id.id,\n 'sale_order_line_id': line_id.id,\n })\n return att_data\n",
"step-ids": [
7,
12,
14,
19,
20
]
}
|
[
7,
12,
14,
19,
20
] |
<|reserved_special_token_0|>
def mutate_operator(root, nodes, path):
candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.
keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]
if len(candidates) == 0:
return -1
mut_node = random.choice(candidates)
type_idx = OP_TYPES[type(mut_node)]
new_node_type = random.choice([types for types in OP_MAP[type_idx] if
types != type(mut_node)])
mut_node.__class__ = new_node_type
save_ast(root, path)
return mut_node.lineno
<|reserved_special_token_0|>
def mutate_constant(root, nodes, path):
return -1
def mutate_operand(root, nodes, path):
return -1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mutate_operator(root, nodes, path):
candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.
keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]
if len(candidates) == 0:
return -1
mut_node = random.choice(candidates)
type_idx = OP_TYPES[type(mut_node)]
new_node_type = random.choice([types for types in OP_MAP[type_idx] if
types != type(mut_node)])
mut_node.__class__ = new_node_type
save_ast(root, path)
return mut_node.lineno
def mutate_signal(root, nodes, path):
candidates = [node for node in nodes.keys() if type(node) == Identifier and
_check_parent_type(node, nodes, SIG_PARENT_TYPES)]
if len(candidates) == 0:
return -1
sigs = get_signals(root)
trial = 0
while trial < 1000:
trial += 1
mut_node = random.choice(candidates)
name = mut_node.name
if name in sigs.keys():
sig_type = sigs[name]
choices = [sig for sig in sigs[sig_type] if sig != name]
if len(choices) == 0:
continue
new_name = random.choice(choices)
mut_node.name = new_name
save_ast(root, path)
return mut_node.lineno
def mutate_constant(root, nodes, path):
return -1
def mutate_operand(root, nodes, path):
return -1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _check_parent_type(node, nodes, types):
par = node
while nodes[par] != None:
par = nodes[par]
if type(par) in types:
return True
return False
def mutate_operator(root, nodes, path):
candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.
keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]
if len(candidates) == 0:
return -1
mut_node = random.choice(candidates)
type_idx = OP_TYPES[type(mut_node)]
new_node_type = random.choice([types for types in OP_MAP[type_idx] if
types != type(mut_node)])
mut_node.__class__ = new_node_type
save_ast(root, path)
return mut_node.lineno
def mutate_signal(root, nodes, path):
candidates = [node for node in nodes.keys() if type(node) == Identifier and
_check_parent_type(node, nodes, SIG_PARENT_TYPES)]
if len(candidates) == 0:
return -1
sigs = get_signals(root)
trial = 0
while trial < 1000:
trial += 1
mut_node = random.choice(candidates)
name = mut_node.name
if name in sigs.keys():
sig_type = sigs[name]
choices = [sig for sig in sigs[sig_type] if sig != name]
if len(choices) == 0:
continue
new_name = random.choice(choices)
mut_node.name = new_name
save_ast(root, path)
return mut_node.lineno
def mutate_constant(root, nodes, path):
return -1
def mutate_operand(root, nodes, path):
return -1
<|reserved_special_token_1|>
import random
from common.ast import *
from mutate.mutate_ctrl import *
def _check_parent_type(node, nodes, types):
par = node
while nodes[par] != None:
par = nodes[par]
if type(par) in types:
return True
return False
def mutate_operator(root, nodes, path):
candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.
keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]
if len(candidates) == 0:
return -1
mut_node = random.choice(candidates)
type_idx = OP_TYPES[type(mut_node)]
new_node_type = random.choice([types for types in OP_MAP[type_idx] if
types != type(mut_node)])
mut_node.__class__ = new_node_type
save_ast(root, path)
return mut_node.lineno
def mutate_signal(root, nodes, path):
candidates = [node for node in nodes.keys() if type(node) == Identifier and
_check_parent_type(node, nodes, SIG_PARENT_TYPES)]
if len(candidates) == 0:
return -1
sigs = get_signals(root)
trial = 0
while trial < 1000:
trial += 1
mut_node = random.choice(candidates)
name = mut_node.name
if name in sigs.keys():
sig_type = sigs[name]
choices = [sig for sig in sigs[sig_type] if sig != name]
if len(choices) == 0:
continue
new_name = random.choice(choices)
mut_node.name = new_name
save_ast(root, path)
return mut_node.lineno
def mutate_constant(root, nodes, path):
return -1
def mutate_operand(root, nodes, path):
return -1
<|reserved_special_token_1|>
import random
from common.ast import *
from mutate.mutate_ctrl import *
def _check_parent_type(node, nodes, types):
par = node
while(nodes[par] != None):
par = nodes[par]
if type(par) in types:
return True
return False
def mutate_operator(root, nodes, path):
candidates = [node
for node in nodes.keys()
if type(node) in OP_TYPES.keys()
and _check_parent_type(node, nodes, OP_PARENT_TYPES)]
if len(candidates) == 0:
return -1
mut_node = random.choice(candidates)
type_idx = OP_TYPES[type(mut_node)]
new_node_type = random.choice([types for types in OP_MAP[type_idx] if types != type(mut_node)])
mut_node.__class__ = new_node_type
save_ast(root, path)
return mut_node.lineno
def mutate_signal(root, nodes, path):
candidates = [node
for node in nodes.keys()
if type(node) == Identifier
and _check_parent_type(node, nodes, SIG_PARENT_TYPES)]
if len(candidates) == 0:
return -1
sigs = get_signals(root)
trial = 0
while (trial < 1000):
trial += 1
mut_node = random.choice(candidates)
name = mut_node.name
if name in sigs.keys():
sig_type = sigs[name]
choices = [sig for sig in sigs[sig_type] if sig != name]
if len(choices) == 0:
continue
new_name = random.choice(choices)
mut_node.name = new_name
save_ast(root, path)
return mut_node.lineno
def mutate_constant(root, nodes, path):
return -1
def mutate_operand(root, nodes, path):
return -1
|
flexible
|
{
"blob_id": "c0524301a79788aa34a039fc46799021fb45362c",
"index": 7141,
"step-1": "<mask token>\n\n\ndef mutate_operator(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.\n keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n mut_node = random.choice(candidates)\n type_idx = OP_TYPES[type(mut_node)]\n new_node_type = random.choice([types for types in OP_MAP[type_idx] if \n types != type(mut_node)])\n mut_node.__class__ = new_node_type\n save_ast(root, path)\n return mut_node.lineno\n\n\n<mask token>\n\n\ndef mutate_constant(root, nodes, path):\n return -1\n\n\ndef mutate_operand(root, nodes, path):\n return -1\n",
"step-2": "<mask token>\n\n\ndef mutate_operator(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.\n keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n mut_node = random.choice(candidates)\n type_idx = OP_TYPES[type(mut_node)]\n new_node_type = random.choice([types for types in OP_MAP[type_idx] if \n types != type(mut_node)])\n mut_node.__class__ = new_node_type\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_signal(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) == Identifier and\n _check_parent_type(node, nodes, SIG_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n sigs = get_signals(root)\n trial = 0\n while trial < 1000:\n trial += 1\n mut_node = random.choice(candidates)\n name = mut_node.name\n if name in sigs.keys():\n sig_type = sigs[name]\n choices = [sig for sig in sigs[sig_type] if sig != name]\n if len(choices) == 0:\n continue\n new_name = random.choice(choices)\n mut_node.name = new_name\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_constant(root, nodes, path):\n return -1\n\n\ndef mutate_operand(root, nodes, path):\n return -1\n",
"step-3": "<mask token>\n\n\ndef _check_parent_type(node, nodes, types):\n par = node\n while nodes[par] != None:\n par = nodes[par]\n if type(par) in types:\n return True\n return False\n\n\ndef mutate_operator(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.\n keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n mut_node = random.choice(candidates)\n type_idx = OP_TYPES[type(mut_node)]\n new_node_type = random.choice([types for types in OP_MAP[type_idx] if \n types != type(mut_node)])\n mut_node.__class__ = new_node_type\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_signal(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) == Identifier and\n _check_parent_type(node, nodes, SIG_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n sigs = get_signals(root)\n trial = 0\n while trial < 1000:\n trial += 1\n mut_node = random.choice(candidates)\n name = mut_node.name\n if name in sigs.keys():\n sig_type = sigs[name]\n choices = [sig for sig in sigs[sig_type] if sig != name]\n if len(choices) == 0:\n continue\n new_name = random.choice(choices)\n mut_node.name = new_name\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_constant(root, nodes, path):\n return -1\n\n\ndef mutate_operand(root, nodes, path):\n return -1\n",
"step-4": "import random\nfrom common.ast import *\nfrom mutate.mutate_ctrl import *\n\n\ndef _check_parent_type(node, nodes, types):\n par = node\n while nodes[par] != None:\n par = nodes[par]\n if type(par) in types:\n return True\n return False\n\n\ndef mutate_operator(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.\n keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n mut_node = random.choice(candidates)\n type_idx = OP_TYPES[type(mut_node)]\n new_node_type = random.choice([types for types in OP_MAP[type_idx] if \n types != type(mut_node)])\n mut_node.__class__ = new_node_type\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_signal(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) == Identifier and\n _check_parent_type(node, nodes, SIG_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n sigs = get_signals(root)\n trial = 0\n while trial < 1000:\n trial += 1\n mut_node = random.choice(candidates)\n name = mut_node.name\n if name in sigs.keys():\n sig_type = sigs[name]\n choices = [sig for sig in sigs[sig_type] if sig != name]\n if len(choices) == 0:\n continue\n new_name = random.choice(choices)\n mut_node.name = new_name\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_constant(root, nodes, path):\n return -1\n\n\ndef mutate_operand(root, nodes, path):\n return -1\n",
"step-5": "import random\n\nfrom common.ast import *\nfrom mutate.mutate_ctrl import *\n\ndef _check_parent_type(node, nodes, types):\n par = node\n while(nodes[par] != None):\n par = nodes[par]\n if type(par) in types:\n return True\n return False\n\ndef mutate_operator(root, nodes, path):\n candidates = [node \n for node in nodes.keys() \n if type(node) in OP_TYPES.keys()\n and _check_parent_type(node, nodes, OP_PARENT_TYPES)]\n \n if len(candidates) == 0:\n return -1\n\n mut_node = random.choice(candidates)\n type_idx = OP_TYPES[type(mut_node)]\n new_node_type = random.choice([types for types in OP_MAP[type_idx] if types != type(mut_node)])\n\n mut_node.__class__ = new_node_type\n\n save_ast(root, path)\n \n return mut_node.lineno\n\ndef mutate_signal(root, nodes, path):\n candidates = [node \n for node in nodes.keys()\n if type(node) == Identifier\n and _check_parent_type(node, nodes, SIG_PARENT_TYPES)]\n\n if len(candidates) == 0:\n return -1 \n\n sigs = get_signals(root)\n\n trial = 0\n while (trial < 1000):\n trial += 1\n\n mut_node = random.choice(candidates)\n name = mut_node.name\n if name in sigs.keys():\n sig_type = sigs[name]\n choices = [sig for sig in sigs[sig_type] if sig != name]\n if len(choices) == 0:\n continue\n new_name = random.choice(choices)\n mut_node.name = new_name\n\n save_ast(root, path)\n\n return mut_node.lineno\n\ndef mutate_constant(root, nodes, path):\n return -1\n\ndef mutate_operand(root, nodes, path):\n return -1 \n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
__author__ = 'asistente'
#from __future__ import absolute_import
from unittest import TestCase
from selenium import webdriver
from selenium.webdriver.common.by import By
class FunctionalTest(TestCase):
def setUp(self):
self.browser = webdriver.Chrome("C:\\chromedriver\\chromedriver.exe")
self.browser.implicitly_wait(2)
def tearDown(self):
self.browser.quit()
def test_title(self):
self.browser.get('http://localhost:8000')
self.assertIn('BuscoAyuda', self.browser.title)
def test_registro(self):
self.browser.get('http://localhost:8000')
link = self.browser.find_element_by_id('id_register')
link.click()
nombre = self.browser.find_element_by_id('id_nombre')
nombre.send_keys('Rafael')
apellidos = self.browser.find_element_by_id('id_apellidos')
apellidos.send_keys('Medrano')
experiencia = self.browser.find_element_by_id('id_aniosExperiencia')
experiencia.send_keys('7')
self.browser.find_element_by_xpath(
"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']").click()
telefono = self.browser.find_element_by_id('id_telefono')
telefono.send_keys('3135555555')
correo = self.browser.find_element_by_id('id_correo')
correo.send_keys('re.medrano@uniandes.edu.co')
imagen = self.browser.find_element_by_id('id_imagen')
imagen.send_keys('C:\chromedriver\developer.jpg')
nombreUsuario = self.browser.find_element_by_id('id_username')
nombreUsuario.send_keys('re.medrano')
clave = self.browser.find_element_by_id('id_password')
clave.send_keys('prueba123')
botonGrabar = self.browser.find_element_by_id('id_grabar')
botonGrabar.click()
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()="Rafael Medrano"]')
self.assertIn('Rafael Medrano', span.text)
def test_verDetalle(self):
self.browser.get('http://localhost:8000')
span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]')
span.click()
self.browser.implicitly_wait(3)
h2 = self.browser.find_element(By.XPATH, '//h2[text()="Betzy Editado Montanez Editado"]')
self.assertIn('Betzy Editado Montanez Editado', h2.text)
def test_login(self):
self.browser.get('http://localhost:8000')
link = self.browser.find_element_by_id('id_login')
link.click()
nombreUsuario = self.browser.find_element_by_id('username')
nombreUsuario.send_keys('ba.montanez')
clave = self.browser.find_element_by_id('password')
clave.send_keys('prueba123')
botonIngresar = self.browser.find_element_by_id('id_ingresar')
botonIngresar.click()
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()=" Logout"]')
self.assertIn('Logout', span.text)
def test_Editar(self):
self.browser.get('http://localhost:8000')
link = self.browser.find_element_by_id('id_login')
link.click()
nombreUsuario = self.browser.find_element_by_id('username')
nombreUsuario.send_keys('ba.montanez')
claveIngreso = self.browser.find_element_by_id('password')
claveIngreso.send_keys('prueba123')
botonIngresar = self.browser.find_element_by_id('id_ingresar')
botonIngresar.click()
self.browser.implicitly_wait(3)
linkEditar = self.browser.find_element_by_id('id_editar')
linkEditar.click()
nombre = self.browser.find_element_by_id('id_nombre')
nombre.clear()
nombre.send_keys('Betzy Editado')
apellidos = self.browser.find_element_by_id('id_apellidos')
apellidos.clear()
apellidos.send_keys('Montanez Editado')
experiencia = self.browser.find_element_by_id('id_aniosExperiencia')
experiencia.clear()
experiencia.send_keys('10')
self.browser.find_element_by_xpath(
"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']").click()
telefono = self.browser.find_element_by_id('id_telefono')
telefono.clear()
telefono.send_keys('313555666')
correo = self.browser.find_element_by_id('id_correo')
correo.clear()
correo.send_keys('ba.montanez01@uniandes.edu.co')
imagen = self.browser.find_element_by_id('id_imagen')
imagen.send_keys('C:\chromedriver\developer.jpg')
nombreUsuario = self.browser.find_element_by_id('id_username')
nombreUsuario.clear()
nombreUsuario.send_keys('ba.montanez2')
clave = self.browser.find_element_by_id('id_password')
clave.clear()
clave.send_keys('prueba1234')
botonGrabar = self.browser.find_element_by_id('id_editar')
botonGrabar.click()
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]')
self.assertIn('Betzy Editado Montanez Editado', span.text)
def test_Comentar(self):
self.browser.get('http://localhost:8000')
span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]')
span.click()
self.browser.implicitly_wait(3)
h2 = self.browser.find_element(By.XPATH, '//h2[text()="Betzy Editado Montanez Editado"]')
correo = self.browser.find_element_by_id('correo')
correo.send_keys('prueba@prueba.com')
comentario = self.browser.find_element_by_id('comentario')
comentario.send_keys('Comentario Prueba')
botonAceptar = self.browser.find_element_by_id('id_comentar')
botonAceptar.click()
self.browser.implicitly_wait(6)
span = self.browser.find_element(By.XPATH, '//p[text()="Comentario Prueba"]')
self.assertIn('Comentario Prueba', span.text)
def test_listado(self):
self.browser.get('http://localhost:8000')
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]')
self.assertIn('Betzy Editado Montanez Editado', span.text)
self.browser.implicitly_wait(3)
span = self.browser.find_element(By.XPATH, '//span[text()="pepito perez"]')
self.assertIn('pepito perez', span.text)
def test_buscar(self):
self.browser.get('http://localhost:8000')
correo = self.browser.find_element_by_id('buscar')
correo.send_keys('Betzy Editado Montanez Editado')
botonBuscar = self.browser.find_element_by_id('id_buscar')
botonBuscar.click()
self.browser.implicitly_wait(6)
span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]')
self.assertIn('Betzy Editado Montanez Editado', span.text)
|
normal
|
{
"blob_id": "fc4cf800c663abf20bfba7fcc1032e09a992641b",
"index": 5334,
"step-1": "<mask token>\n\n\nclass FunctionalTest(TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome('C:\\\\chromedriver\\\\chromedriver.exe')\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n <mask token>\n <mask token>\n\n def test_verDetalle(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', h2.text)\n <mask token>\n <mask token>\n <mask token>\n\n def test_listado(self):\n self.browser.get('http://localhost:8000')\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"pepito perez\"]')\n self.assertIn('pepito perez', span.text)\n\n def test_buscar(self):\n self.browser.get('http://localhost:8000')\n correo = self.browser.find_element_by_id('buscar')\n correo.send_keys('Betzy Editado Montanez Editado')\n botonBuscar = self.browser.find_element_by_id('id_buscar')\n botonBuscar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n",
"step-2": "<mask token>\n\n\nclass FunctionalTest(TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome('C:\\\\chromedriver\\\\chromedriver.exe')\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n <mask token>\n <mask token>\n\n def test_verDetalle(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', h2.text)\n <mask token>\n <mask token>\n\n def test_Comentar(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n correo = self.browser.find_element_by_id('correo')\n correo.send_keys('prueba@prueba.com')\n comentario = self.browser.find_element_by_id('comentario')\n comentario.send_keys('Comentario Prueba')\n botonAceptar = self.browser.find_element_by_id('id_comentar')\n botonAceptar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//p[text()=\"Comentario Prueba\"]')\n self.assertIn('Comentario Prueba', span.text)\n\n def test_listado(self):\n self.browser.get('http://localhost:8000')\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"pepito perez\"]')\n self.assertIn('pepito perez', span.text)\n\n def test_buscar(self):\n self.browser.get('http://localhost:8000')\n correo = self.browser.find_element_by_id('buscar')\n correo.send_keys('Betzy Editado Montanez Editado')\n botonBuscar = self.browser.find_element_by_id('id_buscar')\n botonBuscar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n",
"step-3": "<mask token>\n\n\nclass FunctionalTest(TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome('C:\\\\chromedriver\\\\chromedriver.exe')\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n\n def test_title(self):\n self.browser.get('http://localhost:8000')\n self.assertIn('BuscoAyuda', self.browser.title)\n <mask token>\n\n def test_verDetalle(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', h2.text)\n <mask token>\n\n def test_Editar(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_login')\n link.click()\n nombreUsuario = self.browser.find_element_by_id('username')\n nombreUsuario.send_keys('ba.montanez')\n claveIngreso = self.browser.find_element_by_id('password')\n claveIngreso.send_keys('prueba123')\n botonIngresar = self.browser.find_element_by_id('id_ingresar')\n botonIngresar.click()\n self.browser.implicitly_wait(3)\n linkEditar = self.browser.find_element_by_id('id_editar')\n linkEditar.click()\n nombre = self.browser.find_element_by_id('id_nombre')\n nombre.clear()\n nombre.send_keys('Betzy Editado')\n apellidos = self.browser.find_element_by_id('id_apellidos')\n apellidos.clear()\n apellidos.send_keys('Montanez Editado')\n experiencia = self.browser.find_element_by_id('id_aniosExperiencia')\n experiencia.clear()\n experiencia.send_keys('10')\n self.browser.find_element_by_xpath(\n \"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']\"\n ).click()\n telefono = self.browser.find_element_by_id('id_telefono')\n telefono.clear()\n telefono.send_keys('313555666')\n correo = self.browser.find_element_by_id('id_correo')\n correo.clear()\n correo.send_keys('ba.montanez01@uniandes.edu.co')\n imagen = self.browser.find_element_by_id('id_imagen')\n imagen.send_keys('C:\\\\chromedriver\\\\developer.jpg')\n nombreUsuario = self.browser.find_element_by_id('id_username')\n nombreUsuario.clear()\n nombreUsuario.send_keys('ba.montanez2')\n clave = self.browser.find_element_by_id('id_password')\n clave.clear()\n clave.send_keys('prueba1234')\n botonGrabar = self.browser.find_element_by_id('id_editar')\n botonGrabar.click()\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n\n def test_Comentar(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n correo = self.browser.find_element_by_id('correo')\n correo.send_keys('prueba@prueba.com')\n comentario = self.browser.find_element_by_id('comentario')\n comentario.send_keys('Comentario Prueba')\n botonAceptar = self.browser.find_element_by_id('id_comentar')\n botonAceptar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//p[text()=\"Comentario Prueba\"]')\n self.assertIn('Comentario Prueba', span.text)\n\n def test_listado(self):\n self.browser.get('http://localhost:8000')\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"pepito perez\"]')\n self.assertIn('pepito perez', span.text)\n\n def test_buscar(self):\n self.browser.get('http://localhost:8000')\n correo = self.browser.find_element_by_id('buscar')\n correo.send_keys('Betzy Editado Montanez Editado')\n botonBuscar = self.browser.find_element_by_id('id_buscar')\n botonBuscar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n",
"step-4": "__author__ = 'asistente'\nfrom unittest import TestCase\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\nclass FunctionalTest(TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome('C:\\\\chromedriver\\\\chromedriver.exe')\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n\n def test_title(self):\n self.browser.get('http://localhost:8000')\n self.assertIn('BuscoAyuda', self.browser.title)\n\n def test_registro(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_register')\n link.click()\n nombre = self.browser.find_element_by_id('id_nombre')\n nombre.send_keys('Rafael')\n apellidos = self.browser.find_element_by_id('id_apellidos')\n apellidos.send_keys('Medrano')\n experiencia = self.browser.find_element_by_id('id_aniosExperiencia')\n experiencia.send_keys('7')\n self.browser.find_element_by_xpath(\n \"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']\"\n ).click()\n telefono = self.browser.find_element_by_id('id_telefono')\n telefono.send_keys('3135555555')\n correo = self.browser.find_element_by_id('id_correo')\n correo.send_keys('re.medrano@uniandes.edu.co')\n imagen = self.browser.find_element_by_id('id_imagen')\n imagen.send_keys('C:\\\\chromedriver\\\\developer.jpg')\n nombreUsuario = self.browser.find_element_by_id('id_username')\n nombreUsuario.send_keys('re.medrano')\n clave = self.browser.find_element_by_id('id_password')\n clave.send_keys('prueba123')\n botonGrabar = self.browser.find_element_by_id('id_grabar')\n botonGrabar.click()\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Rafael Medrano\"]')\n self.assertIn('Rafael Medrano', span.text)\n\n def test_verDetalle(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', h2.text)\n\n def test_login(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_login')\n link.click()\n nombreUsuario = self.browser.find_element_by_id('username')\n nombreUsuario.send_keys('ba.montanez')\n clave = self.browser.find_element_by_id('password')\n clave.send_keys('prueba123')\n botonIngresar = self.browser.find_element_by_id('id_ingresar')\n botonIngresar.click()\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH, '//span[text()=\" Logout\"]')\n self.assertIn('Logout', span.text)\n\n def test_Editar(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_login')\n link.click()\n nombreUsuario = self.browser.find_element_by_id('username')\n nombreUsuario.send_keys('ba.montanez')\n claveIngreso = self.browser.find_element_by_id('password')\n claveIngreso.send_keys('prueba123')\n botonIngresar = self.browser.find_element_by_id('id_ingresar')\n botonIngresar.click()\n self.browser.implicitly_wait(3)\n linkEditar = self.browser.find_element_by_id('id_editar')\n linkEditar.click()\n nombre = self.browser.find_element_by_id('id_nombre')\n nombre.clear()\n nombre.send_keys('Betzy Editado')\n apellidos = self.browser.find_element_by_id('id_apellidos')\n apellidos.clear()\n apellidos.send_keys('Montanez Editado')\n experiencia = self.browser.find_element_by_id('id_aniosExperiencia')\n experiencia.clear()\n experiencia.send_keys('10')\n self.browser.find_element_by_xpath(\n \"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']\"\n ).click()\n telefono = self.browser.find_element_by_id('id_telefono')\n telefono.clear()\n telefono.send_keys('313555666')\n correo = self.browser.find_element_by_id('id_correo')\n correo.clear()\n correo.send_keys('ba.montanez01@uniandes.edu.co')\n imagen = self.browser.find_element_by_id('id_imagen')\n imagen.send_keys('C:\\\\chromedriver\\\\developer.jpg')\n nombreUsuario = self.browser.find_element_by_id('id_username')\n nombreUsuario.clear()\n nombreUsuario.send_keys('ba.montanez2')\n clave = self.browser.find_element_by_id('id_password')\n clave.clear()\n clave.send_keys('prueba1234')\n botonGrabar = self.browser.find_element_by_id('id_editar')\n botonGrabar.click()\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n\n def test_Comentar(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n correo = self.browser.find_element_by_id('correo')\n correo.send_keys('prueba@prueba.com')\n comentario = self.browser.find_element_by_id('comentario')\n comentario.send_keys('Comentario Prueba')\n botonAceptar = self.browser.find_element_by_id('id_comentar')\n botonAceptar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//p[text()=\"Comentario Prueba\"]')\n self.assertIn('Comentario Prueba', span.text)\n\n def test_listado(self):\n self.browser.get('http://localhost:8000')\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"pepito perez\"]')\n self.assertIn('pepito perez', span.text)\n\n def test_buscar(self):\n self.browser.get('http://localhost:8000')\n correo = self.browser.find_element_by_id('buscar')\n correo.send_keys('Betzy Editado Montanez Editado')\n botonBuscar = self.browser.find_element_by_id('id_buscar')\n botonBuscar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n",
"step-5": "__author__ = 'asistente'\n\n#from __future__ import absolute_import\n\nfrom unittest import TestCase\nfrom selenium import webdriver\n\nfrom selenium.webdriver.common.by import By\n\nclass FunctionalTest(TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome(\"C:\\\\chromedriver\\\\chromedriver.exe\")\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n\n def test_title(self):\n self.browser.get('http://localhost:8000')\n self.assertIn('BuscoAyuda', self.browser.title)\n\n def test_registro(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_register')\n link.click()\n\n nombre = self.browser.find_element_by_id('id_nombre')\n nombre.send_keys('Rafael')\n\n apellidos = self.browser.find_element_by_id('id_apellidos')\n apellidos.send_keys('Medrano')\n\n experiencia = self.browser.find_element_by_id('id_aniosExperiencia')\n experiencia.send_keys('7')\n\n self.browser.find_element_by_xpath(\n \"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']\").click()\n telefono = self.browser.find_element_by_id('id_telefono')\n telefono.send_keys('3135555555')\n\n correo = self.browser.find_element_by_id('id_correo')\n correo.send_keys('re.medrano@uniandes.edu.co')\n\n imagen = self.browser.find_element_by_id('id_imagen')\n imagen.send_keys('C:\\chromedriver\\developer.jpg')\n\n nombreUsuario = self.browser.find_element_by_id('id_username')\n nombreUsuario.send_keys('re.medrano')\n\n clave = self.browser.find_element_by_id('id_password')\n clave.send_keys('prueba123')\n\n botonGrabar = self.browser.find_element_by_id('id_grabar')\n botonGrabar.click()\n self.browser.implicitly_wait(3)\n\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Rafael Medrano\"]')\n self.assertIn('Rafael Medrano', span.text)\n\n def test_verDetalle(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH, '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n\n self.assertIn('Betzy Editado Montanez Editado', h2.text)\n\n def test_login(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_login')\n link.click()\n\n nombreUsuario = self.browser.find_element_by_id('username')\n nombreUsuario.send_keys('ba.montanez')\n\n clave = self.browser.find_element_by_id('password')\n clave.send_keys('prueba123')\n\n botonIngresar = self.browser.find_element_by_id('id_ingresar')\n botonIngresar.click()\n\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH, '//span[text()=\" Logout\"]')\n\n self.assertIn('Logout', span.text)\n\n\n def test_Editar(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_login')\n link.click()\n\n nombreUsuario = self.browser.find_element_by_id('username')\n nombreUsuario.send_keys('ba.montanez')\n\n claveIngreso = self.browser.find_element_by_id('password')\n claveIngreso.send_keys('prueba123')\n\n botonIngresar = self.browser.find_element_by_id('id_ingresar')\n botonIngresar.click()\n\n self.browser.implicitly_wait(3)\n\n linkEditar = self.browser.find_element_by_id('id_editar')\n linkEditar.click()\n\n nombre = self.browser.find_element_by_id('id_nombre')\n nombre.clear()\n nombre.send_keys('Betzy Editado')\n\n apellidos = self.browser.find_element_by_id('id_apellidos')\n apellidos.clear()\n apellidos.send_keys('Montanez Editado')\n\n experiencia = self.browser.find_element_by_id('id_aniosExperiencia')\n experiencia.clear()\n experiencia.send_keys('10')\n\n self.browser.find_element_by_xpath(\n \"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']\").click()\n telefono = self.browser.find_element_by_id('id_telefono')\n telefono.clear()\n telefono.send_keys('313555666')\n\n correo = self.browser.find_element_by_id('id_correo')\n correo.clear()\n correo.send_keys('ba.montanez01@uniandes.edu.co')\n\n imagen = self.browser.find_element_by_id('id_imagen')\n imagen.send_keys('C:\\chromedriver\\developer.jpg')\n\n nombreUsuario = self.browser.find_element_by_id('id_username')\n nombreUsuario.clear()\n nombreUsuario.send_keys('ba.montanez2')\n\n clave = self.browser.find_element_by_id('id_password')\n clave.clear()\n clave.send_keys('prueba1234')\n\n botonGrabar = self.browser.find_element_by_id('id_editar')\n botonGrabar.click()\n\n self.browser.implicitly_wait(3)\n\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n\n def test_Comentar(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH, '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n\n correo = self.browser.find_element_by_id('correo')\n correo.send_keys('prueba@prueba.com')\n\n comentario = self.browser.find_element_by_id('comentario')\n comentario.send_keys('Comentario Prueba')\n\n botonAceptar = self.browser.find_element_by_id('id_comentar')\n botonAceptar.click()\n self.browser.implicitly_wait(6)\n\n span = self.browser.find_element(By.XPATH, '//p[text()=\"Comentario Prueba\"]')\n self.assertIn('Comentario Prueba', span.text)\n\n def test_listado(self):\n self.browser.get('http://localhost:8000')\n\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH, '//span[text()=\"pepito perez\"]')\n self.assertIn('pepito perez', span.text)\n\n def test_buscar(self):\n self.browser.get('http://localhost:8000')\n\n correo = self.browser.find_element_by_id('buscar')\n correo.send_keys('Betzy Editado Montanez Editado')\n\n botonBuscar = self.browser.find_element_by_id('id_buscar')\n botonBuscar.click()\n self.browser.implicitly_wait(6)\n\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)",
"step-ids": [
6,
7,
9,
13,
14
]
}
|
[
6,
7,
9,
13,
14
] |
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QGraphicsOpacityEffect, \
QPushButton
from PyQt5.QtCore import Qt
class ToolBar(QWidget):
"""
Window for entering parameters
"""
def __init__(self, parent):
super().__init__(parent)
self._main_wnd = parent
self.setAttribute(Qt.WA_StyledBackground, True)
self.setObjectName("options")
self.setStyleSheet("""
#options, #closeButton {
border-radius: 6px;
background-color: rgb(0, 0, 0);
color: #fff;
}
QToolBar {
background-color: rgb(0, 0, 0);
color: #fff;
}
""")
self.setupWidgets()
effect = QGraphicsOpacityEffect()
effect.setOpacity(0.66)
self.setGraphicsEffect(effect)
self.setMinimumWidth(220)
self.updateWidgets()
self.connectSignals()
self.setAcceptDrops(True)
def mainWnd(self):
return self._main_wnd
def setupWidgets(self):
self._layout = QHBoxLayout()
self._layout.setContentsMargins(6, 5, 12, 12)
self._layout.setSpacing(0)
self._open_file = self.addButton("O", self._main_wnd.onOpenFile)
self._layout.addSpacing(8)
self._add_text = self.addButton("T", self._main_wnd.onAddText)
self._layout.addStretch()
self.setLayout(self._layout)
def addButton(self, text, action):
button = QPushButton(text)
button.clicked.connect(action)
self._layout.addWidget(button)
return button
def connectSignals(self):
pass
def updateWidgets(self):
pass
|
normal
|
{
"blob_id": "772e2e0a442c1b63330e9b526b76d767646b0c7c",
"index": 7819,
"step-1": "<mask token>\n\n\nclass ToolBar(QWidget):\n <mask token>\n\n def __init__(self, parent):\n super().__init__(parent)\n self._main_wnd = parent\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setObjectName('options')\n self.setStyleSheet(\n \"\"\"\n #options, #closeButton {\n border-radius: 6px;\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n QToolBar {\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n \"\"\"\n )\n self.setupWidgets()\n effect = QGraphicsOpacityEffect()\n effect.setOpacity(0.66)\n self.setGraphicsEffect(effect)\n self.setMinimumWidth(220)\n self.updateWidgets()\n self.connectSignals()\n self.setAcceptDrops(True)\n <mask token>\n\n def setupWidgets(self):\n self._layout = QHBoxLayout()\n self._layout.setContentsMargins(6, 5, 12, 12)\n self._layout.setSpacing(0)\n self._open_file = self.addButton('O', self._main_wnd.onOpenFile)\n self._layout.addSpacing(8)\n self._add_text = self.addButton('T', self._main_wnd.onAddText)\n self._layout.addStretch()\n self.setLayout(self._layout)\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ToolBar(QWidget):\n <mask token>\n\n def __init__(self, parent):\n super().__init__(parent)\n self._main_wnd = parent\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setObjectName('options')\n self.setStyleSheet(\n \"\"\"\n #options, #closeButton {\n border-radius: 6px;\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n QToolBar {\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n \"\"\"\n )\n self.setupWidgets()\n effect = QGraphicsOpacityEffect()\n effect.setOpacity(0.66)\n self.setGraphicsEffect(effect)\n self.setMinimumWidth(220)\n self.updateWidgets()\n self.connectSignals()\n self.setAcceptDrops(True)\n <mask token>\n\n def setupWidgets(self):\n self._layout = QHBoxLayout()\n self._layout.setContentsMargins(6, 5, 12, 12)\n self._layout.setSpacing(0)\n self._open_file = self.addButton('O', self._main_wnd.onOpenFile)\n self._layout.addSpacing(8)\n self._add_text = self.addButton('T', self._main_wnd.onAddText)\n self._layout.addStretch()\n self.setLayout(self._layout)\n\n def addButton(self, text, action):\n button = QPushButton(text)\n button.clicked.connect(action)\n self._layout.addWidget(button)\n return button\n <mask token>\n\n def updateWidgets(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass ToolBar(QWidget):\n <mask token>\n\n def __init__(self, parent):\n super().__init__(parent)\n self._main_wnd = parent\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setObjectName('options')\n self.setStyleSheet(\n \"\"\"\n #options, #closeButton {\n border-radius: 6px;\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n QToolBar {\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n \"\"\"\n )\n self.setupWidgets()\n effect = QGraphicsOpacityEffect()\n effect.setOpacity(0.66)\n self.setGraphicsEffect(effect)\n self.setMinimumWidth(220)\n self.updateWidgets()\n self.connectSignals()\n self.setAcceptDrops(True)\n\n def mainWnd(self):\n return self._main_wnd\n\n def setupWidgets(self):\n self._layout = QHBoxLayout()\n self._layout.setContentsMargins(6, 5, 12, 12)\n self._layout.setSpacing(0)\n self._open_file = self.addButton('O', self._main_wnd.onOpenFile)\n self._layout.addSpacing(8)\n self._add_text = self.addButton('T', self._main_wnd.onAddText)\n self._layout.addStretch()\n self.setLayout(self._layout)\n\n def addButton(self, text, action):\n button = QPushButton(text)\n button.clicked.connect(action)\n self._layout.addWidget(button)\n return button\n <mask token>\n\n def updateWidgets(self):\n pass\n",
"step-4": "from PyQt5.QtWidgets import QWidget, QHBoxLayout, QGraphicsOpacityEffect, QPushButton\nfrom PyQt5.QtCore import Qt\n\n\nclass ToolBar(QWidget):\n \"\"\"\n Window for entering parameters\n \"\"\"\n\n def __init__(self, parent):\n super().__init__(parent)\n self._main_wnd = parent\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setObjectName('options')\n self.setStyleSheet(\n \"\"\"\n #options, #closeButton {\n border-radius: 6px;\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n QToolBar {\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n \"\"\"\n )\n self.setupWidgets()\n effect = QGraphicsOpacityEffect()\n effect.setOpacity(0.66)\n self.setGraphicsEffect(effect)\n self.setMinimumWidth(220)\n self.updateWidgets()\n self.connectSignals()\n self.setAcceptDrops(True)\n\n def mainWnd(self):\n return self._main_wnd\n\n def setupWidgets(self):\n self._layout = QHBoxLayout()\n self._layout.setContentsMargins(6, 5, 12, 12)\n self._layout.setSpacing(0)\n self._open_file = self.addButton('O', self._main_wnd.onOpenFile)\n self._layout.addSpacing(8)\n self._add_text = self.addButton('T', self._main_wnd.onAddText)\n self._layout.addStretch()\n self.setLayout(self._layout)\n\n def addButton(self, text, action):\n button = QPushButton(text)\n button.clicked.connect(action)\n self._layout.addWidget(button)\n return button\n\n def connectSignals(self):\n pass\n\n def updateWidgets(self):\n pass\n",
"step-5": "from PyQt5.QtWidgets import QWidget, QHBoxLayout, QGraphicsOpacityEffect, \\\n QPushButton\nfrom PyQt5.QtCore import Qt\n\n\nclass ToolBar(QWidget):\n \"\"\"\n Window for entering parameters\n \"\"\"\n\n def __init__(self, parent):\n super().__init__(parent)\n self._main_wnd = parent\n\n self.setAttribute(Qt.WA_StyledBackground, True)\n self.setObjectName(\"options\")\n self.setStyleSheet(\"\"\"\n #options, #closeButton {\n border-radius: 6px;\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n QToolBar {\n background-color: rgb(0, 0, 0);\n color: #fff;\n }\n \"\"\")\n\n self.setupWidgets()\n\n effect = QGraphicsOpacityEffect()\n effect.setOpacity(0.66)\n self.setGraphicsEffect(effect)\n\n self.setMinimumWidth(220)\n self.updateWidgets()\n self.connectSignals()\n\n self.setAcceptDrops(True)\n\n def mainWnd(self):\n return self._main_wnd\n\n def setupWidgets(self):\n self._layout = QHBoxLayout()\n self._layout.setContentsMargins(6, 5, 12, 12)\n self._layout.setSpacing(0)\n\n self._open_file = self.addButton(\"O\", self._main_wnd.onOpenFile)\n self._layout.addSpacing(8)\n self._add_text = self.addButton(\"T\", self._main_wnd.onAddText)\n\n self._layout.addStretch()\n\n self.setLayout(self._layout)\n\n def addButton(self, text, action):\n button = QPushButton(text)\n button.clicked.connect(action)\n self._layout.addWidget(button)\n return button\n\n def connectSignals(self):\n pass\n\n def updateWidgets(self):\n pass\n",
"step-ids": [
3,
5,
6,
9,
10
]
}
|
[
3,
5,
6,
9,
10
] |
config_prefix = "<"
config_suported_types = ["PNG", "GIF", "JPEG"]
config_pattern = "^[A-Za-z0-9_]*$"
config_max_storage = int(1E9)
config_max_name_length = 20
config_message_by_line = 2
config_max_message_length = 2000
config_max_emote_length = 8*int(1E6)
config_pong = """
,;;;!!!!!;;.
:!!!!!!!!!!!!!!;
:!!!!!!!!!!!!!!!!!;
;!!!!!!!!!!!!!!!!!!!;
;!!!!! P O N G !!!!!!!
;!!!!!!!!!!!!!!!!!!!!'
;!!!!!!!!!!!!!!!!!!!'
:!!!!!!!!!!!!!!!!'
,!!!!!!!!!!!!!''
,;!!!'''''''
.!!!!'
!!!!`
`'
"""
|
normal
|
{
"blob_id": "dc2deb7d4c9cc126a6d80435fe9dbc16d6ac8941",
"index": 9397,
"step-1": "<mask token>\n",
"step-2": "config_prefix = '<'\nconfig_suported_types = ['PNG', 'GIF', 'JPEG']\nconfig_pattern = '^[A-Za-z0-9_]*$'\nconfig_max_storage = int(1000000000.0)\nconfig_max_name_length = 20\nconfig_message_by_line = 2\nconfig_max_message_length = 2000\nconfig_max_emote_length = 8 * int(1000000.0)\nconfig_pong = \"\"\"\n ,;;;!!!!!;;.\n :!!!!!!!!!!!!!!;\n :!!!!!!!!!!!!!!!!!;\n ;!!!!!!!!!!!!!!!!!!!;\n ;!!!!! P O N G !!!!!!!\n ;!!!!!!!!!!!!!!!!!!!!'\n ;!!!!!!!!!!!!!!!!!!!'\n :!!!!!!!!!!!!!!!!'\n ,!!!!!!!!!!!!!''\n ,;!!!'''''''\n.!!!!'\n!!!!`\n`'\n\"\"\"\n",
"step-3": "config_prefix = \"<\"\nconfig_suported_types = [\"PNG\", \"GIF\", \"JPEG\"]\nconfig_pattern = \"^[A-Za-z0-9_]*$\"\nconfig_max_storage = int(1E9)\nconfig_max_name_length = 20\nconfig_message_by_line = 2\nconfig_max_message_length = 2000\nconfig_max_emote_length = 8*int(1E6)\nconfig_pong = \"\"\"\n ,;;;!!!!!;;.\n :!!!!!!!!!!!!!!;\n :!!!!!!!!!!!!!!!!!;\n ;!!!!!!!!!!!!!!!!!!!;\n ;!!!!! P O N G !!!!!!!\n ;!!!!!!!!!!!!!!!!!!!!'\n ;!!!!!!!!!!!!!!!!!!!'\n :!!!!!!!!!!!!!!!!'\n ,!!!!!!!!!!!!!''\n ,;!!!'''''''\n.!!!!'\n!!!!`\n`'\n\"\"\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
with open('./src/test/predictions.json', 'r') as f:
data = json.load(f)
total = len(data['label'])
google = 0
sphinx = 0
for i in range(len(data['label'])):
label = data['label'][i]
google_entry = data['google'][i]
sphinx_entry = data['pocket_sphinx'][i]
if google_entry == label:
google += 1
if sphinx_entry == label:
sphinx += 1
print('Google %d out of %d: %.4f' % (google, total, google / total))
print('Pocket Sphinx %d out of %d: %.4f' % (sphinx, total, sphinx / total))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
with open('./src/test/predictions.json', 'r') as f:
data = json.load(f)
total = len(data['label'])
google = 0
sphinx = 0
for i in range(len(data['label'])):
label = data['label'][i]
google_entry = data['google'][i]
sphinx_entry = data['pocket_sphinx'][i]
if google_entry == label:
google += 1
if sphinx_entry == label:
sphinx += 1
print('Google %d out of %d: %.4f' % (google, total, google / total))
print('Pocket Sphinx %d out of %d: %.4f' % (sphinx, total, sphinx / total))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import json
def main():
with open('./src/test/predictions.json', 'r') as f:
data = json.load(f)
total = len(data['label'])
google = 0
sphinx = 0
for i in range(len(data['label'])):
label = data['label'][i]
google_entry = data['google'][i]
sphinx_entry = data['pocket_sphinx'][i]
if google_entry == label:
google += 1
if sphinx_entry == label:
sphinx += 1
print('Google %d out of %d: %.4f' % (google, total, google / total))
print('Pocket Sphinx %d out of %d: %.4f' % (sphinx, total, sphinx / total))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import json
def main():
with open('./src/test/predictions.json', 'r') as f:
data = json.load(f)
total = len(data['label'])
google = 0
sphinx = 0
for i in range(len(data['label'])):
label = data['label'][i]
google_entry = data['google'][i]
sphinx_entry = data['pocket_sphinx'][i]
if google_entry == label:
google += 1
if sphinx_entry == label:
sphinx += 1
print('Google %d out of %d: %.4f' %(google, total, google/total))
print('Pocket Sphinx %d out of %d: %.4f' %(sphinx, total, sphinx/total))
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "9fc184fe3aa498138138403bef719c59b85b3a80",
"index": 4392,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n with open('./src/test/predictions.json', 'r') as f:\n data = json.load(f)\n total = len(data['label'])\n google = 0\n sphinx = 0\n for i in range(len(data['label'])):\n label = data['label'][i]\n google_entry = data['google'][i]\n sphinx_entry = data['pocket_sphinx'][i]\n if google_entry == label:\n google += 1\n if sphinx_entry == label:\n sphinx += 1\n print('Google %d out of %d: %.4f' % (google, total, google / total))\n print('Pocket Sphinx %d out of %d: %.4f' % (sphinx, total, sphinx / total))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n with open('./src/test/predictions.json', 'r') as f:\n data = json.load(f)\n total = len(data['label'])\n google = 0\n sphinx = 0\n for i in range(len(data['label'])):\n label = data['label'][i]\n google_entry = data['google'][i]\n sphinx_entry = data['pocket_sphinx'][i]\n if google_entry == label:\n google += 1\n if sphinx_entry == label:\n sphinx += 1\n print('Google %d out of %d: %.4f' % (google, total, google / total))\n print('Pocket Sphinx %d out of %d: %.4f' % (sphinx, total, sphinx / total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import json\n\n\ndef main():\n with open('./src/test/predictions.json', 'r') as f:\n data = json.load(f)\n total = len(data['label'])\n google = 0\n sphinx = 0\n for i in range(len(data['label'])):\n label = data['label'][i]\n google_entry = data['google'][i]\n sphinx_entry = data['pocket_sphinx'][i]\n if google_entry == label:\n google += 1\n if sphinx_entry == label:\n sphinx += 1\n print('Google %d out of %d: %.4f' % (google, total, google / total))\n print('Pocket Sphinx %d out of %d: %.4f' % (sphinx, total, sphinx / total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import json\n\n\ndef main():\n with open('./src/test/predictions.json', 'r') as f:\n data = json.load(f)\n \n total = len(data['label'])\n google = 0\n sphinx = 0\n for i in range(len(data['label'])):\n label = data['label'][i]\n google_entry = data['google'][i]\n sphinx_entry = data['pocket_sphinx'][i]\n\n if google_entry == label:\n google += 1\n if sphinx_entry == label:\n sphinx += 1\n \n print('Google %d out of %d: %.4f' %(google, total, google/total))\n print('Pocket Sphinx %d out of %d: %.4f' %(sphinx, total, sphinx/total))\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Задание 1
# Выучите основные стандартные исключения, которые перечислены в данном уроке.
# Задание 2
# Напишите программу-калькулятор, которая поддерживает следующие операции: сложение, вычитание,
# умножение, деление и возведение в степень. Программа должна выдавать сообщения об ошибке и
# продолжать работу при вводе некорректных данных, делении на ноль и возведении нуля в
# отрицательную степень.
# Функция преобразования текстового списка в float
def list_convertion(user_list):
for index, item in enumerate(user_list):
user_list[index]=float(item)
return user_list
# Умножение
def multiplication(user_list):
multnum=1
for item in user_list:
multnum *= item
return multnum
# Деление. Здесь долго мучился, т.к. нужно первое число оставть и делить его на второе, т.е цикл со второго индекса
# пока не нашел в Интернете запись через slice напр.: for i in collection[1:]
def division(user_list):
divnum=user_list[0]
for item in user_list[1:]:
divnum /= item
return divnum
# Сложение
def adding(user_list):
sumnum=0
for item in user_list:
sumnum += item
return sumnum
# Вычитание. Логика та же, что и в делении
def subtraction(user_list):
subtractnum=user_list[0]
for item in user_list[1:]:
subtractnum -= item
return subtractnum
# Возведение в степень. Логика та же, что в делении
def powering(user_list):
pownum=user_list[0]
for item in user_list[1:]:
pownum **= item
return pownum
while True:
operation = input("Enter operation sign, please (*), (/), (+), (-), (^). \nTo quit, please enter 'done' > ")
if operation.lower() == "done": # Ключевым словом выхода из цикла будет done
print("Thank you for using the program!")
break
else:
try:
numbers_list = input("Enter the numbers separated by space > ").split(" ")
if len(numbers_list) < 2:
raise IndexError("You have entered less than 2 numbers")
else:
numbers_list = list_convertion(numbers_list) # Конвертация списка из str в float
try:
if "*" in operation: # Защита от ввода типа "*" или (*), т.е. проверяем есть ли во всей строке операция
print(f"Your multiplication result is {multiplication(numbers_list)}")
elif "^" in operation:
print(f"Your putting into power result is {powering(numbers_list)}")
elif "-" in operation:
print(f"Your subtraction result is {subtraction(numbers_list)}")
elif "+" in operation:
print(f"Your sum result is {adding(numbers_list)}")
elif "/" in operation:
print(f"Your division result is {division(numbers_list)}")
else:
raise ValueError("Unsupported operation, please try again")
except (ValueError, ZeroDivisionError) as e:
print(f"We have an issue. {e}")
except Exception as e:
print(f"We have an issue. {e}")
|
normal
|
{
"blob_id": "a8341bf422a4d31a83ff412c6aac75e5cb8c5e0f",
"index": 5876,
"step-1": "<mask token>\n\n\ndef adding(user_list):\n sumnum = 0\n for item in user_list:\n sumnum += item\n return sumnum\n\n\ndef subtraction(user_list):\n subtractnum = user_list[0]\n for item in user_list[1:]:\n subtractnum -= item\n return subtractnum\n\n\ndef powering(user_list):\n pownum = user_list[0]\n for item in user_list[1:]:\n pownum **= item\n return pownum\n\n\n<mask token>\n",
"step-2": "def list_convertion(user_list):\n for index, item in enumerate(user_list):\n user_list[index] = float(item)\n return user_list\n\n\ndef multiplication(user_list):\n multnum = 1\n for item in user_list:\n multnum *= item\n return multnum\n\n\n<mask token>\n\n\ndef adding(user_list):\n sumnum = 0\n for item in user_list:\n sumnum += item\n return sumnum\n\n\ndef subtraction(user_list):\n subtractnum = user_list[0]\n for item in user_list[1:]:\n subtractnum -= item\n return subtractnum\n\n\ndef powering(user_list):\n pownum = user_list[0]\n for item in user_list[1:]:\n pownum **= item\n return pownum\n\n\n<mask token>\n",
"step-3": "def list_convertion(user_list):\n for index, item in enumerate(user_list):\n user_list[index] = float(item)\n return user_list\n\n\ndef multiplication(user_list):\n multnum = 1\n for item in user_list:\n multnum *= item\n return multnum\n\n\ndef division(user_list):\n divnum = user_list[0]\n for item in user_list[1:]:\n divnum /= item\n return divnum\n\n\ndef adding(user_list):\n sumnum = 0\n for item in user_list:\n sumnum += item\n return sumnum\n\n\ndef subtraction(user_list):\n subtractnum = user_list[0]\n for item in user_list[1:]:\n subtractnum -= item\n return subtractnum\n\n\ndef powering(user_list):\n pownum = user_list[0]\n for item in user_list[1:]:\n pownum **= item\n return pownum\n\n\n<mask token>\n",
"step-4": "def list_convertion(user_list):\n for index, item in enumerate(user_list):\n user_list[index] = float(item)\n return user_list\n\n\ndef multiplication(user_list):\n multnum = 1\n for item in user_list:\n multnum *= item\n return multnum\n\n\ndef division(user_list):\n divnum = user_list[0]\n for item in user_list[1:]:\n divnum /= item\n return divnum\n\n\ndef adding(user_list):\n sumnum = 0\n for item in user_list:\n sumnum += item\n return sumnum\n\n\ndef subtraction(user_list):\n subtractnum = user_list[0]\n for item in user_list[1:]:\n subtractnum -= item\n return subtractnum\n\n\ndef powering(user_list):\n pownum = user_list[0]\n for item in user_list[1:]:\n pownum **= item\n return pownum\n\n\nwhile True:\n operation = input(\n \"\"\"Enter operation sign, please (*), (/), (+), (-), (^). \nTo quit, please enter 'done' > \"\"\"\n )\n if operation.lower() == 'done':\n print('Thank you for using the program!')\n break\n else:\n try:\n numbers_list = input('Enter the numbers separated by space > '\n ).split(' ')\n if len(numbers_list) < 2:\n raise IndexError('You have entered less than 2 numbers')\n else:\n numbers_list = list_convertion(numbers_list)\n try:\n if '*' in operation:\n print(\n f'Your multiplication result is {multiplication(numbers_list)}'\n )\n elif '^' in operation:\n print(\n f'Your putting into power result is {powering(numbers_list)}'\n )\n elif '-' in operation:\n print(\n f'Your subtraction result is {subtraction(numbers_list)}'\n )\n elif '+' in operation:\n print(f'Your sum result is {adding(numbers_list)}')\n elif '/' in operation:\n print(\n f'Your division result is {division(numbers_list)}'\n )\n else:\n raise ValueError(\n 'Unsupported operation, please try again')\n except (ValueError, ZeroDivisionError) as e:\n print(f'We have an issue. {e}')\n except Exception as e:\n print(f'We have an issue. {e}')\n",
"step-5": "# Задание 1\n# Выучите основные стандартные исключения, которые перечислены в данном уроке.\n# Задание 2\n# Напишите программу-калькулятор, которая поддерживает следующие операции: сложение, вычитание,\n# умножение, деление и возведение в степень. Программа должна выдавать сообщения об ошибке и\n# продолжать работу при вводе некорректных данных, делении на ноль и возведении нуля в\n# отрицательную степень.\n\n\n# Функция преобразования текстового списка в float\ndef list_convertion(user_list):\n for index, item in enumerate(user_list):\n user_list[index]=float(item)\n\n return user_list\n\n# Умножение\ndef multiplication(user_list):\n multnum=1\n for item in user_list:\n multnum *= item\n\n return multnum\n\n# Деление. Здесь долго мучился, т.к. нужно первое число оставть и делить его на второе, т.е цикл со второго индекса\n# пока не нашел в Интернете запись через slice напр.: for i in collection[1:]\ndef division(user_list):\n divnum=user_list[0]\n for item in user_list[1:]:\n divnum /= item\n\n return divnum\n\n# Сложение\ndef adding(user_list):\n sumnum=0\n for item in user_list:\n sumnum += item\n\n return sumnum\n\n# Вычитание. Логика та же, что и в делении\ndef subtraction(user_list):\n subtractnum=user_list[0]\n for item in user_list[1:]:\n subtractnum -= item\n\n return subtractnum\n\n# Возведение в степень. Логика та же, что в делении\ndef powering(user_list):\n pownum=user_list[0]\n for item in user_list[1:]:\n pownum **= item\n\n return pownum\n\nwhile True:\n operation = input(\"Enter operation sign, please (*), (/), (+), (-), (^). \\nTo quit, please enter 'done' > \")\n if operation.lower() == \"done\": # Ключевым словом выхода из цикла будет done\n print(\"Thank you for using the program!\")\n break\n else:\n try:\n numbers_list = input(\"Enter the numbers separated by space > \").split(\" \")\n if len(numbers_list) < 2:\n raise IndexError(\"You have entered less than 2 numbers\")\n else:\n numbers_list = list_convertion(numbers_list) # Конвертация списка из str в float\n try:\n if \"*\" in operation: # Защита от ввода типа \"*\" или (*), т.е. проверяем есть ли во всей строке операция\n print(f\"Your multiplication result is {multiplication(numbers_list)}\")\n elif \"^\" in operation:\n print(f\"Your putting into power result is {powering(numbers_list)}\")\n elif \"-\" in operation:\n print(f\"Your subtraction result is {subtraction(numbers_list)}\")\n elif \"+\" in operation:\n print(f\"Your sum result is {adding(numbers_list)}\")\n elif \"/\" in operation:\n print(f\"Your division result is {division(numbers_list)}\")\n else:\n raise ValueError(\"Unsupported operation, please try again\")\n except (ValueError, ZeroDivisionError) as e:\n print(f\"We have an issue. {e}\")\n except Exception as e:\n print(f\"We have an issue. {e}\")\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('', views.artifact, name='artifacts'), path(
'<int:artifact_id>', views.detail, name='detail'), path('register/',
views.register, name='register')]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [path('', views.artifact, name='artifacts'), path(
'<int:artifact_id>', views.detail, name='detail'), path('register/',
views.register, name='register')]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.artifact, name="artifacts"),
path('<int:artifact_id>', views.detail, name="detail"),
path('register/', views.register, name="register")
]
|
flexible
|
{
"blob_id": "9b73037e8af7d4f91261cebf895b68650182fcd5",
"index": 2780,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.artifact, name='artifacts'), path(\n '<int:artifact_id>', views.detail, name='detail'), path('register/',\n views.register, name='register')]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nurlpatterns = [path('', views.artifact, name='artifacts'), path(\n '<int:artifact_id>', views.detail, name='detail'), path('register/',\n views.register, name='register')]\n",
"step-4": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.artifact, name=\"artifacts\"),\n path('<int:artifact_id>', views.detail, name=\"detail\"),\n path('register/', views.register, name=\"register\")\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
from ghg import GHGPredictor
predictor = GHGPredictor()
dataset_df = pd.read_csv("db-wheat.csv", index_col=0)
# print(dataset_df.iloc[1])
dataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop', 'Previous crop'])
# print(dataset_df_2)
dataset = dataset_df_2.to_numpy()
# print(dataset)
X, Y = dataset[:, :-1], dataset[:, -1:]
# print(X)
# print(Y)
seed = 10
test_size = 0.2
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
# print(len(X_train))
# print(len(X_test))
# print(len(Y_train))
# print(len(Y_test))
model = XGBRegressor()
model.fit(X_train, Y_train)
# print(model)
print(dataset_df_2.columns)
print(model.feature_importances_)
# print(X_test.shape)
y_pred = model.predict(X_test)
# predictions = [round(value) for value in y_pred]
Y_test = map(lambda x: x[0], Y_test)
# print(Y_test)
res = zip(y_pred, Y_test)
# print(list(res))
ghg_predictor = GHGPredictor()
def predict(model, row):
preds = []
# print(row)
# print(row.).shape)
for perc in range(-10, 11):
new_row = row.copy()
row_copy = row.copy()
# new_row = new_row.iloc[0]
new_row = new_row.drop(labels=['Area', 'Year', 'Crop', 'Previous crop', 'Yield'])
# print(new_row.labels)
# new_row = new_row.tolist()
# print(new_row)
# print(type(new_row))
nitrogen = new_row['N'] * ((100 + perc) / 100)
new_row['N'] = nitrogen
row_copy['N'] = nitrogen
new_row = np.array([new_row])
# print(new_row)
pred = model.predict(new_row)
row_df = pd.DataFrame([row_copy])
fuel_ghg = predictor.fuel_ghg_emissions(row_df["Area"], unit="kg")
fuel_ghg = fuel_ghg.values[0]
ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'], row_df['Area'], row_df['Crop'], row_df['Yield'])
ms_ghg = ms_ghg.values[0]
sum_ghg = fuel_ghg + ms_ghg
area = row_df['Area'].iloc[0]
# print(area)
# print(sum_ghg)
# print(row_df['N'])
# print(sum_ghg)
# GHG
# fuel = ghg_predictor.fuel_ghg_emissions()
preds.append([nitrogen, pred[0], sum_ghg])
print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'.format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))
return preds
# accuracy = accuracy_score(Y_test, predictions)
# print("Accuracy: %.2f%%" % (accuracy * 100.0))
import random
rand_ind = random.randrange(0, len(dataset))
rand_row = dataset_df.iloc[rand_ind]
while rand_row['N'] == 0:
rand_ind = random.randrange(0, len(dataset))
rand_row = dataset_df.iloc[rand_ind]
# rand_row = rand_row[:-1]
preds = predict(model, rand_row)
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots()
n_amount = [x[0] for x in preds]
yield_p = [x[1] for x in preds]
ghg_p = [x[2] for x in preds]
color = 'tab:red'
ax1.set_xlabel('N')
ax1.set_ylabel('Yield (t)', color=color)
ax1.set_title(f'GHG and yield predictions (Area: {rand_row["Area"]} ha)')
ax1.plot(n_amount, yield_p, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('CO2 (kg)', color=color) # we already handled the x-label with ax1
ax2.plot(n_amount, ghg_p, color=color)
ax2.tick_params(axis='y', labelcolor=color)
print(n_amount)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
|
normal
|
{
"blob_id": "0ebd3ca5fd29b0f2f2149dd162b37f39668f1c58",
"index": 7397,
"step-1": "<mask token>\n\n\ndef predict(model, row):\n preds = []\n for perc in range(-10, 11):\n new_row = row.copy()\n row_copy = row.copy()\n new_row = new_row.drop(labels=['Area', 'Year', 'Crop',\n 'Previous crop', 'Yield'])\n nitrogen = new_row['N'] * ((100 + perc) / 100)\n new_row['N'] = nitrogen\n row_copy['N'] = nitrogen\n new_row = np.array([new_row])\n pred = model.predict(new_row)\n row_df = pd.DataFrame([row_copy])\n fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')\n fuel_ghg = fuel_ghg.values[0]\n ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],\n row_df['Area'], row_df['Crop'], row_df['Yield'])\n ms_ghg = ms_ghg.values[0]\n sum_ghg = fuel_ghg + ms_ghg\n area = row_df['Area'].iloc[0]\n preds.append([nitrogen, pred[0], sum_ghg])\n print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'\n .format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))\n return preds\n\n\n<mask token>\n",
"step-2": "<mask token>\nmodel.fit(X_train, Y_train)\nprint(dataset_df_2.columns)\nprint(model.feature_importances_)\n<mask token>\n\n\ndef predict(model, row):\n preds = []\n for perc in range(-10, 11):\n new_row = row.copy()\n row_copy = row.copy()\n new_row = new_row.drop(labels=['Area', 'Year', 'Crop',\n 'Previous crop', 'Yield'])\n nitrogen = new_row['N'] * ((100 + perc) / 100)\n new_row['N'] = nitrogen\n row_copy['N'] = nitrogen\n new_row = np.array([new_row])\n pred = model.predict(new_row)\n row_df = pd.DataFrame([row_copy])\n fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')\n fuel_ghg = fuel_ghg.values[0]\n ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],\n row_df['Area'], row_df['Crop'], row_df['Yield'])\n ms_ghg = ms_ghg.values[0]\n sum_ghg = fuel_ghg + ms_ghg\n area = row_df['Area'].iloc[0]\n preds.append([nitrogen, pred[0], sum_ghg])\n print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'\n .format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))\n return preds\n\n\n<mask token>\nwhile rand_row['N'] == 0:\n rand_ind = random.randrange(0, len(dataset))\n rand_row = dataset_df.iloc[rand_ind]\n<mask token>\nax1.set_xlabel('N')\nax1.set_ylabel('Yield (t)', color=color)\nax1.set_title(f\"GHG and yield predictions (Area: {rand_row['Area']} ha)\")\nax1.plot(n_amount, yield_p, color=color)\nax1.tick_params(axis='y', labelcolor=color)\n<mask token>\nax2.set_ylabel('CO2 (kg)', color=color)\nax2.plot(n_amount, ghg_p, color=color)\nax2.tick_params(axis='y', labelcolor=color)\nprint(n_amount)\nfig.tight_layout()\nplt.show()\n",
"step-3": "<mask token>\npredictor = GHGPredictor()\ndataset_df = pd.read_csv('db-wheat.csv', index_col=0)\ndataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop',\n 'Previous crop'])\ndataset = dataset_df_2.to_numpy()\nX, Y = dataset[:, :-1], dataset[:, -1:]\nseed = 10\ntest_size = 0.2\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=\n test_size, random_state=seed)\nmodel = XGBRegressor()\nmodel.fit(X_train, Y_train)\nprint(dataset_df_2.columns)\nprint(model.feature_importances_)\ny_pred = model.predict(X_test)\nY_test = map(lambda x: x[0], Y_test)\nres = zip(y_pred, Y_test)\nghg_predictor = GHGPredictor()\n\n\ndef predict(model, row):\n preds = []\n for perc in range(-10, 11):\n new_row = row.copy()\n row_copy = row.copy()\n new_row = new_row.drop(labels=['Area', 'Year', 'Crop',\n 'Previous crop', 'Yield'])\n nitrogen = new_row['N'] * ((100 + perc) / 100)\n new_row['N'] = nitrogen\n row_copy['N'] = nitrogen\n new_row = np.array([new_row])\n pred = model.predict(new_row)\n row_df = pd.DataFrame([row_copy])\n fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')\n fuel_ghg = fuel_ghg.values[0]\n ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],\n row_df['Area'], row_df['Crop'], row_df['Yield'])\n ms_ghg = ms_ghg.values[0]\n sum_ghg = fuel_ghg + ms_ghg\n area = row_df['Area'].iloc[0]\n preds.append([nitrogen, pred[0], sum_ghg])\n print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'\n .format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))\n return preds\n\n\n<mask token>\nrand_ind = random.randrange(0, len(dataset))\nrand_row = dataset_df.iloc[rand_ind]\nwhile rand_row['N'] == 0:\n rand_ind = random.randrange(0, len(dataset))\n rand_row = dataset_df.iloc[rand_ind]\npreds = predict(model, rand_row)\n<mask token>\nfig, ax1 = plt.subplots()\nn_amount = [x[0] for x in preds]\nyield_p = [x[1] for x in preds]\nghg_p = [x[2] for x in preds]\ncolor = 'tab:red'\nax1.set_xlabel('N')\nax1.set_ylabel('Yield (t)', color=color)\nax1.set_title(f\"GHG and yield predictions (Area: {rand_row['Area']} ha)\")\nax1.plot(n_amount, yield_p, color=color)\nax1.tick_params(axis='y', labelcolor=color)\nax2 = ax1.twinx()\ncolor = 'tab:blue'\nax2.set_ylabel('CO2 (kg)', color=color)\nax2.plot(n_amount, ghg_p, color=color)\nax2.tick_params(axis='y', labelcolor=color)\nprint(n_amount)\nfig.tight_layout()\nplt.show()\n",
"step-4": "from xgboost import XGBRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\nimport numpy as np\nfrom ghg import GHGPredictor\npredictor = GHGPredictor()\ndataset_df = pd.read_csv('db-wheat.csv', index_col=0)\ndataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop',\n 'Previous crop'])\ndataset = dataset_df_2.to_numpy()\nX, Y = dataset[:, :-1], dataset[:, -1:]\nseed = 10\ntest_size = 0.2\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=\n test_size, random_state=seed)\nmodel = XGBRegressor()\nmodel.fit(X_train, Y_train)\nprint(dataset_df_2.columns)\nprint(model.feature_importances_)\ny_pred = model.predict(X_test)\nY_test = map(lambda x: x[0], Y_test)\nres = zip(y_pred, Y_test)\nghg_predictor = GHGPredictor()\n\n\ndef predict(model, row):\n preds = []\n for perc in range(-10, 11):\n new_row = row.copy()\n row_copy = row.copy()\n new_row = new_row.drop(labels=['Area', 'Year', 'Crop',\n 'Previous crop', 'Yield'])\n nitrogen = new_row['N'] * ((100 + perc) / 100)\n new_row['N'] = nitrogen\n row_copy['N'] = nitrogen\n new_row = np.array([new_row])\n pred = model.predict(new_row)\n row_df = pd.DataFrame([row_copy])\n fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')\n fuel_ghg = fuel_ghg.values[0]\n ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],\n row_df['Area'], row_df['Crop'], row_df['Yield'])\n ms_ghg = ms_ghg.values[0]\n sum_ghg = fuel_ghg + ms_ghg\n area = row_df['Area'].iloc[0]\n preds.append([nitrogen, pred[0], sum_ghg])\n print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'\n .format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))\n return preds\n\n\nimport random\nrand_ind = random.randrange(0, len(dataset))\nrand_row = dataset_df.iloc[rand_ind]\nwhile rand_row['N'] == 0:\n rand_ind = random.randrange(0, len(dataset))\n rand_row = dataset_df.iloc[rand_ind]\npreds = predict(model, rand_row)\nimport matplotlib.pyplot as plt\nfig, ax1 = plt.subplots()\nn_amount = [x[0] for x in preds]\nyield_p = [x[1] for x in preds]\nghg_p = [x[2] for x in preds]\ncolor = 'tab:red'\nax1.set_xlabel('N')\nax1.set_ylabel('Yield (t)', color=color)\nax1.set_title(f\"GHG and yield predictions (Area: {rand_row['Area']} ha)\")\nax1.plot(n_amount, yield_p, color=color)\nax1.tick_params(axis='y', labelcolor=color)\nax2 = ax1.twinx()\ncolor = 'tab:blue'\nax2.set_ylabel('CO2 (kg)', color=color)\nax2.plot(n_amount, ghg_p, color=color)\nax2.tick_params(axis='y', labelcolor=color)\nprint(n_amount)\nfig.tight_layout()\nplt.show()\n",
"step-5": "from xgboost import XGBRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\nimport numpy as np\n\nfrom ghg import GHGPredictor\n\npredictor = GHGPredictor()\n\ndataset_df = pd.read_csv(\"db-wheat.csv\", index_col=0)\n\n# print(dataset_df.iloc[1])\n\ndataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop', 'Previous crop'])\n# print(dataset_df_2)\n\ndataset = dataset_df_2.to_numpy()\n\n# print(dataset)\n\nX, Y = dataset[:, :-1], dataset[:, -1:]\n\n# print(X)\n# print(Y)\n\nseed = 10\ntest_size = 0.2\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)\n\n# print(len(X_train))\n# print(len(X_test))\n# print(len(Y_train))\n# print(len(Y_test))\n\nmodel = XGBRegressor()\nmodel.fit(X_train, Y_train)\n\n# print(model)\nprint(dataset_df_2.columns)\nprint(model.feature_importances_)\n\n# print(X_test.shape)\ny_pred = model.predict(X_test)\n# predictions = [round(value) for value in y_pred]\n\nY_test = map(lambda x: x[0], Y_test)\n# print(Y_test)\n\nres = zip(y_pred, Y_test)\n\n# print(list(res))\n\nghg_predictor = GHGPredictor()\n\ndef predict(model, row):\n preds = []\n # print(row)\n # print(row.).shape)\n for perc in range(-10, 11):\n new_row = row.copy()\n row_copy = row.copy()\n\n # new_row = new_row.iloc[0]\n new_row = new_row.drop(labels=['Area', 'Year', 'Crop', 'Previous crop', 'Yield'])\n # print(new_row.labels)\n # new_row = new_row.tolist()\n\n # print(new_row)\n # print(type(new_row))\n nitrogen = new_row['N'] * ((100 + perc) / 100)\n\n new_row['N'] = nitrogen\n row_copy['N'] = nitrogen\n new_row = np.array([new_row])\n # print(new_row)\n pred = model.predict(new_row)\n\n\n row_df = pd.DataFrame([row_copy])\n\n fuel_ghg = predictor.fuel_ghg_emissions(row_df[\"Area\"], unit=\"kg\")\n \n fuel_ghg = fuel_ghg.values[0]\n\n ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'], row_df['Area'], row_df['Crop'], row_df['Yield'])\n\n ms_ghg = ms_ghg.values[0]\n\n\n sum_ghg = fuel_ghg + ms_ghg\n\n area = row_df['Area'].iloc[0]\n # print(area)\n\n # print(sum_ghg)\n # print(row_df['N'])\n\n # print(sum_ghg)\n\n # GHG\n # fuel = ghg_predictor.fuel_ghg_emissions()\n\n preds.append([nitrogen, pred[0], sum_ghg])\n\n print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'.format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))\n\n return preds\n\n# accuracy = accuracy_score(Y_test, predictions)\n# print(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n\nimport random\n\nrand_ind = random.randrange(0, len(dataset))\nrand_row = dataset_df.iloc[rand_ind]\nwhile rand_row['N'] == 0:\n rand_ind = random.randrange(0, len(dataset))\n rand_row = dataset_df.iloc[rand_ind]\n# rand_row = rand_row[:-1]\n\npreds = predict(model, rand_row)\n\nimport matplotlib.pyplot as plt\n\nfig, ax1 = plt.subplots()\n\nn_amount = [x[0] for x in preds]\nyield_p = [x[1] for x in preds]\nghg_p = [x[2] for x in preds]\n\ncolor = 'tab:red'\nax1.set_xlabel('N')\nax1.set_ylabel('Yield (t)', color=color)\nax1.set_title(f'GHG and yield predictions (Area: {rand_row[\"Area\"]} ha)')\nax1.plot(n_amount, yield_p, color=color)\nax1.tick_params(axis='y', labelcolor=color)\n\nax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\ncolor = 'tab:blue'\nax2.set_ylabel('CO2 (kg)', color=color) # we already handled the x-label with ax1\nax2.plot(n_amount, ghg_p, color=color)\nax2.tick_params(axis='y', labelcolor=color)\n\nprint(n_amount)\n\nfig.tight_layout() # otherwise the right y-label is slightly clipped\nplt.show()\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
api_id = '2168275'
api_hash = 'e011a9cb95b7e7e153aa5840985fc883'
<|reserved_special_token_1|>
api_id = "2168275"
api_hash = "e011a9cb95b7e7e153aa5840985fc883"
|
flexible
|
{
"blob_id": "c6d6fcc242e1b63104a3f3eb788880635257ff4c",
"index": 7503,
"step-1": "<mask token>\n",
"step-2": "api_id = '2168275'\napi_hash = 'e011a9cb95b7e7e153aa5840985fc883'\n",
"step-3": "api_id = \"2168275\"\napi_hash = \"e011a9cb95b7e7e153aa5840985fc883\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
while True:
print("Light Levels:" + input.light_level())
if input.light_level() < 6:
light.set_all(light.rgb(255, 0, 255))
elif input.light_level() < 13:
light.set_all(light.rgb(255, 0, 0))
else:
light.clear()
|
normal
|
{
"blob_id": "7277b045f85d58383f26ab0d3299feb166f45e36",
"index": 2575,
"step-1": "<mask token>\n",
"step-2": "while True:\n print('Light Levels:' + input.light_level())\n if input.light_level() < 6:\n light.set_all(light.rgb(255, 0, 255))\n elif input.light_level() < 13:\n light.set_all(light.rgb(255, 0, 0))\nelse:\n light.clear()\n",
"step-3": "while True:\n print(\"Light Levels:\" + input.light_level())\n if input.light_level() < 6:\n light.set_all(light.rgb(255, 0, 255))\n elif input.light_level() < 13:\n light.set_all(light.rgb(255, 0, 0))\nelse:\n light.clear()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""empty message
Revision ID: 3e4ee9eaaeaa
Revises: 6d58871d74a0
Create Date: 2016-07-25 15:30:38.008238
"""
# revision identifiers, used by Alembic.
revision = '3e4ee9eaaeaa'
down_revision = '6d58871d74a0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_account_interface'), 'account', ['interface'], unique=False)
op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)
op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_account_sub_int'), table_name='account')
op.drop_index(op.f('ix_account_mac'), table_name='account')
op.drop_index(op.f('ix_account_interface'), table_name='account')
### end Alembic commands ###
|
normal
|
{
"blob_id": "db49313d2bc8b9f0be0dfd48c6065ea0ab3294cb",
"index": 4032,
"step-1": "<mask token>\n\n\ndef downgrade():\n op.drop_index(op.f('ix_account_sub_int'), table_name='account')\n op.drop_index(op.f('ix_account_mac'), table_name='account')\n op.drop_index(op.f('ix_account_interface'), table_name='account')\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.create_index(op.f('ix_account_interface'), 'account', ['interface'],\n unique=False)\n op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)\n op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'],\n unique=False)\n\n\ndef downgrade():\n op.drop_index(op.f('ix_account_sub_int'), table_name='account')\n op.drop_index(op.f('ix_account_mac'), table_name='account')\n op.drop_index(op.f('ix_account_interface'), table_name='account')\n",
"step-3": "<mask token>\nrevision = '3e4ee9eaaeaa'\ndown_revision = '6d58871d74a0'\n<mask token>\n\n\ndef upgrade():\n op.create_index(op.f('ix_account_interface'), 'account', ['interface'],\n unique=False)\n op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)\n op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'],\n unique=False)\n\n\ndef downgrade():\n op.drop_index(op.f('ix_account_sub_int'), table_name='account')\n op.drop_index(op.f('ix_account_mac'), table_name='account')\n op.drop_index(op.f('ix_account_interface'), table_name='account')\n",
"step-4": "<mask token>\nrevision = '3e4ee9eaaeaa'\ndown_revision = '6d58871d74a0'\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.create_index(op.f('ix_account_interface'), 'account', ['interface'],\n unique=False)\n op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)\n op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'],\n unique=False)\n\n\ndef downgrade():\n op.drop_index(op.f('ix_account_sub_int'), table_name='account')\n op.drop_index(op.f('ix_account_mac'), table_name='account')\n op.drop_index(op.f('ix_account_interface'), table_name='account')\n",
"step-5": "\"\"\"empty message\n\nRevision ID: 3e4ee9eaaeaa\nRevises: 6d58871d74a0\nCreate Date: 2016-07-25 15:30:38.008238\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3e4ee9eaaeaa'\ndown_revision = '6d58871d74a0'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_index(op.f('ix_account_interface'), 'account', ['interface'], unique=False)\n op.create_index(op.f('ix_account_mac'), 'account', ['mac'], unique=False)\n op.create_index(op.f('ix_account_sub_int'), 'account', ['sub_int'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_account_sub_int'), table_name='account')\n op.drop_index(op.f('ix_account_mac'), table_name='account')\n op.drop_index(op.f('ix_account_interface'), table_name='account')\n ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# @Time : 2019/3/5 上午9:55
# @Author : yidxue
from src.handler.base.base_handler import BaseHandler
from src.utils.tools import read_model
from tornado.options import options
import os
module_path = os.path.abspath(os.path.join(os.curdir))
model_path = os.path.join(module_path, 'model')
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs)
def do_action(self):
model_name = self.get_argument('modelname', None)
if model_name is None:
for model_name in os.listdir(model_path):
if model_name.find(".model") == -1:
continue
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={"message": "server has reload all models"})
else:
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={"message": "server has reload {model}".format(model=model_name)})
|
normal
|
{
"blob_id": "a8ae59bb525c52ef852655f0ef1e32d96c8914d6",
"index": 1356,
"step-1": "<mask token>\n\n\nclass ReloadModelHandler(BaseHandler):\n\n def __init__(self, application, request, **kwargs):\n super(ReloadModelHandler, self).__init__(application, request, **kwargs\n )\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ReloadModelHandler(BaseHandler):\n\n def __init__(self, application, request, **kwargs):\n super(ReloadModelHandler, self).__init__(application, request, **kwargs\n )\n\n def do_action(self):\n model_name = self.get_argument('modelname', None)\n if model_name is None:\n for model_name in os.listdir(model_path):\n if model_name.find('.model') == -1:\n continue\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload all models'})\n else:\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload {model}'.\n format(model=model_name)})\n",
"step-3": "<mask token>\nmodule_path = os.path.abspath(os.path.join(os.curdir))\nmodel_path = os.path.join(module_path, 'model')\n\n\nclass ReloadModelHandler(BaseHandler):\n\n def __init__(self, application, request, **kwargs):\n super(ReloadModelHandler, self).__init__(application, request, **kwargs\n )\n\n def do_action(self):\n model_name = self.get_argument('modelname', None)\n if model_name is None:\n for model_name in os.listdir(model_path):\n if model_name.find('.model') == -1:\n continue\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload all models'})\n else:\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload {model}'.\n format(model=model_name)})\n",
"step-4": "from src.handler.base.base_handler import BaseHandler\nfrom src.utils.tools import read_model\nfrom tornado.options import options\nimport os\nmodule_path = os.path.abspath(os.path.join(os.curdir))\nmodel_path = os.path.join(module_path, 'model')\n\n\nclass ReloadModelHandler(BaseHandler):\n\n def __init__(self, application, request, **kwargs):\n super(ReloadModelHandler, self).__init__(application, request, **kwargs\n )\n\n def do_action(self):\n model_name = self.get_argument('modelname', None)\n if model_name is None:\n for model_name in os.listdir(model_path):\n if model_name.find('.model') == -1:\n continue\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload all models'})\n else:\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload {model}'.\n format(model=model_name)})\n",
"step-5": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/5 上午9:55\n# @Author : yidxue\nfrom src.handler.base.base_handler import BaseHandler\nfrom src.utils.tools import read_model\nfrom tornado.options import options\nimport os\n\nmodule_path = os.path.abspath(os.path.join(os.curdir))\nmodel_path = os.path.join(module_path, 'model')\n\n\nclass ReloadModelHandler(BaseHandler):\n\n def __init__(self, application, request, **kwargs):\n super(ReloadModelHandler, self).__init__(application, request, **kwargs)\n\n def do_action(self):\n model_name = self.get_argument('modelname', None)\n if model_name is None:\n for model_name in os.listdir(model_path):\n if model_name.find(\".model\") == -1:\n continue\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={\"message\": \"server has reload all models\"})\n else:\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={\"message\": \"server has reload {model}\".format(model=model_name)})\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def process_resources(_res_iter):
for rows in _res_iter:
def process_rows(_rows):
for row in _rows:
for column in columns:
if column in row:
del row[column]
yield row
yield process_rows(rows)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for resource in datapackage['resources']:
fields = resource.get('schema', {}).get('fields')
if fields is not None:
fields = [field for field in fields if field['name'] not in columns]
resource['schema']['fields'] = fields
def process_resources(_res_iter):
for rows in _res_iter:
def process_rows(_rows):
for row in _rows:
for column in columns:
if column in row:
del row[column]
yield row
yield process_rows(rows)
spew(datapackage, process_resources(res_iter))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
params, datapackage, res_iter = ingest()
columns = params['columns']
for resource in datapackage['resources']:
fields = resource.get('schema', {}).get('fields')
if fields is not None:
fields = [field for field in fields if field['name'] not in columns]
resource['schema']['fields'] = fields
def process_resources(_res_iter):
for rows in _res_iter:
def process_rows(_rows):
for row in _rows:
for column in columns:
if column in row:
del row[column]
yield row
yield process_rows(rows)
spew(datapackage, process_resources(res_iter))
<|reserved_special_token_1|>
from datapackage_pipelines.wrapper import ingest, spew
params, datapackage, res_iter = ingest()
columns = params['columns']
for resource in datapackage['resources']:
fields = resource.get('schema', {}).get('fields')
if fields is not None:
fields = [field for field in fields if field['name'] not in columns]
resource['schema']['fields'] = fields
def process_resources(_res_iter):
for rows in _res_iter:
def process_rows(_rows):
for row in _rows:
for column in columns:
if column in row:
del row[column]
yield row
yield process_rows(rows)
spew(datapackage, process_resources(res_iter))
|
flexible
|
{
"blob_id": "17b3fb44d9e7a09fe3b807b47bdc0248b6960634",
"index": 4022,
"step-1": "<mask token>\n\n\ndef process_resources(_res_iter):\n for rows in _res_iter:\n\n def process_rows(_rows):\n for row in _rows:\n for column in columns:\n if column in row:\n del row[column]\n yield row\n yield process_rows(rows)\n\n\n<mask token>\n",
"step-2": "<mask token>\nfor resource in datapackage['resources']:\n fields = resource.get('schema', {}).get('fields')\n if fields is not None:\n fields = [field for field in fields if field['name'] not in columns]\n resource['schema']['fields'] = fields\n\n\ndef process_resources(_res_iter):\n for rows in _res_iter:\n\n def process_rows(_rows):\n for row in _rows:\n for column in columns:\n if column in row:\n del row[column]\n yield row\n yield process_rows(rows)\n\n\nspew(datapackage, process_resources(res_iter))\n",
"step-3": "<mask token>\nparams, datapackage, res_iter = ingest()\ncolumns = params['columns']\nfor resource in datapackage['resources']:\n fields = resource.get('schema', {}).get('fields')\n if fields is not None:\n fields = [field for field in fields if field['name'] not in columns]\n resource['schema']['fields'] = fields\n\n\ndef process_resources(_res_iter):\n for rows in _res_iter:\n\n def process_rows(_rows):\n for row in _rows:\n for column in columns:\n if column in row:\n del row[column]\n yield row\n yield process_rows(rows)\n\n\nspew(datapackage, process_resources(res_iter))\n",
"step-4": "from datapackage_pipelines.wrapper import ingest, spew\nparams, datapackage, res_iter = ingest()\ncolumns = params['columns']\nfor resource in datapackage['resources']:\n fields = resource.get('schema', {}).get('fields')\n if fields is not None:\n fields = [field for field in fields if field['name'] not in columns]\n resource['schema']['fields'] = fields\n\n\ndef process_resources(_res_iter):\n for rows in _res_iter:\n\n def process_rows(_rows):\n for row in _rows:\n for column in columns:\n if column in row:\n del row[column]\n yield row\n yield process_rows(rows)\n\n\nspew(datapackage, process_resources(res_iter))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def partition_benthic(reach, runoff, runoff_mass, erosion_mass):
from .parameters import soil, stream_channel, benthic
try:
reach = self.region.flow_file.fetch(reach)
q, v, l = reach.q, reach.v, reach.l
except AttributeError:
return None, None, (None, None)
mean_runoff = runoff.mean()
baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),
where=q > mean_runoff)
total_flow = runoff + baseflow
mixing_cell = 40.0
cross_section = total_flow / v
width = stream_channel.a * np.power(cross_section, stream_channel.b)
depth = cross_section / width
surface_area = width * l
volume = np.array([depth * surface_area, benthic.depth * surface_area *
benthic.porosity])
runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.
n_dates), where=runoff != 0)
daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.
zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (
mixing_cell > 0.0))
mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,
soil.prben * erosion_mass]).T
fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,
surface_area, self.i.koc)
k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +
self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.
deg_benthic * (1 - fw1))
aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.
n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,
theta, self.i.deg_aqueous)
return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,
daily_avg, daily_peak))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def partition_benthic(reach, runoff, runoff_mass, erosion_mass):
from .parameters import soil, stream_channel, benthic
try:
reach = self.region.flow_file.fetch(reach)
q, v, l = reach.q, reach.v, reach.l
except AttributeError:
return None, None, (None, None)
mean_runoff = runoff.mean()
baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),
where=q > mean_runoff)
total_flow = runoff + baseflow
mixing_cell = 40.0
cross_section = total_flow / v
width = stream_channel.a * np.power(cross_section, stream_channel.b)
depth = cross_section / width
surface_area = width * l
volume = np.array([depth * surface_area, benthic.depth * surface_area *
benthic.porosity])
runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.
n_dates), where=runoff != 0)
daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.
zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (
mixing_cell > 0.0))
mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,
soil.prben * erosion_mass]).T
fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,
surface_area, self.i.koc)
k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +
self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.
deg_benthic * (1 - fw1))
aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.
n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,
theta, self.i.deg_aqueous)
return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,
daily_avg, daily_peak))
@njit
def concentration_loop(n_dates, daily_concentration, k_adj, daily_volume,
mass_input, fw1, fw2, omega, theta, deg_aq):
daily_peak = np.zeros((2, n_dates))
daily_avg = np.zeros((2, n_dates))
aqconc_avg_wb = np.zeros(n_dates)
exp_k = np.exp(-k_adj)
aqconc_wb = 0
antecedent_mass = np.zeros(2)
for day in range(daily_concentration.size):
daily_mass = antecedent_mass + mass_input[day]
daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]
daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]
aqconc_wb += daily_concentration[day]
aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])
aqconc_wb *= exp_k[day]
new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day],
deg_aq, omega, theta[day], daily_peak[:, day])
daily_avg[0, day] = wc_avg
daily_avg[1, day] = benthic_avg
antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]
antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]
return aqconc_avg_wb, daily_avg, daily_peak
<|reserved_special_token_0|>
def solute_holding_capacity(depth, surface_area, koc):
"""Calculates Solute Holding capacities and mass transfer between water column and benthic regions"""
from .parameters import benthic, water_column
vol1 = depth * surface_area
vol2a = benthic.depth * surface_area
vol2 = vol2a * benthic.porosity
kow = koc / 0.35
kpdoc1 = kow * 0.074
kpdoc2 = koc
xkpb = 0.436 * kow ** 0.907
vol1a = depth[0] * surface_area
m_sed_1 = water_column.sused * vol1a * 0.001
m_bio_1 = water_column.plmas * vol1a * 0.001
m_doc_1 = water_column.doc * vol1a * 0.001
kd_sed_1 = koc * water_column.froc * 0.001
kd_sed_2 = koc * benthic.froc * 0.001
kd_bio = xkpb / 1000.0
kd_doc_1 = kpdoc1 / 1000.0
kd_doc_2 = kpdoc2 / 1000.0
m_sed_2 = benthic.bulk_density * vol2a * 1000.0
m_bio_2 = benthic.bnmas * surface_area * 0.001
m_doc_2 = benthic.doc * vol2 * 0.001
capacity_1 = (kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 *
m_doc_1 + vol1)
capacity_2 = (kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 *
m_doc_2 + vol2)
fw1 = vol1 / capacity_1
fw2 = vol2 / capacity_2
theta = capacity_2 / capacity_1
sed_conv_factor = vol2 / fw2 / m_sed_2
omega = benthic.d_over_dx / benthic.depth
return fw1, fw2, theta, sed_conv_factor, omega
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def partition_benthic(reach, runoff, runoff_mass, erosion_mass):
from .parameters import soil, stream_channel, benthic
try:
reach = self.region.flow_file.fetch(reach)
q, v, l = reach.q, reach.v, reach.l
except AttributeError:
return None, None, (None, None)
mean_runoff = runoff.mean()
baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),
where=q > mean_runoff)
total_flow = runoff + baseflow
mixing_cell = 40.0
cross_section = total_flow / v
width = stream_channel.a * np.power(cross_section, stream_channel.b)
depth = cross_section / width
surface_area = width * l
volume = np.array([depth * surface_area, benthic.depth * surface_area *
benthic.porosity])
runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.
n_dates), where=runoff != 0)
daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.
zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (
mixing_cell > 0.0))
mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,
soil.prben * erosion_mass]).T
fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,
surface_area, self.i.koc)
k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +
self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.
deg_benthic * (1 - fw1))
aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.
n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,
theta, self.i.deg_aqueous)
return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,
daily_avg, daily_peak))
@njit
def concentration_loop(n_dates, daily_concentration, k_adj, daily_volume,
mass_input, fw1, fw2, omega, theta, deg_aq):
daily_peak = np.zeros((2, n_dates))
daily_avg = np.zeros((2, n_dates))
aqconc_avg_wb = np.zeros(n_dates)
exp_k = np.exp(-k_adj)
aqconc_wb = 0
antecedent_mass = np.zeros(2)
for day in range(daily_concentration.size):
daily_mass = antecedent_mass + mass_input[day]
daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]
daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]
aqconc_wb += daily_concentration[day]
aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])
aqconc_wb *= exp_k[day]
new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day],
deg_aq, omega, theta[day], daily_peak[:, day])
daily_avg[0, day] = wc_avg
daily_avg[1, day] = benthic_avg
antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]
antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]
return aqconc_avg_wb, daily_avg, daily_peak
@njit
def simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):
"""
ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:
dm1/dt = Am1 + Bm2
dm2/dt = Em1 + Fm2
WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2
mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T
mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T
mavg1 IS AVERAGE VALUE OF m1 OVER TIME T
"""
t_end = 86400.0
m1, m2 = daily_aq_peak
a = -gamma1 - omega * theta
b = omega * theta
e = omega
f = -gamma2 - omega
af = a + f
dif = 4 * (f * a - b * e)
bbb = np.sqrt(af * af - dif)
root1 = (af + bbb) / 2.0
root2 = (af - bbb) / 2.0
dd = (root1 - a) / b
ee = (root2 - a) / b
ff = ee - dd
x1 = (ee * m1 - m2) / ff
y1 = (m2 - dd * m1) / ff
rt1 = root1 * t_end
rt2 = root2 * t_end
exrt1 = np.exp(rt1)
exrt2 = np.exp(rt2)
ccc = x1 * exrt1
ddd = y1 * exrt2
mn = np.zeros(2)
mn[0] = ccc + ddd
mn[1] = dd * ccc + ee * ddd
gx = x1 / root1
hx = y1 / root2
term1 = gx * exrt1
term2 = hx * exrt2
term3 = -gx
term4 = -hx
mavg_wc = (term1 + term2 + term3 + term4) / t_end
mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end
return mn, mavg_wc, mavg_ben
def solute_holding_capacity(depth, surface_area, koc):
"""Calculates Solute Holding capacities and mass transfer between water column and benthic regions"""
from .parameters import benthic, water_column
vol1 = depth * surface_area
vol2a = benthic.depth * surface_area
vol2 = vol2a * benthic.porosity
kow = koc / 0.35
kpdoc1 = kow * 0.074
kpdoc2 = koc
xkpb = 0.436 * kow ** 0.907
vol1a = depth[0] * surface_area
m_sed_1 = water_column.sused * vol1a * 0.001
m_bio_1 = water_column.plmas * vol1a * 0.001
m_doc_1 = water_column.doc * vol1a * 0.001
kd_sed_1 = koc * water_column.froc * 0.001
kd_sed_2 = koc * benthic.froc * 0.001
kd_bio = xkpb / 1000.0
kd_doc_1 = kpdoc1 / 1000.0
kd_doc_2 = kpdoc2 / 1000.0
m_sed_2 = benthic.bulk_density * vol2a * 1000.0
m_bio_2 = benthic.bnmas * surface_area * 0.001
m_doc_2 = benthic.doc * vol2 * 0.001
capacity_1 = (kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 *
m_doc_1 + vol1)
capacity_2 = (kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 *
m_doc_2 + vol2)
fw1 = vol1 / capacity_1
fw2 = vol2 / capacity_2
theta = capacity_2 / capacity_1
sed_conv_factor = vol2 / fw2 / m_sed_2
omega = benthic.d_over_dx / benthic.depth
return fw1, fw2, theta, sed_conv_factor, omega
<|reserved_special_token_1|>
benthic_params = {'depth': 0.05, 'porosity': 0.65, 'bulk_density': 1,
'froc': 0, 'doc': 5, 'bnmas': 0, 'd_over_dx': 1}
def partition_benthic(reach, runoff, runoff_mass, erosion_mass):
from .parameters import soil, stream_channel, benthic
try:
reach = self.region.flow_file.fetch(reach)
q, v, l = reach.q, reach.v, reach.l
except AttributeError:
return None, None, (None, None)
mean_runoff = runoff.mean()
baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),
where=q > mean_runoff)
total_flow = runoff + baseflow
mixing_cell = 40.0
cross_section = total_flow / v
width = stream_channel.a * np.power(cross_section, stream_channel.b)
depth = cross_section / width
surface_area = width * l
volume = np.array([depth * surface_area, benthic.depth * surface_area *
benthic.porosity])
runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.
n_dates), where=runoff != 0)
daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.
zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (
mixing_cell > 0.0))
mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,
soil.prben * erosion_mass]).T
fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,
surface_area, self.i.koc)
k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +
self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.
deg_benthic * (1 - fw1))
aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.
n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,
theta, self.i.deg_aqueous)
return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,
daily_avg, daily_peak))
@njit
def concentration_loop(n_dates, daily_concentration, k_adj, daily_volume,
mass_input, fw1, fw2, omega, theta, deg_aq):
daily_peak = np.zeros((2, n_dates))
daily_avg = np.zeros((2, n_dates))
aqconc_avg_wb = np.zeros(n_dates)
exp_k = np.exp(-k_adj)
aqconc_wb = 0
antecedent_mass = np.zeros(2)
for day in range(daily_concentration.size):
daily_mass = antecedent_mass + mass_input[day]
daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]
daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]
aqconc_wb += daily_concentration[day]
aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])
aqconc_wb *= exp_k[day]
new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day],
deg_aq, omega, theta[day], daily_peak[:, day])
daily_avg[0, day] = wc_avg
daily_avg[1, day] = benthic_avg
antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]
antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]
return aqconc_avg_wb, daily_avg, daily_peak
@njit
def simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):
"""
ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:
dm1/dt = Am1 + Bm2
dm2/dt = Em1 + Fm2
WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2
mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T
mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T
mavg1 IS AVERAGE VALUE OF m1 OVER TIME T
"""
t_end = 86400.0
m1, m2 = daily_aq_peak
a = -gamma1 - omega * theta
b = omega * theta
e = omega
f = -gamma2 - omega
af = a + f
dif = 4 * (f * a - b * e)
bbb = np.sqrt(af * af - dif)
root1 = (af + bbb) / 2.0
root2 = (af - bbb) / 2.0
dd = (root1 - a) / b
ee = (root2 - a) / b
ff = ee - dd
x1 = (ee * m1 - m2) / ff
y1 = (m2 - dd * m1) / ff
rt1 = root1 * t_end
rt2 = root2 * t_end
exrt1 = np.exp(rt1)
exrt2 = np.exp(rt2)
ccc = x1 * exrt1
ddd = y1 * exrt2
mn = np.zeros(2)
mn[0] = ccc + ddd
mn[1] = dd * ccc + ee * ddd
gx = x1 / root1
hx = y1 / root2
term1 = gx * exrt1
term2 = hx * exrt2
term3 = -gx
term4 = -hx
mavg_wc = (term1 + term2 + term3 + term4) / t_end
mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end
return mn, mavg_wc, mavg_ben
def solute_holding_capacity(depth, surface_area, koc):
"""Calculates Solute Holding capacities and mass transfer between water column and benthic regions"""
from .parameters import benthic, water_column
vol1 = depth * surface_area
vol2a = benthic.depth * surface_area
vol2 = vol2a * benthic.porosity
kow = koc / 0.35
kpdoc1 = kow * 0.074
kpdoc2 = koc
xkpb = 0.436 * kow ** 0.907
vol1a = depth[0] * surface_area
m_sed_1 = water_column.sused * vol1a * 0.001
m_bio_1 = water_column.plmas * vol1a * 0.001
m_doc_1 = water_column.doc * vol1a * 0.001
kd_sed_1 = koc * water_column.froc * 0.001
kd_sed_2 = koc * benthic.froc * 0.001
kd_bio = xkpb / 1000.0
kd_doc_1 = kpdoc1 / 1000.0
kd_doc_2 = kpdoc2 / 1000.0
m_sed_2 = benthic.bulk_density * vol2a * 1000.0
m_bio_2 = benthic.bnmas * surface_area * 0.001
m_doc_2 = benthic.doc * vol2 * 0.001
capacity_1 = (kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 *
m_doc_1 + vol1)
capacity_2 = (kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 *
m_doc_2 + vol2)
fw1 = vol1 / capacity_1
fw2 = vol2 / capacity_2
theta = capacity_2 / capacity_1
sed_conv_factor = vol2 / fw2 / m_sed_2
omega = benthic.d_over_dx / benthic.depth
return fw1, fw2, theta, sed_conv_factor, omega
<|reserved_special_token_1|>
# Benthic Parameters - USEPA OPP defaults from EXAMS
benthic_params = {
"depth": 0.05, # benthic depth (m)
"porosity": 0.65, # benthic porosity
"bulk_density": 1, # bulk density, dry solid mass/total vol (g/cm3)
"froc": 0, # benthic organic carbon fraction
"doc": 5, # benthic dissolved organic carbon content (mg/L)
"bnmas": 0, # benthic biomass intensity (g/m2)
"d_over_dx": 1 # mass transfer coefficient for exchange between benthic and water column (m/s)
# (can be modified later if data exists)
}
def partition_benthic(reach, runoff, runoff_mass, erosion_mass):
from .parameters import soil, stream_channel, benthic
try:
reach = self.region.flow_file.fetch(reach)
q, v, l = reach.q, reach.v, reach.l
except AttributeError:
return None, None, (None, None)
mean_runoff = runoff.mean() # m3/d
baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates), where=(q > mean_runoff))
total_flow = runoff + baseflow
mixing_cell = 40. # meters
cross_section = total_flow / v
width = stream_channel.a * np.power(cross_section, stream_channel.b)
depth = cross_section / width
surface_area = width * l
volume = np.array([(depth * surface_area), # Water column
(benthic.depth * surface_area * benthic.porosity)]) # Benthic zone
# Compute concentration in runoff of runoff mass and erosion mass
runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.n_dates), where=(runoff != 0))
daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.zeros(self.i.n_dates),
where=(runoff_mass + erosion_mass > 0.0) & (mixing_cell > 0.0))
# Divide mass loading between water column and benthic zones
mass_input = np.vstack([runoff_mass + ((1. - soil.prben) * erosion_mass), # Water Column
soil.prben * erosion_mass]).T # Benthic
# Partition concentration into benthic and water column concentrations
# This needs to be cleaned up
# Compute benthic solute holding capacity
fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth, surface_area, self.i.koc)
k_adj = np.array((total_flow / mixing_cell) + (self.i.deg_photolysis + self.i.deg_hydrolysis) * fw1 + \
(self.i.deg_wc * fw1) + self.i.deg_benthic * (1 - fw1))
aqconc_avg_wb, daily_avg, daily_peak = \
concentration_loop(self.i.n_dates, daily_conc, k_adj, volume,
mass_input, fw1, fw2, omega, theta, self.i.deg_aqueous)
return map(lambda x: x * 1000000., (runoff_conc, aqconc_avg_wb, daily_avg, daily_peak))
@njit
def concentration_loop(n_dates, daily_concentration, k_adj, daily_volume, mass_input, fw1, fw2, omega, theta, deg_aq):
# Beginning day aquatic concentrations, considered Peak Aqueous Daily Conc in Water Column
daily_peak = np.zeros((2, n_dates))
daily_avg = np.zeros((2, n_dates))
aqconc_avg_wb = np.zeros(n_dates)
# Reset starting values
exp_k = np.exp(-k_adj)
aqconc_wb = 0
antecedent_mass = np.zeros(2) # mn
for day in range(daily_concentration.size):
# Add mass input to antecedent mass
daily_mass = antecedent_mass + mass_input[day]
# Convert to aqueous concentrations (peak) at beginning of day
# JCH - fw comes from solute_holding_capacity. Fraction going into each section. Should fw[0] + fw[1] = 1?
daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]
daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]
# Compute daily average concentration in the water body - when no Benthic layer considered
aqconc_wb += daily_concentration[day] # initial water body concentration for current time step
# Daily avg aq conc in water body, area under curve/t = Ci/k*(1-e^-k), NO benthic
aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])
# initial water body concentration for next time step
aqconc_wb *= exp_k[day]
# For simul diffeq soln: mn1,mn2,mavg1,mavg2 = new_aqconc1, new_aqconc2, aqconc_avg1[d], aqconc_avg2[d]
# Note: aqconc_avg1 and aqconc_avg2 are outputted - Daily avg aq conc in WC and Benthic regions
new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day], deg_aq, omega, theta[day], daily_peak[:, day])
daily_avg[0, day] = wc_avg
daily_avg[1, day] = benthic_avg
# Masses m1 and m2 after time step, t_end
antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]
antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]
return aqconc_avg_wb, daily_avg, daily_peak
@njit
def simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):
"""
ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:
dm1/dt = Am1 + Bm2
dm2/dt = Em1 + Fm2
WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2
mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T
mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T
mavg1 IS AVERAGE VALUE OF m1 OVER TIME T
"""
t_end = 86400. # seconds, time step of ONE DAY
m1, m2 = daily_aq_peak
# Calculate constants for simultaneous_diffeq: A,B,E,F
# This reduces the model equivalent parameters to the coefficients needed for solving simultaneous_diffeq
a = -gamma1 - omega * theta
b = omega * theta
e = omega
f = -gamma2 - omega
af = a + f
dif = 4 * ((f * a) - (b * e))
bbb = np.sqrt(af * af - dif)
root1 = (af + bbb) / 2.
root2 = (af - bbb) / 2.
dd = (root1 - a) / b
ee = (root2 - a) / b
ff = ee - dd
x1 = (ee * m1 - m2) / ff
y1 = (m2 - dd * m1) / ff
# Calculate new concentrations for next step
rt1 = root1 * t_end
rt2 = root2 * t_end
exrt1 = np.exp(rt1)
exrt2 = np.exp(rt2)
ccc = x1 * exrt1
ddd = y1 * exrt2
# values for m1 and m2 after time step t_end
mn = np.zeros(2)
mn[0] = ccc + ddd # Water column
mn[1] = dd * ccc + ee * ddd # Benthic
# AVERAGE DAILY CONCENTRATION SOLUTION: set up for daily average, but can be changed by adjusting time step
gx = x1 / root1
hx = y1 / root2
term1 = gx * exrt1 # term3 = -X1/root1*exp(root1*T1)
term2 = hx * exrt2 # term4 = -Y1/root2*exp(root2*T1
term3 = -gx
term4 = -hx
mavg_wc = (term1 + term2 + term3 + term4) / t_end # Water column
mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end # Benthic
return mn, mavg_wc, mavg_ben
def solute_holding_capacity(depth, surface_area, koc):
"""Calculates Solute Holding capacities and mass transfer between water column and benthic regions"""
from .parameters import benthic, water_column
# Aqueous volumes in each region
vol1 = depth * surface_area # total volume in water column, approximately equal to water volume alone
vol2a = benthic.depth * surface_area # total benthic volume
vol2 = vol2a * benthic.porosity # total benthic pore water volume
# Default EXAMS conditions for partitioning
kow = koc / .35 # DEFAULT EXAMS CONDITION ON Kow p.35
kpdoc1 = kow * .074 # DEFAULT RELATION IN EXAMS (LITTORAL)
kpdoc2 = koc # DEFAULT RELATION IN EXAMS (BENTHIC) p.16 of EXAMS 2.98 (or is it Kow*.46 ?)
xkpb = 0.436 * kow ** .907 # DEFAULT RELATION IN EXAMS
# mass in littoral region
vol1a = depth[0] * surface_area # initial volume corresponding with suspended matter reference
m_sed_1 = water_column.sused * vol1a * .001 # SEDIMENT MASS LITTORAL
m_bio_1 = water_column.plmas * vol1a * .001 # BIOLOGICAL MASS LITTORAL
m_doc_1 = water_column.doc * vol1a * .001 # DOC MASS LITTORAL
# partitioning coefficients of individual media
kd_sed_1 = koc * water_column.froc * .001 # Kd of sediment in littoral [m3/kg]
kd_sed_2 = koc * benthic.froc * .001 # Kd of sediment in benthic
kd_bio = xkpb / 1000. # Kd of biological organisms
kd_doc_1 = kpdoc1 / 1000. # Kd of DOC in littoral region
kd_doc_2 = kpdoc2 / 1000. # Kd of DOC in benthic region
# mass in benthic region
m_sed_2 = benthic.bulk_density * vol2a * 1000. # as defined by EXAMS parameters m_sed_2 = BULKD/PCTWA*VOL2*100000.
m_bio_2 = benthic.bnmas * surface_area * .001
m_doc_2 = benthic.doc * vol2 * .001
# solute holding capacity in regions 1 and 2
capacity_1 = kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 * m_doc_1 + vol1
capacity_2 = kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 * m_doc_2 + vol2
# Fraction going to water column and benthic
fw1 = vol1 / capacity_1 # fw1 is daily, vol1 is daily
fw2 = vol2 / capacity_2
theta = capacity_2 / capacity_1
sed_conv_factor = vol2 / fw2 / m_sed_2 # converts pore water to [Total Conc normalized to sed mass]
# Omega mass transfer - Calculates littoral to benthic mass transfer coefficient
omega = benthic.d_over_dx / benthic.depth # (m3/hr)/(3600 s/hr)
return fw1, fw2, theta, sed_conv_factor, omega
|
flexible
|
{
"blob_id": "5890525b16b42578ac06e7ab2170c5613feea0a5",
"index": 6494,
"step-1": "<mask token>\n\n\ndef partition_benthic(reach, runoff, runoff_mass, erosion_mass):\n from .parameters import soil, stream_channel, benthic\n try:\n reach = self.region.flow_file.fetch(reach)\n q, v, l = reach.q, reach.v, reach.l\n except AttributeError:\n return None, None, (None, None)\n mean_runoff = runoff.mean()\n baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),\n where=q > mean_runoff)\n total_flow = runoff + baseflow\n mixing_cell = 40.0\n cross_section = total_flow / v\n width = stream_channel.a * np.power(cross_section, stream_channel.b)\n depth = cross_section / width\n surface_area = width * l\n volume = np.array([depth * surface_area, benthic.depth * surface_area *\n benthic.porosity])\n runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.\n n_dates), where=runoff != 0)\n daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.\n zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (\n mixing_cell > 0.0))\n mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,\n soil.prben * erosion_mass]).T\n fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,\n surface_area, self.i.koc)\n k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +\n self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.\n deg_benthic * (1 - fw1))\n aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.\n n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,\n theta, self.i.deg_aqueous)\n return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,\n daily_avg, daily_peak))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef partition_benthic(reach, runoff, runoff_mass, erosion_mass):\n from .parameters import soil, stream_channel, benthic\n try:\n reach = self.region.flow_file.fetch(reach)\n q, v, l = reach.q, reach.v, reach.l\n except AttributeError:\n return None, None, (None, None)\n mean_runoff = runoff.mean()\n baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),\n where=q > mean_runoff)\n total_flow = runoff + baseflow\n mixing_cell = 40.0\n cross_section = total_flow / v\n width = stream_channel.a * np.power(cross_section, stream_channel.b)\n depth = cross_section / width\n surface_area = width * l\n volume = np.array([depth * surface_area, benthic.depth * surface_area *\n benthic.porosity])\n runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.\n n_dates), where=runoff != 0)\n daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.\n zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (\n mixing_cell > 0.0))\n mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,\n soil.prben * erosion_mass]).T\n fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,\n surface_area, self.i.koc)\n k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +\n self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.\n deg_benthic * (1 - fw1))\n aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.\n n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,\n theta, self.i.deg_aqueous)\n return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,\n daily_avg, daily_peak))\n\n\n@njit\ndef concentration_loop(n_dates, daily_concentration, k_adj, daily_volume,\n mass_input, fw1, fw2, omega, theta, deg_aq):\n daily_peak = np.zeros((2, n_dates))\n daily_avg = np.zeros((2, n_dates))\n aqconc_avg_wb = np.zeros(n_dates)\n exp_k = np.exp(-k_adj)\n aqconc_wb = 0\n antecedent_mass = np.zeros(2)\n for day in range(daily_concentration.size):\n daily_mass = antecedent_mass + mass_input[day]\n daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]\n daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]\n aqconc_wb += daily_concentration[day]\n aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])\n aqconc_wb *= exp_k[day]\n new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day],\n deg_aq, omega, theta[day], daily_peak[:, day])\n daily_avg[0, day] = wc_avg\n daily_avg[1, day] = benthic_avg\n antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]\n antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]\n return aqconc_avg_wb, daily_avg, daily_peak\n\n\n<mask token>\n\n\ndef solute_holding_capacity(depth, surface_area, koc):\n \"\"\"Calculates Solute Holding capacities and mass transfer between water column and benthic regions\"\"\"\n from .parameters import benthic, water_column\n vol1 = depth * surface_area\n vol2a = benthic.depth * surface_area\n vol2 = vol2a * benthic.porosity\n kow = koc / 0.35\n kpdoc1 = kow * 0.074\n kpdoc2 = koc\n xkpb = 0.436 * kow ** 0.907\n vol1a = depth[0] * surface_area\n m_sed_1 = water_column.sused * vol1a * 0.001\n m_bio_1 = water_column.plmas * vol1a * 0.001\n m_doc_1 = water_column.doc * vol1a * 0.001\n kd_sed_1 = koc * water_column.froc * 0.001\n kd_sed_2 = koc * benthic.froc * 0.001\n kd_bio = xkpb / 1000.0\n kd_doc_1 = kpdoc1 / 1000.0\n kd_doc_2 = kpdoc2 / 1000.0\n m_sed_2 = benthic.bulk_density * vol2a * 1000.0\n m_bio_2 = benthic.bnmas * surface_area * 0.001\n m_doc_2 = benthic.doc * vol2 * 0.001\n capacity_1 = (kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 *\n m_doc_1 + vol1)\n capacity_2 = (kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 *\n m_doc_2 + vol2)\n fw1 = vol1 / capacity_1\n fw2 = vol2 / capacity_2\n theta = capacity_2 / capacity_1\n sed_conv_factor = vol2 / fw2 / m_sed_2\n omega = benthic.d_over_dx / benthic.depth\n return fw1, fw2, theta, sed_conv_factor, omega\n",
"step-3": "<mask token>\n\n\ndef partition_benthic(reach, runoff, runoff_mass, erosion_mass):\n from .parameters import soil, stream_channel, benthic\n try:\n reach = self.region.flow_file.fetch(reach)\n q, v, l = reach.q, reach.v, reach.l\n except AttributeError:\n return None, None, (None, None)\n mean_runoff = runoff.mean()\n baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),\n where=q > mean_runoff)\n total_flow = runoff + baseflow\n mixing_cell = 40.0\n cross_section = total_flow / v\n width = stream_channel.a * np.power(cross_section, stream_channel.b)\n depth = cross_section / width\n surface_area = width * l\n volume = np.array([depth * surface_area, benthic.depth * surface_area *\n benthic.porosity])\n runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.\n n_dates), where=runoff != 0)\n daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.\n zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (\n mixing_cell > 0.0))\n mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,\n soil.prben * erosion_mass]).T\n fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,\n surface_area, self.i.koc)\n k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +\n self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.\n deg_benthic * (1 - fw1))\n aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.\n n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,\n theta, self.i.deg_aqueous)\n return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,\n daily_avg, daily_peak))\n\n\n@njit\ndef concentration_loop(n_dates, daily_concentration, k_adj, daily_volume,\n mass_input, fw1, fw2, omega, theta, deg_aq):\n daily_peak = np.zeros((2, n_dates))\n daily_avg = np.zeros((2, n_dates))\n aqconc_avg_wb = np.zeros(n_dates)\n exp_k = np.exp(-k_adj)\n aqconc_wb = 0\n antecedent_mass = np.zeros(2)\n for day in range(daily_concentration.size):\n daily_mass = antecedent_mass + mass_input[day]\n daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]\n daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]\n aqconc_wb += daily_concentration[day]\n aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])\n aqconc_wb *= exp_k[day]\n new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day],\n deg_aq, omega, theta[day], daily_peak[:, day])\n daily_avg[0, day] = wc_avg\n daily_avg[1, day] = benthic_avg\n antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]\n antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]\n return aqconc_avg_wb, daily_avg, daily_peak\n\n\n@njit\ndef simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):\n \"\"\"\n ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:\n dm1/dt = Am1 + Bm2\n dm2/dt = Em1 + Fm2\n WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2\n mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T\n mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T\n mavg1 IS AVERAGE VALUE OF m1 OVER TIME T\n \"\"\"\n t_end = 86400.0\n m1, m2 = daily_aq_peak\n a = -gamma1 - omega * theta\n b = omega * theta\n e = omega\n f = -gamma2 - omega\n af = a + f\n dif = 4 * (f * a - b * e)\n bbb = np.sqrt(af * af - dif)\n root1 = (af + bbb) / 2.0\n root2 = (af - bbb) / 2.0\n dd = (root1 - a) / b\n ee = (root2 - a) / b\n ff = ee - dd\n x1 = (ee * m1 - m2) / ff\n y1 = (m2 - dd * m1) / ff\n rt1 = root1 * t_end\n rt2 = root2 * t_end\n exrt1 = np.exp(rt1)\n exrt2 = np.exp(rt2)\n ccc = x1 * exrt1\n ddd = y1 * exrt2\n mn = np.zeros(2)\n mn[0] = ccc + ddd\n mn[1] = dd * ccc + ee * ddd\n gx = x1 / root1\n hx = y1 / root2\n term1 = gx * exrt1\n term2 = hx * exrt2\n term3 = -gx\n term4 = -hx\n mavg_wc = (term1 + term2 + term3 + term4) / t_end\n mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end\n return mn, mavg_wc, mavg_ben\n\n\ndef solute_holding_capacity(depth, surface_area, koc):\n \"\"\"Calculates Solute Holding capacities and mass transfer between water column and benthic regions\"\"\"\n from .parameters import benthic, water_column\n vol1 = depth * surface_area\n vol2a = benthic.depth * surface_area\n vol2 = vol2a * benthic.porosity\n kow = koc / 0.35\n kpdoc1 = kow * 0.074\n kpdoc2 = koc\n xkpb = 0.436 * kow ** 0.907\n vol1a = depth[0] * surface_area\n m_sed_1 = water_column.sused * vol1a * 0.001\n m_bio_1 = water_column.plmas * vol1a * 0.001\n m_doc_1 = water_column.doc * vol1a * 0.001\n kd_sed_1 = koc * water_column.froc * 0.001\n kd_sed_2 = koc * benthic.froc * 0.001\n kd_bio = xkpb / 1000.0\n kd_doc_1 = kpdoc1 / 1000.0\n kd_doc_2 = kpdoc2 / 1000.0\n m_sed_2 = benthic.bulk_density * vol2a * 1000.0\n m_bio_2 = benthic.bnmas * surface_area * 0.001\n m_doc_2 = benthic.doc * vol2 * 0.001\n capacity_1 = (kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 *\n m_doc_1 + vol1)\n capacity_2 = (kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 *\n m_doc_2 + vol2)\n fw1 = vol1 / capacity_1\n fw2 = vol2 / capacity_2\n theta = capacity_2 / capacity_1\n sed_conv_factor = vol2 / fw2 / m_sed_2\n omega = benthic.d_over_dx / benthic.depth\n return fw1, fw2, theta, sed_conv_factor, omega\n",
"step-4": "benthic_params = {'depth': 0.05, 'porosity': 0.65, 'bulk_density': 1,\n 'froc': 0, 'doc': 5, 'bnmas': 0, 'd_over_dx': 1}\n\n\ndef partition_benthic(reach, runoff, runoff_mass, erosion_mass):\n from .parameters import soil, stream_channel, benthic\n try:\n reach = self.region.flow_file.fetch(reach)\n q, v, l = reach.q, reach.v, reach.l\n except AttributeError:\n return None, None, (None, None)\n mean_runoff = runoff.mean()\n baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),\n where=q > mean_runoff)\n total_flow = runoff + baseflow\n mixing_cell = 40.0\n cross_section = total_flow / v\n width = stream_channel.a * np.power(cross_section, stream_channel.b)\n depth = cross_section / width\n surface_area = width * l\n volume = np.array([depth * surface_area, benthic.depth * surface_area *\n benthic.porosity])\n runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.\n n_dates), where=runoff != 0)\n daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.\n zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (\n mixing_cell > 0.0))\n mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,\n soil.prben * erosion_mass]).T\n fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,\n surface_area, self.i.koc)\n k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +\n self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.\n deg_benthic * (1 - fw1))\n aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.\n n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,\n theta, self.i.deg_aqueous)\n return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,\n daily_avg, daily_peak))\n\n\n@njit\ndef concentration_loop(n_dates, daily_concentration, k_adj, daily_volume,\n mass_input, fw1, fw2, omega, theta, deg_aq):\n daily_peak = np.zeros((2, n_dates))\n daily_avg = np.zeros((2, n_dates))\n aqconc_avg_wb = np.zeros(n_dates)\n exp_k = np.exp(-k_adj)\n aqconc_wb = 0\n antecedent_mass = np.zeros(2)\n for day in range(daily_concentration.size):\n daily_mass = antecedent_mass + mass_input[day]\n daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]\n daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]\n aqconc_wb += daily_concentration[day]\n aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])\n aqconc_wb *= exp_k[day]\n new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day],\n deg_aq, omega, theta[day], daily_peak[:, day])\n daily_avg[0, day] = wc_avg\n daily_avg[1, day] = benthic_avg\n antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]\n antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]\n return aqconc_avg_wb, daily_avg, daily_peak\n\n\n@njit\ndef simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):\n \"\"\"\n ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:\n dm1/dt = Am1 + Bm2\n dm2/dt = Em1 + Fm2\n WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2\n mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T\n mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T\n mavg1 IS AVERAGE VALUE OF m1 OVER TIME T\n \"\"\"\n t_end = 86400.0\n m1, m2 = daily_aq_peak\n a = -gamma1 - omega * theta\n b = omega * theta\n e = omega\n f = -gamma2 - omega\n af = a + f\n dif = 4 * (f * a - b * e)\n bbb = np.sqrt(af * af - dif)\n root1 = (af + bbb) / 2.0\n root2 = (af - bbb) / 2.0\n dd = (root1 - a) / b\n ee = (root2 - a) / b\n ff = ee - dd\n x1 = (ee * m1 - m2) / ff\n y1 = (m2 - dd * m1) / ff\n rt1 = root1 * t_end\n rt2 = root2 * t_end\n exrt1 = np.exp(rt1)\n exrt2 = np.exp(rt2)\n ccc = x1 * exrt1\n ddd = y1 * exrt2\n mn = np.zeros(2)\n mn[0] = ccc + ddd\n mn[1] = dd * ccc + ee * ddd\n gx = x1 / root1\n hx = y1 / root2\n term1 = gx * exrt1\n term2 = hx * exrt2\n term3 = -gx\n term4 = -hx\n mavg_wc = (term1 + term2 + term3 + term4) / t_end\n mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end\n return mn, mavg_wc, mavg_ben\n\n\ndef solute_holding_capacity(depth, surface_area, koc):\n \"\"\"Calculates Solute Holding capacities and mass transfer between water column and benthic regions\"\"\"\n from .parameters import benthic, water_column\n vol1 = depth * surface_area\n vol2a = benthic.depth * surface_area\n vol2 = vol2a * benthic.porosity\n kow = koc / 0.35\n kpdoc1 = kow * 0.074\n kpdoc2 = koc\n xkpb = 0.436 * kow ** 0.907\n vol1a = depth[0] * surface_area\n m_sed_1 = water_column.sused * vol1a * 0.001\n m_bio_1 = water_column.plmas * vol1a * 0.001\n m_doc_1 = water_column.doc * vol1a * 0.001\n kd_sed_1 = koc * water_column.froc * 0.001\n kd_sed_2 = koc * benthic.froc * 0.001\n kd_bio = xkpb / 1000.0\n kd_doc_1 = kpdoc1 / 1000.0\n kd_doc_2 = kpdoc2 / 1000.0\n m_sed_2 = benthic.bulk_density * vol2a * 1000.0\n m_bio_2 = benthic.bnmas * surface_area * 0.001\n m_doc_2 = benthic.doc * vol2 * 0.001\n capacity_1 = (kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 *\n m_doc_1 + vol1)\n capacity_2 = (kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 *\n m_doc_2 + vol2)\n fw1 = vol1 / capacity_1\n fw2 = vol2 / capacity_2\n theta = capacity_2 / capacity_1\n sed_conv_factor = vol2 / fw2 / m_sed_2\n omega = benthic.d_over_dx / benthic.depth\n return fw1, fw2, theta, sed_conv_factor, omega\n",
"step-5": "# Benthic Parameters - USEPA OPP defaults from EXAMS\nbenthic_params = {\n \"depth\": 0.05, # benthic depth (m)\n \"porosity\": 0.65, # benthic porosity\n \"bulk_density\": 1, # bulk density, dry solid mass/total vol (g/cm3)\n \"froc\": 0, # benthic organic carbon fraction\n \"doc\": 5, # benthic dissolved organic carbon content (mg/L)\n \"bnmas\": 0, # benthic biomass intensity (g/m2)\n \"d_over_dx\": 1 # mass transfer coefficient for exchange between benthic and water column (m/s)\n # (can be modified later if data exists)\n}\ndef partition_benthic(reach, runoff, runoff_mass, erosion_mass):\n from .parameters import soil, stream_channel, benthic\n\n try:\n reach = self.region.flow_file.fetch(reach)\n q, v, l = reach.q, reach.v, reach.l\n except AttributeError:\n return None, None, (None, None)\n\n mean_runoff = runoff.mean() # m3/d\n baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates), where=(q > mean_runoff))\n total_flow = runoff + baseflow\n mixing_cell = 40. # meters\n cross_section = total_flow / v\n width = stream_channel.a * np.power(cross_section, stream_channel.b)\n depth = cross_section / width\n surface_area = width * l\n volume = np.array([(depth * surface_area), # Water column\n (benthic.depth * surface_area * benthic.porosity)]) # Benthic zone\n\n # Compute concentration in runoff of runoff mass and erosion mass\n runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.n_dates), where=(runoff != 0))\n daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.zeros(self.i.n_dates),\n where=(runoff_mass + erosion_mass > 0.0) & (mixing_cell > 0.0))\n\n # Divide mass loading between water column and benthic zones\n mass_input = np.vstack([runoff_mass + ((1. - soil.prben) * erosion_mass), # Water Column\n soil.prben * erosion_mass]).T # Benthic\n # Partition concentration into benthic and water column concentrations\n # This needs to be cleaned up\n # Compute benthic solute holding capacity\n fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth, surface_area, self.i.koc)\n\n k_adj = np.array((total_flow / mixing_cell) + (self.i.deg_photolysis + self.i.deg_hydrolysis) * fw1 + \\\n (self.i.deg_wc * fw1) + self.i.deg_benthic * (1 - fw1))\n\n aqconc_avg_wb, daily_avg, daily_peak = \\\n concentration_loop(self.i.n_dates, daily_conc, k_adj, volume,\n mass_input, fw1, fw2, omega, theta, self.i.deg_aqueous)\n\n return map(lambda x: x * 1000000., (runoff_conc, aqconc_avg_wb, daily_avg, daily_peak))\n\n@njit\ndef concentration_loop(n_dates, daily_concentration, k_adj, daily_volume, mass_input, fw1, fw2, omega, theta, deg_aq):\n # Beginning day aquatic concentrations, considered Peak Aqueous Daily Conc in Water Column\n daily_peak = np.zeros((2, n_dates))\n daily_avg = np.zeros((2, n_dates))\n aqconc_avg_wb = np.zeros(n_dates)\n\n # Reset starting values\n exp_k = np.exp(-k_adj)\n aqconc_wb = 0\n antecedent_mass = np.zeros(2) # mn\n\n for day in range(daily_concentration.size):\n # Add mass input to antecedent mass\n daily_mass = antecedent_mass + mass_input[day]\n\n # Convert to aqueous concentrations (peak) at beginning of day\n # JCH - fw comes from solute_holding_capacity. Fraction going into each section. Should fw[0] + fw[1] = 1?\n daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]\n daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]\n\n # Compute daily average concentration in the water body - when no Benthic layer considered\n aqconc_wb += daily_concentration[day] # initial water body concentration for current time step\n\n # Daily avg aq conc in water body, area under curve/t = Ci/k*(1-e^-k), NO benthic\n aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])\n\n # initial water body concentration for next time step\n aqconc_wb *= exp_k[day]\n\n # For simul diffeq soln: mn1,mn2,mavg1,mavg2 = new_aqconc1, new_aqconc2, aqconc_avg1[d], aqconc_avg2[d]\n # Note: aqconc_avg1 and aqconc_avg2 are outputted - Daily avg aq conc in WC and Benthic regions\n new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day], deg_aq, omega, theta[day], daily_peak[:, day])\n daily_avg[0, day] = wc_avg\n daily_avg[1, day] = benthic_avg\n\n # Masses m1 and m2 after time step, t_end\n antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]\n antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]\n\n return aqconc_avg_wb, daily_avg, daily_peak\n@njit\ndef simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):\n \"\"\"\n ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:\n dm1/dt = Am1 + Bm2\n dm2/dt = Em1 + Fm2\n WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2\n mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T\n mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T\n mavg1 IS AVERAGE VALUE OF m1 OVER TIME T\n \"\"\"\n\n t_end = 86400. # seconds, time step of ONE DAY\n m1, m2 = daily_aq_peak\n\n # Calculate constants for simultaneous_diffeq: A,B,E,F\n # This reduces the model equivalent parameters to the coefficients needed for solving simultaneous_diffeq\n a = -gamma1 - omega * theta\n b = omega * theta\n e = omega\n f = -gamma2 - omega\n\n af = a + f\n dif = 4 * ((f * a) - (b * e))\n bbb = np.sqrt(af * af - dif)\n\n root1 = (af + bbb) / 2.\n root2 = (af - bbb) / 2.\n\n dd = (root1 - a) / b\n ee = (root2 - a) / b\n ff = ee - dd\n x1 = (ee * m1 - m2) / ff\n y1 = (m2 - dd * m1) / ff\n\n # Calculate new concentrations for next step\n rt1 = root1 * t_end\n rt2 = root2 * t_end\n exrt1 = np.exp(rt1)\n exrt2 = np.exp(rt2)\n ccc = x1 * exrt1\n ddd = y1 * exrt2\n\n # values for m1 and m2 after time step t_end\n mn = np.zeros(2)\n mn[0] = ccc + ddd # Water column\n mn[1] = dd * ccc + ee * ddd # Benthic\n\n # AVERAGE DAILY CONCENTRATION SOLUTION: set up for daily average, but can be changed by adjusting time step\n gx = x1 / root1\n hx = y1 / root2\n\n term1 = gx * exrt1 # term3 = -X1/root1*exp(root1*T1)\n term2 = hx * exrt2 # term4 = -Y1/root2*exp(root2*T1\n term3 = -gx\n term4 = -hx\n\n mavg_wc = (term1 + term2 + term3 + term4) / t_end # Water column\n mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end # Benthic\n\n return mn, mavg_wc, mavg_ben\n\n\ndef solute_holding_capacity(depth, surface_area, koc):\n \"\"\"Calculates Solute Holding capacities and mass transfer between water column and benthic regions\"\"\"\n\n from .parameters import benthic, water_column\n\n # Aqueous volumes in each region\n vol1 = depth * surface_area # total volume in water column, approximately equal to water volume alone\n vol2a = benthic.depth * surface_area # total benthic volume\n vol2 = vol2a * benthic.porosity # total benthic pore water volume\n\n # Default EXAMS conditions for partitioning\n kow = koc / .35 # DEFAULT EXAMS CONDITION ON Kow p.35\n kpdoc1 = kow * .074 # DEFAULT RELATION IN EXAMS (LITTORAL)\n kpdoc2 = koc # DEFAULT RELATION IN EXAMS (BENTHIC) p.16 of EXAMS 2.98 (or is it Kow*.46 ?)\n xkpb = 0.436 * kow ** .907 # DEFAULT RELATION IN EXAMS\n\n # mass in littoral region\n vol1a = depth[0] * surface_area # initial volume corresponding with suspended matter reference\n m_sed_1 = water_column.sused * vol1a * .001 # SEDIMENT MASS LITTORAL\n m_bio_1 = water_column.plmas * vol1a * .001 # BIOLOGICAL MASS LITTORAL\n m_doc_1 = water_column.doc * vol1a * .001 # DOC MASS LITTORAL\n\n # partitioning coefficients of individual media\n kd_sed_1 = koc * water_column.froc * .001 # Kd of sediment in littoral [m3/kg]\n kd_sed_2 = koc * benthic.froc * .001 # Kd of sediment in benthic\n kd_bio = xkpb / 1000. # Kd of biological organisms\n kd_doc_1 = kpdoc1 / 1000. # Kd of DOC in littoral region\n kd_doc_2 = kpdoc2 / 1000. # Kd of DOC in benthic region\n\n # mass in benthic region\n m_sed_2 = benthic.bulk_density * vol2a * 1000. # as defined by EXAMS parameters m_sed_2 = BULKD/PCTWA*VOL2*100000.\n m_bio_2 = benthic.bnmas * surface_area * .001\n m_doc_2 = benthic.doc * vol2 * .001\n\n # solute holding capacity in regions 1 and 2\n capacity_1 = kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 * m_doc_1 + vol1\n capacity_2 = kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 * m_doc_2 + vol2\n\n # Fraction going to water column and benthic\n fw1 = vol1 / capacity_1 # fw1 is daily, vol1 is daily\n fw2 = vol2 / capacity_2\n\n theta = capacity_2 / capacity_1\n\n sed_conv_factor = vol2 / fw2 / m_sed_2 # converts pore water to [Total Conc normalized to sed mass]\n\n # Omega mass transfer - Calculates littoral to benthic mass transfer coefficient\n omega = benthic.d_over_dx / benthic.depth # (m3/hr)/(3600 s/hr)\n\n return fw1, fw2, theta, sed_conv_factor, omega\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class TestComponents(unittest.TestCase):
def setUp(self):
self.cpu = CPU(cycles=5)
self.disk = DiskIO(cycles=5)
self.network = Network(cycles=5)
def test_cpu_length(self):
cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
<|reserved_special_token_0|>
def test_network_length(self):
cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestComponents(unittest.TestCase):
def setUp(self):
self.cpu = CPU(cycles=5)
self.disk = DiskIO(cycles=5)
self.network = Network(cycles=5)
def test_cpu_length(self):
cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_disk_length(self):
cpu_data = self.disk.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_network_length(self):
cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('/home/adityas/UGA/SensorWeb/scripts/Summer2018/code')
<|reserved_special_token_0|>
class TestComponents(unittest.TestCase):
def setUp(self):
self.cpu = CPU(cycles=5)
self.disk = DiskIO(cycles=5)
self.network = Network(cycles=5)
def test_cpu_length(self):
cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_disk_length(self):
cpu_data = self.disk.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_network_length(self):
cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_visualize(self):
cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)
disk_data = self.disk.get_data(start=0, stop=1, noise=0.01)
network_data = self.network.get_data(start=0, stop=1, noise=0.01)
plotter.plot(cpu_data)
plotter.plot(disk_data)
plotter.plot(network_data)
plotter.show()
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
import sys
import matplotlib.pyplot as plotter
import numpy
sys.path.append('/home/adityas/UGA/SensorWeb/scripts/Summer2018/code')
from simulator.component import CPU, DiskIO, Network
class TestComponents(unittest.TestCase):
def setUp(self):
self.cpu = CPU(cycles=5)
self.disk = DiskIO(cycles=5)
self.network = Network(cycles=5)
def test_cpu_length(self):
cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_disk_length(self):
cpu_data = self.disk.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_network_length(self):
cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_visualize(self):
cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)
disk_data = self.disk.get_data(start=0, stop=1, noise=0.01)
network_data = self.network.get_data(start=0, stop=1, noise=0.01)
plotter.plot(cpu_data)
plotter.plot(disk_data)
plotter.plot(network_data)
plotter.show()
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
import sys
import matplotlib.pyplot as plotter
import numpy
sys.path.append("/home/adityas/UGA/SensorWeb/scripts/Summer2018/code")
from simulator.component import CPU, DiskIO, Network
class TestComponents(unittest.TestCase):
def setUp(self):
self.cpu = CPU(cycles=5)
self.disk = DiskIO(cycles=5)
self.network = Network(cycles=5)
def test_cpu_length(self):
cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_disk_length(self):
cpu_data = self.disk.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_network_length(self):
cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_visualize(self):
cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)
disk_data = self.disk.get_data(start=0, stop=1, noise=0.01)
network_data = self.network.get_data(start=0, stop=1, noise=0.01)
plotter.plot(cpu_data)
plotter.plot(disk_data)
plotter.plot(network_data)
plotter.show()
if __name__ == "__main__":
unittest.main()
|
flexible
|
{
"blob_id": "4f54f3e306df3b861124adb4fe544089446e8021",
"index": 3453,
"step-1": "<mask token>\n\n\nclass TestComponents(unittest.TestCase):\n\n def setUp(self):\n self.cpu = CPU(cycles=5)\n self.disk = DiskIO(cycles=5)\n self.network = Network(cycles=5)\n\n def test_cpu_length(self):\n cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n <mask token>\n\n def test_network_length(self):\n cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestComponents(unittest.TestCase):\n\n def setUp(self):\n self.cpu = CPU(cycles=5)\n self.disk = DiskIO(cycles=5)\n self.network = Network(cycles=5)\n\n def test_cpu_length(self):\n cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_disk_length(self):\n cpu_data = self.disk.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_network_length(self):\n cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('/home/adityas/UGA/SensorWeb/scripts/Summer2018/code')\n<mask token>\n\n\nclass TestComponents(unittest.TestCase):\n\n def setUp(self):\n self.cpu = CPU(cycles=5)\n self.disk = DiskIO(cycles=5)\n self.network = Network(cycles=5)\n\n def test_cpu_length(self):\n cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_disk_length(self):\n cpu_data = self.disk.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_network_length(self):\n cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_visualize(self):\n cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)\n disk_data = self.disk.get_data(start=0, stop=1, noise=0.01)\n network_data = self.network.get_data(start=0, stop=1, noise=0.01)\n plotter.plot(cpu_data)\n plotter.plot(disk_data)\n plotter.plot(network_data)\n plotter.show()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport sys\nimport matplotlib.pyplot as plotter\nimport numpy\nsys.path.append('/home/adityas/UGA/SensorWeb/scripts/Summer2018/code')\nfrom simulator.component import CPU, DiskIO, Network\n\n\nclass TestComponents(unittest.TestCase):\n\n def setUp(self):\n self.cpu = CPU(cycles=5)\n self.disk = DiskIO(cycles=5)\n self.network = Network(cycles=5)\n\n def test_cpu_length(self):\n cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_disk_length(self):\n cpu_data = self.disk.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_network_length(self):\n cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_visualize(self):\n cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)\n disk_data = self.disk.get_data(start=0, stop=1, noise=0.01)\n network_data = self.network.get_data(start=0, stop=1, noise=0.01)\n plotter.plot(cpu_data)\n plotter.plot(disk_data)\n plotter.plot(network_data)\n plotter.show()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport sys\nimport matplotlib.pyplot as plotter\nimport numpy\n\nsys.path.append(\"/home/adityas/UGA/SensorWeb/scripts/Summer2018/code\")\n\nfrom simulator.component import CPU, DiskIO, Network\n\n\nclass TestComponents(unittest.TestCase):\n\n def setUp(self):\n self.cpu = CPU(cycles=5)\n self.disk = DiskIO(cycles=5)\n self.network = Network(cycles=5)\n\n def test_cpu_length(self):\n cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_disk_length(self):\n cpu_data = self.disk.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_network_length(self):\n cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)\n self.assertEqual(cpu_data.shape[0], 100)\n\n def test_visualize(self):\n cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)\n disk_data = self.disk.get_data(start=0, stop=1, noise=0.01)\n network_data = self.network.get_data(start=0, stop=1, noise=0.01)\n plotter.plot(cpu_data)\n plotter.plot(disk_data)\n plotter.plot(network_data)\n plotter.show()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def density(arr, ax=None, logx=False, logy=False, bins=25, mode='density',
extent=None, contours=[], percentiles=True, relim=True, cmap=
DEFAULT_CONT_COLORMAP, shading='auto', vmin=0.0, colorbar=False, **kwargs):
"""
Creates diagramatic representation of data density and/or frequency for either
binary diagrams (X-Y) or ternary plots.
Additional arguments are typically forwarded
to respective :mod:`matplotlib` functions
:func:`~matplotlib.pyplot.pcolormesh`,
:func:`~matplotlib.pyplot.hist2d`,
:func:`~matplotlib.pyplot.hexbin`,
:func:`~matplotlib.pyplot.contour`, and
:func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).
Parameters
----------
arr : :class:`numpy.ndarray`
Dataframe from which to draw data.
ax : :class:`matplotlib.axes.Axes`, `None`
The subplot to draw on.
logx : :class:`bool`, `False`
Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.
logy : :class:`bool`, `False`
Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.
bins : :class:`int`, 20
Number of bins used in the gridded functions (histograms, KDE evaluation grid).
mode : :class:`str`, 'density'
Different modes used here: ['density', 'hexbin', 'hist2d']
extent : :class:`list`
Predetermined extent of the grid for which to from the histogram/KDE. In the
general form (xmin, xmax, ymin, ymax).
contours : :class:`list`
Contours to add to the plot, where :code:`mode='density'` is used.
percentiles : :class:`bool`, `True`
Whether contours specified are to be converted to percentiles.
relim : :class:`bool`, :code:`True`
Whether to relimit the plot based on xmin, xmax values.
cmap : :class:`matplotlib.colors.Colormap`
Colormap for mapping surfaces.
vmin : :class:`float`, 0.
Minimum value for colormap.
shading : :class:`str`, 'auto'
Shading to apply to pcolormesh.
colorbar : :class:`bool`, False
Whether to append a linked colorbar to the generated mappable image.
{otherparams}
Returns
-------
:class:`matplotlib.axes.Axes`
Axes on which the densityplot is plotted.
.. seealso::
Functions:
:func:`matplotlib.pyplot.pcolormesh`
:func:`matplotlib.pyplot.hist2d`
:func:`matplotlib.pyplot.contourf`
Notes
-----
The default density estimates and derived contours are generated based on
kernel density estimates. Assumptions around e.g. 95% of points lying within
a 95% contour won't necessarily be valid for non-normally distributed data
(instead, this represents the approximate 95% percentile on the kernel
density estimate). Note that contours are currently only generated; for
`mode="density"`; future updates may allow the use of a histogram
basis, which would give results closer to 95% data percentiles.
Todo
----
* Allow generation of contours from histogram data, rather than just
the kernel density estimate.
* Implement an option and filter to 'scatter' points below the minimum threshold
or maximum percentile contours.
"""
if (mode == 'density') & np.isclose(vmin, 0.0):
vmin = 0.02
if arr.shape[-1] == 3:
projection = 'ternary'
else:
projection = None
ax = init_axes(ax=ax, projection=projection, **kwargs)
pcolor, contour, contourf = get_axis_density_methods(ax)
background_color = *ax.patch.get_facecolor()[:-1], 0.0
if cmap is not None:
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
cmap = copy.copy(cmap)
cmap.set_under((1, 1, 1, 0))
if mode == 'density':
cbarlabel = 'Kernel Density Estimate'
else:
cbarlabel = 'Frequency'
valid_rows = np.isfinite(arr).all(axis=-1)
if mode in ['hexbin', 'hist2d'] and contours:
raise NotImplementedError(
"Contours are not currently implemented for 'hexbin' or 'hist2d' modes."
)
if arr.size > 0 and valid_rows.any():
arr = arr[valid_rows]
if projection is None:
x, y = arr.T
grid = DensityGrid(x, y, bins=bins, logx=logx, logy=logy,
extent=extent, **subkwargs(kwargs, DensityGrid))
if mode == 'hexbin':
mappable = ax.hexbin(x, y, gridsize=bins, cmap=cmap, extent
=grid.get_hex_extent(), xscale=['linear', 'log'][logx],
yscale=['linear', 'log'][logy], **subkwargs(kwargs, ax.
hexbin))
elif mode == 'hist2d':
_, _, _, im = ax.hist2d(x, y, bins=[grid.grid_xe, grid.
grid_ye], range=grid.get_range(), cmap=cmap, cmin=[0, 1
][vmin > 0], **subkwargs(kwargs, ax.hist2d))
mappable = im
elif mode == 'density':
zei = grid.kdefrom(arr, xtransform=[lambda x: x, np.log][
logx], ytransform=[lambda y: y, np.log][logy], mode=
'edges', **subkwargs(kwargs, grid.kdefrom))
if percentiles:
vmin = percentile_contour_values_from_meshz(zei, [1.0 -
vmin])[1][0]
logger.debug('Updating `vmin` to percentile equiv: {:.2f}'
.format(vmin))
if not contours:
mappable = pcolor(grid.grid_xei, grid.grid_yei, zei,
cmap=cmap, vmin=vmin, shading=shading, **subkwargs(
kwargs, pcolor))
mappable.set_edgecolor(background_color)
mappable.set_linestyle('None')
mappable.set_lw(0.0)
else:
mappable = _add_contours(grid.grid_xei, grid.grid_yei,
zi=zei.reshape(grid.grid_xei.shape), ax=ax,
contours=contours, percentiles=percentiles, cmap=
cmap, vmin=vmin, **kwargs)
if relim and extent is not None:
ax.axis(extent)
elif projection == 'ternary':
if shading == 'auto':
shading = 'flat'
arr[~(arr > 0).all(axis=1), :] = np.nan
arr = close(arr)
if mode == 'hexbin':
raise NotImplementedError
coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)
if percentiles:
vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1
][0]
logger.debug('Updating `vmin` to percentile equiv: {:.2f}'.
format(vmin))
fltr = (zi != 0) & (zi >= vmin)
coords = coords[fltr.flatten(), :]
zi = zi[fltr]
if not contours:
tri_poly_collection = pcolor(*coords.T, zi.flatten(), cmap=
cmap, vmin=vmin, shading=shading, **subkwargs(kwargs,
pcolor))
mappable = tri_poly_collection
else:
mappable = _add_contours(*coords.T, zi=zi.flatten(), ax=ax,
contours=contours, percentiles=percentiles, cmap=cmap,
vmin=vmin, **kwargs)
ax.set_aspect('equal')
elif not arr.ndim in [0, 1, 2]:
raise NotImplementedError
if colorbar:
cbkwargs = kwargs.copy()
cbkwargs['label'] = cbarlabel
add_colorbar(mappable, **cbkwargs)
return ax
def _add_contours(*coords, zi=None, ax=None, contours=[], cmap=
DEFAULT_CONT_COLORMAP, vmin=0.0, extent=None, **kwargs):
"""
Add density-based contours to a plot.
"""
percentiles = kwargs.pop('percentiles', True)
levels = contours or kwargs.get('levels', None)
pcolor, contour, contourf = get_axis_density_methods(ax)
if percentiles and not isinstance(levels, int):
_cs = plot_Z_percentiles(*coords, zi=zi, ax=ax, percentiles=levels,
extent=extent, cmap=cmap, **kwargs)
mappable = _cs
else:
if levels is None:
levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())
elif isinstance(levels, int):
levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())
else:
raise NotImplementedError
mappable = contourf(*coords, zi, extent=extent, levels=levels, cmap
=cmap, vmin=vmin, **kwargs)
contour(*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=
vmin, **kwargs)
return mappable
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = Handle(__name__)
def density(arr, ax=None, logx=False, logy=False, bins=25, mode='density',
extent=None, contours=[], percentiles=True, relim=True, cmap=
DEFAULT_CONT_COLORMAP, shading='auto', vmin=0.0, colorbar=False, **kwargs):
"""
Creates diagramatic representation of data density and/or frequency for either
binary diagrams (X-Y) or ternary plots.
Additional arguments are typically forwarded
to respective :mod:`matplotlib` functions
:func:`~matplotlib.pyplot.pcolormesh`,
:func:`~matplotlib.pyplot.hist2d`,
:func:`~matplotlib.pyplot.hexbin`,
:func:`~matplotlib.pyplot.contour`, and
:func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).
Parameters
----------
arr : :class:`numpy.ndarray`
Dataframe from which to draw data.
ax : :class:`matplotlib.axes.Axes`, `None`
The subplot to draw on.
logx : :class:`bool`, `False`
Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.
logy : :class:`bool`, `False`
Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.
bins : :class:`int`, 20
Number of bins used in the gridded functions (histograms, KDE evaluation grid).
mode : :class:`str`, 'density'
Different modes used here: ['density', 'hexbin', 'hist2d']
extent : :class:`list`
Predetermined extent of the grid for which to from the histogram/KDE. In the
general form (xmin, xmax, ymin, ymax).
contours : :class:`list`
Contours to add to the plot, where :code:`mode='density'` is used.
percentiles : :class:`bool`, `True`
Whether contours specified are to be converted to percentiles.
relim : :class:`bool`, :code:`True`
Whether to relimit the plot based on xmin, xmax values.
cmap : :class:`matplotlib.colors.Colormap`
Colormap for mapping surfaces.
vmin : :class:`float`, 0.
Minimum value for colormap.
shading : :class:`str`, 'auto'
Shading to apply to pcolormesh.
colorbar : :class:`bool`, False
Whether to append a linked colorbar to the generated mappable image.
{otherparams}
Returns
-------
:class:`matplotlib.axes.Axes`
Axes on which the densityplot is plotted.
.. seealso::
Functions:
:func:`matplotlib.pyplot.pcolormesh`
:func:`matplotlib.pyplot.hist2d`
:func:`matplotlib.pyplot.contourf`
Notes
-----
The default density estimates and derived contours are generated based on
kernel density estimates. Assumptions around e.g. 95% of points lying within
a 95% contour won't necessarily be valid for non-normally distributed data
(instead, this represents the approximate 95% percentile on the kernel
density estimate). Note that contours are currently only generated; for
`mode="density"`; future updates may allow the use of a histogram
basis, which would give results closer to 95% data percentiles.
Todo
----
* Allow generation of contours from histogram data, rather than just
the kernel density estimate.
* Implement an option and filter to 'scatter' points below the minimum threshold
or maximum percentile contours.
"""
if (mode == 'density') & np.isclose(vmin, 0.0):
vmin = 0.02
if arr.shape[-1] == 3:
projection = 'ternary'
else:
projection = None
ax = init_axes(ax=ax, projection=projection, **kwargs)
pcolor, contour, contourf = get_axis_density_methods(ax)
background_color = *ax.patch.get_facecolor()[:-1], 0.0
if cmap is not None:
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
cmap = copy.copy(cmap)
cmap.set_under((1, 1, 1, 0))
if mode == 'density':
cbarlabel = 'Kernel Density Estimate'
else:
cbarlabel = 'Frequency'
valid_rows = np.isfinite(arr).all(axis=-1)
if mode in ['hexbin', 'hist2d'] and contours:
raise NotImplementedError(
"Contours are not currently implemented for 'hexbin' or 'hist2d' modes."
)
if arr.size > 0 and valid_rows.any():
arr = arr[valid_rows]
if projection is None:
x, y = arr.T
grid = DensityGrid(x, y, bins=bins, logx=logx, logy=logy,
extent=extent, **subkwargs(kwargs, DensityGrid))
if mode == 'hexbin':
mappable = ax.hexbin(x, y, gridsize=bins, cmap=cmap, extent
=grid.get_hex_extent(), xscale=['linear', 'log'][logx],
yscale=['linear', 'log'][logy], **subkwargs(kwargs, ax.
hexbin))
elif mode == 'hist2d':
_, _, _, im = ax.hist2d(x, y, bins=[grid.grid_xe, grid.
grid_ye], range=grid.get_range(), cmap=cmap, cmin=[0, 1
][vmin > 0], **subkwargs(kwargs, ax.hist2d))
mappable = im
elif mode == 'density':
zei = grid.kdefrom(arr, xtransform=[lambda x: x, np.log][
logx], ytransform=[lambda y: y, np.log][logy], mode=
'edges', **subkwargs(kwargs, grid.kdefrom))
if percentiles:
vmin = percentile_contour_values_from_meshz(zei, [1.0 -
vmin])[1][0]
logger.debug('Updating `vmin` to percentile equiv: {:.2f}'
.format(vmin))
if not contours:
mappable = pcolor(grid.grid_xei, grid.grid_yei, zei,
cmap=cmap, vmin=vmin, shading=shading, **subkwargs(
kwargs, pcolor))
mappable.set_edgecolor(background_color)
mappable.set_linestyle('None')
mappable.set_lw(0.0)
else:
mappable = _add_contours(grid.grid_xei, grid.grid_yei,
zi=zei.reshape(grid.grid_xei.shape), ax=ax,
contours=contours, percentiles=percentiles, cmap=
cmap, vmin=vmin, **kwargs)
if relim and extent is not None:
ax.axis(extent)
elif projection == 'ternary':
if shading == 'auto':
shading = 'flat'
arr[~(arr > 0).all(axis=1), :] = np.nan
arr = close(arr)
if mode == 'hexbin':
raise NotImplementedError
coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)
if percentiles:
vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1
][0]
logger.debug('Updating `vmin` to percentile equiv: {:.2f}'.
format(vmin))
fltr = (zi != 0) & (zi >= vmin)
coords = coords[fltr.flatten(), :]
zi = zi[fltr]
if not contours:
tri_poly_collection = pcolor(*coords.T, zi.flatten(), cmap=
cmap, vmin=vmin, shading=shading, **subkwargs(kwargs,
pcolor))
mappable = tri_poly_collection
else:
mappable = _add_contours(*coords.T, zi=zi.flatten(), ax=ax,
contours=contours, percentiles=percentiles, cmap=cmap,
vmin=vmin, **kwargs)
ax.set_aspect('equal')
elif not arr.ndim in [0, 1, 2]:
raise NotImplementedError
if colorbar:
cbkwargs = kwargs.copy()
cbkwargs['label'] = cbarlabel
add_colorbar(mappable, **cbkwargs)
return ax
def _add_contours(*coords, zi=None, ax=None, contours=[], cmap=
DEFAULT_CONT_COLORMAP, vmin=0.0, extent=None, **kwargs):
"""
Add density-based contours to a plot.
"""
percentiles = kwargs.pop('percentiles', True)
levels = contours or kwargs.get('levels', None)
pcolor, contour, contourf = get_axis_density_methods(ax)
if percentiles and not isinstance(levels, int):
_cs = plot_Z_percentiles(*coords, zi=zi, ax=ax, percentiles=levels,
extent=extent, cmap=cmap, **kwargs)
mappable = _cs
else:
if levels is None:
levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())
elif isinstance(levels, int):
levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())
else:
raise NotImplementedError
mappable = contourf(*coords, zi, extent=extent, levels=levels, cmap
=cmap, vmin=vmin, **kwargs)
contour(*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=
vmin, **kwargs)
return mappable
_add_additional_parameters = True
density.__doc__ = density.__doc__.format(otherparams=['',
get_additional_params(density, plt.pcolormesh, plt.hist2d, plt.hexbin,
plt.contour, plt.contourf, header='Other Parameters', indent=4,
subsections=True)][_add_additional_parameters])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import copy
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MaxNLocator
from ...comp.codata import close
from ...util.log import Handle
from ...util.meta import get_additional_params, subkwargs
from ...util.plot.axes import add_colorbar, init_axes
from ...util.plot.density import get_axis_density_methods, percentile_contour_values_from_meshz, plot_Z_percentiles
from ...util.plot.style import DEFAULT_CONT_COLORMAP
from .grid import DensityGrid
from .ternary import ternary_heatmap
logger = Handle(__name__)
def density(arr, ax=None, logx=False, logy=False, bins=25, mode='density',
extent=None, contours=[], percentiles=True, relim=True, cmap=
DEFAULT_CONT_COLORMAP, shading='auto', vmin=0.0, colorbar=False, **kwargs):
"""
Creates diagramatic representation of data density and/or frequency for either
binary diagrams (X-Y) or ternary plots.
Additional arguments are typically forwarded
to respective :mod:`matplotlib` functions
:func:`~matplotlib.pyplot.pcolormesh`,
:func:`~matplotlib.pyplot.hist2d`,
:func:`~matplotlib.pyplot.hexbin`,
:func:`~matplotlib.pyplot.contour`, and
:func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).
Parameters
----------
arr : :class:`numpy.ndarray`
Dataframe from which to draw data.
ax : :class:`matplotlib.axes.Axes`, `None`
The subplot to draw on.
logx : :class:`bool`, `False`
Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.
logy : :class:`bool`, `False`
Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.
bins : :class:`int`, 20
Number of bins used in the gridded functions (histograms, KDE evaluation grid).
mode : :class:`str`, 'density'
Different modes used here: ['density', 'hexbin', 'hist2d']
extent : :class:`list`
Predetermined extent of the grid for which to from the histogram/KDE. In the
general form (xmin, xmax, ymin, ymax).
contours : :class:`list`
Contours to add to the plot, where :code:`mode='density'` is used.
percentiles : :class:`bool`, `True`
Whether contours specified are to be converted to percentiles.
relim : :class:`bool`, :code:`True`
Whether to relimit the plot based on xmin, xmax values.
cmap : :class:`matplotlib.colors.Colormap`
Colormap for mapping surfaces.
vmin : :class:`float`, 0.
Minimum value for colormap.
shading : :class:`str`, 'auto'
Shading to apply to pcolormesh.
colorbar : :class:`bool`, False
Whether to append a linked colorbar to the generated mappable image.
{otherparams}
Returns
-------
:class:`matplotlib.axes.Axes`
Axes on which the densityplot is plotted.
.. seealso::
Functions:
:func:`matplotlib.pyplot.pcolormesh`
:func:`matplotlib.pyplot.hist2d`
:func:`matplotlib.pyplot.contourf`
Notes
-----
The default density estimates and derived contours are generated based on
kernel density estimates. Assumptions around e.g. 95% of points lying within
a 95% contour won't necessarily be valid for non-normally distributed data
(instead, this represents the approximate 95% percentile on the kernel
density estimate). Note that contours are currently only generated; for
`mode="density"`; future updates may allow the use of a histogram
basis, which would give results closer to 95% data percentiles.
Todo
----
* Allow generation of contours from histogram data, rather than just
the kernel density estimate.
* Implement an option and filter to 'scatter' points below the minimum threshold
or maximum percentile contours.
"""
if (mode == 'density') & np.isclose(vmin, 0.0):
vmin = 0.02
if arr.shape[-1] == 3:
projection = 'ternary'
else:
projection = None
ax = init_axes(ax=ax, projection=projection, **kwargs)
pcolor, contour, contourf = get_axis_density_methods(ax)
background_color = *ax.patch.get_facecolor()[:-1], 0.0
if cmap is not None:
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
cmap = copy.copy(cmap)
cmap.set_under((1, 1, 1, 0))
if mode == 'density':
cbarlabel = 'Kernel Density Estimate'
else:
cbarlabel = 'Frequency'
valid_rows = np.isfinite(arr).all(axis=-1)
if mode in ['hexbin', 'hist2d'] and contours:
raise NotImplementedError(
"Contours are not currently implemented for 'hexbin' or 'hist2d' modes."
)
if arr.size > 0 and valid_rows.any():
arr = arr[valid_rows]
if projection is None:
x, y = arr.T
grid = DensityGrid(x, y, bins=bins, logx=logx, logy=logy,
extent=extent, **subkwargs(kwargs, DensityGrid))
if mode == 'hexbin':
mappable = ax.hexbin(x, y, gridsize=bins, cmap=cmap, extent
=grid.get_hex_extent(), xscale=['linear', 'log'][logx],
yscale=['linear', 'log'][logy], **subkwargs(kwargs, ax.
hexbin))
elif mode == 'hist2d':
_, _, _, im = ax.hist2d(x, y, bins=[grid.grid_xe, grid.
grid_ye], range=grid.get_range(), cmap=cmap, cmin=[0, 1
][vmin > 0], **subkwargs(kwargs, ax.hist2d))
mappable = im
elif mode == 'density':
zei = grid.kdefrom(arr, xtransform=[lambda x: x, np.log][
logx], ytransform=[lambda y: y, np.log][logy], mode=
'edges', **subkwargs(kwargs, grid.kdefrom))
if percentiles:
vmin = percentile_contour_values_from_meshz(zei, [1.0 -
vmin])[1][0]
logger.debug('Updating `vmin` to percentile equiv: {:.2f}'
.format(vmin))
if not contours:
mappable = pcolor(grid.grid_xei, grid.grid_yei, zei,
cmap=cmap, vmin=vmin, shading=shading, **subkwargs(
kwargs, pcolor))
mappable.set_edgecolor(background_color)
mappable.set_linestyle('None')
mappable.set_lw(0.0)
else:
mappable = _add_contours(grid.grid_xei, grid.grid_yei,
zi=zei.reshape(grid.grid_xei.shape), ax=ax,
contours=contours, percentiles=percentiles, cmap=
cmap, vmin=vmin, **kwargs)
if relim and extent is not None:
ax.axis(extent)
elif projection == 'ternary':
if shading == 'auto':
shading = 'flat'
arr[~(arr > 0).all(axis=1), :] = np.nan
arr = close(arr)
if mode == 'hexbin':
raise NotImplementedError
coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)
if percentiles:
vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1
][0]
logger.debug('Updating `vmin` to percentile equiv: {:.2f}'.
format(vmin))
fltr = (zi != 0) & (zi >= vmin)
coords = coords[fltr.flatten(), :]
zi = zi[fltr]
if not contours:
tri_poly_collection = pcolor(*coords.T, zi.flatten(), cmap=
cmap, vmin=vmin, shading=shading, **subkwargs(kwargs,
pcolor))
mappable = tri_poly_collection
else:
mappable = _add_contours(*coords.T, zi=zi.flatten(), ax=ax,
contours=contours, percentiles=percentiles, cmap=cmap,
vmin=vmin, **kwargs)
ax.set_aspect('equal')
elif not arr.ndim in [0, 1, 2]:
raise NotImplementedError
if colorbar:
cbkwargs = kwargs.copy()
cbkwargs['label'] = cbarlabel
add_colorbar(mappable, **cbkwargs)
return ax
def _add_contours(*coords, zi=None, ax=None, contours=[], cmap=
DEFAULT_CONT_COLORMAP, vmin=0.0, extent=None, **kwargs):
"""
Add density-based contours to a plot.
"""
percentiles = kwargs.pop('percentiles', True)
levels = contours or kwargs.get('levels', None)
pcolor, contour, contourf = get_axis_density_methods(ax)
if percentiles and not isinstance(levels, int):
_cs = plot_Z_percentiles(*coords, zi=zi, ax=ax, percentiles=levels,
extent=extent, cmap=cmap, **kwargs)
mappable = _cs
else:
if levels is None:
levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())
elif isinstance(levels, int):
levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())
else:
raise NotImplementedError
mappable = contourf(*coords, zi, extent=extent, levels=levels, cmap
=cmap, vmin=vmin, **kwargs)
contour(*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=
vmin, **kwargs)
return mappable
_add_additional_parameters = True
density.__doc__ = density.__doc__.format(otherparams=['',
get_additional_params(density, plt.pcolormesh, plt.hist2d, plt.hexbin,
plt.contour, plt.contourf, header='Other Parameters', indent=4,
subsections=True)][_add_additional_parameters])
<|reserved_special_token_1|>
"""
Kernel desnity estimation plots for geochemical data.
"""
import copy
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MaxNLocator
from ...comp.codata import close
from ...util.log import Handle
from ...util.meta import get_additional_params, subkwargs
from ...util.plot.axes import add_colorbar, init_axes
from ...util.plot.density import (
get_axis_density_methods,
percentile_contour_values_from_meshz,
plot_Z_percentiles,
)
from ...util.plot.style import DEFAULT_CONT_COLORMAP
from .grid import DensityGrid
from .ternary import ternary_heatmap
logger = Handle(__name__)
def density(
arr,
ax=None,
logx=False,
logy=False,
bins=25,
mode="density",
extent=None,
contours=[],
percentiles=True,
relim=True,
cmap=DEFAULT_CONT_COLORMAP,
shading="auto",
vmin=0.0,
colorbar=False,
**kwargs
):
"""
Creates diagramatic representation of data density and/or frequency for either
binary diagrams (X-Y) or ternary plots.
Additional arguments are typically forwarded
to respective :mod:`matplotlib` functions
:func:`~matplotlib.pyplot.pcolormesh`,
:func:`~matplotlib.pyplot.hist2d`,
:func:`~matplotlib.pyplot.hexbin`,
:func:`~matplotlib.pyplot.contour`, and
:func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).
Parameters
----------
arr : :class:`numpy.ndarray`
Dataframe from which to draw data.
ax : :class:`matplotlib.axes.Axes`, `None`
The subplot to draw on.
logx : :class:`bool`, `False`
Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.
logy : :class:`bool`, `False`
Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.
bins : :class:`int`, 20
Number of bins used in the gridded functions (histograms, KDE evaluation grid).
mode : :class:`str`, 'density'
Different modes used here: ['density', 'hexbin', 'hist2d']
extent : :class:`list`
Predetermined extent of the grid for which to from the histogram/KDE. In the
general form (xmin, xmax, ymin, ymax).
contours : :class:`list`
Contours to add to the plot, where :code:`mode='density'` is used.
percentiles : :class:`bool`, `True`
Whether contours specified are to be converted to percentiles.
relim : :class:`bool`, :code:`True`
Whether to relimit the plot based on xmin, xmax values.
cmap : :class:`matplotlib.colors.Colormap`
Colormap for mapping surfaces.
vmin : :class:`float`, 0.
Minimum value for colormap.
shading : :class:`str`, 'auto'
Shading to apply to pcolormesh.
colorbar : :class:`bool`, False
Whether to append a linked colorbar to the generated mappable image.
{otherparams}
Returns
-------
:class:`matplotlib.axes.Axes`
Axes on which the densityplot is plotted.
.. seealso::
Functions:
:func:`matplotlib.pyplot.pcolormesh`
:func:`matplotlib.pyplot.hist2d`
:func:`matplotlib.pyplot.contourf`
Notes
-----
The default density estimates and derived contours are generated based on
kernel density estimates. Assumptions around e.g. 95% of points lying within
a 95% contour won't necessarily be valid for non-normally distributed data
(instead, this represents the approximate 95% percentile on the kernel
density estimate). Note that contours are currently only generated; for
`mode="density"`; future updates may allow the use of a histogram
basis, which would give results closer to 95% data percentiles.
Todo
----
* Allow generation of contours from histogram data, rather than just
the kernel density estimate.
* Implement an option and filter to 'scatter' points below the minimum threshold
or maximum percentile contours.
"""
if (mode == "density") & np.isclose(vmin, 0.0): # if vmin is not specified
vmin = 0.02 # 2% max height | 98th percentile
if arr.shape[-1] == 3:
projection = "ternary"
else:
projection = None
ax = init_axes(ax=ax, projection=projection, **kwargs)
pcolor, contour, contourf = get_axis_density_methods(ax)
background_color = (*ax.patch.get_facecolor()[:-1], 0.0)
if cmap is not None:
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
cmap = copy.copy(cmap) # without this, it would modify the global cmap
cmap.set_under((1, 1, 1, 0))
if mode == "density":
cbarlabel = "Kernel Density Estimate"
else:
cbarlabel = "Frequency"
valid_rows = np.isfinite(arr).all(axis=-1)
if (mode in ["hexbin", "hist2d"]) and contours:
raise NotImplementedError(
"Contours are not currently implemented for 'hexbin' or 'hist2d' modes."
)
if (arr.size > 0) and valid_rows.any():
# Data can't be plotted if there's any nans, so we can exclude these
arr = arr[valid_rows]
if projection is None: # binary
x, y = arr.T
grid = DensityGrid(
x,
y,
bins=bins,
logx=logx,
logy=logy,
extent=extent,
**subkwargs(kwargs, DensityGrid)
)
if mode == "hexbin":
# extent values are exponents (i.e. 3 -> 10**3)
mappable = ax.hexbin(
x,
y,
gridsize=bins,
cmap=cmap,
extent=grid.get_hex_extent(),
xscale=["linear", "log"][logx],
yscale=["linear", "log"][logy],
**subkwargs(kwargs, ax.hexbin)
)
elif mode == "hist2d":
_, _, _, im = ax.hist2d(
x,
y,
bins=[grid.grid_xe, grid.grid_ye],
range=grid.get_range(),
cmap=cmap,
cmin=[0, 1][vmin > 0],
**subkwargs(kwargs, ax.hist2d)
)
mappable = im
elif mode == "density":
zei = grid.kdefrom(
arr,
xtransform=[lambda x: x, np.log][logx],
ytransform=[lambda y: y, np.log][logy],
mode="edges",
**subkwargs(kwargs, grid.kdefrom)
)
if percentiles: # 98th percentile
vmin = percentile_contour_values_from_meshz(zei, [1.0 - vmin])[1][0]
logger.debug(
"Updating `vmin` to percentile equiv: {:.2f}".format(vmin)
)
if not contours:
# pcolormesh using bin edges
mappable = pcolor(
grid.grid_xei,
grid.grid_yei,
zei,
cmap=cmap,
vmin=vmin,
shading=shading,
**subkwargs(kwargs, pcolor)
)
mappable.set_edgecolor(background_color)
mappable.set_linestyle("None")
mappable.set_lw(0.0)
else:
mappable = _add_contours(
grid.grid_xei,
grid.grid_yei,
zi=zei.reshape(grid.grid_xei.shape),
ax=ax,
contours=contours,
percentiles=percentiles,
cmap=cmap,
vmin=vmin,
**kwargs
)
if relim and (extent is not None):
ax.axis(extent)
elif projection == "ternary": # ternary
if shading == "auto":
shading = "flat" # auto cant' be passed to tripcolor
# zeros make nans in this case, due to the heatmap calculations
arr[~(arr > 0).all(axis=1), :] = np.nan
arr = close(arr)
if mode == "hexbin":
raise NotImplementedError
# density, histogram etc parsed here
coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)
if percentiles: # 98th percentile
vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1][0]
logger.debug("Updating `vmin` to percentile equiv: {:.2f}".format(vmin))
# remove coords where H==0, as ax.tripcolor can't deal with variable alpha :'(
fltr = (zi != 0) & (zi >= vmin)
coords = coords[fltr.flatten(), :]
zi = zi[fltr]
if not contours:
tri_poly_collection = pcolor(
*coords.T,
zi.flatten(),
cmap=cmap,
vmin=vmin,
shading=shading,
**subkwargs(kwargs, pcolor)
)
mappable = tri_poly_collection
else:
mappable = _add_contours(
*coords.T,
zi=zi.flatten(),
ax=ax,
contours=contours,
percentiles=percentiles,
cmap=cmap,
vmin=vmin,
**kwargs
)
ax.set_aspect("equal")
else:
if not arr.ndim in [0, 1, 2]:
raise NotImplementedError
if colorbar:
cbkwargs = kwargs.copy()
cbkwargs["label"] = cbarlabel
add_colorbar(mappable, **cbkwargs)
return ax
def _add_contours(
*coords,
zi=None,
ax=None,
contours=[],
cmap=DEFAULT_CONT_COLORMAP,
vmin=0.0,
extent=None,
**kwargs
):
"""
Add density-based contours to a plot.
"""
# get the contour levels
percentiles = kwargs.pop("percentiles", True)
levels = contours or kwargs.get("levels", None)
pcolor, contour, contourf = get_axis_density_methods(ax)
if percentiles and not isinstance(levels, int):
# plot individual percentile contours
_cs = plot_Z_percentiles(
*coords,
zi=zi,
ax=ax,
percentiles=levels,
extent=extent,
cmap=cmap,
**kwargs
)
mappable = _cs
else:
# plot interval contours
if levels is None:
levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())
elif isinstance(levels, int):
levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())
else:
raise NotImplementedError
# filled contours
mappable = contourf(
*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=vmin, **kwargs
)
# contours
contour(
*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=vmin, **kwargs
)
return mappable
_add_additional_parameters = True
density.__doc__ = density.__doc__.format(
otherparams=[
"",
get_additional_params(
density,
plt.pcolormesh,
plt.hist2d,
plt.hexbin,
plt.contour,
plt.contourf,
header="Other Parameters",
indent=4,
subsections=True,
),
][_add_additional_parameters]
)
|
flexible
|
{
"blob_id": "ae475dc95c6a099270cf65d4b471b4b430f02303",
"index": 8840,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef density(arr, ax=None, logx=False, logy=False, bins=25, mode='density',\n extent=None, contours=[], percentiles=True, relim=True, cmap=\n DEFAULT_CONT_COLORMAP, shading='auto', vmin=0.0, colorbar=False, **kwargs):\n \"\"\"\n Creates diagramatic representation of data density and/or frequency for either\n binary diagrams (X-Y) or ternary plots.\n Additional arguments are typically forwarded\n to respective :mod:`matplotlib` functions\n :func:`~matplotlib.pyplot.pcolormesh`,\n :func:`~matplotlib.pyplot.hist2d`,\n :func:`~matplotlib.pyplot.hexbin`,\n :func:`~matplotlib.pyplot.contour`, and\n :func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n Dataframe from which to draw data.\n ax : :class:`matplotlib.axes.Axes`, `None`\n The subplot to draw on.\n logx : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.\n logy : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.\n bins : :class:`int`, 20\n Number of bins used in the gridded functions (histograms, KDE evaluation grid).\n mode : :class:`str`, 'density'\n Different modes used here: ['density', 'hexbin', 'hist2d']\n extent : :class:`list`\n Predetermined extent of the grid for which to from the histogram/KDE. In the\n general form (xmin, xmax, ymin, ymax).\n contours : :class:`list`\n Contours to add to the plot, where :code:`mode='density'` is used.\n percentiles : :class:`bool`, `True`\n Whether contours specified are to be converted to percentiles.\n relim : :class:`bool`, :code:`True`\n Whether to relimit the plot based on xmin, xmax values.\n cmap : :class:`matplotlib.colors.Colormap`\n Colormap for mapping surfaces.\n vmin : :class:`float`, 0.\n Minimum value for colormap.\n shading : :class:`str`, 'auto'\n Shading to apply to pcolormesh.\n colorbar : :class:`bool`, False\n Whether to append a linked colorbar to the generated mappable image.\n\n {otherparams}\n\n Returns\n -------\n :class:`matplotlib.axes.Axes`\n Axes on which the densityplot is plotted.\n\n .. seealso::\n\n Functions:\n\n :func:`matplotlib.pyplot.pcolormesh`\n :func:`matplotlib.pyplot.hist2d`\n :func:`matplotlib.pyplot.contourf`\n\n Notes\n -----\n The default density estimates and derived contours are generated based on\n kernel density estimates. Assumptions around e.g. 95% of points lying within\n a 95% contour won't necessarily be valid for non-normally distributed data\n (instead, this represents the approximate 95% percentile on the kernel\n density estimate). Note that contours are currently only generated; for\n `mode=\"density\"`; future updates may allow the use of a histogram\n basis, which would give results closer to 95% data percentiles.\n\n Todo\n ----\n * Allow generation of contours from histogram data, rather than just\n the kernel density estimate.\n * Implement an option and filter to 'scatter' points below the minimum threshold\n or maximum percentile contours.\n \"\"\"\n if (mode == 'density') & np.isclose(vmin, 0.0):\n vmin = 0.02\n if arr.shape[-1] == 3:\n projection = 'ternary'\n else:\n projection = None\n ax = init_axes(ax=ax, projection=projection, **kwargs)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n background_color = *ax.patch.get_facecolor()[:-1], 0.0\n if cmap is not None:\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n cmap = copy.copy(cmap)\n cmap.set_under((1, 1, 1, 0))\n if mode == 'density':\n cbarlabel = 'Kernel Density Estimate'\n else:\n cbarlabel = 'Frequency'\n valid_rows = np.isfinite(arr).all(axis=-1)\n if mode in ['hexbin', 'hist2d'] and contours:\n raise NotImplementedError(\n \"Contours are not currently implemented for 'hexbin' or 'hist2d' modes.\"\n )\n if arr.size > 0 and valid_rows.any():\n arr = arr[valid_rows]\n if projection is None:\n x, y = arr.T\n grid = DensityGrid(x, y, bins=bins, logx=logx, logy=logy,\n extent=extent, **subkwargs(kwargs, DensityGrid))\n if mode == 'hexbin':\n mappable = ax.hexbin(x, y, gridsize=bins, cmap=cmap, extent\n =grid.get_hex_extent(), xscale=['linear', 'log'][logx],\n yscale=['linear', 'log'][logy], **subkwargs(kwargs, ax.\n hexbin))\n elif mode == 'hist2d':\n _, _, _, im = ax.hist2d(x, y, bins=[grid.grid_xe, grid.\n grid_ye], range=grid.get_range(), cmap=cmap, cmin=[0, 1\n ][vmin > 0], **subkwargs(kwargs, ax.hist2d))\n mappable = im\n elif mode == 'density':\n zei = grid.kdefrom(arr, xtransform=[lambda x: x, np.log][\n logx], ytransform=[lambda y: y, np.log][logy], mode=\n 'edges', **subkwargs(kwargs, grid.kdefrom))\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zei, [1.0 -\n vmin])[1][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'\n .format(vmin))\n if not contours:\n mappable = pcolor(grid.grid_xei, grid.grid_yei, zei,\n cmap=cmap, vmin=vmin, shading=shading, **subkwargs(\n kwargs, pcolor))\n mappable.set_edgecolor(background_color)\n mappable.set_linestyle('None')\n mappable.set_lw(0.0)\n else:\n mappable = _add_contours(grid.grid_xei, grid.grid_yei,\n zi=zei.reshape(grid.grid_xei.shape), ax=ax,\n contours=contours, percentiles=percentiles, cmap=\n cmap, vmin=vmin, **kwargs)\n if relim and extent is not None:\n ax.axis(extent)\n elif projection == 'ternary':\n if shading == 'auto':\n shading = 'flat'\n arr[~(arr > 0).all(axis=1), :] = np.nan\n arr = close(arr)\n if mode == 'hexbin':\n raise NotImplementedError\n coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1\n ][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'.\n format(vmin))\n fltr = (zi != 0) & (zi >= vmin)\n coords = coords[fltr.flatten(), :]\n zi = zi[fltr]\n if not contours:\n tri_poly_collection = pcolor(*coords.T, zi.flatten(), cmap=\n cmap, vmin=vmin, shading=shading, **subkwargs(kwargs,\n pcolor))\n mappable = tri_poly_collection\n else:\n mappable = _add_contours(*coords.T, zi=zi.flatten(), ax=ax,\n contours=contours, percentiles=percentiles, cmap=cmap,\n vmin=vmin, **kwargs)\n ax.set_aspect('equal')\n elif not arr.ndim in [0, 1, 2]:\n raise NotImplementedError\n if colorbar:\n cbkwargs = kwargs.copy()\n cbkwargs['label'] = cbarlabel\n add_colorbar(mappable, **cbkwargs)\n return ax\n\n\ndef _add_contours(*coords, zi=None, ax=None, contours=[], cmap=\n DEFAULT_CONT_COLORMAP, vmin=0.0, extent=None, **kwargs):\n \"\"\"\n Add density-based contours to a plot.\n \"\"\"\n percentiles = kwargs.pop('percentiles', True)\n levels = contours or kwargs.get('levels', None)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n if percentiles and not isinstance(levels, int):\n _cs = plot_Z_percentiles(*coords, zi=zi, ax=ax, percentiles=levels,\n extent=extent, cmap=cmap, **kwargs)\n mappable = _cs\n else:\n if levels is None:\n levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())\n elif isinstance(levels, int):\n levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())\n else:\n raise NotImplementedError\n mappable = contourf(*coords, zi, extent=extent, levels=levels, cmap\n =cmap, vmin=vmin, **kwargs)\n contour(*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=\n vmin, **kwargs)\n return mappable\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogger = Handle(__name__)\n\n\ndef density(arr, ax=None, logx=False, logy=False, bins=25, mode='density',\n extent=None, contours=[], percentiles=True, relim=True, cmap=\n DEFAULT_CONT_COLORMAP, shading='auto', vmin=0.0, colorbar=False, **kwargs):\n \"\"\"\n Creates diagramatic representation of data density and/or frequency for either\n binary diagrams (X-Y) or ternary plots.\n Additional arguments are typically forwarded\n to respective :mod:`matplotlib` functions\n :func:`~matplotlib.pyplot.pcolormesh`,\n :func:`~matplotlib.pyplot.hist2d`,\n :func:`~matplotlib.pyplot.hexbin`,\n :func:`~matplotlib.pyplot.contour`, and\n :func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n Dataframe from which to draw data.\n ax : :class:`matplotlib.axes.Axes`, `None`\n The subplot to draw on.\n logx : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.\n logy : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.\n bins : :class:`int`, 20\n Number of bins used in the gridded functions (histograms, KDE evaluation grid).\n mode : :class:`str`, 'density'\n Different modes used here: ['density', 'hexbin', 'hist2d']\n extent : :class:`list`\n Predetermined extent of the grid for which to from the histogram/KDE. In the\n general form (xmin, xmax, ymin, ymax).\n contours : :class:`list`\n Contours to add to the plot, where :code:`mode='density'` is used.\n percentiles : :class:`bool`, `True`\n Whether contours specified are to be converted to percentiles.\n relim : :class:`bool`, :code:`True`\n Whether to relimit the plot based on xmin, xmax values.\n cmap : :class:`matplotlib.colors.Colormap`\n Colormap for mapping surfaces.\n vmin : :class:`float`, 0.\n Minimum value for colormap.\n shading : :class:`str`, 'auto'\n Shading to apply to pcolormesh.\n colorbar : :class:`bool`, False\n Whether to append a linked colorbar to the generated mappable image.\n\n {otherparams}\n\n Returns\n -------\n :class:`matplotlib.axes.Axes`\n Axes on which the densityplot is plotted.\n\n .. seealso::\n\n Functions:\n\n :func:`matplotlib.pyplot.pcolormesh`\n :func:`matplotlib.pyplot.hist2d`\n :func:`matplotlib.pyplot.contourf`\n\n Notes\n -----\n The default density estimates and derived contours are generated based on\n kernel density estimates. Assumptions around e.g. 95% of points lying within\n a 95% contour won't necessarily be valid for non-normally distributed data\n (instead, this represents the approximate 95% percentile on the kernel\n density estimate). Note that contours are currently only generated; for\n `mode=\"density\"`; future updates may allow the use of a histogram\n basis, which would give results closer to 95% data percentiles.\n\n Todo\n ----\n * Allow generation of contours from histogram data, rather than just\n the kernel density estimate.\n * Implement an option and filter to 'scatter' points below the minimum threshold\n or maximum percentile contours.\n \"\"\"\n if (mode == 'density') & np.isclose(vmin, 0.0):\n vmin = 0.02\n if arr.shape[-1] == 3:\n projection = 'ternary'\n else:\n projection = None\n ax = init_axes(ax=ax, projection=projection, **kwargs)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n background_color = *ax.patch.get_facecolor()[:-1], 0.0\n if cmap is not None:\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n cmap = copy.copy(cmap)\n cmap.set_under((1, 1, 1, 0))\n if mode == 'density':\n cbarlabel = 'Kernel Density Estimate'\n else:\n cbarlabel = 'Frequency'\n valid_rows = np.isfinite(arr).all(axis=-1)\n if mode in ['hexbin', 'hist2d'] and contours:\n raise NotImplementedError(\n \"Contours are not currently implemented for 'hexbin' or 'hist2d' modes.\"\n )\n if arr.size > 0 and valid_rows.any():\n arr = arr[valid_rows]\n if projection is None:\n x, y = arr.T\n grid = DensityGrid(x, y, bins=bins, logx=logx, logy=logy,\n extent=extent, **subkwargs(kwargs, DensityGrid))\n if mode == 'hexbin':\n mappable = ax.hexbin(x, y, gridsize=bins, cmap=cmap, extent\n =grid.get_hex_extent(), xscale=['linear', 'log'][logx],\n yscale=['linear', 'log'][logy], **subkwargs(kwargs, ax.\n hexbin))\n elif mode == 'hist2d':\n _, _, _, im = ax.hist2d(x, y, bins=[grid.grid_xe, grid.\n grid_ye], range=grid.get_range(), cmap=cmap, cmin=[0, 1\n ][vmin > 0], **subkwargs(kwargs, ax.hist2d))\n mappable = im\n elif mode == 'density':\n zei = grid.kdefrom(arr, xtransform=[lambda x: x, np.log][\n logx], ytransform=[lambda y: y, np.log][logy], mode=\n 'edges', **subkwargs(kwargs, grid.kdefrom))\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zei, [1.0 -\n vmin])[1][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'\n .format(vmin))\n if not contours:\n mappable = pcolor(grid.grid_xei, grid.grid_yei, zei,\n cmap=cmap, vmin=vmin, shading=shading, **subkwargs(\n kwargs, pcolor))\n mappable.set_edgecolor(background_color)\n mappable.set_linestyle('None')\n mappable.set_lw(0.0)\n else:\n mappable = _add_contours(grid.grid_xei, grid.grid_yei,\n zi=zei.reshape(grid.grid_xei.shape), ax=ax,\n contours=contours, percentiles=percentiles, cmap=\n cmap, vmin=vmin, **kwargs)\n if relim and extent is not None:\n ax.axis(extent)\n elif projection == 'ternary':\n if shading == 'auto':\n shading = 'flat'\n arr[~(arr > 0).all(axis=1), :] = np.nan\n arr = close(arr)\n if mode == 'hexbin':\n raise NotImplementedError\n coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1\n ][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'.\n format(vmin))\n fltr = (zi != 0) & (zi >= vmin)\n coords = coords[fltr.flatten(), :]\n zi = zi[fltr]\n if not contours:\n tri_poly_collection = pcolor(*coords.T, zi.flatten(), cmap=\n cmap, vmin=vmin, shading=shading, **subkwargs(kwargs,\n pcolor))\n mappable = tri_poly_collection\n else:\n mappable = _add_contours(*coords.T, zi=zi.flatten(), ax=ax,\n contours=contours, percentiles=percentiles, cmap=cmap,\n vmin=vmin, **kwargs)\n ax.set_aspect('equal')\n elif not arr.ndim in [0, 1, 2]:\n raise NotImplementedError\n if colorbar:\n cbkwargs = kwargs.copy()\n cbkwargs['label'] = cbarlabel\n add_colorbar(mappable, **cbkwargs)\n return ax\n\n\ndef _add_contours(*coords, zi=None, ax=None, contours=[], cmap=\n DEFAULT_CONT_COLORMAP, vmin=0.0, extent=None, **kwargs):\n \"\"\"\n Add density-based contours to a plot.\n \"\"\"\n percentiles = kwargs.pop('percentiles', True)\n levels = contours or kwargs.get('levels', None)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n if percentiles and not isinstance(levels, int):\n _cs = plot_Z_percentiles(*coords, zi=zi, ax=ax, percentiles=levels,\n extent=extent, cmap=cmap, **kwargs)\n mappable = _cs\n else:\n if levels is None:\n levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())\n elif isinstance(levels, int):\n levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())\n else:\n raise NotImplementedError\n mappable = contourf(*coords, zi, extent=extent, levels=levels, cmap\n =cmap, vmin=vmin, **kwargs)\n contour(*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=\n vmin, **kwargs)\n return mappable\n\n\n_add_additional_parameters = True\ndensity.__doc__ = density.__doc__.format(otherparams=['',\n get_additional_params(density, plt.pcolormesh, plt.hist2d, plt.hexbin,\n plt.contour, plt.contourf, header='Other Parameters', indent=4,\n subsections=True)][_add_additional_parameters])\n",
"step-4": "<mask token>\nimport copy\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import MaxNLocator\nfrom ...comp.codata import close\nfrom ...util.log import Handle\nfrom ...util.meta import get_additional_params, subkwargs\nfrom ...util.plot.axes import add_colorbar, init_axes\nfrom ...util.plot.density import get_axis_density_methods, percentile_contour_values_from_meshz, plot_Z_percentiles\nfrom ...util.plot.style import DEFAULT_CONT_COLORMAP\nfrom .grid import DensityGrid\nfrom .ternary import ternary_heatmap\nlogger = Handle(__name__)\n\n\ndef density(arr, ax=None, logx=False, logy=False, bins=25, mode='density',\n extent=None, contours=[], percentiles=True, relim=True, cmap=\n DEFAULT_CONT_COLORMAP, shading='auto', vmin=0.0, colorbar=False, **kwargs):\n \"\"\"\n Creates diagramatic representation of data density and/or frequency for either\n binary diagrams (X-Y) or ternary plots.\n Additional arguments are typically forwarded\n to respective :mod:`matplotlib` functions\n :func:`~matplotlib.pyplot.pcolormesh`,\n :func:`~matplotlib.pyplot.hist2d`,\n :func:`~matplotlib.pyplot.hexbin`,\n :func:`~matplotlib.pyplot.contour`, and\n :func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n Dataframe from which to draw data.\n ax : :class:`matplotlib.axes.Axes`, `None`\n The subplot to draw on.\n logx : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.\n logy : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.\n bins : :class:`int`, 20\n Number of bins used in the gridded functions (histograms, KDE evaluation grid).\n mode : :class:`str`, 'density'\n Different modes used here: ['density', 'hexbin', 'hist2d']\n extent : :class:`list`\n Predetermined extent of the grid for which to from the histogram/KDE. In the\n general form (xmin, xmax, ymin, ymax).\n contours : :class:`list`\n Contours to add to the plot, where :code:`mode='density'` is used.\n percentiles : :class:`bool`, `True`\n Whether contours specified are to be converted to percentiles.\n relim : :class:`bool`, :code:`True`\n Whether to relimit the plot based on xmin, xmax values.\n cmap : :class:`matplotlib.colors.Colormap`\n Colormap for mapping surfaces.\n vmin : :class:`float`, 0.\n Minimum value for colormap.\n shading : :class:`str`, 'auto'\n Shading to apply to pcolormesh.\n colorbar : :class:`bool`, False\n Whether to append a linked colorbar to the generated mappable image.\n\n {otherparams}\n\n Returns\n -------\n :class:`matplotlib.axes.Axes`\n Axes on which the densityplot is plotted.\n\n .. seealso::\n\n Functions:\n\n :func:`matplotlib.pyplot.pcolormesh`\n :func:`matplotlib.pyplot.hist2d`\n :func:`matplotlib.pyplot.contourf`\n\n Notes\n -----\n The default density estimates and derived contours are generated based on\n kernel density estimates. Assumptions around e.g. 95% of points lying within\n a 95% contour won't necessarily be valid for non-normally distributed data\n (instead, this represents the approximate 95% percentile on the kernel\n density estimate). Note that contours are currently only generated; for\n `mode=\"density\"`; future updates may allow the use of a histogram\n basis, which would give results closer to 95% data percentiles.\n\n Todo\n ----\n * Allow generation of contours from histogram data, rather than just\n the kernel density estimate.\n * Implement an option and filter to 'scatter' points below the minimum threshold\n or maximum percentile contours.\n \"\"\"\n if (mode == 'density') & np.isclose(vmin, 0.0):\n vmin = 0.02\n if arr.shape[-1] == 3:\n projection = 'ternary'\n else:\n projection = None\n ax = init_axes(ax=ax, projection=projection, **kwargs)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n background_color = *ax.patch.get_facecolor()[:-1], 0.0\n if cmap is not None:\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n cmap = copy.copy(cmap)\n cmap.set_under((1, 1, 1, 0))\n if mode == 'density':\n cbarlabel = 'Kernel Density Estimate'\n else:\n cbarlabel = 'Frequency'\n valid_rows = np.isfinite(arr).all(axis=-1)\n if mode in ['hexbin', 'hist2d'] and contours:\n raise NotImplementedError(\n \"Contours are not currently implemented for 'hexbin' or 'hist2d' modes.\"\n )\n if arr.size > 0 and valid_rows.any():\n arr = arr[valid_rows]\n if projection is None:\n x, y = arr.T\n grid = DensityGrid(x, y, bins=bins, logx=logx, logy=logy,\n extent=extent, **subkwargs(kwargs, DensityGrid))\n if mode == 'hexbin':\n mappable = ax.hexbin(x, y, gridsize=bins, cmap=cmap, extent\n =grid.get_hex_extent(), xscale=['linear', 'log'][logx],\n yscale=['linear', 'log'][logy], **subkwargs(kwargs, ax.\n hexbin))\n elif mode == 'hist2d':\n _, _, _, im = ax.hist2d(x, y, bins=[grid.grid_xe, grid.\n grid_ye], range=grid.get_range(), cmap=cmap, cmin=[0, 1\n ][vmin > 0], **subkwargs(kwargs, ax.hist2d))\n mappable = im\n elif mode == 'density':\n zei = grid.kdefrom(arr, xtransform=[lambda x: x, np.log][\n logx], ytransform=[lambda y: y, np.log][logy], mode=\n 'edges', **subkwargs(kwargs, grid.kdefrom))\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zei, [1.0 -\n vmin])[1][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'\n .format(vmin))\n if not contours:\n mappable = pcolor(grid.grid_xei, grid.grid_yei, zei,\n cmap=cmap, vmin=vmin, shading=shading, **subkwargs(\n kwargs, pcolor))\n mappable.set_edgecolor(background_color)\n mappable.set_linestyle('None')\n mappable.set_lw(0.0)\n else:\n mappable = _add_contours(grid.grid_xei, grid.grid_yei,\n zi=zei.reshape(grid.grid_xei.shape), ax=ax,\n contours=contours, percentiles=percentiles, cmap=\n cmap, vmin=vmin, **kwargs)\n if relim and extent is not None:\n ax.axis(extent)\n elif projection == 'ternary':\n if shading == 'auto':\n shading = 'flat'\n arr[~(arr > 0).all(axis=1), :] = np.nan\n arr = close(arr)\n if mode == 'hexbin':\n raise NotImplementedError\n coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1\n ][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'.\n format(vmin))\n fltr = (zi != 0) & (zi >= vmin)\n coords = coords[fltr.flatten(), :]\n zi = zi[fltr]\n if not contours:\n tri_poly_collection = pcolor(*coords.T, zi.flatten(), cmap=\n cmap, vmin=vmin, shading=shading, **subkwargs(kwargs,\n pcolor))\n mappable = tri_poly_collection\n else:\n mappable = _add_contours(*coords.T, zi=zi.flatten(), ax=ax,\n contours=contours, percentiles=percentiles, cmap=cmap,\n vmin=vmin, **kwargs)\n ax.set_aspect('equal')\n elif not arr.ndim in [0, 1, 2]:\n raise NotImplementedError\n if colorbar:\n cbkwargs = kwargs.copy()\n cbkwargs['label'] = cbarlabel\n add_colorbar(mappable, **cbkwargs)\n return ax\n\n\ndef _add_contours(*coords, zi=None, ax=None, contours=[], cmap=\n DEFAULT_CONT_COLORMAP, vmin=0.0, extent=None, **kwargs):\n \"\"\"\n Add density-based contours to a plot.\n \"\"\"\n percentiles = kwargs.pop('percentiles', True)\n levels = contours or kwargs.get('levels', None)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n if percentiles and not isinstance(levels, int):\n _cs = plot_Z_percentiles(*coords, zi=zi, ax=ax, percentiles=levels,\n extent=extent, cmap=cmap, **kwargs)\n mappable = _cs\n else:\n if levels is None:\n levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())\n elif isinstance(levels, int):\n levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())\n else:\n raise NotImplementedError\n mappable = contourf(*coords, zi, extent=extent, levels=levels, cmap\n =cmap, vmin=vmin, **kwargs)\n contour(*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=\n vmin, **kwargs)\n return mappable\n\n\n_add_additional_parameters = True\ndensity.__doc__ = density.__doc__.format(otherparams=['',\n get_additional_params(density, plt.pcolormesh, plt.hist2d, plt.hexbin,\n plt.contour, plt.contourf, header='Other Parameters', indent=4,\n subsections=True)][_add_additional_parameters])\n",
"step-5": "\"\"\"\nKernel desnity estimation plots for geochemical data.\n\"\"\"\nimport copy\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import MaxNLocator\n\nfrom ...comp.codata import close\nfrom ...util.log import Handle\nfrom ...util.meta import get_additional_params, subkwargs\nfrom ...util.plot.axes import add_colorbar, init_axes\nfrom ...util.plot.density import (\n get_axis_density_methods,\n percentile_contour_values_from_meshz,\n plot_Z_percentiles,\n)\nfrom ...util.plot.style import DEFAULT_CONT_COLORMAP\nfrom .grid import DensityGrid\nfrom .ternary import ternary_heatmap\n\nlogger = Handle(__name__)\n\n\ndef density(\n arr,\n ax=None,\n logx=False,\n logy=False,\n bins=25,\n mode=\"density\",\n extent=None,\n contours=[],\n percentiles=True,\n relim=True,\n cmap=DEFAULT_CONT_COLORMAP,\n shading=\"auto\",\n vmin=0.0,\n colorbar=False,\n **kwargs\n):\n \"\"\"\n Creates diagramatic representation of data density and/or frequency for either\n binary diagrams (X-Y) or ternary plots.\n Additional arguments are typically forwarded\n to respective :mod:`matplotlib` functions\n :func:`~matplotlib.pyplot.pcolormesh`,\n :func:`~matplotlib.pyplot.hist2d`,\n :func:`~matplotlib.pyplot.hexbin`,\n :func:`~matplotlib.pyplot.contour`, and\n :func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n Dataframe from which to draw data.\n ax : :class:`matplotlib.axes.Axes`, `None`\n The subplot to draw on.\n logx : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.\n logy : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.\n bins : :class:`int`, 20\n Number of bins used in the gridded functions (histograms, KDE evaluation grid).\n mode : :class:`str`, 'density'\n Different modes used here: ['density', 'hexbin', 'hist2d']\n extent : :class:`list`\n Predetermined extent of the grid for which to from the histogram/KDE. In the\n general form (xmin, xmax, ymin, ymax).\n contours : :class:`list`\n Contours to add to the plot, where :code:`mode='density'` is used.\n percentiles : :class:`bool`, `True`\n Whether contours specified are to be converted to percentiles.\n relim : :class:`bool`, :code:`True`\n Whether to relimit the plot based on xmin, xmax values.\n cmap : :class:`matplotlib.colors.Colormap`\n Colormap for mapping surfaces.\n vmin : :class:`float`, 0.\n Minimum value for colormap.\n shading : :class:`str`, 'auto'\n Shading to apply to pcolormesh.\n colorbar : :class:`bool`, False\n Whether to append a linked colorbar to the generated mappable image.\n\n {otherparams}\n\n Returns\n -------\n :class:`matplotlib.axes.Axes`\n Axes on which the densityplot is plotted.\n\n .. seealso::\n\n Functions:\n\n :func:`matplotlib.pyplot.pcolormesh`\n :func:`matplotlib.pyplot.hist2d`\n :func:`matplotlib.pyplot.contourf`\n\n Notes\n -----\n The default density estimates and derived contours are generated based on\n kernel density estimates. Assumptions around e.g. 95% of points lying within\n a 95% contour won't necessarily be valid for non-normally distributed data\n (instead, this represents the approximate 95% percentile on the kernel\n density estimate). Note that contours are currently only generated; for\n `mode=\"density\"`; future updates may allow the use of a histogram\n basis, which would give results closer to 95% data percentiles.\n\n Todo\n ----\n * Allow generation of contours from histogram data, rather than just\n the kernel density estimate.\n * Implement an option and filter to 'scatter' points below the minimum threshold\n or maximum percentile contours.\n \"\"\"\n if (mode == \"density\") & np.isclose(vmin, 0.0): # if vmin is not specified\n vmin = 0.02 # 2% max height | 98th percentile\n\n if arr.shape[-1] == 3:\n projection = \"ternary\"\n else:\n projection = None\n\n ax = init_axes(ax=ax, projection=projection, **kwargs)\n\n pcolor, contour, contourf = get_axis_density_methods(ax)\n background_color = (*ax.patch.get_facecolor()[:-1], 0.0)\n\n if cmap is not None:\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n cmap = copy.copy(cmap) # without this, it would modify the global cmap\n cmap.set_under((1, 1, 1, 0))\n\n if mode == \"density\":\n cbarlabel = \"Kernel Density Estimate\"\n else:\n cbarlabel = \"Frequency\"\n\n valid_rows = np.isfinite(arr).all(axis=-1)\n\n if (mode in [\"hexbin\", \"hist2d\"]) and contours:\n raise NotImplementedError(\n \"Contours are not currently implemented for 'hexbin' or 'hist2d' modes.\"\n )\n\n if (arr.size > 0) and valid_rows.any():\n # Data can't be plotted if there's any nans, so we can exclude these\n arr = arr[valid_rows]\n\n if projection is None: # binary\n x, y = arr.T\n grid = DensityGrid(\n x,\n y,\n bins=bins,\n logx=logx,\n logy=logy,\n extent=extent,\n **subkwargs(kwargs, DensityGrid)\n )\n if mode == \"hexbin\":\n # extent values are exponents (i.e. 3 -> 10**3)\n mappable = ax.hexbin(\n x,\n y,\n gridsize=bins,\n cmap=cmap,\n extent=grid.get_hex_extent(),\n xscale=[\"linear\", \"log\"][logx],\n yscale=[\"linear\", \"log\"][logy],\n **subkwargs(kwargs, ax.hexbin)\n )\n\n elif mode == \"hist2d\":\n _, _, _, im = ax.hist2d(\n x,\n y,\n bins=[grid.grid_xe, grid.grid_ye],\n range=grid.get_range(),\n cmap=cmap,\n cmin=[0, 1][vmin > 0],\n **subkwargs(kwargs, ax.hist2d)\n )\n mappable = im\n\n elif mode == \"density\":\n zei = grid.kdefrom(\n arr,\n xtransform=[lambda x: x, np.log][logx],\n ytransform=[lambda y: y, np.log][logy],\n mode=\"edges\",\n **subkwargs(kwargs, grid.kdefrom)\n )\n\n if percentiles: # 98th percentile\n vmin = percentile_contour_values_from_meshz(zei, [1.0 - vmin])[1][0]\n logger.debug(\n \"Updating `vmin` to percentile equiv: {:.2f}\".format(vmin)\n )\n\n if not contours:\n # pcolormesh using bin edges\n mappable = pcolor(\n grid.grid_xei,\n grid.grid_yei,\n zei,\n cmap=cmap,\n vmin=vmin,\n shading=shading,\n **subkwargs(kwargs, pcolor)\n )\n mappable.set_edgecolor(background_color)\n mappable.set_linestyle(\"None\")\n mappable.set_lw(0.0)\n else:\n mappable = _add_contours(\n grid.grid_xei,\n grid.grid_yei,\n zi=zei.reshape(grid.grid_xei.shape),\n ax=ax,\n contours=contours,\n percentiles=percentiles,\n cmap=cmap,\n vmin=vmin,\n **kwargs\n )\n if relim and (extent is not None):\n ax.axis(extent)\n elif projection == \"ternary\": # ternary\n if shading == \"auto\":\n shading = \"flat\" # auto cant' be passed to tripcolor\n # zeros make nans in this case, due to the heatmap calculations\n arr[~(arr > 0).all(axis=1), :] = np.nan\n arr = close(arr)\n if mode == \"hexbin\":\n raise NotImplementedError\n # density, histogram etc parsed here\n coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)\n\n if percentiles: # 98th percentile\n vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1][0]\n logger.debug(\"Updating `vmin` to percentile equiv: {:.2f}\".format(vmin))\n\n # remove coords where H==0, as ax.tripcolor can't deal with variable alpha :'(\n fltr = (zi != 0) & (zi >= vmin)\n coords = coords[fltr.flatten(), :]\n zi = zi[fltr]\n\n if not contours:\n tri_poly_collection = pcolor(\n *coords.T,\n zi.flatten(),\n cmap=cmap,\n vmin=vmin,\n shading=shading,\n **subkwargs(kwargs, pcolor)\n )\n\n mappable = tri_poly_collection\n else:\n mappable = _add_contours(\n *coords.T,\n zi=zi.flatten(),\n ax=ax,\n contours=contours,\n percentiles=percentiles,\n cmap=cmap,\n vmin=vmin,\n **kwargs\n )\n ax.set_aspect(\"equal\")\n else:\n if not arr.ndim in [0, 1, 2]:\n raise NotImplementedError\n\n if colorbar:\n cbkwargs = kwargs.copy()\n cbkwargs[\"label\"] = cbarlabel\n add_colorbar(mappable, **cbkwargs)\n\n return ax\n\n\ndef _add_contours(\n *coords,\n zi=None,\n ax=None,\n contours=[],\n cmap=DEFAULT_CONT_COLORMAP,\n vmin=0.0,\n extent=None,\n **kwargs\n):\n \"\"\"\n Add density-based contours to a plot.\n \"\"\"\n # get the contour levels\n percentiles = kwargs.pop(\"percentiles\", True)\n levels = contours or kwargs.get(\"levels\", None)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n if percentiles and not isinstance(levels, int):\n # plot individual percentile contours\n _cs = plot_Z_percentiles(\n *coords,\n zi=zi,\n ax=ax,\n percentiles=levels,\n extent=extent,\n cmap=cmap,\n **kwargs\n )\n mappable = _cs\n else:\n # plot interval contours\n if levels is None:\n levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())\n elif isinstance(levels, int):\n levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())\n else:\n raise NotImplementedError\n # filled contours\n mappable = contourf(\n *coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=vmin, **kwargs\n )\n # contours\n contour(\n *coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=vmin, **kwargs\n )\n return mappable\n\n\n_add_additional_parameters = True\n\ndensity.__doc__ = density.__doc__.format(\n otherparams=[\n \"\",\n get_additional_params(\n density,\n plt.pcolormesh,\n plt.hist2d,\n plt.hexbin,\n plt.contour,\n plt.contourf,\n header=\"Other Parameters\",\n indent=4,\n subsections=True,\n ),\n ][_add_additional_parameters]\n)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class DynamicVPTree:
<|reserved_special_token_0|>
def __init__(self, dist_fn, min_tree_size=4):
"""
:param dist_fn: Metric distance function used for vp-trees
:param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)
"""
self.dist_fn = dist_fn
self.trees = []
self.pool = []
self.min_tree_size = min_tree_size
def insert(self, item):
"""
Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached
Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes
"""
self.pool.append(item)
if len(self.pool) == self.min_tree_size:
self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))
self.pool = []
while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2
].size:
a = self.trees.pop()
b = self.trees.pop()
self.trees.append(_ExtendedVPTree(a.points + b.points, self.
dist_fn))
def nearest(self, query):
"""
Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)
"""
nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),
self.trees))
distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),
self.pool), self.pool))
best = None
best_cost = np.inf
for cost, near in (nearest_trees + distances_pool):
if cost <= best_cost:
best = near
best_cost = cost
return best
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DynamicVPTree:
"""
Dynamic vp-tree implemented using index folding
"""
def __init__(self, dist_fn, min_tree_size=4):
"""
:param dist_fn: Metric distance function used for vp-trees
:param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)
"""
self.dist_fn = dist_fn
self.trees = []
self.pool = []
self.min_tree_size = min_tree_size
def insert(self, item):
"""
Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached
Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes
"""
self.pool.append(item)
if len(self.pool) == self.min_tree_size:
self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))
self.pool = []
while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2
].size:
a = self.trees.pop()
b = self.trees.pop()
self.trees.append(_ExtendedVPTree(a.points + b.points, self.
dist_fn))
def nearest(self, query):
"""
Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)
"""
nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),
self.trees))
distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),
self.pool), self.pool))
best = None
best_cost = np.inf
for cost, near in (nearest_trees + distances_pool):
if cost <= best_cost:
best = near
best_cost = cost
return best
def neighbourhood(self, query, radius):
"""
Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)
"""
tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.
get_all_in_range(query, radius)))
neighbourhood_trees = list(itertools.chain.from_iterable(map(
tree_neighbourhood, self.trees)))
return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x,
query) < radius, self.pool))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _ExtendedVPTree(vptree.VPTree):
<|reserved_special_token_0|>
def __init__(self, points, dist_fn):
"""
:param points: List of points to add to the vp-tree
:param dist_fn: Metric distance function
"""
super().__init__(points, dist_fn)
self.points = points
self.size = len(points)
def get_n_nearest_neighbors(self, query, n_neighbors):
"""
Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance
"""
if not isinstance(n_neighbors, int) or n_neighbors < 1:
raise ValueError('n_neighbors must be strictly positive integer')
neighbors = vptree._AutoSortingList(max_size=n_neighbors)
nodes_to_visit = [(self, 0)]
furthest_d = np.inf
while len(nodes_to_visit) > 0:
node, d0 = nodes_to_visit.pop(0)
if node is None or d0 > furthest_d:
continue
d = self.dist_fn(query, node.vp)
if d <= furthest_d:
neighbors.append((d, node.vp))
furthest_d, _ = neighbors[-1]
if node._is_leaf():
continue
if node.left_min <= d <= node.left_max:
nodes_to_visit.insert(0, (node.left, 0))
elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:
nodes_to_visit.append((node.left, node.left_min - d if d <
node.left_min else d - node.left_max))
if node.right_min <= d <= node.right_max:
nodes_to_visit.insert(0, (node.right, 0))
elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:
nodes_to_visit.append((node.right, node.right_min - d if d <
node.right_min else d - node.right_max))
if len(neighbors) == 0:
neighbors = [(np.nan, point) for point in self.points[:n_neighbors]
]
return list(neighbors)
class DynamicVPTree:
"""
Dynamic vp-tree implemented using index folding
"""
def __init__(self, dist_fn, min_tree_size=4):
"""
:param dist_fn: Metric distance function used for vp-trees
:param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)
"""
self.dist_fn = dist_fn
self.trees = []
self.pool = []
self.min_tree_size = min_tree_size
def insert(self, item):
"""
Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached
Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes
"""
self.pool.append(item)
if len(self.pool) == self.min_tree_size:
self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))
self.pool = []
while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2
].size:
a = self.trees.pop()
b = self.trees.pop()
self.trees.append(_ExtendedVPTree(a.points + b.points, self.
dist_fn))
def nearest(self, query):
"""
Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)
"""
nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),
self.trees))
distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),
self.pool), self.pool))
best = None
best_cost = np.inf
for cost, near in (nearest_trees + distances_pool):
if cost <= best_cost:
best = near
best_cost = cost
return best
def neighbourhood(self, query, radius):
"""
Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)
"""
tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.
get_all_in_range(query, radius)))
neighbourhood_trees = list(itertools.chain.from_iterable(map(
tree_neighbourhood, self.trees)))
return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x,
query) < radius, self.pool))
<|reserved_special_token_1|>
import vptree, itertools
import numpy as np
class _ExtendedVPTree(vptree.VPTree):
"""
VPTree class extended to include the list of points within the tree
"""
def __init__(self, points, dist_fn):
"""
:param points: List of points to add to the vp-tree
:param dist_fn: Metric distance function
"""
super().__init__(points, dist_fn)
self.points = points
self.size = len(points)
def get_n_nearest_neighbors(self, query, n_neighbors):
"""
Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance
"""
if not isinstance(n_neighbors, int) or n_neighbors < 1:
raise ValueError('n_neighbors must be strictly positive integer')
neighbors = vptree._AutoSortingList(max_size=n_neighbors)
nodes_to_visit = [(self, 0)]
furthest_d = np.inf
while len(nodes_to_visit) > 0:
node, d0 = nodes_to_visit.pop(0)
if node is None or d0 > furthest_d:
continue
d = self.dist_fn(query, node.vp)
if d <= furthest_d:
neighbors.append((d, node.vp))
furthest_d, _ = neighbors[-1]
if node._is_leaf():
continue
if node.left_min <= d <= node.left_max:
nodes_to_visit.insert(0, (node.left, 0))
elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:
nodes_to_visit.append((node.left, node.left_min - d if d <
node.left_min else d - node.left_max))
if node.right_min <= d <= node.right_max:
nodes_to_visit.insert(0, (node.right, 0))
elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:
nodes_to_visit.append((node.right, node.right_min - d if d <
node.right_min else d - node.right_max))
if len(neighbors) == 0:
neighbors = [(np.nan, point) for point in self.points[:n_neighbors]
]
return list(neighbors)
class DynamicVPTree:
"""
Dynamic vp-tree implemented using index folding
"""
def __init__(self, dist_fn, min_tree_size=4):
"""
:param dist_fn: Metric distance function used for vp-trees
:param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)
"""
self.dist_fn = dist_fn
self.trees = []
self.pool = []
self.min_tree_size = min_tree_size
def insert(self, item):
"""
Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached
Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes
"""
self.pool.append(item)
if len(self.pool) == self.min_tree_size:
self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))
self.pool = []
while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2
].size:
a = self.trees.pop()
b = self.trees.pop()
self.trees.append(_ExtendedVPTree(a.points + b.points, self.
dist_fn))
def nearest(self, query):
"""
Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)
"""
nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),
self.trees))
distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),
self.pool), self.pool))
best = None
best_cost = np.inf
for cost, near in (nearest_trees + distances_pool):
if cost <= best_cost:
best = near
best_cost = cost
return best
def neighbourhood(self, query, radius):
"""
Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)
"""
tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.
get_all_in_range(query, radius)))
neighbourhood_trees = list(itertools.chain.from_iterable(map(
tree_neighbourhood, self.trees)))
return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x,
query) < radius, self.pool))
<|reserved_special_token_1|>
#Copyright (c) 2020 Ocado. All Rights Reserved.
import vptree, itertools
import numpy as np
class _ExtendedVPTree(vptree.VPTree):
"""
VPTree class extended to include the list of points within the tree
"""
def __init__(self, points, dist_fn):
"""
:param points: List of points to add to the vp-tree
:param dist_fn: Metric distance function
"""
super().__init__(points, dist_fn)
self.points = points
self.size = len(points)
def get_n_nearest_neighbors(self, query, n_neighbors):
"""
Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance
"""
if not isinstance(n_neighbors, int) or n_neighbors < 1:
raise ValueError('n_neighbors must be strictly positive integer')
neighbors = vptree._AutoSortingList(max_size=n_neighbors)
nodes_to_visit = [(self, 0)]
furthest_d = np.inf
while len(nodes_to_visit) > 0:
node, d0 = nodes_to_visit.pop(0)
if node is None or d0 > furthest_d:
continue
d = self.dist_fn(query, node.vp)
if d <= furthest_d: #Replaced < with <=
neighbors.append((d, node.vp))
furthest_d, _ = neighbors[-1]
if node._is_leaf():
continue
if node.left_min <= d <= node.left_max:
nodes_to_visit.insert(0, (node.left, 0))
elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:
nodes_to_visit.append((node.left,
node.left_min - d if d < node.left_min
else d - node.left_max))
if node.right_min <= d <= node.right_max:
nodes_to_visit.insert(0, (node.right, 0))
elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:
nodes_to_visit.append((node.right,
node.right_min - d if d < node.right_min
else d - node.right_max))
if len(neighbors) == 0:
neighbors = [(np.nan, point) for point in self.points[:n_neighbors]] #Return any point(s) if query contains np.nan
return list(neighbors)
class DynamicVPTree:
"""
Dynamic vp-tree implemented using index folding
"""
def __init__(self, dist_fn, min_tree_size=4):
"""
:param dist_fn: Metric distance function used for vp-trees
:param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)
"""
self.dist_fn = dist_fn
self.trees = []
self.pool = []
self.min_tree_size = min_tree_size
def insert(self, item):
"""
Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached
Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes
"""
self.pool.append(item)
if len(self.pool) == self.min_tree_size:
self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))
self.pool = []
while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size:
a = self.trees.pop()
b = self.trees.pop()
self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))
def nearest(self, query):
"""
Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)
"""
nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))
distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))
best = None
best_cost = np.inf
for cost, near in nearest_trees + distances_pool:
if cost <= best_cost:
best = near
best_cost = cost
return best
def neighbourhood(self, query, radius):
"""
Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)
"""
tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.get_all_in_range(query, radius)))
neighbourhood_trees = list(itertools.chain.from_iterable(map(tree_neighbourhood, self.trees)))
return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x, query) < radius, self.pool))
|
flexible
|
{
"blob_id": "22e6616fb98ecfb256587c3767c7c289decc6bf6",
"index": 3049,
"step-1": "<mask token>\n\n\nclass DynamicVPTree:\n <mask token>\n\n def __init__(self, dist_fn, min_tree_size=4):\n \"\"\"\n :param dist_fn: Metric distance function used for vp-trees\n :param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)\n \"\"\"\n self.dist_fn = dist_fn\n self.trees = []\n self.pool = []\n self.min_tree_size = min_tree_size\n\n def insert(self, item):\n \"\"\"\n Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached\n Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes\n \"\"\"\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2\n ].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.\n dist_fn))\n\n def nearest(self, query):\n \"\"\"\n Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)\n \"\"\"\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),\n self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),\n self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in (nearest_trees + distances_pool):\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DynamicVPTree:\n \"\"\"\n Dynamic vp-tree implemented using index folding\n \"\"\"\n\n def __init__(self, dist_fn, min_tree_size=4):\n \"\"\"\n :param dist_fn: Metric distance function used for vp-trees\n :param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)\n \"\"\"\n self.dist_fn = dist_fn\n self.trees = []\n self.pool = []\n self.min_tree_size = min_tree_size\n\n def insert(self, item):\n \"\"\"\n Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached\n Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes\n \"\"\"\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2\n ].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.\n dist_fn))\n\n def nearest(self, query):\n \"\"\"\n Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)\n \"\"\"\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),\n self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),\n self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in (nearest_trees + distances_pool):\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best\n\n def neighbourhood(self, query, radius):\n \"\"\"\n Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)\n \"\"\"\n tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.\n get_all_in_range(query, radius)))\n neighbourhood_trees = list(itertools.chain.from_iterable(map(\n tree_neighbourhood, self.trees)))\n return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x,\n query) < radius, self.pool))\n",
"step-3": "<mask token>\n\n\nclass _ExtendedVPTree(vptree.VPTree):\n <mask token>\n\n def __init__(self, points, dist_fn):\n \"\"\"\n :param points: List of points to add to the vp-tree\n :param dist_fn: Metric distance function\n \"\"\"\n super().__init__(points, dist_fn)\n self.points = points\n self.size = len(points)\n\n def get_n_nearest_neighbors(self, query, n_neighbors):\n \"\"\"\n Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance\n \"\"\"\n if not isinstance(n_neighbors, int) or n_neighbors < 1:\n raise ValueError('n_neighbors must be strictly positive integer')\n neighbors = vptree._AutoSortingList(max_size=n_neighbors)\n nodes_to_visit = [(self, 0)]\n furthest_d = np.inf\n while len(nodes_to_visit) > 0:\n node, d0 = nodes_to_visit.pop(0)\n if node is None or d0 > furthest_d:\n continue\n d = self.dist_fn(query, node.vp)\n if d <= furthest_d:\n neighbors.append((d, node.vp))\n furthest_d, _ = neighbors[-1]\n if node._is_leaf():\n continue\n if node.left_min <= d <= node.left_max:\n nodes_to_visit.insert(0, (node.left, 0))\n elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:\n nodes_to_visit.append((node.left, node.left_min - d if d <\n node.left_min else d - node.left_max))\n if node.right_min <= d <= node.right_max:\n nodes_to_visit.insert(0, (node.right, 0))\n elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:\n nodes_to_visit.append((node.right, node.right_min - d if d <\n node.right_min else d - node.right_max))\n if len(neighbors) == 0:\n neighbors = [(np.nan, point) for point in self.points[:n_neighbors]\n ]\n return list(neighbors)\n\n\nclass DynamicVPTree:\n \"\"\"\n Dynamic vp-tree implemented using index folding\n \"\"\"\n\n def __init__(self, dist_fn, min_tree_size=4):\n \"\"\"\n :param dist_fn: Metric distance function used for vp-trees\n :param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)\n \"\"\"\n self.dist_fn = dist_fn\n self.trees = []\n self.pool = []\n self.min_tree_size = min_tree_size\n\n def insert(self, item):\n \"\"\"\n Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached\n Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes\n \"\"\"\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2\n ].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.\n dist_fn))\n\n def nearest(self, query):\n \"\"\"\n Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)\n \"\"\"\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),\n self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),\n self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in (nearest_trees + distances_pool):\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best\n\n def neighbourhood(self, query, radius):\n \"\"\"\n Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)\n \"\"\"\n tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.\n get_all_in_range(query, radius)))\n neighbourhood_trees = list(itertools.chain.from_iterable(map(\n tree_neighbourhood, self.trees)))\n return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x,\n query) < radius, self.pool))\n",
"step-4": "import vptree, itertools\nimport numpy as np\n\n\nclass _ExtendedVPTree(vptree.VPTree):\n \"\"\"\n VPTree class extended to include the list of points within the tree\n \"\"\"\n\n def __init__(self, points, dist_fn):\n \"\"\"\n :param points: List of points to add to the vp-tree\n :param dist_fn: Metric distance function\n \"\"\"\n super().__init__(points, dist_fn)\n self.points = points\n self.size = len(points)\n\n def get_n_nearest_neighbors(self, query, n_neighbors):\n \"\"\"\n Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance\n \"\"\"\n if not isinstance(n_neighbors, int) or n_neighbors < 1:\n raise ValueError('n_neighbors must be strictly positive integer')\n neighbors = vptree._AutoSortingList(max_size=n_neighbors)\n nodes_to_visit = [(self, 0)]\n furthest_d = np.inf\n while len(nodes_to_visit) > 0:\n node, d0 = nodes_to_visit.pop(0)\n if node is None or d0 > furthest_d:\n continue\n d = self.dist_fn(query, node.vp)\n if d <= furthest_d:\n neighbors.append((d, node.vp))\n furthest_d, _ = neighbors[-1]\n if node._is_leaf():\n continue\n if node.left_min <= d <= node.left_max:\n nodes_to_visit.insert(0, (node.left, 0))\n elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:\n nodes_to_visit.append((node.left, node.left_min - d if d <\n node.left_min else d - node.left_max))\n if node.right_min <= d <= node.right_max:\n nodes_to_visit.insert(0, (node.right, 0))\n elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:\n nodes_to_visit.append((node.right, node.right_min - d if d <\n node.right_min else d - node.right_max))\n if len(neighbors) == 0:\n neighbors = [(np.nan, point) for point in self.points[:n_neighbors]\n ]\n return list(neighbors)\n\n\nclass DynamicVPTree:\n \"\"\"\n Dynamic vp-tree implemented using index folding\n \"\"\"\n\n def __init__(self, dist_fn, min_tree_size=4):\n \"\"\"\n :param dist_fn: Metric distance function used for vp-trees\n :param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)\n \"\"\"\n self.dist_fn = dist_fn\n self.trees = []\n self.pool = []\n self.min_tree_size = min_tree_size\n\n def insert(self, item):\n \"\"\"\n Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached\n Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes\n \"\"\"\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2\n ].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.\n dist_fn))\n\n def nearest(self, query):\n \"\"\"\n Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)\n \"\"\"\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query),\n self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query),\n self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in (nearest_trees + distances_pool):\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best\n\n def neighbourhood(self, query, radius):\n \"\"\"\n Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)\n \"\"\"\n tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.\n get_all_in_range(query, radius)))\n neighbourhood_trees = list(itertools.chain.from_iterable(map(\n tree_neighbourhood, self.trees)))\n return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x,\n query) < radius, self.pool))\n",
"step-5": "#Copyright (c) 2020 Ocado. All Rights Reserved.\n\nimport vptree, itertools\nimport numpy as np\n\n\nclass _ExtendedVPTree(vptree.VPTree):\n \"\"\"\n VPTree class extended to include the list of points within the tree\n \"\"\"\n def __init__(self, points, dist_fn):\n \"\"\"\n :param points: List of points to add to the vp-tree\n :param dist_fn: Metric distance function\n \"\"\"\n super().__init__(points, dist_fn)\n self.points = points\n self.size = len(points)\n\n def get_n_nearest_neighbors(self, query, n_neighbors):\n \"\"\"\n Override parent method to use <= when finding nearest neighbours to ensure a neighbour is returned even at infinite/nan distance\n \"\"\"\n if not isinstance(n_neighbors, int) or n_neighbors < 1:\n raise ValueError('n_neighbors must be strictly positive integer')\n neighbors = vptree._AutoSortingList(max_size=n_neighbors)\n nodes_to_visit = [(self, 0)]\n furthest_d = np.inf\n while len(nodes_to_visit) > 0:\n node, d0 = nodes_to_visit.pop(0)\n if node is None or d0 > furthest_d:\n continue\n d = self.dist_fn(query, node.vp)\n if d <= furthest_d: #Replaced < with <=\n neighbors.append((d, node.vp))\n furthest_d, _ = neighbors[-1]\n if node._is_leaf():\n continue\n if node.left_min <= d <= node.left_max:\n nodes_to_visit.insert(0, (node.left, 0))\n elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:\n nodes_to_visit.append((node.left,\n node.left_min - d if d < node.left_min\n else d - node.left_max))\n if node.right_min <= d <= node.right_max:\n nodes_to_visit.insert(0, (node.right, 0))\n elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:\n nodes_to_visit.append((node.right,\n node.right_min - d if d < node.right_min\n else d - node.right_max))\n if len(neighbors) == 0:\n neighbors = [(np.nan, point) for point in self.points[:n_neighbors]] #Return any point(s) if query contains np.nan\n return list(neighbors)\n\n\nclass DynamicVPTree:\n \"\"\"\n Dynamic vp-tree implemented using index folding\n \"\"\"\n def __init__(self, dist_fn, min_tree_size=4):\n \"\"\"\n :param dist_fn: Metric distance function used for vp-trees\n :param min_tree_size: Minimum number of nodes to form a tree (extra nodes are stored in a pool until the number is reached)\n \"\"\"\n self.dist_fn = dist_fn\n self.trees = []\n self.pool = []\n self.min_tree_size = min_tree_size\n\n def insert(self, item):\n \"\"\"\n Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached\n Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes\n \"\"\"\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))\n\n def nearest(self, query):\n \"\"\"\n Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)\n \"\"\"\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in nearest_trees + distances_pool:\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best\n\n def neighbourhood(self, query, radius):\n \"\"\"\n Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)\n \"\"\"\n tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.get_all_in_range(query, radius)))\n neighbourhood_trees = list(itertools.chain.from_iterable(map(tree_neighbourhood, self.trees)))\n return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x, query) < radius, self.pool))\n",
"step-ids": [
4,
6,
9,
11,
12
]
}
|
[
4,
6,
9,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def IsContinuous(numbers):
if not numbers or len(numbers) < 1:
return False
numbers.sort()
number_of_zero = 0
number_of_gap = 0
for i in range(len(numbers)):
if numbers[i] == 0:
number_of_zero += 1
small = number_of_zero
big = small + 1
while big < len(numbers):
if numbers[small] == numbers[big]:
return False
number_of_gap += numbers[big] - numbers[small] - 1
small = big
big += 1
if number_of_gap <= number_of_zero:
return True
else:
return False
<|reserved_special_token_1|>
# flush in poker
def IsContinuous(numbers):
if not numbers or len(numbers) < 1 :
return False
numbers.sort()
number_of_zero = 0
number_of_gap = 0
for i in range(len(numbers)):
if numbers[i] == 0:
number_of_zero += 1
small = number_of_zero
big = small + 1
while(big < len(numbers)):
if numbers[small] == numbers[big]:
return False
number_of_gap += (numbers[big] - numbers[small] - 1)
small = big
big += 1
if number_of_gap <= number_of_zero:
return True
else:
return False
|
flexible
|
{
"blob_id": "68a776d7fccc8d8496a944baff51d2a862fc7d31",
"index": 1259,
"step-1": "<mask token>\n",
"step-2": "def IsContinuous(numbers):\n if not numbers or len(numbers) < 1:\n return False\n numbers.sort()\n number_of_zero = 0\n number_of_gap = 0\n for i in range(len(numbers)):\n if numbers[i] == 0:\n number_of_zero += 1\n small = number_of_zero\n big = small + 1\n while big < len(numbers):\n if numbers[small] == numbers[big]:\n return False\n number_of_gap += numbers[big] - numbers[small] - 1\n small = big\n big += 1\n if number_of_gap <= number_of_zero:\n return True\n else:\n return False\n",
"step-3": "# flush in poker\ndef IsContinuous(numbers):\n if not numbers or len(numbers) < 1 :\n return False\n\n numbers.sort()\n number_of_zero = 0\n number_of_gap = 0\n for i in range(len(numbers)):\n if numbers[i] == 0:\n number_of_zero += 1\n\n small = number_of_zero\n big = small + 1\n while(big < len(numbers)):\n if numbers[small] == numbers[big]:\n return False\n\n number_of_gap += (numbers[big] - numbers[small] - 1)\n small = big\n big += 1\n\n if number_of_gap <= number_of_zero:\n return True\n else:\n return False\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def merge(p, n):
global vet
global aux
if n <= 1:
return 0
c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)
d, a, b = 0, 0, n // 2
while d < n:
if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):
aux[d] = vet[p + a]
a += 1
else:
aux[d] = vet[p + b]
c += n // 2 + a
b += 1
d += 1
for i in range(n):
vet[p + i] = aux[i]
return c
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def merge(p, n):
global vet
global aux
if n <= 1:
return 0
c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)
d, a, b = 0, 0, n // 2
while d < n:
if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):
aux[d] = vet[p + a]
a += 1
else:
aux[d] = vet[p + b]
c += n // 2 + a
b += 1
d += 1
for i in range(n):
vet[p + i] = aux[i]
return c
<|reserved_special_token_0|>
print(merge(0, entrada))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def merge(p, n):
global vet
global aux
if n <= 1:
return 0
c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)
d, a, b = 0, 0, n // 2
while d < n:
if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):
aux[d] = vet[p + a]
a += 1
else:
aux[d] = vet[p + b]
c += n // 2 + a
b += 1
d += 1
for i in range(n):
vet[p + i] = aux[i]
return c
entrada = int(input())
vet = [int(x) for x in input().split()]
aux = [0] * entrada
print(merge(0, entrada))
<|reserved_special_token_1|>
'''
vetor = ["pares de pregos ligados por uma linha"]
indice do vetor representa os pregos na vertical, e o
inteiro em cada pos, os pregos na horizontal.
i(vertical) e j(horizontal) entao:
vetor[i] = j
pregos a(vertical) e pregos b(horizontal)
se a>i and b<j or a<i and b>j
a e i(são indices) b e j(são os elemntos salvos na pos)
'''
def merge(p,n):
global vet
global aux
if n <= 1:
return 0
c = merge(p,n//2) + merge(p+n//2,n-n//2)
d,a,b = 0,0,n//2
while d<n:
if a != n//2 and (b == n or vet[p+a]<vet[p+b]):
aux[d] = vet[p+a]
a+=1
else:
aux[d] = vet[p+b]
c+=n//2+a
b+=1
d+=1
for i in range(n):
vet[p+i] = aux[i]
return c
entrada = int(input())
vet = [int(x) for x in input().split()]
aux = [0]*entrada
print(merge(0,entrada))
|
flexible
|
{
"blob_id": "fe081a422db6b7f10c89179beab852c6b74ec687",
"index": 2795,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef merge(p, n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)\n d, a, b = 0, 0, n // 2\n while d < n:\n if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):\n aux[d] = vet[p + a]\n a += 1\n else:\n aux[d] = vet[p + b]\n c += n // 2 + a\n b += 1\n d += 1\n for i in range(n):\n vet[p + i] = aux[i]\n return c\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef merge(p, n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)\n d, a, b = 0, 0, n // 2\n while d < n:\n if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):\n aux[d] = vet[p + a]\n a += 1\n else:\n aux[d] = vet[p + b]\n c += n // 2 + a\n b += 1\n d += 1\n for i in range(n):\n vet[p + i] = aux[i]\n return c\n\n\n<mask token>\nprint(merge(0, entrada))\n",
"step-4": "<mask token>\n\n\ndef merge(p, n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p, n // 2) + merge(p + n // 2, n - n // 2)\n d, a, b = 0, 0, n // 2\n while d < n:\n if a != n // 2 and (b == n or vet[p + a] < vet[p + b]):\n aux[d] = vet[p + a]\n a += 1\n else:\n aux[d] = vet[p + b]\n c += n // 2 + a\n b += 1\n d += 1\n for i in range(n):\n vet[p + i] = aux[i]\n return c\n\n\nentrada = int(input())\nvet = [int(x) for x in input().split()]\naux = [0] * entrada\nprint(merge(0, entrada))\n",
"step-5": "'''\nvetor = [\"pares de pregos ligados por uma linha\"]\nindice do vetor representa os pregos na vertical, e o\ninteiro em cada pos, os pregos na horizontal.\n\ni(vertical) e j(horizontal) entao:\n vetor[i] = j\n\npregos a(vertical) e pregos b(horizontal)\n\nse a>i and b<j or a<i and b>j\n\na e i(são indices) b e j(são os elemntos salvos na pos)\n'''\n\ndef merge(p,n):\n global vet\n global aux\n if n <= 1:\n return 0\n c = merge(p,n//2) + merge(p+n//2,n-n//2)\n d,a,b = 0,0,n//2\n while d<n:\n if a != n//2 and (b == n or vet[p+a]<vet[p+b]):\n aux[d] = vet[p+a]\n a+=1\n else:\n aux[d] = vet[p+b]\n c+=n//2+a\n b+=1\n d+=1\n for i in range(n):\n vet[p+i] = aux[i]\n return c\n\nentrada = int(input())\nvet = [int(x) for x in input().split()]\naux = [0]*entrada\nprint(merge(0,entrada))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def readimg(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return img
def readimg_color(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
return img
def sift_descriptor(img):
sift = cv2.xfeatures2d.SIFT_create()
kp, dsp = sift.detectAndCompute(img, None)
return kp, dsp
<|reserved_special_token_0|>
def calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):
dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')
idx1 = np.where(dist < num_threshold)[0]
idx2 = np.where(dist < num_threshold)[1]
coord1 = np.array([kp1[idx].pt for idx in idx1])
coord2 = np.array([kp2[idx].pt for idx in idx2])
match_coords = np.concatenate((coord1, coord2), axis=1)
return match_coords
<|reserved_special_token_0|>
def compute_H(subset):
A = []
for i in range(subset.shape[0]):
p1 = np.append(subset[i][0:2], 1)
p2 = np.append(subset[i][2:4], 1)
row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1
], -p2[1] * p1[2]]
row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1
], -p2[0] * p1[2]]
A.append(row1)
A.append(row2)
A = np.array(A)
U, s, V = np.linalg.svd(A)
H = V[len(V) - 1].reshape(3, 3)
H = H / H[2, 2]
return H
def show_inlier_matches(img1, img2, inliers):
print('num of inliers shown in the matching: ' + str(len(inliers)))
h1, w1 = img1.shape
h2, w2 = img2.shape
vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
vis[:, :w1] = img1
vis[:h2, w1:] = img2
fig, ax = plt.subplots()
ax.imshow(vis)
ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]
)
plt.show()
def ransac(img1, img2, matches, thres_ransac):
itertimes = 1000
inliners = 0
max_inliners = 0
for iter in range(0, itertimes):
subset_idx = random.sample(range(matches.shape[0]), k=4)
subset = matches[subset_idx]
H = compute_H(subset)
if np.linalg.matrix_rank(H) < 3:
continue
errors = get_errors(matches, H)
idx = np.where(errors < thres_ransac)[0]
inlinerspts = matches[idx]
inliners = len(inlinerspts)
if inliners >= max_inliners:
which_inliners = inlinerspts.copy()
max_inliners = inliners
best_H = H.copy()
avg_residual = sum(get_errors(matches[idx], H)) / inliners
print('num of inliners: ' + str(max_inliners) + ' average residual: ' +
str(avg_residual))
show_inlier_matches(img1, img2, which_inliners)
return best_H
def warp_images(image0, image1, H):
transform = skimage.transform.ProjectiveTransform(H)
warp = skimage.transform.warp
r, c = image1.shape[:2]
corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])
warped_corners = transform(corners)
all_corners = np.vstack((warped_corners, corners))
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = corner_max - corner_min
output_shape = np.ceil(output_shape[::-1])
offset = skimage.transform.SimilarityTransform(translation=-corner_min)
image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)
image1_ = warp(image1, (transform + offset).inverse, output_shape=
output_shape, cval=-1)
image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,
cval=0)
image1_zeros = warp(image1, (transform + offset).inverse, output_shape=
output_shape, cval=0)
overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)
overlap += (overlap < 1).astype(int)
merged = (image0_zeros + image1_zeros) / overlap
im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')
im = np.asarray(im)
return im
<|reserved_special_token_0|>
def main_3pic():
dirs = 'MP3_part1_data/' + 'pier/'
leftimg = readimg(dirs, '1.jpg')
midimg = readimg(dirs, '2.jpg')
rightimg = readimg(dirs, '3.jpg')
leftimgcolor = readimg_color(dirs, '1.jpg')
midimgcolor = readimg_color(dirs, '2.jpg')
rightimgcolor = readimg_color(dirs, '3.jpg')
stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)
plt.imshow(stitched1)
plt.show()
grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)
stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)
plt.imshow(stitched2)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def readimg(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return img
def readimg_color(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
return img
def sift_descriptor(img):
sift = cv2.xfeatures2d.SIFT_create()
kp, dsp = sift.detectAndCompute(img, None)
return kp, dsp
def show_sift(kp, img):
copyimg = img.copy()
copyimg = cv2.drawKeypoints(img, kp, copyimg)
plt.imshow(copyimg)
plt.show()
def calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):
dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')
idx1 = np.where(dist < num_threshold)[0]
idx2 = np.where(dist < num_threshold)[1]
coord1 = np.array([kp1[idx].pt for idx in idx1])
coord2 = np.array([kp2[idx].pt for idx in idx2])
match_coords = np.concatenate((coord1, coord2), axis=1)
return match_coords
<|reserved_special_token_0|>
def compute_H(subset):
A = []
for i in range(subset.shape[0]):
p1 = np.append(subset[i][0:2], 1)
p2 = np.append(subset[i][2:4], 1)
row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1
], -p2[1] * p1[2]]
row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1
], -p2[0] * p1[2]]
A.append(row1)
A.append(row2)
A = np.array(A)
U, s, V = np.linalg.svd(A)
H = V[len(V) - 1].reshape(3, 3)
H = H / H[2, 2]
return H
def show_inlier_matches(img1, img2, inliers):
print('num of inliers shown in the matching: ' + str(len(inliers)))
h1, w1 = img1.shape
h2, w2 = img2.shape
vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
vis[:, :w1] = img1
vis[:h2, w1:] = img2
fig, ax = plt.subplots()
ax.imshow(vis)
ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]
)
plt.show()
def ransac(img1, img2, matches, thres_ransac):
itertimes = 1000
inliners = 0
max_inliners = 0
for iter in range(0, itertimes):
subset_idx = random.sample(range(matches.shape[0]), k=4)
subset = matches[subset_idx]
H = compute_H(subset)
if np.linalg.matrix_rank(H) < 3:
continue
errors = get_errors(matches, H)
idx = np.where(errors < thres_ransac)[0]
inlinerspts = matches[idx]
inliners = len(inlinerspts)
if inliners >= max_inliners:
which_inliners = inlinerspts.copy()
max_inliners = inliners
best_H = H.copy()
avg_residual = sum(get_errors(matches[idx], H)) / inliners
print('num of inliners: ' + str(max_inliners) + ' average residual: ' +
str(avg_residual))
show_inlier_matches(img1, img2, which_inliners)
return best_H
def warp_images(image0, image1, H):
transform = skimage.transform.ProjectiveTransform(H)
warp = skimage.transform.warp
r, c = image1.shape[:2]
corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])
warped_corners = transform(corners)
all_corners = np.vstack((warped_corners, corners))
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = corner_max - corner_min
output_shape = np.ceil(output_shape[::-1])
offset = skimage.transform.SimilarityTransform(translation=-corner_min)
image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)
image1_ = warp(image1, (transform + offset).inverse, output_shape=
output_shape, cval=-1)
image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,
cval=0)
image1_zeros = warp(image1, (transform + offset).inverse, output_shape=
output_shape, cval=0)
overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)
overlap += (overlap < 1).astype(int)
merged = (image0_zeros + image1_zeros) / overlap
im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')
im = np.asarray(im)
return im
def main(leftimg, rightimg, leftimgcolor, rightimgcolor):
thres = 9000
thres_ransac = 1.0
kp1, dsp1 = sift_descriptor(leftimg)
kp2, dsp2 = sift_descriptor(rightimg)
matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)
H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)
stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)
return stitched_img
<|reserved_special_token_0|>
def main_3pic():
dirs = 'MP3_part1_data/' + 'pier/'
leftimg = readimg(dirs, '1.jpg')
midimg = readimg(dirs, '2.jpg')
rightimg = readimg(dirs, '3.jpg')
leftimgcolor = readimg_color(dirs, '1.jpg')
midimgcolor = readimg_color(dirs, '2.jpg')
rightimgcolor = readimg_color(dirs, '3.jpg')
stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)
plt.imshow(stitched1)
plt.show()
grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)
stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)
plt.imshow(stitched2)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def readimg(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return img
def readimg_color(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
return img
def sift_descriptor(img):
sift = cv2.xfeatures2d.SIFT_create()
kp, dsp = sift.detectAndCompute(img, None)
return kp, dsp
def show_sift(kp, img):
copyimg = img.copy()
copyimg = cv2.drawKeypoints(img, kp, copyimg)
plt.imshow(copyimg)
plt.show()
def calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):
dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')
idx1 = np.where(dist < num_threshold)[0]
idx2 = np.where(dist < num_threshold)[1]
coord1 = np.array([kp1[idx].pt for idx in idx1])
coord2 = np.array([kp2[idx].pt for idx in idx2])
match_coords = np.concatenate((coord1, coord2), axis=1)
return match_coords
def get_errors(matches, H):
num_pairs = len(matches)
p1 = np.concatenate((matches[:, 0:2], np.ones((1, num_pairs)).T), axis=1)
p2 = matches[:, 2:4]
transformed_p1 = np.zeros((num_pairs, 2))
for i in range(num_pairs):
transformed_p1[i] = (np.matmul(H, p1[i]) / np.matmul(H, p1[i])[-1])[0:2
]
errors = np.linalg.norm(p2 - transformed_p1, axis=1) ** 2
return errors
def compute_H(subset):
A = []
for i in range(subset.shape[0]):
p1 = np.append(subset[i][0:2], 1)
p2 = np.append(subset[i][2:4], 1)
row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1
], -p2[1] * p1[2]]
row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1
], -p2[0] * p1[2]]
A.append(row1)
A.append(row2)
A = np.array(A)
U, s, V = np.linalg.svd(A)
H = V[len(V) - 1].reshape(3, 3)
H = H / H[2, 2]
return H
def show_inlier_matches(img1, img2, inliers):
print('num of inliers shown in the matching: ' + str(len(inliers)))
h1, w1 = img1.shape
h2, w2 = img2.shape
vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
vis[:, :w1] = img1
vis[:h2, w1:] = img2
fig, ax = plt.subplots()
ax.imshow(vis)
ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]
)
plt.show()
def ransac(img1, img2, matches, thres_ransac):
itertimes = 1000
inliners = 0
max_inliners = 0
for iter in range(0, itertimes):
subset_idx = random.sample(range(matches.shape[0]), k=4)
subset = matches[subset_idx]
H = compute_H(subset)
if np.linalg.matrix_rank(H) < 3:
continue
errors = get_errors(matches, H)
idx = np.where(errors < thres_ransac)[0]
inlinerspts = matches[idx]
inliners = len(inlinerspts)
if inliners >= max_inliners:
which_inliners = inlinerspts.copy()
max_inliners = inliners
best_H = H.copy()
avg_residual = sum(get_errors(matches[idx], H)) / inliners
print('num of inliners: ' + str(max_inliners) + ' average residual: ' +
str(avg_residual))
show_inlier_matches(img1, img2, which_inliners)
return best_H
def warp_images(image0, image1, H):
transform = skimage.transform.ProjectiveTransform(H)
warp = skimage.transform.warp
r, c = image1.shape[:2]
corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])
warped_corners = transform(corners)
all_corners = np.vstack((warped_corners, corners))
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = corner_max - corner_min
output_shape = np.ceil(output_shape[::-1])
offset = skimage.transform.SimilarityTransform(translation=-corner_min)
image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)
image1_ = warp(image1, (transform + offset).inverse, output_shape=
output_shape, cval=-1)
image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,
cval=0)
image1_zeros = warp(image1, (transform + offset).inverse, output_shape=
output_shape, cval=0)
overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)
overlap += (overlap < 1).astype(int)
merged = (image0_zeros + image1_zeros) / overlap
im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')
im = np.asarray(im)
return im
def main(leftimg, rightimg, leftimgcolor, rightimgcolor):
thres = 9000
thres_ransac = 1.0
kp1, dsp1 = sift_descriptor(leftimg)
kp2, dsp2 = sift_descriptor(rightimg)
matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)
H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)
stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)
return stitched_img
def main_2pic():
dirs = 'MP3_part1_data/' + 'park/'
leftimg = readimg(dirs, 'left.jpg')
rightimg = readimg(dirs, 'right.jpg')
leftimgcolor = readimg_color(dirs, 'left.jpg')
rightimgcolor = readimg_color(dirs, 'right.jpg')
stitched_img = main(leftimg, rightimg, leftimgcolor, rightimgcolor)
plt.imshow(stitched_img)
plt.show()
def main_3pic():
dirs = 'MP3_part1_data/' + 'pier/'
leftimg = readimg(dirs, '1.jpg')
midimg = readimg(dirs, '2.jpg')
rightimg = readimg(dirs, '3.jpg')
leftimgcolor = readimg_color(dirs, '1.jpg')
midimgcolor = readimg_color(dirs, '2.jpg')
rightimgcolor = readimg_color(dirs, '3.jpg')
stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)
plt.imshow(stitched1)
plt.show()
grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)
stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)
plt.imshow(stitched2)
plt.show()
if __name__ == '__main__':
main_3pic()
<|reserved_special_token_1|>
import time
import numpy as np
import matplotlib.pyplot as plt
import cv2
import matplotlib.image as mpimg
import random
import skimage
import scipy
from PIL import Image
def readimg(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return img
def readimg_color(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
return img
def sift_descriptor(img):
sift = cv2.xfeatures2d.SIFT_create()
kp, dsp = sift.detectAndCompute(img, None)
return kp, dsp
def show_sift(kp, img):
copyimg = img.copy()
copyimg = cv2.drawKeypoints(img, kp, copyimg)
plt.imshow(copyimg)
plt.show()
def calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):
dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')
idx1 = np.where(dist < num_threshold)[0]
idx2 = np.where(dist < num_threshold)[1]
coord1 = np.array([kp1[idx].pt for idx in idx1])
coord2 = np.array([kp2[idx].pt for idx in idx2])
match_coords = np.concatenate((coord1, coord2), axis=1)
return match_coords
def get_errors(matches, H):
num_pairs = len(matches)
p1 = np.concatenate((matches[:, 0:2], np.ones((1, num_pairs)).T), axis=1)
p2 = matches[:, 2:4]
transformed_p1 = np.zeros((num_pairs, 2))
for i in range(num_pairs):
transformed_p1[i] = (np.matmul(H, p1[i]) / np.matmul(H, p1[i])[-1])[0:2
]
errors = np.linalg.norm(p2 - transformed_p1, axis=1) ** 2
return errors
def compute_H(subset):
A = []
for i in range(subset.shape[0]):
p1 = np.append(subset[i][0:2], 1)
p2 = np.append(subset[i][2:4], 1)
row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1
], -p2[1] * p1[2]]
row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1
], -p2[0] * p1[2]]
A.append(row1)
A.append(row2)
A = np.array(A)
U, s, V = np.linalg.svd(A)
H = V[len(V) - 1].reshape(3, 3)
H = H / H[2, 2]
return H
def show_inlier_matches(img1, img2, inliers):
print('num of inliers shown in the matching: ' + str(len(inliers)))
h1, w1 = img1.shape
h2, w2 = img2.shape
vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
vis[:, :w1] = img1
vis[:h2, w1:] = img2
fig, ax = plt.subplots()
ax.imshow(vis)
ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]
)
plt.show()
def ransac(img1, img2, matches, thres_ransac):
itertimes = 1000
inliners = 0
max_inliners = 0
for iter in range(0, itertimes):
subset_idx = random.sample(range(matches.shape[0]), k=4)
subset = matches[subset_idx]
H = compute_H(subset)
if np.linalg.matrix_rank(H) < 3:
continue
errors = get_errors(matches, H)
idx = np.where(errors < thres_ransac)[0]
inlinerspts = matches[idx]
inliners = len(inlinerspts)
if inliners >= max_inliners:
which_inliners = inlinerspts.copy()
max_inliners = inliners
best_H = H.copy()
avg_residual = sum(get_errors(matches[idx], H)) / inliners
print('num of inliners: ' + str(max_inliners) + ' average residual: ' +
str(avg_residual))
show_inlier_matches(img1, img2, which_inliners)
return best_H
def warp_images(image0, image1, H):
transform = skimage.transform.ProjectiveTransform(H)
warp = skimage.transform.warp
r, c = image1.shape[:2]
corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])
warped_corners = transform(corners)
all_corners = np.vstack((warped_corners, corners))
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = corner_max - corner_min
output_shape = np.ceil(output_shape[::-1])
offset = skimage.transform.SimilarityTransform(translation=-corner_min)
image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)
image1_ = warp(image1, (transform + offset).inverse, output_shape=
output_shape, cval=-1)
image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,
cval=0)
image1_zeros = warp(image1, (transform + offset).inverse, output_shape=
output_shape, cval=0)
overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)
overlap += (overlap < 1).astype(int)
merged = (image0_zeros + image1_zeros) / overlap
im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')
im = np.asarray(im)
return im
def main(leftimg, rightimg, leftimgcolor, rightimgcolor):
thres = 9000
thres_ransac = 1.0
kp1, dsp1 = sift_descriptor(leftimg)
kp2, dsp2 = sift_descriptor(rightimg)
matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)
H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)
stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)
return stitched_img
def main_2pic():
dirs = 'MP3_part1_data/' + 'park/'
leftimg = readimg(dirs, 'left.jpg')
rightimg = readimg(dirs, 'right.jpg')
leftimgcolor = readimg_color(dirs, 'left.jpg')
rightimgcolor = readimg_color(dirs, 'right.jpg')
stitched_img = main(leftimg, rightimg, leftimgcolor, rightimgcolor)
plt.imshow(stitched_img)
plt.show()
def main_3pic():
dirs = 'MP3_part1_data/' + 'pier/'
leftimg = readimg(dirs, '1.jpg')
midimg = readimg(dirs, '2.jpg')
rightimg = readimg(dirs, '3.jpg')
leftimgcolor = readimg_color(dirs, '1.jpg')
midimgcolor = readimg_color(dirs, '2.jpg')
rightimgcolor = readimg_color(dirs, '3.jpg')
stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)
plt.imshow(stitched1)
plt.show()
grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)
stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)
plt.imshow(stitched2)
plt.show()
if __name__ == '__main__':
main_3pic()
<|reserved_special_token_1|>
import time
import numpy as np
import matplotlib.pyplot as plt
import cv2
import matplotlib.image as mpimg
import random
import skimage
import scipy
from PIL import Image
def readimg(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return img
def readimg_color(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
return img
def sift_descriptor(img):
sift = cv2.xfeatures2d.SIFT_create()
kp, dsp = sift.detectAndCompute(img, None)
return kp, dsp
def show_sift(kp, img):
# show the img with descriptors
copyimg = img.copy()
copyimg = cv2.drawKeypoints(img, kp, copyimg)
plt.imshow(copyimg)
plt.show()
def calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):
# fast computation of Euclidean distance between each descriptors
dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')
# find the matching coordinates
idx1 = np.where(dist < num_threshold)[0]
idx2 = np.where(dist < num_threshold)[1]
coord1 = np.array([kp1[idx].pt for idx in idx1])
coord2 = np.array([kp2[idx].pt for idx in idx2])
# put into pairs of coords
match_coords = np.concatenate((coord1, coord2), axis=1)
return match_coords
def get_errors(matches, H):
# difference between original img2 points and transformed img1 points with H
num_pairs = len(matches)
# all matching points in img1
p1 = np.concatenate((matches[:, 0:2], np.ones((1, num_pairs)).T), axis=1)
# all matching points in img2
p2 = matches[:, 2:4]
# Transform every point in p1 to estimate p2.
transformed_p1 = np.zeros((num_pairs, 2))
for i in range(num_pairs):
transformed_p1[i] = (np.matmul(H, p1[i]) / np.matmul(H, p1[i])[-1])[0:2]
# Compute error of each matching pair
errors = np.linalg.norm(p2 - transformed_p1, axis=1) ** 2
return errors
def compute_H(subset):
# calculate the fitted homography
A = []
for i in range(subset.shape[0]):
p1 = np.append(subset[i][0:2], 1)
p2 = np.append(subset[i][2:4], 1)
row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1]*p1[0], -p2[1]*p1[1], -p2[1]*p1[2]]
row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1], -p2[0]*p1[2]]
A.append(row1)
A.append(row2)
A = np.array(A)
U, s, V = np.linalg.svd(A)
H = V[len(V)-1].reshape(3, 3)
# normalize
H = H / H[2, 2]
return H
def show_inlier_matches(img1, img2, inliers):
print("num of inliers shown in the matching: " + str(len(inliers)))
h1, w1 = img1.shape
h2, w2 = img2.shape
vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
vis[:, :w1] = img1
vis[:h2, w1:] = img2
fig, ax = plt.subplots()
ax.imshow(vis)
ax.plot([inliers[:,0], inliers[:,2] + w1],[inliers[:,1], inliers[:,3]])
plt.show()
def ransac(img1, img2, matches, thres_ransac):
itertimes = 1000
inliners = 0
max_inliners = 0
for iter in range(0, itertimes):
subset_idx = random.sample(range(matches.shape[0]), k=4)
subset = matches[subset_idx]
H = compute_H(subset)
# check if it is full rank
if np.linalg.matrix_rank(H) < 3:
continue
# the norm of error caused if we choose the above subset
errors = get_errors(matches, H)
idx = np.where(errors < thres_ransac)[0]
inlinerspts = matches[idx]
# find the best number of inliners
inliners = len(inlinerspts)
if inliners >= max_inliners:
which_inliners = inlinerspts.copy()
max_inliners = inliners
best_H = H.copy()
avg_residual = sum(get_errors(matches[idx], H)) / inliners
print("num of inliners: " + str(max_inliners) + " average residual: " + str(avg_residual))
show_inlier_matches(img1, img2, which_inliners)
return best_H
# function provided by Maghav at Piazza @450
def warp_images(image0, image1, H):
transform = skimage.transform.ProjectiveTransform(H)
warp = skimage.transform.warp
r, c = image1.shape[:2]
# Note that transformations take coordinates in (x, y) format,
# not (row, column), in order to be consistent with most literature
corners = np.array([[0, 0],
[0, r],
[c, 0],
[c, r]])
# Warp the image corners to their new positions
warped_corners = transform(corners)
# Find the extents of both the reference image and the warped
# target image
all_corners = np.vstack((warped_corners, corners))
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = (corner_max - corner_min)
output_shape = np.ceil(output_shape[::-1])
offset = skimage.transform.SimilarityTransform(translation=-corner_min)
image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)
image1_ = warp(image1, (transform + offset).inverse, output_shape=output_shape, cval=-1)
image0_zeros = warp(image0, offset.inverse, output_shape=output_shape, cval=0)
image1_zeros = warp(image1, (transform + offset).inverse, output_shape=output_shape, cval=0)
overlap = (image0_ != -1.0 ).astype(int) + (image1_ != -1.0).astype(int)
overlap += (overlap < 1).astype(int)
merged = (image0_zeros+image1_zeros)/overlap
im = Image.fromarray((255*merged).astype('uint8'), mode='RGB')
im = np.asarray(im)
return im
def main(leftimg, rightimg, leftimgcolor, rightimgcolor):
# using 7000, 0.5 for 2 pic; 9000, 1.0 for 3 pic
thres = 9000
thres_ransac = 1.0
kp1, dsp1 = sift_descriptor(leftimg)
kp2, dsp2 = sift_descriptor(rightimg)
# get all matching points
matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)
H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)
stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)
return stitched_img
def main_2pic():
dirs = 'MP3_part1_data/' + 'park/'
leftimg = readimg(dirs, 'left.jpg')
rightimg = readimg(dirs, 'right.jpg')
leftimgcolor = readimg_color(dirs, 'left.jpg')
rightimgcolor = readimg_color(dirs, 'right.jpg')
stitched_img = main(leftimg, rightimg, leftimgcolor, rightimgcolor)
plt.imshow(stitched_img)
plt.show()
def main_3pic():
dirs = 'MP3_part1_data/' + 'pier/' # ledge pier hill
leftimg = readimg(dirs, '1.jpg')
midimg = readimg(dirs, '2.jpg')
rightimg = readimg(dirs, '3.jpg')
leftimgcolor = readimg_color(dirs, '1.jpg')
midimgcolor = readimg_color(dirs, '2.jpg')
rightimgcolor = readimg_color(dirs, '3.jpg')
stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)
plt.imshow(stitched1)
plt.show()
grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)
stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)
plt.imshow(stitched2)
plt.show()
if __name__ == '__main__':
#main_2pic()
main_3pic()
|
flexible
|
{
"blob_id": "e08ab06be0957e5e173df798742abc493eac84d0",
"index": 6006,
"step-1": "<mask token>\n\n\ndef readimg(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\n\ndef readimg_color(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n return img\n\n\ndef sift_descriptor(img):\n sift = cv2.xfeatures2d.SIFT_create()\n kp, dsp = sift.detectAndCompute(img, None)\n return kp, dsp\n\n\n<mask token>\n\n\ndef calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):\n dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')\n idx1 = np.where(dist < num_threshold)[0]\n idx2 = np.where(dist < num_threshold)[1]\n coord1 = np.array([kp1[idx].pt for idx in idx1])\n coord2 = np.array([kp2[idx].pt for idx in idx2])\n match_coords = np.concatenate((coord1, coord2), axis=1)\n return match_coords\n\n\n<mask token>\n\n\ndef compute_H(subset):\n A = []\n for i in range(subset.shape[0]):\n p1 = np.append(subset[i][0:2], 1)\n p2 = np.append(subset[i][2:4], 1)\n row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1\n ], -p2[1] * p1[2]]\n row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1\n ], -p2[0] * p1[2]]\n A.append(row1)\n A.append(row2)\n A = np.array(A)\n U, s, V = np.linalg.svd(A)\n H = V[len(V) - 1].reshape(3, 3)\n H = H / H[2, 2]\n return H\n\n\ndef show_inlier_matches(img1, img2, inliers):\n print('num of inliers shown in the matching: ' + str(len(inliers)))\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:, :w1] = img1\n vis[:h2, w1:] = img2\n fig, ax = plt.subplots()\n ax.imshow(vis)\n ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]\n )\n plt.show()\n\n\ndef ransac(img1, img2, matches, thres_ransac):\n itertimes = 1000\n inliners = 0\n max_inliners = 0\n for iter in range(0, itertimes):\n subset_idx = random.sample(range(matches.shape[0]), k=4)\n subset = matches[subset_idx]\n H = compute_H(subset)\n if np.linalg.matrix_rank(H) < 3:\n continue\n errors = get_errors(matches, H)\n idx = np.where(errors < thres_ransac)[0]\n inlinerspts = matches[idx]\n inliners = len(inlinerspts)\n if inliners >= max_inliners:\n which_inliners = inlinerspts.copy()\n max_inliners = inliners\n best_H = H.copy()\n avg_residual = sum(get_errors(matches[idx], H)) / inliners\n print('num of inliners: ' + str(max_inliners) + ' average residual: ' +\n str(avg_residual))\n show_inlier_matches(img1, img2, which_inliners)\n return best_H\n\n\ndef warp_images(image0, image1, H):\n transform = skimage.transform.ProjectiveTransform(H)\n warp = skimage.transform.warp\n r, c = image1.shape[:2]\n corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])\n warped_corners = transform(corners)\n all_corners = np.vstack((warped_corners, corners))\n corner_min = np.min(all_corners, axis=0)\n corner_max = np.max(all_corners, axis=0)\n output_shape = corner_max - corner_min\n output_shape = np.ceil(output_shape[::-1])\n offset = skimage.transform.SimilarityTransform(translation=-corner_min)\n image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)\n image1_ = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=-1)\n image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,\n cval=0)\n image1_zeros = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=0)\n overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)\n overlap += (overlap < 1).astype(int)\n merged = (image0_zeros + image1_zeros) / overlap\n im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')\n im = np.asarray(im)\n return im\n\n\n<mask token>\n\n\ndef main_3pic():\n dirs = 'MP3_part1_data/' + 'pier/'\n leftimg = readimg(dirs, '1.jpg')\n midimg = readimg(dirs, '2.jpg')\n rightimg = readimg(dirs, '3.jpg')\n leftimgcolor = readimg_color(dirs, '1.jpg')\n midimgcolor = readimg_color(dirs, '2.jpg')\n rightimgcolor = readimg_color(dirs, '3.jpg')\n stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)\n plt.imshow(stitched1)\n plt.show()\n grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)\n stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)\n plt.imshow(stitched2)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef readimg(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\n\ndef readimg_color(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n return img\n\n\ndef sift_descriptor(img):\n sift = cv2.xfeatures2d.SIFT_create()\n kp, dsp = sift.detectAndCompute(img, None)\n return kp, dsp\n\n\ndef show_sift(kp, img):\n copyimg = img.copy()\n copyimg = cv2.drawKeypoints(img, kp, copyimg)\n plt.imshow(copyimg)\n plt.show()\n\n\ndef calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):\n dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')\n idx1 = np.where(dist < num_threshold)[0]\n idx2 = np.where(dist < num_threshold)[1]\n coord1 = np.array([kp1[idx].pt for idx in idx1])\n coord2 = np.array([kp2[idx].pt for idx in idx2])\n match_coords = np.concatenate((coord1, coord2), axis=1)\n return match_coords\n\n\n<mask token>\n\n\ndef compute_H(subset):\n A = []\n for i in range(subset.shape[0]):\n p1 = np.append(subset[i][0:2], 1)\n p2 = np.append(subset[i][2:4], 1)\n row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1\n ], -p2[1] * p1[2]]\n row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1\n ], -p2[0] * p1[2]]\n A.append(row1)\n A.append(row2)\n A = np.array(A)\n U, s, V = np.linalg.svd(A)\n H = V[len(V) - 1].reshape(3, 3)\n H = H / H[2, 2]\n return H\n\n\ndef show_inlier_matches(img1, img2, inliers):\n print('num of inliers shown in the matching: ' + str(len(inliers)))\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:, :w1] = img1\n vis[:h2, w1:] = img2\n fig, ax = plt.subplots()\n ax.imshow(vis)\n ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]\n )\n plt.show()\n\n\ndef ransac(img1, img2, matches, thres_ransac):\n itertimes = 1000\n inliners = 0\n max_inliners = 0\n for iter in range(0, itertimes):\n subset_idx = random.sample(range(matches.shape[0]), k=4)\n subset = matches[subset_idx]\n H = compute_H(subset)\n if np.linalg.matrix_rank(H) < 3:\n continue\n errors = get_errors(matches, H)\n idx = np.where(errors < thres_ransac)[0]\n inlinerspts = matches[idx]\n inliners = len(inlinerspts)\n if inliners >= max_inliners:\n which_inliners = inlinerspts.copy()\n max_inliners = inliners\n best_H = H.copy()\n avg_residual = sum(get_errors(matches[idx], H)) / inliners\n print('num of inliners: ' + str(max_inliners) + ' average residual: ' +\n str(avg_residual))\n show_inlier_matches(img1, img2, which_inliners)\n return best_H\n\n\ndef warp_images(image0, image1, H):\n transform = skimage.transform.ProjectiveTransform(H)\n warp = skimage.transform.warp\n r, c = image1.shape[:2]\n corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])\n warped_corners = transform(corners)\n all_corners = np.vstack((warped_corners, corners))\n corner_min = np.min(all_corners, axis=0)\n corner_max = np.max(all_corners, axis=0)\n output_shape = corner_max - corner_min\n output_shape = np.ceil(output_shape[::-1])\n offset = skimage.transform.SimilarityTransform(translation=-corner_min)\n image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)\n image1_ = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=-1)\n image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,\n cval=0)\n image1_zeros = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=0)\n overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)\n overlap += (overlap < 1).astype(int)\n merged = (image0_zeros + image1_zeros) / overlap\n im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')\n im = np.asarray(im)\n return im\n\n\ndef main(leftimg, rightimg, leftimgcolor, rightimgcolor):\n thres = 9000\n thres_ransac = 1.0\n kp1, dsp1 = sift_descriptor(leftimg)\n kp2, dsp2 = sift_descriptor(rightimg)\n matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)\n H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)\n stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)\n return stitched_img\n\n\n<mask token>\n\n\ndef main_3pic():\n dirs = 'MP3_part1_data/' + 'pier/'\n leftimg = readimg(dirs, '1.jpg')\n midimg = readimg(dirs, '2.jpg')\n rightimg = readimg(dirs, '3.jpg')\n leftimgcolor = readimg_color(dirs, '1.jpg')\n midimgcolor = readimg_color(dirs, '2.jpg')\n rightimgcolor = readimg_color(dirs, '3.jpg')\n stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)\n plt.imshow(stitched1)\n plt.show()\n grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)\n stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)\n plt.imshow(stitched2)\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef readimg(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\n\ndef readimg_color(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n return img\n\n\ndef sift_descriptor(img):\n sift = cv2.xfeatures2d.SIFT_create()\n kp, dsp = sift.detectAndCompute(img, None)\n return kp, dsp\n\n\ndef show_sift(kp, img):\n copyimg = img.copy()\n copyimg = cv2.drawKeypoints(img, kp, copyimg)\n plt.imshow(copyimg)\n plt.show()\n\n\ndef calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):\n dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')\n idx1 = np.where(dist < num_threshold)[0]\n idx2 = np.where(dist < num_threshold)[1]\n coord1 = np.array([kp1[idx].pt for idx in idx1])\n coord2 = np.array([kp2[idx].pt for idx in idx2])\n match_coords = np.concatenate((coord1, coord2), axis=1)\n return match_coords\n\n\ndef get_errors(matches, H):\n num_pairs = len(matches)\n p1 = np.concatenate((matches[:, 0:2], np.ones((1, num_pairs)).T), axis=1)\n p2 = matches[:, 2:4]\n transformed_p1 = np.zeros((num_pairs, 2))\n for i in range(num_pairs):\n transformed_p1[i] = (np.matmul(H, p1[i]) / np.matmul(H, p1[i])[-1])[0:2\n ]\n errors = np.linalg.norm(p2 - transformed_p1, axis=1) ** 2\n return errors\n\n\ndef compute_H(subset):\n A = []\n for i in range(subset.shape[0]):\n p1 = np.append(subset[i][0:2], 1)\n p2 = np.append(subset[i][2:4], 1)\n row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1\n ], -p2[1] * p1[2]]\n row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1\n ], -p2[0] * p1[2]]\n A.append(row1)\n A.append(row2)\n A = np.array(A)\n U, s, V = np.linalg.svd(A)\n H = V[len(V) - 1].reshape(3, 3)\n H = H / H[2, 2]\n return H\n\n\ndef show_inlier_matches(img1, img2, inliers):\n print('num of inliers shown in the matching: ' + str(len(inliers)))\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:, :w1] = img1\n vis[:h2, w1:] = img2\n fig, ax = plt.subplots()\n ax.imshow(vis)\n ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]\n )\n plt.show()\n\n\ndef ransac(img1, img2, matches, thres_ransac):\n itertimes = 1000\n inliners = 0\n max_inliners = 0\n for iter in range(0, itertimes):\n subset_idx = random.sample(range(matches.shape[0]), k=4)\n subset = matches[subset_idx]\n H = compute_H(subset)\n if np.linalg.matrix_rank(H) < 3:\n continue\n errors = get_errors(matches, H)\n idx = np.where(errors < thres_ransac)[0]\n inlinerspts = matches[idx]\n inliners = len(inlinerspts)\n if inliners >= max_inliners:\n which_inliners = inlinerspts.copy()\n max_inliners = inliners\n best_H = H.copy()\n avg_residual = sum(get_errors(matches[idx], H)) / inliners\n print('num of inliners: ' + str(max_inliners) + ' average residual: ' +\n str(avg_residual))\n show_inlier_matches(img1, img2, which_inliners)\n return best_H\n\n\ndef warp_images(image0, image1, H):\n transform = skimage.transform.ProjectiveTransform(H)\n warp = skimage.transform.warp\n r, c = image1.shape[:2]\n corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])\n warped_corners = transform(corners)\n all_corners = np.vstack((warped_corners, corners))\n corner_min = np.min(all_corners, axis=0)\n corner_max = np.max(all_corners, axis=0)\n output_shape = corner_max - corner_min\n output_shape = np.ceil(output_shape[::-1])\n offset = skimage.transform.SimilarityTransform(translation=-corner_min)\n image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)\n image1_ = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=-1)\n image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,\n cval=0)\n image1_zeros = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=0)\n overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)\n overlap += (overlap < 1).astype(int)\n merged = (image0_zeros + image1_zeros) / overlap\n im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')\n im = np.asarray(im)\n return im\n\n\ndef main(leftimg, rightimg, leftimgcolor, rightimgcolor):\n thres = 9000\n thres_ransac = 1.0\n kp1, dsp1 = sift_descriptor(leftimg)\n kp2, dsp2 = sift_descriptor(rightimg)\n matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)\n H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)\n stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)\n return stitched_img\n\n\ndef main_2pic():\n dirs = 'MP3_part1_data/' + 'park/'\n leftimg = readimg(dirs, 'left.jpg')\n rightimg = readimg(dirs, 'right.jpg')\n leftimgcolor = readimg_color(dirs, 'left.jpg')\n rightimgcolor = readimg_color(dirs, 'right.jpg')\n stitched_img = main(leftimg, rightimg, leftimgcolor, rightimgcolor)\n plt.imshow(stitched_img)\n plt.show()\n\n\ndef main_3pic():\n dirs = 'MP3_part1_data/' + 'pier/'\n leftimg = readimg(dirs, '1.jpg')\n midimg = readimg(dirs, '2.jpg')\n rightimg = readimg(dirs, '3.jpg')\n leftimgcolor = readimg_color(dirs, '1.jpg')\n midimgcolor = readimg_color(dirs, '2.jpg')\n rightimgcolor = readimg_color(dirs, '3.jpg')\n stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)\n plt.imshow(stitched1)\n plt.show()\n grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)\n stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)\n plt.imshow(stitched2)\n plt.show()\n\n\nif __name__ == '__main__':\n main_3pic()\n",
"step-4": "import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport matplotlib.image as mpimg\nimport random\nimport skimage\nimport scipy\nfrom PIL import Image\n\n\ndef readimg(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\n\ndef readimg_color(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n return img\n\n\ndef sift_descriptor(img):\n sift = cv2.xfeatures2d.SIFT_create()\n kp, dsp = sift.detectAndCompute(img, None)\n return kp, dsp\n\n\ndef show_sift(kp, img):\n copyimg = img.copy()\n copyimg = cv2.drawKeypoints(img, kp, copyimg)\n plt.imshow(copyimg)\n plt.show()\n\n\ndef calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):\n dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')\n idx1 = np.where(dist < num_threshold)[0]\n idx2 = np.where(dist < num_threshold)[1]\n coord1 = np.array([kp1[idx].pt for idx in idx1])\n coord2 = np.array([kp2[idx].pt for idx in idx2])\n match_coords = np.concatenate((coord1, coord2), axis=1)\n return match_coords\n\n\ndef get_errors(matches, H):\n num_pairs = len(matches)\n p1 = np.concatenate((matches[:, 0:2], np.ones((1, num_pairs)).T), axis=1)\n p2 = matches[:, 2:4]\n transformed_p1 = np.zeros((num_pairs, 2))\n for i in range(num_pairs):\n transformed_p1[i] = (np.matmul(H, p1[i]) / np.matmul(H, p1[i])[-1])[0:2\n ]\n errors = np.linalg.norm(p2 - transformed_p1, axis=1) ** 2\n return errors\n\n\ndef compute_H(subset):\n A = []\n for i in range(subset.shape[0]):\n p1 = np.append(subset[i][0:2], 1)\n p2 = np.append(subset[i][2:4], 1)\n row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1\n ], -p2[1] * p1[2]]\n row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1\n ], -p2[0] * p1[2]]\n A.append(row1)\n A.append(row2)\n A = np.array(A)\n U, s, V = np.linalg.svd(A)\n H = V[len(V) - 1].reshape(3, 3)\n H = H / H[2, 2]\n return H\n\n\ndef show_inlier_matches(img1, img2, inliers):\n print('num of inliers shown in the matching: ' + str(len(inliers)))\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:, :w1] = img1\n vis[:h2, w1:] = img2\n fig, ax = plt.subplots()\n ax.imshow(vis)\n ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]\n )\n plt.show()\n\n\ndef ransac(img1, img2, matches, thres_ransac):\n itertimes = 1000\n inliners = 0\n max_inliners = 0\n for iter in range(0, itertimes):\n subset_idx = random.sample(range(matches.shape[0]), k=4)\n subset = matches[subset_idx]\n H = compute_H(subset)\n if np.linalg.matrix_rank(H) < 3:\n continue\n errors = get_errors(matches, H)\n idx = np.where(errors < thres_ransac)[0]\n inlinerspts = matches[idx]\n inliners = len(inlinerspts)\n if inliners >= max_inliners:\n which_inliners = inlinerspts.copy()\n max_inliners = inliners\n best_H = H.copy()\n avg_residual = sum(get_errors(matches[idx], H)) / inliners\n print('num of inliners: ' + str(max_inliners) + ' average residual: ' +\n str(avg_residual))\n show_inlier_matches(img1, img2, which_inliners)\n return best_H\n\n\ndef warp_images(image0, image1, H):\n transform = skimage.transform.ProjectiveTransform(H)\n warp = skimage.transform.warp\n r, c = image1.shape[:2]\n corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])\n warped_corners = transform(corners)\n all_corners = np.vstack((warped_corners, corners))\n corner_min = np.min(all_corners, axis=0)\n corner_max = np.max(all_corners, axis=0)\n output_shape = corner_max - corner_min\n output_shape = np.ceil(output_shape[::-1])\n offset = skimage.transform.SimilarityTransform(translation=-corner_min)\n image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)\n image1_ = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=-1)\n image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,\n cval=0)\n image1_zeros = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=0)\n overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)\n overlap += (overlap < 1).astype(int)\n merged = (image0_zeros + image1_zeros) / overlap\n im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')\n im = np.asarray(im)\n return im\n\n\ndef main(leftimg, rightimg, leftimgcolor, rightimgcolor):\n thres = 9000\n thres_ransac = 1.0\n kp1, dsp1 = sift_descriptor(leftimg)\n kp2, dsp2 = sift_descriptor(rightimg)\n matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)\n H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)\n stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)\n return stitched_img\n\n\ndef main_2pic():\n dirs = 'MP3_part1_data/' + 'park/'\n leftimg = readimg(dirs, 'left.jpg')\n rightimg = readimg(dirs, 'right.jpg')\n leftimgcolor = readimg_color(dirs, 'left.jpg')\n rightimgcolor = readimg_color(dirs, 'right.jpg')\n stitched_img = main(leftimg, rightimg, leftimgcolor, rightimgcolor)\n plt.imshow(stitched_img)\n plt.show()\n\n\ndef main_3pic():\n dirs = 'MP3_part1_data/' + 'pier/'\n leftimg = readimg(dirs, '1.jpg')\n midimg = readimg(dirs, '2.jpg')\n rightimg = readimg(dirs, '3.jpg')\n leftimgcolor = readimg_color(dirs, '1.jpg')\n midimgcolor = readimg_color(dirs, '2.jpg')\n rightimgcolor = readimg_color(dirs, '3.jpg')\n stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)\n plt.imshow(stitched1)\n plt.show()\n grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)\n stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)\n plt.imshow(stitched2)\n plt.show()\n\n\nif __name__ == '__main__':\n main_3pic()\n",
"step-5": "import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport matplotlib.image as mpimg\nimport random\nimport skimage\nimport scipy\nfrom PIL import Image\n\ndef readimg(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\ndef readimg_color(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n return img\n\ndef sift_descriptor(img):\n sift = cv2.xfeatures2d.SIFT_create()\n kp, dsp = sift.detectAndCompute(img, None)\n\n return kp, dsp\n\ndef show_sift(kp, img):\n # show the img with descriptors\n copyimg = img.copy()\n copyimg = cv2.drawKeypoints(img, kp, copyimg)\n plt.imshow(copyimg)\n plt.show()\n\ndef calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):\n # fast computation of Euclidean distance between each descriptors\n dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')\n # find the matching coordinates\n idx1 = np.where(dist < num_threshold)[0]\n idx2 = np.where(dist < num_threshold)[1]\n coord1 = np.array([kp1[idx].pt for idx in idx1])\n coord2 = np.array([kp2[idx].pt for idx in idx2])\n # put into pairs of coords\n match_coords = np.concatenate((coord1, coord2), axis=1)\n\n return match_coords\n\ndef get_errors(matches, H):\n\t# difference between original img2 points and transformed img1 points with H\n num_pairs = len(matches)\n # all matching points in img1\n p1 = np.concatenate((matches[:, 0:2], np.ones((1, num_pairs)).T), axis=1)\n # all matching points in img2\n p2 = matches[:, 2:4]\n\n # Transform every point in p1 to estimate p2.\n transformed_p1 = np.zeros((num_pairs, 2))\n for i in range(num_pairs):\n transformed_p1[i] = (np.matmul(H, p1[i]) / np.matmul(H, p1[i])[-1])[0:2]\n\n # Compute error of each matching pair\n errors = np.linalg.norm(p2 - transformed_p1, axis=1) ** 2\n return errors\n\ndef compute_H(subset):\n # calculate the fitted homography\n A = []\n\n for i in range(subset.shape[0]):\n p1 = np.append(subset[i][0:2], 1)\n p2 = np.append(subset[i][2:4], 1)\n \n row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1]*p1[0], -p2[1]*p1[1], -p2[1]*p1[2]]\n row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1], -p2[0]*p1[2]]\n A.append(row1)\n A.append(row2)\n\n A = np.array(A)\n\n U, s, V = np.linalg.svd(A)\n H = V[len(V)-1].reshape(3, 3)\n\n # normalize\n H = H / H[2, 2]\n return H\n\ndef show_inlier_matches(img1, img2, inliers):\n print(\"num of inliers shown in the matching: \" + str(len(inliers)))\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:, :w1] = img1\n vis[:h2, w1:] = img2\n\n fig, ax = plt.subplots()\n ax.imshow(vis)\n ax.plot([inliers[:,0], inliers[:,2] + w1],[inliers[:,1], inliers[:,3]])\n plt.show()\n\ndef ransac(img1, img2, matches, thres_ransac):\n itertimes = 1000\n inliners = 0\n max_inliners = 0\n\n for iter in range(0, itertimes):\n subset_idx = random.sample(range(matches.shape[0]), k=4)\n subset = matches[subset_idx]\n\n H = compute_H(subset)\n\n # check if it is full rank\n if np.linalg.matrix_rank(H) < 3:\n continue\n\n # the norm of error caused if we choose the above subset\n errors = get_errors(matches, H)\n idx = np.where(errors < thres_ransac)[0]\n inlinerspts = matches[idx]\n\n # find the best number of inliners \n inliners = len(inlinerspts)\n if inliners >= max_inliners:\n which_inliners = inlinerspts.copy()\n max_inliners = inliners\n best_H = H.copy()\n \n avg_residual = sum(get_errors(matches[idx], H)) / inliners\n\n print(\"num of inliners: \" + str(max_inliners) + \" average residual: \" + str(avg_residual))\n show_inlier_matches(img1, img2, which_inliners)\n return best_H\n\n# function provided by Maghav at Piazza @450\ndef warp_images(image0, image1, H):\n transform = skimage.transform.ProjectiveTransform(H)\n warp = skimage.transform.warp\n\n r, c = image1.shape[:2]\n # Note that transformations take coordinates in (x, y) format,\n # not (row, column), in order to be consistent with most literature\n corners = np.array([[0, 0],\n [0, r],\n [c, 0],\n [c, r]])\n\n # Warp the image corners to their new positions\n warped_corners = transform(corners)\n\n # Find the extents of both the reference image and the warped\n # target image\n all_corners = np.vstack((warped_corners, corners))\n\n corner_min = np.min(all_corners, axis=0)\n corner_max = np.max(all_corners, axis=0)\n\n output_shape = (corner_max - corner_min)\n output_shape = np.ceil(output_shape[::-1])\n\n offset = skimage.transform.SimilarityTransform(translation=-corner_min)\n\n image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)\n\n image1_ = warp(image1, (transform + offset).inverse, output_shape=output_shape, cval=-1)\n\n image0_zeros = warp(image0, offset.inverse, output_shape=output_shape, cval=0)\n\n image1_zeros = warp(image1, (transform + offset).inverse, output_shape=output_shape, cval=0)\n\n overlap = (image0_ != -1.0 ).astype(int) + (image1_ != -1.0).astype(int)\n overlap += (overlap < 1).astype(int)\n merged = (image0_zeros+image1_zeros)/overlap\n\n im = Image.fromarray((255*merged).astype('uint8'), mode='RGB')\n im = np.asarray(im)\n\n return im\n\ndef main(leftimg, rightimg, leftimgcolor, rightimgcolor):\n # using 7000, 0.5 for 2 pic; 9000, 1.0 for 3 pic\n thres = 9000\n thres_ransac = 1.0\n\n kp1, dsp1 = sift_descriptor(leftimg)\n kp2, dsp2 = sift_descriptor(rightimg)\n\n # get all matching points\n matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)\n\n H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)\n\n stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)\n\n return stitched_img\n\ndef main_2pic():\n dirs = 'MP3_part1_data/' + 'park/'\n leftimg = readimg(dirs, 'left.jpg')\n rightimg = readimg(dirs, 'right.jpg')\n leftimgcolor = readimg_color(dirs, 'left.jpg')\n rightimgcolor = readimg_color(dirs, 'right.jpg')\n\n stitched_img = main(leftimg, rightimg, leftimgcolor, rightimgcolor)\n\n plt.imshow(stitched_img)\n plt.show()\n\ndef main_3pic():\n dirs = 'MP3_part1_data/' + 'pier/' # ledge pier hill\n leftimg = readimg(dirs, '1.jpg')\n midimg = readimg(dirs, '2.jpg')\n rightimg = readimg(dirs, '3.jpg')\n leftimgcolor = readimg_color(dirs, '1.jpg')\n midimgcolor = readimg_color(dirs, '2.jpg')\n rightimgcolor = readimg_color(dirs, '3.jpg')\n\n stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)\n\n plt.imshow(stitched1)\n plt.show()\n\n grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)\n\n stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)\n\n plt.imshow(stitched2)\n plt.show()\n\nif __name__ == '__main__':\n\t#main_2pic()\n\tmain_3pic()\n \n",
"step-ids": [
9,
11,
14,
15,
16
]
}
|
[
9,
11,
14,
15,
16
] |
import pickle
import time
DECAY = 0.95
DEPTH = 2
def init_cache(g):
'''
Initialize simrank cache for graph g
'''
g.cache = {}
def return_and_cache(g, element, val):
'''
Code (and function name) is pretty self explainatory here
'''
g.cache[element] = val
return val
def simrank_impl(g, node1, node2, t, is_weighted):
'''
Weighted simrank implementation
'''
#print "%d %d %d"%(node1, node2, t)
if node1 == node2:
return 1
if t == 0:
return 0
if (node1, node2, t) in g.cache.keys():
return g.cache[(node1, node2, t)]
#if (node2 not in pickle.load(open("neighbourhood/%s"%node1, 'rb'))):
#if (node2 not in g.authors[node1].neighbours):
# return return_and_cache(g, (node1, node2, t), 0)
neighbours1 = g.authors[node1].edges
neighbours2 = g.authors[node2].edges
if is_weighted:
neighbours_mult = [(neighbours1[i]*neighbours2[j], i, j) for i in neighbours1.keys() for j in neighbours2.keys()]
else:
neighbours_mult = [(1, i, j) for i in neighbours1.keys() for j in neighbours2.keys()]
simrank_sum = sum([mult*simrank_impl(g, i, j, t-1, is_weighted) for (mult, i, j) in neighbours_mult])
normalize = sum([mult for (mult, i, j) in neighbours_mult])
return return_and_cache(g, (node1, node2, t), (DECAY/normalize)*simrank_sum)
def simrank(g, node1, node2, depth=DEPTH):
'''
NON-weighted variant
'''
init_cache(g)
start = time.time()
res = simrank_impl(g, node1, node2, depth, False)
end = time.time()
print "simrank took %f seconds"%(end-start)
return res
def wsimrank(g, node1, node2, depth=DEPTH):
'''
weighted variant
'''
init_cache(g)
start = time.time()
res = simrank_impl(g, node1, node2, depth, True)
end = time.time()
print "weighted simrank took %f seconds"%(end-start)
return res
def read_neighbours(g):
'''
Read neighbours of all nodes from disk into memory.
Neighbours are assumed to be stored under the "neighbours" directory.
'''
i = 0
for auth_id, auth in g.authors.iteritems():
auth.neighbours = pickle.load(open("neighbourhood/%s"%auth_id, 'rb'))
if (i % 500) == 0:
print "reading neighbours, iteration %d out of %d"%(i, len(g.authors))
i += 1
def experiment_phaze1():
print "loading graph"
g = pickle.load(open("processed_graph.pickle"))
read_neighbours(g)
g.authors[828114].edges[14607] = 4
g.authors[14607].edges[828114] = 4
return g
def experiment_phaze2(g, file_name):
area = map(int, map(str.strip, open(file_name).readlines()))
main_auth = area[0]
area = area[1:]
results = [["name", "simrank", "weighted simrank"]]
for aid in area:
print "computing author %d %s:"%(aid, g.authors[aid].name)
ws = str(wsimrank(g, main_auth, aid, 3))
print "wsimrank: " + ws
s = str(simrank(g, main_auth, aid, 3))
print "simrank: " + s
results.append([g.authors[aid].name, s, ws])
wr = csv.writer(open(file_name + ".csv", 'w'))
wr.writerows(results)
|
normal
|
{
"blob_id": "535ee547475fbc2e1c0ee59e3e300beda1489d47",
"index": 4215,
"step-1": "import pickle\nimport time\nDECAY = 0.95\nDEPTH = 2\n\ndef init_cache(g):\n\t'''\n\tInitialize simrank cache for graph g\n\t'''\n\tg.cache = {}\n\ndef return_and_cache(g, element, val):\n\t'''\n\tCode (and function name) is pretty self explainatory here\n\t'''\n\tg.cache[element] = val\n\treturn val\n\ndef simrank_impl(g, node1, node2, t, is_weighted):\n\t'''\n\tWeighted simrank implementation\n\t'''\n\t#print \"%d %d %d\"%(node1, node2, t)\n\tif node1 == node2:\n\t\treturn 1\n\tif t == 0:\n\t\treturn 0\n\tif (node1, node2, t) in g.cache.keys():\n\t\treturn g.cache[(node1, node2, t)]\n\t#if (node2 not in pickle.load(open(\"neighbourhood/%s\"%node1, 'rb'))):\n\t#if (node2 not in g.authors[node1].neighbours):\n\t#\treturn return_and_cache(g, (node1, node2, t), 0)\n\t\n\tneighbours1 = g.authors[node1].edges\n\tneighbours2 = g.authors[node2].edges\n\n\tif is_weighted:\n\t\tneighbours_mult = [(neighbours1[i]*neighbours2[j], i, j) for i in neighbours1.keys() for j in neighbours2.keys()]\n\telse:\n\t\tneighbours_mult = [(1, i, j) for i in neighbours1.keys() for j in neighbours2.keys()]\n\t\n\n\tsimrank_sum = sum([mult*simrank_impl(g, i, j, t-1, is_weighted) for (mult, i, j) in neighbours_mult])\n\tnormalize = sum([mult for (mult, i, j) in neighbours_mult])\n\n\treturn return_and_cache(g, (node1, node2, t), (DECAY/normalize)*simrank_sum)\n\ndef simrank(g, node1, node2, depth=DEPTH):\n\t'''\n\tNON-weighted variant\n\t'''\n\tinit_cache(g)\n\tstart = time.time()\n\tres = simrank_impl(g, node1, node2, depth, False)\n\tend = time.time()\n\tprint \"simrank took %f seconds\"%(end-start)\n\treturn res\n\ndef wsimrank(g, node1, node2, depth=DEPTH):\n\t'''\n\tweighted variant\n\t'''\n\tinit_cache(g)\n\tstart = time.time()\n\tres = simrank_impl(g, node1, node2, depth, True)\n\tend = time.time()\n\tprint \"weighted simrank took %f seconds\"%(end-start)\n\treturn res\n\n\n\ndef read_neighbours(g):\n\t'''\n\tRead neighbours of all nodes from disk into memory.\n\tNeighbours are assumed to be stored under the \"neighbours\" directory.\n\t'''\n\ti = 0\n\tfor auth_id, auth in g.authors.iteritems():\n\t\tauth.neighbours = pickle.load(open(\"neighbourhood/%s\"%auth_id, 'rb'))\n\t\tif (i % 500) == 0:\n\t\t\tprint \"reading neighbours, iteration %d out of %d\"%(i, len(g.authors))\n\t\ti += 1\n\n\ndef experiment_phaze1():\n\tprint \"loading graph\"\n\tg = pickle.load(open(\"processed_graph.pickle\"))\n\tread_neighbours(g)\n\tg.authors[828114].edges[14607] = 4\n\tg.authors[14607].edges[828114] = 4\n\treturn g\n\ndef experiment_phaze2(g, file_name):\n\tarea = map(int, map(str.strip, open(file_name).readlines()))\n\tmain_auth = area[0]\n\tarea = area[1:]\n\tresults = [[\"name\", \"simrank\", \"weighted simrank\"]]\n\tfor aid in area:\n\t\tprint \"computing author %d %s:\"%(aid, g.authors[aid].name)\n\t\tws = str(wsimrank(g, main_auth, aid, 3))\n\t\tprint \"wsimrank: \" + ws\n\t\ts = str(simrank(g, main_auth, aid, 3))\n\t\tprint \"simrank: \" + s\n\t\tresults.append([g.authors[aid].name, s, ws])\n\t\n\twr = csv.writer(open(file_name + \".csv\", 'w'))\n\twr.writerows(results)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def make_single_prediction(wav_file, model, is_game):
""" Predictions with model that is locally saved
:param wav_file: wav-file we want to predict
:param model: Trained model for our predictions
:return: None """
return reshape_and_predict(filepath=wav_file, saved_model=model,
is_game=is_game)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def make_predictions(file_list, model, is_game=False):
""" Predictions with model that is locally saved
:param file_list: path to files we want to predict
:param model: Trained model for our predictions
:return: None """
temp_list = []
for wav_file in glob.glob(file_list):
temp_list.append(reshape_and_predict(filepath=wav_file, saved_model
=model, is_game=is_game))
return temp_list
def make_single_prediction(wav_file, model, is_game):
""" Predictions with model that is locally saved
:param wav_file: wav-file we want to predict
:param model: Trained model for our predictions
:return: None """
return reshape_and_predict(filepath=wav_file, saved_model=model,
is_game=is_game)
<|reserved_special_token_1|>
import tensorflow.keras
from preprocessing_and_training.train import reshape_and_predict
import glob
<|reserved_special_token_0|>
def make_predictions(file_list, model, is_game=False):
""" Predictions with model that is locally saved
:param file_list: path to files we want to predict
:param model: Trained model for our predictions
:return: None """
temp_list = []
for wav_file in glob.glob(file_list):
temp_list.append(reshape_and_predict(filepath=wav_file, saved_model
=model, is_game=is_game))
return temp_list
def make_single_prediction(wav_file, model, is_game):
""" Predictions with model that is locally saved
:param wav_file: wav-file we want to predict
:param model: Trained model for our predictions
:return: None """
return reshape_and_predict(filepath=wav_file, saved_model=model,
is_game=is_game)
<|reserved_special_token_1|>
import tensorflow.keras
from preprocessing_and_training.train import reshape_and_predict
import glob
""" Script for prediction - testing and importing the trained model from train.py """
def make_predictions(file_list, model, is_game=False):
""" Predictions with model that is locally saved
:param file_list: path to files we want to predict
:param model: Trained model for our predictions
:return: None """
temp_list = []
for wav_file in glob.glob(file_list):
temp_list.append(reshape_and_predict(filepath=wav_file, saved_model=model, is_game=is_game))
return temp_list
def make_single_prediction(wav_file, model, is_game):
""" Predictions with model that is locally saved
:param wav_file: wav-file we want to predict
:param model: Trained model for our predictions
:return: None """
return reshape_and_predict(filepath=wav_file, saved_model=model, is_game=is_game)
|
flexible
|
{
"blob_id": "a17c448b068b28881f9d0c89be6037503eca3974",
"index": 5700,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef make_single_prediction(wav_file, model, is_game):\n \"\"\" Predictions with model that is locally saved\n\n :param wav_file: wav-file we want to predict\n :param model: Trained model for our predictions\n :return: None \"\"\"\n return reshape_and_predict(filepath=wav_file, saved_model=model,\n is_game=is_game)\n",
"step-3": "<mask token>\n\n\ndef make_predictions(file_list, model, is_game=False):\n \"\"\" Predictions with model that is locally saved\n\n :param file_list: path to files we want to predict\n :param model: Trained model for our predictions\n :return: None \"\"\"\n temp_list = []\n for wav_file in glob.glob(file_list):\n temp_list.append(reshape_and_predict(filepath=wav_file, saved_model\n =model, is_game=is_game))\n return temp_list\n\n\ndef make_single_prediction(wav_file, model, is_game):\n \"\"\" Predictions with model that is locally saved\n\n :param wav_file: wav-file we want to predict\n :param model: Trained model for our predictions\n :return: None \"\"\"\n return reshape_and_predict(filepath=wav_file, saved_model=model,\n is_game=is_game)\n",
"step-4": "import tensorflow.keras\nfrom preprocessing_and_training.train import reshape_and_predict\nimport glob\n<mask token>\n\n\ndef make_predictions(file_list, model, is_game=False):\n \"\"\" Predictions with model that is locally saved\n\n :param file_list: path to files we want to predict\n :param model: Trained model for our predictions\n :return: None \"\"\"\n temp_list = []\n for wav_file in glob.glob(file_list):\n temp_list.append(reshape_and_predict(filepath=wav_file, saved_model\n =model, is_game=is_game))\n return temp_list\n\n\ndef make_single_prediction(wav_file, model, is_game):\n \"\"\" Predictions with model that is locally saved\n\n :param wav_file: wav-file we want to predict\n :param model: Trained model for our predictions\n :return: None \"\"\"\n return reshape_and_predict(filepath=wav_file, saved_model=model,\n is_game=is_game)\n",
"step-5": "import tensorflow.keras\nfrom preprocessing_and_training.train import reshape_and_predict\nimport glob\n\n\"\"\" Script for prediction - testing and importing the trained model from train.py \"\"\"\n\n\ndef make_predictions(file_list, model, is_game=False):\n \"\"\" Predictions with model that is locally saved\n\n :param file_list: path to files we want to predict\n :param model: Trained model for our predictions\n :return: None \"\"\"\n temp_list = []\n for wav_file in glob.glob(file_list):\n temp_list.append(reshape_and_predict(filepath=wav_file, saved_model=model, is_game=is_game))\n\n return temp_list\n\n\ndef make_single_prediction(wav_file, model, is_game):\n \"\"\" Predictions with model that is locally saved\n\n :param wav_file: wav-file we want to predict\n :param model: Trained model for our predictions\n :return: None \"\"\"\n\n return reshape_and_predict(filepath=wav_file, saved_model=model, is_game=is_game)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
print("2 + 3 * 4 =")
print(2 + 3 * 4)
print("2 + (3 * 4) = ")
print(2 + (3 * 4))
|
normal
|
{
"blob_id": "58d137d614a0d5c11bf4325c1ade13f4f4f89f52",
"index": 3184,
"step-1": "<mask token>\n",
"step-2": "print('2 + 3 * 4 =')\nprint(2 + 3 * 4)\nprint('2 + (3 * 4) = ')\nprint(2 + 3 * 4)\n",
"step-3": "print(\"2 + 3 * 4 =\")\nprint(2 + 3 * 4)\n\nprint(\"2 + (3 * 4) = \")\nprint(2 + (3 * 4))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from opengever.propertysheets.assignment import get_document_assignment_slots
from opengever.propertysheets.assignment import get_dossier_assignment_slots
from opengever.propertysheets.storage import PropertySheetSchemaStorage
from plone.restapi.services import Service
LISTING_TO_SLOTS = {
u'dossiers': get_dossier_assignment_slots,
u'documents': get_document_assignment_slots,
}
class ListingCustomFieldsGet(Service):
"""API Endpoint which returns custom fields available for listings.
It returns a nested data structure with custom fields for each supported
listing, if available.
Custom fields are provided as follows:
- Custom field source are property sheets registerd for a type associated
with a listing
- Custom fields must be indexed in solr (i.e. everything but `Text`)
- If different sheets for the same type index to the same field, only the
last field is returned.
GET /@listing-custom-fields HTTP/1.1
"""
def reply(self):
solr_fields = {}
storage = PropertySheetSchemaStorage()
if not storage:
return solr_fields
for listing_name, slot_provider in LISTING_TO_SLOTS.items():
fields_by_listing = {}
for slot_name in slot_provider():
definition = storage.query(slot_name)
if definition is not None:
fields_by_listing.update(
definition.get_solr_dynamic_field_schema()
)
if fields_by_listing:
solr_fields[listing_name] = {
'properties': fields_by_listing
}
return solr_fields
|
normal
|
{
"blob_id": "ab352c9431fda19bc21a9f7ffa075303641cca45",
"index": 155,
"step-1": "<mask token>\n\n\nclass ListingCustomFieldsGet(Service):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ListingCustomFieldsGet(Service):\n <mask token>\n\n def reply(self):\n solr_fields = {}\n storage = PropertySheetSchemaStorage()\n if not storage:\n return solr_fields\n for listing_name, slot_provider in LISTING_TO_SLOTS.items():\n fields_by_listing = {}\n for slot_name in slot_provider():\n definition = storage.query(slot_name)\n if definition is not None:\n fields_by_listing.update(definition.\n get_solr_dynamic_field_schema())\n if fields_by_listing:\n solr_fields[listing_name] = {'properties': fields_by_listing}\n return solr_fields\n",
"step-3": "<mask token>\n\n\nclass ListingCustomFieldsGet(Service):\n \"\"\"API Endpoint which returns custom fields available for listings.\n\n It returns a nested data structure with custom fields for each supported\n listing, if available.\n Custom fields are provided as follows:\n - Custom field source are property sheets registerd for a type associated\n with a listing\n - Custom fields must be indexed in solr (i.e. everything but `Text`)\n - If different sheets for the same type index to the same field, only the\n last field is returned.\n\n GET /@listing-custom-fields HTTP/1.1\n \"\"\"\n\n def reply(self):\n solr_fields = {}\n storage = PropertySheetSchemaStorage()\n if not storage:\n return solr_fields\n for listing_name, slot_provider in LISTING_TO_SLOTS.items():\n fields_by_listing = {}\n for slot_name in slot_provider():\n definition = storage.query(slot_name)\n if definition is not None:\n fields_by_listing.update(definition.\n get_solr_dynamic_field_schema())\n if fields_by_listing:\n solr_fields[listing_name] = {'properties': fields_by_listing}\n return solr_fields\n",
"step-4": "from opengever.propertysheets.assignment import get_document_assignment_slots\nfrom opengever.propertysheets.assignment import get_dossier_assignment_slots\nfrom opengever.propertysheets.storage import PropertySheetSchemaStorage\nfrom plone.restapi.services import Service\nLISTING_TO_SLOTS = {u'dossiers': get_dossier_assignment_slots, u'documents':\n get_document_assignment_slots}\n\n\nclass ListingCustomFieldsGet(Service):\n \"\"\"API Endpoint which returns custom fields available for listings.\n\n It returns a nested data structure with custom fields for each supported\n listing, if available.\n Custom fields are provided as follows:\n - Custom field source are property sheets registerd for a type associated\n with a listing\n - Custom fields must be indexed in solr (i.e. everything but `Text`)\n - If different sheets for the same type index to the same field, only the\n last field is returned.\n\n GET /@listing-custom-fields HTTP/1.1\n \"\"\"\n\n def reply(self):\n solr_fields = {}\n storage = PropertySheetSchemaStorage()\n if not storage:\n return solr_fields\n for listing_name, slot_provider in LISTING_TO_SLOTS.items():\n fields_by_listing = {}\n for slot_name in slot_provider():\n definition = storage.query(slot_name)\n if definition is not None:\n fields_by_listing.update(definition.\n get_solr_dynamic_field_schema())\n if fields_by_listing:\n solr_fields[listing_name] = {'properties': fields_by_listing}\n return solr_fields\n",
"step-5": "from opengever.propertysheets.assignment import get_document_assignment_slots\nfrom opengever.propertysheets.assignment import get_dossier_assignment_slots\nfrom opengever.propertysheets.storage import PropertySheetSchemaStorage\nfrom plone.restapi.services import Service\n\n\nLISTING_TO_SLOTS = {\n u'dossiers': get_dossier_assignment_slots,\n u'documents': get_document_assignment_slots,\n}\n\n\nclass ListingCustomFieldsGet(Service):\n \"\"\"API Endpoint which returns custom fields available for listings.\n\n It returns a nested data structure with custom fields for each supported\n listing, if available.\n Custom fields are provided as follows:\n - Custom field source are property sheets registerd for a type associated\n with a listing\n - Custom fields must be indexed in solr (i.e. everything but `Text`)\n - If different sheets for the same type index to the same field, only the\n last field is returned.\n\n GET /@listing-custom-fields HTTP/1.1\n \"\"\"\n\n def reply(self):\n solr_fields = {}\n\n storage = PropertySheetSchemaStorage()\n if not storage:\n return solr_fields\n\n for listing_name, slot_provider in LISTING_TO_SLOTS.items():\n fields_by_listing = {}\n\n for slot_name in slot_provider():\n definition = storage.query(slot_name)\n if definition is not None:\n fields_by_listing.update(\n definition.get_solr_dynamic_field_schema()\n )\n\n if fields_by_listing:\n solr_fields[listing_name] = {\n 'properties': fields_by_listing\n }\n\n return solr_fields\n",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
class Pinnwand:
def __init__(self):
self.__zettel = []
def hefteAn(self, notiz):
prio = notiz.count('!')
self.__zettel.append((prio, notiz))
<|reserved_special_token_0|>
def __str__(self):
ausgabe = 'Notizen\n'
zettelListe = self.__zettel[:]
zettelListe.sort(reverse=True)
print('Zettelliste: ')
print(zettelListe)
for z in zettelListe:
ausgabe += z[1] + '\t'
ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\n'
return ausgabe
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Pinnwand:
def __init__(self):
self.__zettel = []
def hefteAn(self, notiz):
prio = notiz.count('!')
self.__zettel.append((prio, notiz))
def entferne(self):
hoechste = 0
zettel = 0
for i in range(len(self.__zettel)):
if self.__zettel[i][0] > hoechste:
hoechste = self.__zettel[i][0]
zettel = i
print(self.__zettel[zettel][1])
del self.__zettel[zettel]
def __str__(self):
ausgabe = 'Notizen\n'
zettelListe = self.__zettel[:]
zettelListe.sort(reverse=True)
print('Zettelliste: ')
print(zettelListe)
for z in zettelListe:
ausgabe += z[1] + '\t'
ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\n'
return ausgabe
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Pinnwand:
def __init__(self):
self.__zettel = []
def hefteAn(self, notiz):
prio = notiz.count('!')
self.__zettel.append((prio, notiz))
def entferne(self):
hoechste = 0
zettel = 0
for i in range(len(self.__zettel)):
if self.__zettel[i][0] > hoechste:
hoechste = self.__zettel[i][0]
zettel = i
print(self.__zettel[zettel][1])
del self.__zettel[zettel]
def __str__(self):
ausgabe = 'Notizen\n'
zettelListe = self.__zettel[:]
zettelListe.sort(reverse=True)
print('Zettelliste: ')
print(zettelListe)
for z in zettelListe:
ausgabe += z[1] + '\t'
ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\n'
return ausgabe
<|reserved_special_token_0|>
while True:
print(menue)
eingabe = input('Ihre Wahl: ')
if eingabe in 'nN':
notiz = input('Notiz: ')
while notiz != '':
p.hefteAn(notiz)
notiz = input('Notiz: ')
elif eingabe in 'aA':
print(p)
elif eingabe in 'wW':
p.entferne()
elif eingabe in 'eE':
print('Tschüß!')
break
<|reserved_special_token_1|>
class Pinnwand:
def __init__(self):
self.__zettel = []
def hefteAn(self, notiz):
prio = notiz.count('!')
self.__zettel.append((prio, notiz))
def entferne(self):
hoechste = 0
zettel = 0
for i in range(len(self.__zettel)):
if self.__zettel[i][0] > hoechste:
hoechste = self.__zettel[i][0]
zettel = i
print(self.__zettel[zettel][1])
del self.__zettel[zettel]
def __str__(self):
ausgabe = 'Notizen\n'
zettelListe = self.__zettel[:]
zettelListe.sort(reverse=True)
print('Zettelliste: ')
print(zettelListe)
for z in zettelListe:
ausgabe += z[1] + '\t'
ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\n'
return ausgabe
menue = """
(N)eue Notiz anheften (A)lle Notizen auflisten
(W)ichtigste Notiz entfernen (E)nde
"""
p = Pinnwand()
while True:
print(menue)
eingabe = input('Ihre Wahl: ')
if eingabe in 'nN':
notiz = input('Notiz: ')
while notiz != '':
p.hefteAn(notiz)
notiz = input('Notiz: ')
elif eingabe in 'aA':
print(p)
elif eingabe in 'wW':
p.entferne()
elif eingabe in 'eE':
print('Tschüß!')
break
<|reserved_special_token_1|>
#####################
# Aufgabe 2, 13.7 #
# v1.0 #
# baehll #
# 04.05.2018 #
#####################
class Pinnwand:
def __init__(self):
self.__zettel = []
def hefteAn(self, notiz):
#Analyse des Textes
prio = notiz.count("!")
self.__zettel.append((prio, notiz))
def entferne(self):
hoechste = 0
zettel = 0
for i in range(len(self.__zettel)):
if self.__zettel[i][0] > hoechste:
hoechste = self.__zettel[i][0]
zettel = i
print(self.__zettel[zettel][1])
del self.__zettel[zettel]
def __str__(self):
ausgabe = "Notizen\n"
zettelListe = self.__zettel[:]
zettelListe.sort(reverse=True)
print("Zettelliste: ")
print(zettelListe)
for z in zettelListe:
ausgabe += z[1] + "\t"
ausgabe += "(Priorität: " + str(z[0]) + ")" + "\n"
return ausgabe
menue = """
(N)eue Notiz anheften (A)lle Notizen auflisten
(W)ichtigste Notiz entfernen (E)nde
"""
p = Pinnwand()
while True:
print(menue)
eingabe = input("Ihre Wahl: ")
if eingabe in "nN":
notiz = input("Notiz: ")
while notiz != "":
p.hefteAn(notiz)
notiz = input("Notiz: ")
elif eingabe in "aA":
print(p)
elif eingabe in "wW":
p.entferne()
elif eingabe in "eE":
print("Tschüß!")
break
|
flexible
|
{
"blob_id": "382a3b8bcd07c7098cecf2b770e46dfff50eeb98",
"index": 2695,
"step-1": "class Pinnwand:\n\n def __init__(self):\n self.__zettel = []\n\n def hefteAn(self, notiz):\n prio = notiz.count('!')\n self.__zettel.append((prio, notiz))\n <mask token>\n\n def __str__(self):\n ausgabe = 'Notizen\\n'\n zettelListe = self.__zettel[:]\n zettelListe.sort(reverse=True)\n print('Zettelliste: ')\n print(zettelListe)\n for z in zettelListe:\n ausgabe += z[1] + '\\t'\n ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\\n'\n return ausgabe\n\n\n<mask token>\n",
"step-2": "class Pinnwand:\n\n def __init__(self):\n self.__zettel = []\n\n def hefteAn(self, notiz):\n prio = notiz.count('!')\n self.__zettel.append((prio, notiz))\n\n def entferne(self):\n hoechste = 0\n zettel = 0\n for i in range(len(self.__zettel)):\n if self.__zettel[i][0] > hoechste:\n hoechste = self.__zettel[i][0]\n zettel = i\n print(self.__zettel[zettel][1])\n del self.__zettel[zettel]\n\n def __str__(self):\n ausgabe = 'Notizen\\n'\n zettelListe = self.__zettel[:]\n zettelListe.sort(reverse=True)\n print('Zettelliste: ')\n print(zettelListe)\n for z in zettelListe:\n ausgabe += z[1] + '\\t'\n ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\\n'\n return ausgabe\n\n\n<mask token>\n",
"step-3": "class Pinnwand:\n\n def __init__(self):\n self.__zettel = []\n\n def hefteAn(self, notiz):\n prio = notiz.count('!')\n self.__zettel.append((prio, notiz))\n\n def entferne(self):\n hoechste = 0\n zettel = 0\n for i in range(len(self.__zettel)):\n if self.__zettel[i][0] > hoechste:\n hoechste = self.__zettel[i][0]\n zettel = i\n print(self.__zettel[zettel][1])\n del self.__zettel[zettel]\n\n def __str__(self):\n ausgabe = 'Notizen\\n'\n zettelListe = self.__zettel[:]\n zettelListe.sort(reverse=True)\n print('Zettelliste: ')\n print(zettelListe)\n for z in zettelListe:\n ausgabe += z[1] + '\\t'\n ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\\n'\n return ausgabe\n\n\n<mask token>\nwhile True:\n print(menue)\n eingabe = input('Ihre Wahl: ')\n if eingabe in 'nN':\n notiz = input('Notiz: ')\n while notiz != '':\n p.hefteAn(notiz)\n notiz = input('Notiz: ')\n elif eingabe in 'aA':\n print(p)\n elif eingabe in 'wW':\n p.entferne()\n elif eingabe in 'eE':\n print('Tschüß!')\n break\n",
"step-4": "class Pinnwand:\n\n def __init__(self):\n self.__zettel = []\n\n def hefteAn(self, notiz):\n prio = notiz.count('!')\n self.__zettel.append((prio, notiz))\n\n def entferne(self):\n hoechste = 0\n zettel = 0\n for i in range(len(self.__zettel)):\n if self.__zettel[i][0] > hoechste:\n hoechste = self.__zettel[i][0]\n zettel = i\n print(self.__zettel[zettel][1])\n del self.__zettel[zettel]\n\n def __str__(self):\n ausgabe = 'Notizen\\n'\n zettelListe = self.__zettel[:]\n zettelListe.sort(reverse=True)\n print('Zettelliste: ')\n print(zettelListe)\n for z in zettelListe:\n ausgabe += z[1] + '\\t'\n ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\\n'\n return ausgabe\n\n\nmenue = \"\"\"\n (N)eue Notiz anheften (A)lle Notizen auflisten\n (W)ichtigste Notiz entfernen (E)nde\n\"\"\"\np = Pinnwand()\nwhile True:\n print(menue)\n eingabe = input('Ihre Wahl: ')\n if eingabe in 'nN':\n notiz = input('Notiz: ')\n while notiz != '':\n p.hefteAn(notiz)\n notiz = input('Notiz: ')\n elif eingabe in 'aA':\n print(p)\n elif eingabe in 'wW':\n p.entferne()\n elif eingabe in 'eE':\n print('Tschüß!')\n break\n",
"step-5": "#####################\n# Aufgabe 2, 13.7 #\n# v1.0 #\n# baehll #\n# 04.05.2018 #\n#####################\n\nclass Pinnwand:\n\n def __init__(self):\n self.__zettel = []\n \n def hefteAn(self, notiz):\n #Analyse des Textes\n prio = notiz.count(\"!\")\n self.__zettel.append((prio, notiz))\n\n def entferne(self):\n hoechste = 0\n zettel = 0\n for i in range(len(self.__zettel)):\n if self.__zettel[i][0] > hoechste:\n hoechste = self.__zettel[i][0]\n zettel = i\n print(self.__zettel[zettel][1])\n del self.__zettel[zettel]\n \n def __str__(self):\n ausgabe = \"Notizen\\n\"\n zettelListe = self.__zettel[:]\n zettelListe.sort(reverse=True)\n print(\"Zettelliste: \")\n print(zettelListe)\n for z in zettelListe:\n ausgabe += z[1] + \"\\t\"\n ausgabe += \"(Priorität: \" + str(z[0]) + \")\" + \"\\n\"\n return ausgabe\n\nmenue = \"\"\"\n (N)eue Notiz anheften (A)lle Notizen auflisten\n (W)ichtigste Notiz entfernen (E)nde\n\"\"\"\n\np = Pinnwand()\n\nwhile True:\n print(menue)\n eingabe = input(\"Ihre Wahl: \")\n if eingabe in \"nN\":\n notiz = input(\"Notiz: \")\n while notiz != \"\":\n p.hefteAn(notiz)\n notiz = input(\"Notiz: \")\n elif eingabe in \"aA\":\n print(p)\n elif eingabe in \"wW\":\n p.entferne()\n elif eingabe in \"eE\":\n print(\"Tschüß!\")\n break",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse(query):
print('parsing the query...')
query = dnf_converter.convert(query)
cp_clause_list = []
clause_list = []
for cp in query['$or']:
clauses = []
if '$and' in cp:
for clause in cp['$and']:
clauses.append(clause)
clause_list.append(clause)
else:
clause = cp
clauses.append(clause)
clause_list.append(clause)
cp_clause_list.append({'cp': cp, 'clauses': clauses})
return cp_clause_list, clause_list
<|reserved_special_token_1|>
import dnf_converter
def parse(query):
print('parsing the query...')
query = dnf_converter.convert(query)
cp_clause_list = []
clause_list = []
for cp in query['$or']:
clauses = []
if '$and' in cp:
for clause in cp['$and']:
clauses.append(clause)
clause_list.append(clause)
else:
clause = cp
clauses.append(clause)
clause_list.append(clause)
cp_clause_list.append({'cp': cp, 'clauses': clauses})
return cp_clause_list, clause_list
<|reserved_special_token_1|>
import dnf_converter
def parse(query):
print("parsing the query...")
query = dnf_converter.convert(query)
cp_clause_list = []
clause_list = []
for cp in query["$or"]:
clauses = []
if "$and" in cp:
for clause in cp["$and"]:
clauses.append(clause)
clause_list.append(clause)
else:
clause = cp
clauses.append(clause)
clause_list.append(clause)
cp_clause_list.append({ "cp": cp, "clauses": clauses })
return cp_clause_list, clause_list
|
flexible
|
{
"blob_id": "999de0965efa3c1fe021142a105dcf28184cd5ba",
"index": 43,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse(query):\n print('parsing the query...')\n query = dnf_converter.convert(query)\n cp_clause_list = []\n clause_list = []\n for cp in query['$or']:\n clauses = []\n if '$and' in cp:\n for clause in cp['$and']:\n clauses.append(clause)\n clause_list.append(clause)\n else:\n clause = cp\n clauses.append(clause)\n clause_list.append(clause)\n cp_clause_list.append({'cp': cp, 'clauses': clauses})\n return cp_clause_list, clause_list\n",
"step-3": "import dnf_converter\n\n\ndef parse(query):\n print('parsing the query...')\n query = dnf_converter.convert(query)\n cp_clause_list = []\n clause_list = []\n for cp in query['$or']:\n clauses = []\n if '$and' in cp:\n for clause in cp['$and']:\n clauses.append(clause)\n clause_list.append(clause)\n else:\n clause = cp\n clauses.append(clause)\n clause_list.append(clause)\n cp_clause_list.append({'cp': cp, 'clauses': clauses})\n return cp_clause_list, clause_list\n",
"step-4": "import dnf_converter\n\ndef parse(query):\n\tprint(\"parsing the query...\")\n\tquery = dnf_converter.convert(query)\n\tcp_clause_list = []\n\tclause_list = []\n\tfor cp in query[\"$or\"]:\n\t\tclauses = []\n\t\tif \"$and\" in cp:\n\t\t\tfor clause in cp[\"$and\"]:\n\t\t\t\tclauses.append(clause)\n\t\t\t\tclause_list.append(clause)\n\t\telse:\n\t\t\tclause = cp\n\t\t\tclauses.append(clause)\n\t\t\tclause_list.append(clause)\n\t\tcp_clause_list.append({ \"cp\": cp, \"clauses\": clauses })\n\treturn cp_clause_list, clause_list",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
import serial
import time
import sys
senderId="\x01"
receiverId="\x00"
#openSerial just opens the serial connection
def openSerial(port):
#Some configuration for the serial port
ser = serial.Serial()
ser.baudrate = 300
ser.port = port
ser.bytesize = 8
ser.stopbits = 2
ser.open()
return ser
def initializePort(ser, payloadLen, sender, receiver, layerVersion="\x02"):
#SenderID
ser.write(sender)
#ReceiverID
ser.write(receiver)
#layerconfig: At the moment layer2
ser.write(layerVersion)
#payloadlen
ser.write(payloadLen)
#USART Protocol type: No one reads this field at the moment
ser.write("\x01")
def main():
if (len(sys.argv) < 2):
print "sender.py <port>"
sys.exit(1)
layerVersion = "\x02"
if (len(sys.argv) >= 3):
layerVersion = "\x03"
print "Use reliable transport"
ser = openSerial(sys.argv[1])
time.sleep(2)
initializePort(ser, payloadLen="\x01", sender="\x01", receiver="\x00", layerVersion=layerVersion)
time.sleep(0.5)
char = 1
while (char != "\x00"):
char = ser.read(1)
sys.stdout.write(char)
sys.stdout.flush()
print ""
while (char != "\x00"):
char = ser.read(1)
sys.stdout.write(char)
sys.stdout.flush()
ser.close()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "bf1d54015a9ae529f4fda4fa9b9f7c874ec3b240",
"index": 4514,
"step-1": "#!/usr/bin/python\n\nimport serial\nimport time\nimport sys\n\nsenderId=\"\\x01\"\nreceiverId=\"\\x00\"\n\n#openSerial just opens the serial connection\ndef openSerial(port):\n\t#Some configuration for the serial port\n\tser = serial.Serial()\n\tser.baudrate = 300\n\tser.port = port\n\tser.bytesize = 8\n\tser.stopbits = 2\n\tser.open()\n\treturn ser\n\ndef initializePort(ser, payloadLen, sender, receiver, layerVersion=\"\\x02\"):\n\t#SenderID\n\tser.write(sender)\n\t#ReceiverID\n\tser.write(receiver)\n\t#layerconfig: At the moment layer2\n\tser.write(layerVersion)\n\t#payloadlen\n\tser.write(payloadLen)\n\t#USART Protocol type: No one reads this field at the moment\n\tser.write(\"\\x01\")\n\ndef main():\n\tif (len(sys.argv) < 2):\n\t\tprint \"sender.py <port>\"\n\t\tsys.exit(1)\n\n\tlayerVersion = \"\\x02\"\n\tif (len(sys.argv) >= 3):\n\t\tlayerVersion = \"\\x03\"\n\t\tprint \"Use reliable transport\"\n\n\tser = openSerial(sys.argv[1])\n\ttime.sleep(2)\n\tinitializePort(ser, payloadLen=\"\\x01\", sender=\"\\x01\", receiver=\"\\x00\", layerVersion=layerVersion)\n\ttime.sleep(0.5)\n\tchar = 1\n\twhile (char != \"\\x00\"):\n\t\tchar = ser.read(1)\n\t\tsys.stdout.write(char)\n\t\tsys.stdout.flush()\n\n\tprint \"\"\n\n\twhile (char != \"\\x00\"):\n\t\tchar = ser.read(1)\n\t\tsys.stdout.write(char)\n\t\tsys.stdout.flush()\n\tser.close()\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('core', '0003_auto_20200310_1620')]
operations = [migrations.AddField(model_name='tag', name='name', field=
models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION',
'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION',
'HELPER FUNCTION')], default='code', max_length=100)), migrations.
AddField(model_name='tag', name='slug', field=models.CharField(
default='code', max_length=100, unique=True))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('core', '0003_auto_20200310_1620')]
operations = [migrations.AddField(model_name='tag', name='name', field=
models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION',
'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION',
'HELPER FUNCTION')], default='code', max_length=100)), migrations.
AddField(model_name='tag', name='slug', field=models.CharField(
default='code', max_length=100, unique=True))]
<|reserved_special_token_1|>
# Generated by Django 3.0.4 on 2020-03-11 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20200310_1620'),
]
operations = [
migrations.AddField(
model_name='tag',
name='name',
field=models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION', 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION', 'HELPER FUNCTION')], default='code', max_length=100),
),
migrations.AddField(
model_name='tag',
name='slug',
field=models.CharField(default='code', max_length=100, unique=True),
),
]
|
flexible
|
{
"blob_id": "ab12468b1da20c896e3578091fd9ba245dcfa0a4",
"index": 1350,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0003_auto_20200310_1620')]\n operations = [migrations.AddField(model_name='tag', name='name', field=\n models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION',\n 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION',\n 'HELPER FUNCTION')], default='code', max_length=100)), migrations.\n AddField(model_name='tag', name='slug', field=models.CharField(\n default='code', max_length=100, unique=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0003_auto_20200310_1620')]\n operations = [migrations.AddField(model_name='tag', name='name', field=\n models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION',\n 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION',\n 'HELPER FUNCTION')], default='code', max_length=100)), migrations.\n AddField(model_name='tag', name='slug', field=models.CharField(\n default='code', max_length=100, unique=True))]\n",
"step-5": "# Generated by Django 3.0.4 on 2020-03-11 17:48\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0003_auto_20200310_1620'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tag',\n name='name',\n field=models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION', 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION', 'HELPER FUNCTION')], default='code', max_length=100),\n ),\n migrations.AddField(\n model_name='tag',\n name='slug',\n field=models.CharField(default='code', max_length=100, unique=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright (c) 2021, Omid Erfanmanesh, All rights reserved.
import math
import numpy as np
import pandas as pd
from data.based.based_dataset import BasedDataset
from data.based.file_types import FileTypes
class DengueInfection(BasedDataset):
def __init__(self, cfg, development):
super(DengueInfection, self).__init__(cfg=cfg, dataset_type=FileTypes.TSV, development=development)
if development:
self.total_cases()
self.extract_month()
self.extract_quarter()
self.week_start_date()
# self.six_month()
# self.week_split()
self.city()
self.cyclic_encoder(col='weekofyear',max_val=53)
self.cyclic_encoder(col='month', max_val=12)
self.persiann_precip_mm()
self.ncep_avg_temp_k()
self.ncep_diur_temp_rng_k()
self.ncep_max_air_temp_k()
self.ncep_min_air_temp_k()
self.ncep_air_temp_k()
self.ncep_dew_point_temp_k()
self.avg_temp_c()
self.diur_temp_rng_c()
self.max_temp_c()
self.min_temp_c()
self.precip_mm()
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(f'year == "{row["year"]}" & month =="{row["month"]}"').reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row["year"]][col].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
def extract_month(self):
self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])
self.df['month'] = self.df['week_start_date'].dt.month
def extract_quarter(self):
self.df['quarter'] = self.df['week_start_date'].dt.quarter
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end='20/06/' + year),
'summer': pd.date_range(start='21/06/' + year, end='22/09/' + year),
'autumn': pd.date_range(start='23/09/' + year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
def year(self):
pass
def week_of_year(self):
pass
def week_start_date(self):
pass
def six_month(self):
self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)
def persiann_precip_mm(self):
self.fill_nan(col='PERSIANN_precip_mm')
def ncep_air_temp_k(self):
self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_air_temp_c')
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
def ncep_dew_point_temp_k(self):
"""
dew point temperature in Kelvin degrees measured by NCEP CFSR;
:rtype: object
"""
self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_dew_point_temp_c')
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
def ncep_precip_kg_per_m2(self):
self.fill_nan(col='NCEP_precip_kg_per_m2')
def ncep_humidity_percent(self):
self.fill_nan(col='NCEP_humidity_percent')
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
def diur_temp_rng_c(self):
self.fill_nan(col='diur_temp_rng_c')
def max_temp_c(self):
self.fill_nan(col='max_temp_c')
def min_temp_c(self):
self.fill_nan(col='min_temp_c')
def precip_mm(self):
self.fill_nan(col='precip_mm')
def total_cases(self):
self.df = self.df[self.df['total_cases'] < 41]
def city(self):
self.df = self.df[self.df['city'] != 'sj']
|
normal
|
{
"blob_id": "93ac8a1f795f7809a3e88b56ce90bf1d31706554",
"index": 1139,
"step-1": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n <mask token>\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n <mask token>\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n <mask token>\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n <mask token>\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n <mask token>\n <mask token>\n <mask token>\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-2": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=\n FileTypes.TSV, development=development)\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n self.city()\n self.cyclic_encoder(col='weekofyear', max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n <mask token>\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n <mask token>\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n <mask token>\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n <mask token>\n <mask token>\n <mask token>\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-3": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=\n FileTypes.TSV, development=development)\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n self.city()\n self.cyclic_encoder(col='weekofyear', max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n\n def extract_month(self):\n self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])\n self.df['month'] = self.df['week_start_date'].dt.month\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n\n def ncep_dew_point_temp_k(self):\n \"\"\"\n dew point temperature in Kelvin degrees measured by NCEP CFSR;\n :rtype: object\n \"\"\"\n self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_dew_point_temp_c')\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n\n def ncep_precip_kg_per_m2(self):\n self.fill_nan(col='NCEP_precip_kg_per_m2')\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n <mask token>\n <mask token>\n\n def min_temp_c(self):\n self.fill_nan(col='min_temp_c')\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-4": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=\n FileTypes.TSV, development=development)\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n self.city()\n self.cyclic_encoder(col='weekofyear', max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n\n def extract_month(self):\n self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])\n self.df['month'] = self.df['week_start_date'].dt.month\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n\n def six_month(self):\n self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)\n\n def persiann_precip_mm(self):\n self.fill_nan(col='PERSIANN_precip_mm')\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n\n def ncep_dew_point_temp_k(self):\n \"\"\"\n dew point temperature in Kelvin degrees measured by NCEP CFSR;\n :rtype: object\n \"\"\"\n self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_dew_point_temp_c')\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n\n def ncep_precip_kg_per_m2(self):\n self.fill_nan(col='NCEP_precip_kg_per_m2')\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n\n def diur_temp_rng_c(self):\n self.fill_nan(col='diur_temp_rng_c')\n <mask token>\n\n def min_temp_c(self):\n self.fill_nan(col='min_temp_c')\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-5": "# Copyright (c) 2021, Omid Erfanmanesh, All rights reserved.\n\n\nimport math\n\nimport numpy as np\nimport pandas as pd\n\nfrom data.based.based_dataset import BasedDataset\nfrom data.based.file_types import FileTypes\n\n\nclass DengueInfection(BasedDataset):\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=FileTypes.TSV, development=development)\n\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n # self.six_month()\n # self.week_split()\n self.city()\n self.cyclic_encoder(col='weekofyear',max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n\n self.df[col + '_no_nans'] = self.df[col]\n\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(f'year == \"{row[\"year\"]}\" & month ==\"{row[\"month\"]}\"').reset_index()\n city = row['city']\n value = query[city]\n\n if value.empty:\n value = self.df.loc[self.df['year'] == row[\"year\"]][col].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n\n def extract_month(self):\n self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])\n self.df['month'] = self.df['week_start_date'].dt.month\n\n def extract_quarter(self):\n self.df['quarter'] = self.df['week_start_date'].dt.quarter\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end='20/06/' + year),\n 'summer': pd.date_range(start='21/06/' + year, end='22/09/' + year),\n 'autumn': pd.date_range(start='23/09/' + year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n\n def year(self):\n pass\n\n def week_of_year(self):\n pass\n\n def week_start_date(self):\n pass\n\n def six_month(self):\n self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)\n\n def persiann_precip_mm(self):\n self.fill_nan(col='PERSIANN_precip_mm')\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n\n def ncep_dew_point_temp_k(self):\n \"\"\"\n dew point temperature in Kelvin degrees measured by NCEP CFSR;\n :rtype: object\n \"\"\"\n self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_dew_point_temp_c')\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n\n def ncep_precip_kg_per_m2(self):\n self.fill_nan(col='NCEP_precip_kg_per_m2')\n\n def ncep_humidity_percent(self):\n self.fill_nan(col='NCEP_humidity_percent')\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n\n def diur_temp_rng_c(self):\n self.fill_nan(col='diur_temp_rng_c')\n\n def max_temp_c(self):\n self.fill_nan(col='max_temp_c')\n\n def min_temp_c(self):\n self.fill_nan(col='min_temp_c')\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n\n def total_cases(self):\n self.df = self.df[self.df['total_cases'] < 41]\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n\n",
"step-ids": [
16,
18,
22,
25,
33
]
}
|
[
16,
18,
22,
25,
33
] |
#!/usr/bin/env python
import rospy
import numpy as np
import time
import RPi.GPIO as GPIO
from ccn_raspicar_ros.msg import RaspiCarWheel
from ccn_raspicar_ros.msg import RaspiCarWheelControl
from ccn_raspicar_ros.srv import RaspiCarMotorControl
class MotorControl(object):
def __init__(self, control_pin=[16, 18, 11, 13], t=0.1, dc_level=80, balance=1.0, pwm_freq=500):
self.control_pin = control_pin
self.t = t
self.balance = balance
self.l_level = dc_level * 2 / (balance + 1)
self.r_level = self.l_level * balance
GPIO.setmode(GPIO.BOARD)
[GPIO.setup(control_pin[pin], GPIO.OUT, initial=GPIO.LOW) for pin in range(4)]
self.pwm_r1 = GPIO.PWM(control_pin[0], pwm_freq)
self.pwm_r2 = GPIO.PWM(control_pin[1], pwm_freq)
self.pwm_l1 = GPIO.PWM(control_pin[2], pwm_freq)
self.pwm_l2 = GPIO.PWM(control_pin[3], pwm_freq)
self.pwm_r1.start(0)
self.pwm_r2.start(0)
self.pwm_l1.start(0)
self.pwm_l2.start(0)
def stop(self):
self.pwm_r1.ChangeDutyCycle(0)
self.pwm_r2.ChangeDutyCycle(0)
self.pwm_l1.ChangeDutyCycle(0)
self.pwm_l2.ChangeDutyCycle(0)
def forward(self, speed=1.0, t=None):
self.pwm_r1.ChangeDutyCycle(self.r_level*speed)
self.pwm_r2.ChangeDutyCycle(0)
self.pwm_l1.ChangeDutyCycle(self.l_level*speed)
self.pwm_l2.ChangeDutyCycle(0)
if t is None:
time.sleep(self.t)
else:
time.sleep(t)
self.stop()
def backward(self, speed=0.8, t=None):
self.pwm_r1.ChangeDutyCycle(0)
self.pwm_r2.ChangeDutyCycle(self.r_level*speed)
self.pwm_l1.ChangeDutyCycle(0)
self.pwm_l2.ChangeDutyCycle(self.l_level*speed)
if t is None:
time.sleep(self.t)
else:
time.sleep(t)
self.stop()
def turn_left(self, speed=0.6, t=None):
self.pwm_r1.ChangeDutyCycle(self.r_level*speed)
self.pwm_r2.ChangeDutyCycle(0)
self.pwm_l1.ChangeDutyCycle(0)
self.pwm_l2.ChangeDutyCycle(0)
if t is None:
time.sleep(self.t)
else:
time.sleep(t)
self.stop()
def turn_right(self, speed=0.6, t=None):
self.pwm_r1.ChangeDutyCycle(0)
self.pwm_r2.ChangeDutyCycle(0)
self.pwm_l1.ChangeDutyCycle(self.l_level*speed)
self.pwm_l2.ChangeDutyCycle(0)
if t is None:
time.sleep(self.t)
else:
time.sleep(t)
self.stop()
def arbitrary_speed(self, speed=[1.0, 1.0], t=None):
if 0 < speed[0]:
self.pwm_r1.ChangeDutyCycle(self.r_level * speed[0])
self.pwm_r2.ChangeDutyCycle(0)
elif speed[0] < 0:
self.pwm_r1.ChangeDutyCycle(0)
self.pwm_r2.ChangeDutyCycle(self.r_level * speed[0])
if 0 < speed[1]:
self.pwm_l1.ChangeDutyCycle(self.l_level * speed[1])
self.pwm_l2.ChangeDutyCycle(0)
elif speed[1] < 0:
self.pwm_l1.ChangeDutyCycle(0)
self.pwm_l2.ChangeDutyCycle(self.l_level * speed[1])
if t is None:
return
else:
time.sleep(t)
self.stop()
def cleanup(self):
self.stop()
self.pwm_r1.stop()
self.pwm_r2.stop()
self.pwm_l1.stop()
self.pwm_l2.stop()
GPIO.cleanup()
g_obstacle_detected = False
g_proximity = np.zeros([3])
g_wheel_count = np.zeros([2])
def turn_right_controlled(angle):
wheel_last = g_wheel_count
count = angle / 4.45
while not rospy.is_shutdown():
if not g_obstacle_detected:
if g_wheel_count[0] - wheel_last[0] < count:
motor.turn_right(speed=0.9, t=0.05)
elif g_wheel_count[0] - wheel_last[0] > count:
motor.turn_left(speed=0.8, t=0.03)
break
else:
break
time.sleep(0.05)
else:
time.sleep(0.1)
def turn_left_controlled(angle):
wheel_last = g_wheel_count
count = angle / 4.45
while not rospy.is_shutdown():
if not g_obstacle_detected:
if g_wheel_count[1] - wheel_last[1] < count:
motor.turn_left(speed=0.9, t=0.05)
elif g_wheel_count[1] - wheel_last[1] > count:
motor.turn_right(speed=0.8, t=0.03)
break
else:
break
time.sleep(0.05)
else:
time.sleep(0.1)
def forward_controlled(distance):
wheel_last = g_wheel_count
count = distance / 0.0113
while not rospy.is_shutdown():
if not g_obstacle_detected:
diff_of_both = g_wheel_count - wheel_last
if np.sum(diff_of_both)/2.0 < count:
motor.forward(speed=1.0, t=0.05)
else:
break
diff_between = diff_of_both[0] - diff_of_both[1]
if diff_between > 0:
motor.turn_left(speed=0.7, t=0.03 + diff_between * 0.005)
elif diff_between < 0:
motor.turn_right(speed=0.7, t=0.03 - diff_between * 0.005)
time.sleep(0.05)
else:
time.sleep(0.1)
def callback_RaspiCarWheel(data):
global g_wheel_count
g_wheel_count = np.array(data.wheel_count)
def handle_RaspiCarMotorControl_request(request):
print(request)
command = request.command
if command.startswith('test'):
return 'ack test', 'ok.'
elif command.startswith('fwd'):
try:
value = float(command.split(':')[1])
except KeyError:
value = 0.1
except ValueError:
value = 0
forward_controlled(value)
return 'ack fwd:%f' % value, 'ok.'
elif command.startswith('right'):
try:
value = float(command.split(':')[1])
except KeyError:
value = 10
except ValueError:
value = 0
turn_right_controlled(value)
return 'ack right:%f' % value, 'ok.'
elif command.startswith('left'):
try:
value = float(command.split(':')[1])
except KeyError:
value = 10
except ValueError:
value = 0
turn_left_controlled(value)
return 'ack left:%f' % value, 'ok.'
# elif data.startswith('obstacle'):
# global obstacle_detection_routine_stopper
# try:
# value = float(data.split(':')[1])
# except KeyError:
# if obstacle_detection_routine_stopper is None:
# value = 1
# else:
# value = 0
# except ValueError:
# value = 0
#
# if value > 0.0 and obstacle_detection_routine_stopper is None:
# obstacle_detection_routine_stopper = launch_obstacle_detection_routine()
# elif value == 0.0 and obstacle_detection_routine_stopper is not None:
# obstacle_detection_routine_stopper.set()
# obstacle_detection_routine_stopper = None
#
# connection.sendall(b'ack')
# rospy.loginfo('[tcp_server] sending ack to the client.')
else:
return 'error', 'ok.'
if __name__ == '__main__':
motor = MotorControl(dc_level=70, t=0.3)
rospy.loginfo('[motor_control] up and running...')
try:
rospy.init_node('RaspiCarMotorControl_node', anonymous=False)
rospy.Subscriber('RaspiCarWheel', RaspiCarWheel, callback_RaspiCarWheel)
s = rospy.Service('RaspiCarMotorControl', RaspiCarMotorControl, handle_RaspiCarMotorControl_request)
rospy.spin()
except rospy.ROSInterruptException as e:
rospy.loginfo(e)
finally:
motor.cleanup()
|
normal
|
{
"blob_id": "2985360c1e2d03c619ea2994c609fdf8c033bebd",
"index": 9177,
"step-1": "<mask token>\n\n\nclass MotorControl(object):\n <mask token>\n <mask token>\n\n def forward(self, speed=1.0, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n <mask token>\n\n def turn_left(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_right(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n <mask token>\n\n def cleanup(self):\n self.stop()\n self.pwm_r1.stop()\n self.pwm_r2.stop()\n self.pwm_l1.stop()\n self.pwm_l2.stop()\n GPIO.cleanup()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MotorControl(object):\n\n def __init__(self, control_pin=[16, 18, 11, 13], t=0.1, dc_level=80,\n balance=1.0, pwm_freq=500):\n self.control_pin = control_pin\n self.t = t\n self.balance = balance\n self.l_level = dc_level * 2 / (balance + 1)\n self.r_level = self.l_level * balance\n GPIO.setmode(GPIO.BOARD)\n [GPIO.setup(control_pin[pin], GPIO.OUT, initial=GPIO.LOW) for pin in\n range(4)]\n self.pwm_r1 = GPIO.PWM(control_pin[0], pwm_freq)\n self.pwm_r2 = GPIO.PWM(control_pin[1], pwm_freq)\n self.pwm_l1 = GPIO.PWM(control_pin[2], pwm_freq)\n self.pwm_l2 = GPIO.PWM(control_pin[3], pwm_freq)\n self.pwm_r1.start(0)\n self.pwm_r2.start(0)\n self.pwm_l1.start(0)\n self.pwm_l2.start(0)\n\n def stop(self):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n\n def forward(self, speed=1.0, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def backward(self, speed=0.8, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_left(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_right(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def arbitrary_speed(self, speed=[1.0, 1.0], t=None):\n if 0 < speed[0]:\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed[0])\n self.pwm_r2.ChangeDutyCycle(0)\n elif speed[0] < 0:\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed[0])\n if 0 < speed[1]:\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed[1])\n self.pwm_l2.ChangeDutyCycle(0)\n elif speed[1] < 0:\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed[1])\n if t is None:\n return\n else:\n time.sleep(t)\n self.stop()\n\n def cleanup(self):\n self.stop()\n self.pwm_r1.stop()\n self.pwm_r2.stop()\n self.pwm_l1.stop()\n self.pwm_l2.stop()\n GPIO.cleanup()\n\n\n<mask token>\n\n\ndef turn_right_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n if g_wheel_count[0] - wheel_last[0] < count:\n motor.turn_right(speed=0.9, t=0.05)\n elif g_wheel_count[0] - wheel_last[0] > count:\n motor.turn_left(speed=0.8, t=0.03)\n break\n else:\n break\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\n<mask token>\n\n\ndef callback_RaspiCarWheel(data):\n global g_wheel_count\n g_wheel_count = np.array(data.wheel_count)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MotorControl(object):\n\n def __init__(self, control_pin=[16, 18, 11, 13], t=0.1, dc_level=80,\n balance=1.0, pwm_freq=500):\n self.control_pin = control_pin\n self.t = t\n self.balance = balance\n self.l_level = dc_level * 2 / (balance + 1)\n self.r_level = self.l_level * balance\n GPIO.setmode(GPIO.BOARD)\n [GPIO.setup(control_pin[pin], GPIO.OUT, initial=GPIO.LOW) for pin in\n range(4)]\n self.pwm_r1 = GPIO.PWM(control_pin[0], pwm_freq)\n self.pwm_r2 = GPIO.PWM(control_pin[1], pwm_freq)\n self.pwm_l1 = GPIO.PWM(control_pin[2], pwm_freq)\n self.pwm_l2 = GPIO.PWM(control_pin[3], pwm_freq)\n self.pwm_r1.start(0)\n self.pwm_r2.start(0)\n self.pwm_l1.start(0)\n self.pwm_l2.start(0)\n\n def stop(self):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n\n def forward(self, speed=1.0, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def backward(self, speed=0.8, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_left(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_right(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def arbitrary_speed(self, speed=[1.0, 1.0], t=None):\n if 0 < speed[0]:\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed[0])\n self.pwm_r2.ChangeDutyCycle(0)\n elif speed[0] < 0:\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed[0])\n if 0 < speed[1]:\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed[1])\n self.pwm_l2.ChangeDutyCycle(0)\n elif speed[1] < 0:\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed[1])\n if t is None:\n return\n else:\n time.sleep(t)\n self.stop()\n\n def cleanup(self):\n self.stop()\n self.pwm_r1.stop()\n self.pwm_r2.stop()\n self.pwm_l1.stop()\n self.pwm_l2.stop()\n GPIO.cleanup()\n\n\n<mask token>\n\n\ndef turn_right_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n if g_wheel_count[0] - wheel_last[0] < count:\n motor.turn_right(speed=0.9, t=0.05)\n elif g_wheel_count[0] - wheel_last[0] > count:\n motor.turn_left(speed=0.8, t=0.03)\n break\n else:\n break\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\n<mask token>\n\n\ndef forward_controlled(distance):\n wheel_last = g_wheel_count\n count = distance / 0.0113\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n diff_of_both = g_wheel_count - wheel_last\n if np.sum(diff_of_both) / 2.0 < count:\n motor.forward(speed=1.0, t=0.05)\n else:\n break\n diff_between = diff_of_both[0] - diff_of_both[1]\n if diff_between > 0:\n motor.turn_left(speed=0.7, t=0.03 + diff_between * 0.005)\n elif diff_between < 0:\n motor.turn_right(speed=0.7, t=0.03 - diff_between * 0.005)\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\ndef callback_RaspiCarWheel(data):\n global g_wheel_count\n g_wheel_count = np.array(data.wheel_count)\n\n\ndef handle_RaspiCarMotorControl_request(request):\n print(request)\n command = request.command\n if command.startswith('test'):\n return 'ack test', 'ok.'\n elif command.startswith('fwd'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 0.1\n except ValueError:\n value = 0\n forward_controlled(value)\n return 'ack fwd:%f' % value, 'ok.'\n elif command.startswith('right'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_right_controlled(value)\n return 'ack right:%f' % value, 'ok.'\n elif command.startswith('left'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_left_controlled(value)\n return 'ack left:%f' % value, 'ok.'\n else:\n return 'error', 'ok.'\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass MotorControl(object):\n\n def __init__(self, control_pin=[16, 18, 11, 13], t=0.1, dc_level=80,\n balance=1.0, pwm_freq=500):\n self.control_pin = control_pin\n self.t = t\n self.balance = balance\n self.l_level = dc_level * 2 / (balance + 1)\n self.r_level = self.l_level * balance\n GPIO.setmode(GPIO.BOARD)\n [GPIO.setup(control_pin[pin], GPIO.OUT, initial=GPIO.LOW) for pin in\n range(4)]\n self.pwm_r1 = GPIO.PWM(control_pin[0], pwm_freq)\n self.pwm_r2 = GPIO.PWM(control_pin[1], pwm_freq)\n self.pwm_l1 = GPIO.PWM(control_pin[2], pwm_freq)\n self.pwm_l2 = GPIO.PWM(control_pin[3], pwm_freq)\n self.pwm_r1.start(0)\n self.pwm_r2.start(0)\n self.pwm_l1.start(0)\n self.pwm_l2.start(0)\n\n def stop(self):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n\n def forward(self, speed=1.0, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def backward(self, speed=0.8, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_left(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_right(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def arbitrary_speed(self, speed=[1.0, 1.0], t=None):\n if 0 < speed[0]:\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed[0])\n self.pwm_r2.ChangeDutyCycle(0)\n elif speed[0] < 0:\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed[0])\n if 0 < speed[1]:\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed[1])\n self.pwm_l2.ChangeDutyCycle(0)\n elif speed[1] < 0:\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed[1])\n if t is None:\n return\n else:\n time.sleep(t)\n self.stop()\n\n def cleanup(self):\n self.stop()\n self.pwm_r1.stop()\n self.pwm_r2.stop()\n self.pwm_l1.stop()\n self.pwm_l2.stop()\n GPIO.cleanup()\n\n\n<mask token>\n\n\ndef turn_right_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n if g_wheel_count[0] - wheel_last[0] < count:\n motor.turn_right(speed=0.9, t=0.05)\n elif g_wheel_count[0] - wheel_last[0] > count:\n motor.turn_left(speed=0.8, t=0.03)\n break\n else:\n break\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\ndef turn_left_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n if g_wheel_count[1] - wheel_last[1] < count:\n motor.turn_left(speed=0.9, t=0.05)\n elif g_wheel_count[1] - wheel_last[1] > count:\n motor.turn_right(speed=0.8, t=0.03)\n break\n else:\n break\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\ndef forward_controlled(distance):\n wheel_last = g_wheel_count\n count = distance / 0.0113\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n diff_of_both = g_wheel_count - wheel_last\n if np.sum(diff_of_both) / 2.0 < count:\n motor.forward(speed=1.0, t=0.05)\n else:\n break\n diff_between = diff_of_both[0] - diff_of_both[1]\n if diff_between > 0:\n motor.turn_left(speed=0.7, t=0.03 + diff_between * 0.005)\n elif diff_between < 0:\n motor.turn_right(speed=0.7, t=0.03 - diff_between * 0.005)\n time.sleep(0.05)\n else:\n time.sleep(0.1)\n\n\ndef callback_RaspiCarWheel(data):\n global g_wheel_count\n g_wheel_count = np.array(data.wheel_count)\n\n\ndef handle_RaspiCarMotorControl_request(request):\n print(request)\n command = request.command\n if command.startswith('test'):\n return 'ack test', 'ok.'\n elif command.startswith('fwd'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 0.1\n except ValueError:\n value = 0\n forward_controlled(value)\n return 'ack fwd:%f' % value, 'ok.'\n elif command.startswith('right'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_right_controlled(value)\n return 'ack right:%f' % value, 'ok.'\n elif command.startswith('left'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_left_controlled(value)\n return 'ack left:%f' % value, 'ok.'\n else:\n return 'error', 'ok.'\n\n\nif __name__ == '__main__':\n motor = MotorControl(dc_level=70, t=0.3)\n rospy.loginfo('[motor_control] up and running...')\n try:\n rospy.init_node('RaspiCarMotorControl_node', anonymous=False)\n rospy.Subscriber('RaspiCarWheel', RaspiCarWheel, callback_RaspiCarWheel\n )\n s = rospy.Service('RaspiCarMotorControl', RaspiCarMotorControl,\n handle_RaspiCarMotorControl_request)\n rospy.spin()\n except rospy.ROSInterruptException as e:\n rospy.loginfo(e)\n finally:\n motor.cleanup()\n",
"step-5": "#!/usr/bin/env python\n\nimport rospy\nimport numpy as np\nimport time\nimport RPi.GPIO as GPIO\n\nfrom ccn_raspicar_ros.msg import RaspiCarWheel\nfrom ccn_raspicar_ros.msg import RaspiCarWheelControl\nfrom ccn_raspicar_ros.srv import RaspiCarMotorControl\n\n\nclass MotorControl(object):\n def __init__(self, control_pin=[16, 18, 11, 13], t=0.1, dc_level=80, balance=1.0, pwm_freq=500):\n self.control_pin = control_pin\n self.t = t\n self.balance = balance\n self.l_level = dc_level * 2 / (balance + 1)\n self.r_level = self.l_level * balance\n\n GPIO.setmode(GPIO.BOARD)\n [GPIO.setup(control_pin[pin], GPIO.OUT, initial=GPIO.LOW) for pin in range(4)]\n\n self.pwm_r1 = GPIO.PWM(control_pin[0], pwm_freq)\n self.pwm_r2 = GPIO.PWM(control_pin[1], pwm_freq)\n self.pwm_l1 = GPIO.PWM(control_pin[2], pwm_freq)\n self.pwm_l2 = GPIO.PWM(control_pin[3], pwm_freq)\n self.pwm_r1.start(0)\n self.pwm_r2.start(0)\n self.pwm_l1.start(0)\n self.pwm_l2.start(0)\n\n def stop(self):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n\n def forward(self, speed=1.0, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level*speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level*speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def backward(self, speed=0.8, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level*speed)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level*speed)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_left(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(self.r_level*speed)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def turn_right(self, speed=0.6, t=None):\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(0)\n self.pwm_l1.ChangeDutyCycle(self.l_level*speed)\n self.pwm_l2.ChangeDutyCycle(0)\n if t is None:\n time.sleep(self.t)\n else:\n time.sleep(t)\n self.stop()\n\n def arbitrary_speed(self, speed=[1.0, 1.0], t=None):\n if 0 < speed[0]:\n self.pwm_r1.ChangeDutyCycle(self.r_level * speed[0])\n self.pwm_r2.ChangeDutyCycle(0)\n elif speed[0] < 0:\n self.pwm_r1.ChangeDutyCycle(0)\n self.pwm_r2.ChangeDutyCycle(self.r_level * speed[0])\n if 0 < speed[1]:\n self.pwm_l1.ChangeDutyCycle(self.l_level * speed[1])\n self.pwm_l2.ChangeDutyCycle(0)\n elif speed[1] < 0:\n self.pwm_l1.ChangeDutyCycle(0)\n self.pwm_l2.ChangeDutyCycle(self.l_level * speed[1])\n if t is None:\n return\n else:\n time.sleep(t)\n self.stop()\n\n def cleanup(self):\n self.stop()\n self.pwm_r1.stop()\n self.pwm_r2.stop()\n self.pwm_l1.stop()\n self.pwm_l2.stop()\n GPIO.cleanup()\n\n\ng_obstacle_detected = False\ng_proximity = np.zeros([3])\ng_wheel_count = np.zeros([2])\n\n\ndef turn_right_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n\n if g_wheel_count[0] - wheel_last[0] < count:\n motor.turn_right(speed=0.9, t=0.05)\n elif g_wheel_count[0] - wheel_last[0] > count:\n motor.turn_left(speed=0.8, t=0.03)\n break\n else:\n break\n\n time.sleep(0.05)\n\n else:\n time.sleep(0.1)\n\n\ndef turn_left_controlled(angle):\n wheel_last = g_wheel_count\n count = angle / 4.45\n while not rospy.is_shutdown():\n if not g_obstacle_detected:\n \n if g_wheel_count[1] - wheel_last[1] < count:\n motor.turn_left(speed=0.9, t=0.05)\n elif g_wheel_count[1] - wheel_last[1] > count:\n motor.turn_right(speed=0.8, t=0.03)\n break\n else:\n break\n\n time.sleep(0.05)\n \n else:\n time.sleep(0.1)\n\n\ndef forward_controlled(distance):\n wheel_last = g_wheel_count\n count = distance / 0.0113\n while not rospy.is_shutdown(): \n if not g_obstacle_detected:\n \n diff_of_both = g_wheel_count - wheel_last\n\n if np.sum(diff_of_both)/2.0 < count:\n motor.forward(speed=1.0, t=0.05) \n else:\n break\n\n diff_between = diff_of_both[0] - diff_of_both[1]\n\n if diff_between > 0:\n motor.turn_left(speed=0.7, t=0.03 + diff_between * 0.005)\n elif diff_between < 0:\n motor.turn_right(speed=0.7, t=0.03 - diff_between * 0.005)\n \n time.sleep(0.05)\n \n else:\n time.sleep(0.1)\n\n\ndef callback_RaspiCarWheel(data):\n global g_wheel_count\n g_wheel_count = np.array(data.wheel_count)\n\n\ndef handle_RaspiCarMotorControl_request(request):\n\n print(request)\n\n command = request.command\n if command.startswith('test'):\n return 'ack test', 'ok.'\n\n elif command.startswith('fwd'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 0.1\n except ValueError:\n value = 0\n forward_controlled(value)\n return 'ack fwd:%f' % value, 'ok.'\n\n elif command.startswith('right'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_right_controlled(value)\n return 'ack right:%f' % value, 'ok.'\n elif command.startswith('left'):\n try:\n value = float(command.split(':')[1])\n except KeyError:\n value = 10\n except ValueError:\n value = 0\n turn_left_controlled(value)\n return 'ack left:%f' % value, 'ok.'\n # elif data.startswith('obstacle'):\n # global obstacle_detection_routine_stopper\n # try:\n # value = float(data.split(':')[1])\n # except KeyError:\n # if obstacle_detection_routine_stopper is None:\n # value = 1\n # else:\n # value = 0\n # except ValueError:\n # value = 0\n #\n # if value > 0.0 and obstacle_detection_routine_stopper is None:\n # obstacle_detection_routine_stopper = launch_obstacle_detection_routine()\n # elif value == 0.0 and obstacle_detection_routine_stopper is not None:\n # obstacle_detection_routine_stopper.set()\n # obstacle_detection_routine_stopper = None\n #\n # connection.sendall(b'ack')\n # rospy.loginfo('[tcp_server] sending ack to the client.')\n else:\n return 'error', 'ok.'\n\n\nif __name__ == '__main__':\n motor = MotorControl(dc_level=70, t=0.3)\n rospy.loginfo('[motor_control] up and running...')\n\n try:\n rospy.init_node('RaspiCarMotorControl_node', anonymous=False)\n rospy.Subscriber('RaspiCarWheel', RaspiCarWheel, callback_RaspiCarWheel)\n s = rospy.Service('RaspiCarMotorControl', RaspiCarMotorControl, handle_RaspiCarMotorControl_request)\n rospy.spin()\n\n except rospy.ROSInterruptException as e:\n rospy.loginfo(e)\n\n finally:\n motor.cleanup()\n\n",
"step-ids": [
5,
11,
13,
15,
18
]
}
|
[
5,
11,
13,
15,
18
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.